From c32537d49faba69f5c2146f1ab6dbacf26bff133 Mon Sep 17 00:00:00 2001 From: Markus Elfring Date: Sun, 7 May 2017 21:24:51 +0200 Subject: [PATCH 001/436] xtensa: Use seq_puts() in c_show() A string which did not contain a data format specification should be put into a sequence. Thus use the corresponding function "seq_puts". This issue was detected by using the Coccinelle software. Signed-off-by: Markus Elfring Signed-off-by: Max Filippov --- arch/xtensa/kernel/setup.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c index 197e75b400b1..460a226699b4 100644 --- a/arch/xtensa/kernel/setup.c +++ b/arch/xtensa/kernel/setup.c @@ -594,8 +594,7 @@ c_show(struct seq_file *f, void *slot) (ccount_freq/10000) % 100, loops_per_jiffy/(500000/HZ), (loops_per_jiffy/(5000/HZ)) % 100); - - seq_printf(f,"flags\t\t: " + seq_puts(f, "flags\t\t: " #if XCHAL_HAVE_NMI "nmi " #endif From 5e78e465abcba4a21e5cc52bef41aa4b3361957d Mon Sep 17 00:00:00 2001 From: Markus Elfring Date: Thu, 25 Aug 2016 14:28:09 +0200 Subject: [PATCH 002/436] xtensa: ISS: Use kmalloc_array() in simdisk_init() * A multiplication for the size determination of a memory allocation indicated that an array data structure should be processed. Thus use the corresponding function "kmalloc_array". This issue was detected by using the Coccinelle software. * Replace the specification of a data type by a pointer dereference to make the corresponding size determination a bit safer according to the Linux coding style convention. Signed-off-by: Markus Elfring Signed-off-by: Max Filippov --- arch/xtensa/platforms/iss/simdisk.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/arch/xtensa/platforms/iss/simdisk.c b/arch/xtensa/platforms/iss/simdisk.c index 02e94bb3ad3e..c45b90bb9339 100644 --- a/arch/xtensa/platforms/iss/simdisk.c +++ b/arch/xtensa/platforms/iss/simdisk.c @@ -317,8 +317,7 @@ static int __init simdisk_init(void) if (simdisk_count > MAX_SIMDISK_COUNT) simdisk_count = MAX_SIMDISK_COUNT; - sddev = kmalloc(simdisk_count * sizeof(struct simdisk), - GFP_KERNEL); + sddev = kmalloc_array(simdisk_count, sizeof(*sddev), GFP_KERNEL); if (sddev == NULL) goto out_unregister; From 38b8f823864707eb1cf331d2247608c419ed388c Mon Sep 17 00:00:00 2001 From: Chen-Yu Tsai Date: Wed, 3 May 2017 11:13:46 +0800 Subject: [PATCH 003/436] clk: sunxi-ng: a31: Correct lcd1-ch1 clock register offset The register offset for the lcd1-ch1 clock was incorrectly pointing to the lcd0-ch1 clock. This resulted in the lcd0-ch1 clock being disabled when the clk core disables unused clocks. This then stops the simplefb HDMI output path. Reported-by: Bob Ham Fixes: c6e6c96d8fa6 ("clk: sunxi-ng: Add A31/A31s clocks") Cc: stable@vger.kernel.org # 4.9.x- Signed-off-by: Chen-Yu Tsai Signed-off-by: Maxime Ripard --- drivers/clk/sunxi-ng/ccu-sun6i-a31.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c index 89e68d29bf45..df97e25aec76 100644 --- a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c +++ b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c @@ -556,7 +556,7 @@ static SUNXI_CCU_M_WITH_MUX_GATE(lcd0_ch1_clk, "lcd0-ch1", lcd_ch1_parents, 0x12c, 0, 4, 24, 3, BIT(31), CLK_SET_RATE_PARENT); static SUNXI_CCU_M_WITH_MUX_GATE(lcd1_ch1_clk, "lcd1-ch1", lcd_ch1_parents, - 0x12c, 0, 4, 24, 3, BIT(31), + 0x130, 0, 4, 24, 3, BIT(31), CLK_SET_RATE_PARENT); static const char * const csi_sclk_parents[] = { "pll-video0", "pll-video1", From 7ffc781ec46ef1e9aedb482f5f04425bd8bb2753 Mon Sep 17 00:00:00 2001 From: Yong Deng Date: Fri, 5 May 2017 18:31:57 +0800 Subject: [PATCH 004/436] clk: sunxi-ng: v3s: Fix usb otg device reset bit V3S's usb otg device reset bit should be 24, not 23. Cc: stable@vger.kernel.org Signed-off-by: Yong Deng Reviewed-By: Icenowy Zheng Signed-off-by: Maxime Ripard --- drivers/clk/sunxi-ng/ccu-sun8i-v3s.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c index e58706b40ae9..6297add857b5 100644 --- a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c +++ b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c @@ -537,7 +537,7 @@ static struct ccu_reset_map sun8i_v3s_ccu_resets[] = { [RST_BUS_EMAC] = { 0x2c0, BIT(17) }, [RST_BUS_HSTIMER] = { 0x2c0, BIT(19) }, [RST_BUS_SPI0] = { 0x2c0, BIT(20) }, - [RST_BUS_OTG] = { 0x2c0, BIT(23) }, + [RST_BUS_OTG] = { 0x2c0, BIT(24) }, [RST_BUS_EHCI0] = { 0x2c0, BIT(26) }, [RST_BUS_OHCI0] = { 0x2c0, BIT(29) }, From a27b49b8afd41d53ab2429ebb122b78a4201fa0d Mon Sep 17 00:00:00 2001 From: Icenowy Zheng Date: Mon, 17 Apr 2017 18:34:49 +0800 Subject: [PATCH 005/436] ARM: sunxi: h3/h5: fix the compatible of R_CCU The R_CCU of H3/H5 currently wrongly used A64 R_CCU compatible. Fix it by changing it to the correct H3 compatible. Signed-off-by: Icenowy Zheng Signed-off-by: Maxime Ripard --- arch/arm/boot/dts/sunxi-h3-h5.dtsi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm/boot/dts/sunxi-h3-h5.dtsi b/arch/arm/boot/dts/sunxi-h3-h5.dtsi index 1aeeacb3a884..d0067fec99de 100644 --- a/arch/arm/boot/dts/sunxi-h3-h5.dtsi +++ b/arch/arm/boot/dts/sunxi-h3-h5.dtsi @@ -558,7 +558,7 @@ }; r_ccu: clock@1f01400 { - compatible = "allwinner,sun50i-a64-r-ccu"; + compatible = "allwinner,sun8i-h3-r-ccu"; reg = <0x01f01400 0x100>; clocks = <&osc24M>, <&osc32k>, <&iosc>; clock-names = "hosc", "losc", "iosc"; From dbed87a9d3a857a86f602775b5845f5f6d9652b5 Mon Sep 17 00:00:00 2001 From: Tobias Regnery Date: Mon, 24 Apr 2017 12:05:42 +0200 Subject: [PATCH 006/436] clk: meson: gxbb: fix build error without RESET_CONTROLLER With CONFIG_RESET_CONTROLLER=n we see the following link error in the meson gxbb clk driver: drivers/built-in.o: In function 'gxbb_aoclkc_probe': drivers/clk/meson/gxbb-aoclk.c:161: undefined reference to 'devm_reset_controller_register' Fix this by selecting the reset controller subsystem. Fixes: f8c11f79912d ("clk: meson: Add GXBB AO Clock and Reset controller driver") Signed-off-by: Tobias Regnery Acked-by: Neil Armstrong [narmstrong: Added fixes-by tag] Signed-off-by: Neil Armstrong --- drivers/clk/meson/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/clk/meson/Kconfig b/drivers/clk/meson/Kconfig index 19480bcc7046..2f29ee1a4d00 100644 --- a/drivers/clk/meson/Kconfig +++ b/drivers/clk/meson/Kconfig @@ -14,6 +14,7 @@ config COMMON_CLK_MESON8B config COMMON_CLK_GXBB bool depends on COMMON_CLK_AMLOGIC + select RESET_CONTROLLER help Support for the clock controller on AmLogic S905 devices, aka gxbb. Say Y if you want peripherals and CPU frequency scaling to work. From c0e7bb38c07cbd8269549ee0a0566021a3c729de Mon Sep 17 00:00:00 2001 From: Christian Borntraeger Date: Mon, 15 May 2017 14:11:03 +0200 Subject: [PATCH 007/436] s390/kvm: do not rely on the ILC on kvm host protection fauls For most cases a protection exception in the host (e.g. copy on write or dirty tracking) on the sie instruction will indicate an instruction length of 4. Turns out that there are some corner cases (e.g. runtime instrumentation) where this is not necessarily true and the ILC is unpredictable. Let's replace our 4 byte rewind_pad with 3 byte nops to prepare for all possible ILCs. Signed-off-by: Christian Borntraeger Cc: stable@vger.kernel.org Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/entry.S | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index e408d9cc5b96..6315037335ba 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S @@ -231,12 +231,17 @@ ENTRY(sie64a) lctlg %c1,%c1,__LC_USER_ASCE # load primary asce .Lsie_done: # some program checks are suppressing. C code (e.g. do_protection_exception) -# will rewind the PSW by the ILC, which is 4 bytes in case of SIE. Other -# instructions between sie64a and .Lsie_done should not cause program -# interrupts. So lets use a nop (47 00 00 00) as a landing pad. +# will rewind the PSW by the ILC, which is often 4 bytes in case of SIE. There +# are some corner cases (e.g. runtime instrumentation) where ILC is unpredictable. +# Other instructions between sie64a and .Lsie_done should not cause program +# interrupts. So lets use 3 nops as a landing pad for all possible rewinds. # See also .Lcleanup_sie -.Lrewind_pad: - nop 0 +.Lrewind_pad6: + nopr 7 +.Lrewind_pad4: + nopr 7 +.Lrewind_pad2: + nopr 7 .globl sie_exit sie_exit: lg %r14,__SF_EMPTY+8(%r15) # load guest register save area @@ -249,7 +254,9 @@ sie_exit: stg %r14,__SF_EMPTY+16(%r15) # set exit reason code j sie_exit - EX_TABLE(.Lrewind_pad,.Lsie_fault) + EX_TABLE(.Lrewind_pad6,.Lsie_fault) + EX_TABLE(.Lrewind_pad4,.Lsie_fault) + EX_TABLE(.Lrewind_pad2,.Lsie_fault) EX_TABLE(sie_exit,.Lsie_fault) EXPORT_SYMBOL(sie64a) EXPORT_SYMBOL(sie_exit) From 4f1fcfe94c1700f9e891adc1ca364a5cef00bbfd Mon Sep 17 00:00:00 2001 From: Simon Xue Date: Wed, 3 May 2017 17:19:40 +0200 Subject: [PATCH 008/436] iommu/rockchip: Enable Rockchip IOMMU on ARM64 This patch makes it possible to compile the rockchip-iommu driver on ARM64, so that it can be used with 64-bit SoCs equipped with this type of IOMMU. Signed-off-by: Simon Xue Signed-off-by: Shunqian Zheng Signed-off-by: Tomasz Figa Reviewed-by: Matthias Brugger Signed-off-by: Joerg Roedel --- drivers/iommu/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index 6ee3a25ae731..99c6366a2551 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig @@ -219,7 +219,7 @@ config OMAP_IOMMU_DEBUG config ROCKCHIP_IOMMU bool "Rockchip IOMMU Support" - depends on ARM + depends on ARM || ARM64 depends on ARCH_ROCKCHIP || COMPILE_TEST select IOMMU_API select ARM_DMA_USE_IOMMU From 15060aba717115dc9f204c02213a7c6bf341163e Mon Sep 17 00:00:00 2001 From: CQ Tang Date: Wed, 10 May 2017 11:39:03 -0700 Subject: [PATCH 009/436] iommu/vt-d: Helper function to query if a pasid has any active users A driver would need to know if there are any active references to a a PASID before cleaning up its resources. This function helps check if there are any active users of a PASID before it can perform any recovery on that device. To: Joerg Roedel To: linux-kernel@vger.kernel.org To: David Woodhouse Cc: Jean-Phillipe Brucker Cc: iommu@lists.linux-foundation.org Signed-off-by: CQ Tang Signed-off-by: Ashok Raj Signed-off-by: Joerg Roedel --- drivers/iommu/intel-svm.c | 30 ++++++++++++++++++++++++++++++ include/linux/intel-svm.h | 20 ++++++++++++++++++++ 2 files changed, 50 insertions(+) diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c index 23c427602c55..f167c0d84ebf 100644 --- a/drivers/iommu/intel-svm.c +++ b/drivers/iommu/intel-svm.c @@ -489,6 +489,36 @@ int intel_svm_unbind_mm(struct device *dev, int pasid) } EXPORT_SYMBOL_GPL(intel_svm_unbind_mm); +int intel_svm_is_pasid_valid(struct device *dev, int pasid) +{ + struct intel_iommu *iommu; + struct intel_svm *svm; + int ret = -EINVAL; + + mutex_lock(&pasid_mutex); + iommu = intel_svm_device_to_iommu(dev); + if (!iommu || !iommu->pasid_table) + goto out; + + svm = idr_find(&iommu->pasid_idr, pasid); + if (!svm) + goto out; + + /* init_mm is used in this case */ + if (!svm->mm) + ret = 1; + else if (atomic_read(&svm->mm->mm_users) > 0) + ret = 1; + else + ret = 0; + + out: + mutex_unlock(&pasid_mutex); + + return ret; +} +EXPORT_SYMBOL_GPL(intel_svm_is_pasid_valid); + /* Page request queue descriptor */ struct page_req_dsc { u64 srr:1; diff --git a/include/linux/intel-svm.h b/include/linux/intel-svm.h index 3c25794042f9..99bc5b3ae26e 100644 --- a/include/linux/intel-svm.h +++ b/include/linux/intel-svm.h @@ -102,6 +102,21 @@ extern int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, */ extern int intel_svm_unbind_mm(struct device *dev, int pasid); +/** + * intel_svm_is_pasid_valid() - check if pasid is valid + * @dev: Device for which PASID was allocated + * @pasid: PASID value to be checked + * + * This function checks if the specified pasid is still valid. A + * valid pasid means the backing mm is still having a valid user. + * For kernel callers init_mm is always valid. for other mm, if mm->mm_users + * is non-zero, it is valid. + * + * returns -EINVAL if invalid pasid, 0 if pasid ref count is invalid + * 1 if pasid is valid. + */ +extern int intel_svm_is_pasid_valid(struct device *dev, int pasid); + #else /* CONFIG_INTEL_IOMMU_SVM */ static inline int intel_svm_bind_mm(struct device *dev, int *pasid, @@ -114,6 +129,11 @@ static inline int intel_svm_unbind_mm(struct device *dev, int pasid) { BUG(); } + +static int intel_svm_is_pasid_valid(struct device *dev, int pasid) +{ + return -EINVAL; +} #endif /* CONFIG_INTEL_IOMMU_SVM */ #define intel_svm_available(dev) (!intel_svm_bind_mm((dev), NULL, 0, NULL)) From 6aa9a30838707f19cd5de4b8710987fbc747cdaf Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Wed, 17 May 2017 19:06:27 +0900 Subject: [PATCH 010/436] iommu/ipmmu-vmsa: Remove platform data handling The IPMMU driver is using DT these days, and platform data is no longer used by the driver. Remove unused code. Signed-off-by: Magnus Damm Reviewed-by: Laurent Pinchart Reviewed-by: Joerg Roedel Reviewed-by: Geert Uytterhoeven Signed-off-by: Joerg Roedel --- drivers/iommu/ipmmu-vmsa.c | 5 ----- 1 file changed, 5 deletions(-) diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c index b7e14ee863f9..d10a8d5eee4f 100644 --- a/drivers/iommu/ipmmu-vmsa.c +++ b/drivers/iommu/ipmmu-vmsa.c @@ -768,11 +768,6 @@ static int ipmmu_probe(struct platform_device *pdev) int irq; int ret; - if (!IS_ENABLED(CONFIG_OF) && !pdev->dev.platform_data) { - dev_err(&pdev->dev, "missing platform data\n"); - return -EINVAL; - } - mmu = devm_kzalloc(&pdev->dev, sizeof(*mmu), GFP_KERNEL); if (!mmu) { dev_err(&pdev->dev, "cannot allocate device data\n"); From dbb7069223bed0c40511ee28e5be519405e47906 Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Wed, 17 May 2017 19:06:38 +0900 Subject: [PATCH 011/436] iommu/ipmmu-vmsa: Rework interrupt code and use bitmap for context Introduce a bitmap for context handing and convert the interrupt routine to handle all registered contexts. At this point the number of contexts are still limited. Also remove the use of the ARM specific mapping variable from ipmmu_irq() to allow compile on ARM64. Signed-off-by: Magnus Damm Reviewed-by: Joerg Roedel Signed-off-by: Joerg Roedel --- drivers/iommu/ipmmu-vmsa.c | 76 +++++++++++++++++++++++++++++++++----- 1 file changed, 66 insertions(+), 10 deletions(-) diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c index d10a8d5eee4f..787b675b0e9f 100644 --- a/drivers/iommu/ipmmu-vmsa.c +++ b/drivers/iommu/ipmmu-vmsa.c @@ -8,6 +8,7 @@ * the Free Software Foundation; version 2 of the License. */ +#include #include #include #include @@ -26,12 +27,17 @@ #include "io-pgtable.h" +#define IPMMU_CTX_MAX 1 + struct ipmmu_vmsa_device { struct device *dev; void __iomem *base; struct list_head list; unsigned int num_utlbs; + spinlock_t lock; /* Protects ctx and domains[] */ + DECLARE_BITMAP(ctx, IPMMU_CTX_MAX); + struct ipmmu_vmsa_domain *domains[IPMMU_CTX_MAX]; struct dma_iommu_mapping *mapping; }; @@ -293,9 +299,29 @@ static struct iommu_gather_ops ipmmu_gather_ops = { * Domain/Context Management */ +static int ipmmu_domain_allocate_context(struct ipmmu_vmsa_device *mmu, + struct ipmmu_vmsa_domain *domain) +{ + unsigned long flags; + int ret; + + spin_lock_irqsave(&mmu->lock, flags); + + ret = find_first_zero_bit(mmu->ctx, IPMMU_CTX_MAX); + if (ret != IPMMU_CTX_MAX) { + mmu->domains[ret] = domain; + set_bit(ret, mmu->ctx); + } + + spin_unlock_irqrestore(&mmu->lock, flags); + + return ret; +} + static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain) { u64 ttbr; + int ret; /* * Allocate the page table operations. @@ -327,10 +353,15 @@ static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain) return -EINVAL; /* - * TODO: When adding support for multiple contexts, find an unused - * context. + * Find an unused context. */ - domain->context_id = 0; + ret = ipmmu_domain_allocate_context(domain->mmu, domain); + if (ret == IPMMU_CTX_MAX) { + free_io_pgtable_ops(domain->iop); + return -EBUSY; + } + + domain->context_id = ret; /* TTBR0 */ ttbr = domain->cfg.arm_lpae_s1_cfg.ttbr[0]; @@ -372,6 +403,19 @@ static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain) return 0; } +static void ipmmu_domain_free_context(struct ipmmu_vmsa_device *mmu, + unsigned int context_id) +{ + unsigned long flags; + + spin_lock_irqsave(&mmu->lock, flags); + + clear_bit(context_id, mmu->ctx); + mmu->domains[context_id] = NULL; + + spin_unlock_irqrestore(&mmu->lock, flags); +} + static void ipmmu_domain_destroy_context(struct ipmmu_vmsa_domain *domain) { /* @@ -382,6 +426,7 @@ static void ipmmu_domain_destroy_context(struct ipmmu_vmsa_domain *domain) */ ipmmu_ctx_write(domain, IMCTR, IMCTR_FLUSH); ipmmu_tlb_sync(domain); + ipmmu_domain_free_context(domain->mmu, domain->context_id); } /* ----------------------------------------------------------------------------- @@ -439,16 +484,25 @@ static irqreturn_t ipmmu_domain_irq(struct ipmmu_vmsa_domain *domain) static irqreturn_t ipmmu_irq(int irq, void *dev) { struct ipmmu_vmsa_device *mmu = dev; - struct iommu_domain *io_domain; - struct ipmmu_vmsa_domain *domain; + irqreturn_t status = IRQ_NONE; + unsigned int i; + unsigned long flags; - if (!mmu->mapping) - return IRQ_NONE; + spin_lock_irqsave(&mmu->lock, flags); - io_domain = mmu->mapping->domain; - domain = to_vmsa_domain(io_domain); + /* + * Check interrupts for all active contexts. + */ + for (i = 0; i < IPMMU_CTX_MAX; i++) { + if (!mmu->domains[i]) + continue; + if (ipmmu_domain_irq(mmu->domains[i]) == IRQ_HANDLED) + status = IRQ_HANDLED; + } - return ipmmu_domain_irq(domain); + spin_unlock_irqrestore(&mmu->lock, flags); + + return status; } /* ----------------------------------------------------------------------------- @@ -776,6 +830,8 @@ static int ipmmu_probe(struct platform_device *pdev) mmu->dev = &pdev->dev; mmu->num_utlbs = 32; + spin_lock_init(&mmu->lock); + bitmap_zero(mmu->ctx, IPMMU_CTX_MAX); /* Map I/O memory and request IRQ. */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); From 383fef5f4b56d8dc05cac12dc04262c8175331b1 Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Wed, 17 May 2017 19:06:48 +0900 Subject: [PATCH 012/436] iommu/ipmmu-vmsa: Break out utlb parsing code Break out the utlb parsing code and dev_data allocation into a separate function. This is preparation for future code sharing. Signed-off-by: Magnus Damm Reviewed-by: Joerg Roedel Signed-off-by: Joerg Roedel --- drivers/iommu/ipmmu-vmsa.c | 64 ++++++++++++++++++++++++-------------- 1 file changed, 41 insertions(+), 23 deletions(-) diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c index 787b675b0e9f..c4c35abc1c1a 100644 --- a/drivers/iommu/ipmmu-vmsa.c +++ b/drivers/iommu/ipmmu-vmsa.c @@ -649,22 +649,15 @@ static int ipmmu_find_utlbs(struct ipmmu_vmsa_device *mmu, struct device *dev, return 0; } -static int ipmmu_add_device(struct device *dev) +static int ipmmu_init_platform_device(struct device *dev) { struct ipmmu_vmsa_archdata *archdata; struct ipmmu_vmsa_device *mmu; - struct iommu_group *group = NULL; unsigned int *utlbs; unsigned int i; int num_utlbs; int ret = -ENODEV; - if (dev->archdata.iommu) { - dev_warn(dev, "IOMMU driver already assigned to device %s\n", - dev_name(dev)); - return -EINVAL; - } - /* Find the master corresponding to the device. */ num_utlbs = of_count_phandle_with_args(dev->of_node, "iommus", @@ -701,6 +694,36 @@ static int ipmmu_add_device(struct device *dev) } } + archdata = kzalloc(sizeof(*archdata), GFP_KERNEL); + if (!archdata) { + ret = -ENOMEM; + goto error; + } + + archdata->mmu = mmu; + archdata->utlbs = utlbs; + archdata->num_utlbs = num_utlbs; + dev->archdata.iommu = archdata; + return 0; + +error: + kfree(utlbs); + return ret; +} + +static int ipmmu_add_device(struct device *dev) +{ + struct ipmmu_vmsa_archdata *archdata; + struct ipmmu_vmsa_device *mmu = NULL; + struct iommu_group *group; + int ret; + + if (dev->archdata.iommu) { + dev_warn(dev, "IOMMU driver already assigned to device %s\n", + dev_name(dev)); + return -EINVAL; + } + /* Create a device group and add the device to it. */ group = iommu_group_alloc(); if (IS_ERR(group)) { @@ -718,16 +741,9 @@ static int ipmmu_add_device(struct device *dev) goto error; } - archdata = kzalloc(sizeof(*archdata), GFP_KERNEL); - if (!archdata) { - ret = -ENOMEM; + ret = ipmmu_init_platform_device(dev); + if (ret < 0) goto error; - } - - archdata->mmu = mmu; - archdata->utlbs = utlbs; - archdata->num_utlbs = num_utlbs; - dev->archdata.iommu = archdata; /* * Create the ARM mapping, used by the ARM DMA mapping core to allocate @@ -738,6 +754,8 @@ static int ipmmu_add_device(struct device *dev) * - Make the mapping size configurable ? We currently use a 2GB mapping * at a 1GB offset to ensure that NULL VAs will fault. */ + archdata = dev->archdata.iommu; + mmu = archdata->mmu; if (!mmu->mapping) { struct dma_iommu_mapping *mapping; @@ -762,16 +780,16 @@ static int ipmmu_add_device(struct device *dev) return 0; error: - arm_iommu_release_mapping(mmu->mapping); - - kfree(dev->archdata.iommu); - kfree(utlbs); - - dev->archdata.iommu = NULL; + if (mmu) + arm_iommu_release_mapping(mmu->mapping); if (!IS_ERR_OR_NULL(group)) iommu_group_remove_device(dev); + kfree(archdata->utlbs); + kfree(archdata); + dev->archdata.iommu = NULL; + return ret; } From 8e73bf659135ee7333d9a762bbdadb2ad794452f Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Wed, 17 May 2017 19:06:59 +0900 Subject: [PATCH 013/436] iommu/ipmmu-vmsa: Break out domain allocation code Break out the domain allocation code into a separate function. This is preparation for future code sharing. Signed-off-by: Magnus Damm Reviewed-by: Joerg Roedel Reviewed-by: Geert Uytterhoeven Signed-off-by: Joerg Roedel --- drivers/iommu/ipmmu-vmsa.c | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c index c4c35abc1c1a..eb5008596051 100644 --- a/drivers/iommu/ipmmu-vmsa.c +++ b/drivers/iommu/ipmmu-vmsa.c @@ -509,13 +509,10 @@ static irqreturn_t ipmmu_irq(int irq, void *dev) * IOMMU Operations */ -static struct iommu_domain *ipmmu_domain_alloc(unsigned type) +static struct iommu_domain *__ipmmu_domain_alloc(unsigned type) { struct ipmmu_vmsa_domain *domain; - if (type != IOMMU_DOMAIN_UNMANAGED) - return NULL; - domain = kzalloc(sizeof(*domain), GFP_KERNEL); if (!domain) return NULL; @@ -525,6 +522,14 @@ static struct iommu_domain *ipmmu_domain_alloc(unsigned type) return &domain->io_domain; } +static struct iommu_domain *ipmmu_domain_alloc(unsigned type) +{ + if (type != IOMMU_DOMAIN_UNMANAGED) + return NULL; + + return __ipmmu_domain_alloc(type); +} + static void ipmmu_domain_free(struct iommu_domain *io_domain) { struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); From 3ae47292024f85e82b769044c43f0bd13adcd7e8 Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Wed, 17 May 2017 19:07:10 +0900 Subject: [PATCH 014/436] iommu/ipmmu-vmsa: Add new IOMMU_DOMAIN_DMA ops Introduce an alternative set of iommu_ops suitable for 64-bit ARM as well as 32-bit ARM when CONFIG_IOMMU_DMA=y. Also adjust the Kconfig to depend on ARM or IOMMU_DMA. Initialize the device from ->xlate() when CONFIG_IOMMU_DMA=y. Signed-off-by: Magnus Damm Signed-off-by: Joerg Roedel --- drivers/iommu/Kconfig | 1 + drivers/iommu/ipmmu-vmsa.c | 164 +++++++++++++++++++++++++++++++++++-- 2 files changed, 156 insertions(+), 9 deletions(-) diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index 6ee3a25ae731..504ba025a54c 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig @@ -274,6 +274,7 @@ config EXYNOS_IOMMU_DEBUG config IPMMU_VMSA bool "Renesas VMSA-compatible IPMMU" + depends on ARM || IOMMU_DMA depends on ARM_LPAE depends on ARCH_RENESAS || COMPILE_TEST select IOMMU_API diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c index eb5008596051..8b648f6e0e4d 100644 --- a/drivers/iommu/ipmmu-vmsa.c +++ b/drivers/iommu/ipmmu-vmsa.c @@ -10,6 +10,7 @@ #include #include +#include #include #include #include @@ -22,8 +23,10 @@ #include #include +#if defined(CONFIG_ARM) && !defined(CONFIG_IOMMU_DMA) #include #include +#endif #include "io-pgtable.h" @@ -57,6 +60,8 @@ struct ipmmu_vmsa_archdata { struct ipmmu_vmsa_device *mmu; unsigned int *utlbs; unsigned int num_utlbs; + struct device *dev; + struct list_head list; }; static DEFINE_SPINLOCK(ipmmu_devices_lock); @@ -522,14 +527,6 @@ static struct iommu_domain *__ipmmu_domain_alloc(unsigned type) return &domain->io_domain; } -static struct iommu_domain *ipmmu_domain_alloc(unsigned type) -{ - if (type != IOMMU_DOMAIN_UNMANAGED) - return NULL; - - return __ipmmu_domain_alloc(type); -} - static void ipmmu_domain_free(struct iommu_domain *io_domain) { struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); @@ -572,7 +569,8 @@ static int ipmmu_attach_device(struct iommu_domain *io_domain, dev_err(dev, "Can't attach IPMMU %s to domain on IPMMU %s\n", dev_name(mmu->dev), dev_name(domain->mmu->dev)); ret = -EINVAL; - } + } else + dev_info(dev, "Reusing IPMMU context %u\n", domain->context_id); spin_unlock_irqrestore(&domain->lock, flags); @@ -708,6 +706,7 @@ static int ipmmu_init_platform_device(struct device *dev) archdata->mmu = mmu; archdata->utlbs = utlbs; archdata->num_utlbs = num_utlbs; + archdata->dev = dev; dev->archdata.iommu = archdata; return 0; @@ -716,6 +715,16 @@ error: return ret; } +#if defined(CONFIG_ARM) && !defined(CONFIG_IOMMU_DMA) + +static struct iommu_domain *ipmmu_domain_alloc(unsigned type) +{ + if (type != IOMMU_DOMAIN_UNMANAGED) + return NULL; + + return __ipmmu_domain_alloc(type); +} + static int ipmmu_add_device(struct device *dev) { struct ipmmu_vmsa_archdata *archdata; @@ -825,6 +834,141 @@ static const struct iommu_ops ipmmu_ops = { .pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K, }; +#endif /* !CONFIG_ARM && CONFIG_IOMMU_DMA */ + +#ifdef CONFIG_IOMMU_DMA + +static DEFINE_SPINLOCK(ipmmu_slave_devices_lock); +static LIST_HEAD(ipmmu_slave_devices); + +static struct iommu_domain *ipmmu_domain_alloc_dma(unsigned type) +{ + struct iommu_domain *io_domain = NULL; + + switch (type) { + case IOMMU_DOMAIN_UNMANAGED: + io_domain = __ipmmu_domain_alloc(type); + break; + + case IOMMU_DOMAIN_DMA: + io_domain = __ipmmu_domain_alloc(type); + if (io_domain) + iommu_get_dma_cookie(io_domain); + break; + } + + return io_domain; +} + +static void ipmmu_domain_free_dma(struct iommu_domain *io_domain) +{ + switch (io_domain->type) { + case IOMMU_DOMAIN_DMA: + iommu_put_dma_cookie(io_domain); + /* fall-through */ + default: + ipmmu_domain_free(io_domain); + break; + } +} + +static int ipmmu_add_device_dma(struct device *dev) +{ + struct ipmmu_vmsa_archdata *archdata = dev->archdata.iommu; + struct iommu_group *group; + + /* The device has been verified in xlate() */ + if (!archdata) + return -ENODEV; + + group = iommu_group_get_for_dev(dev); + if (IS_ERR(group)) + return PTR_ERR(group); + + spin_lock(&ipmmu_slave_devices_lock); + list_add(&archdata->list, &ipmmu_slave_devices); + spin_unlock(&ipmmu_slave_devices_lock); + return 0; +} + +static void ipmmu_remove_device_dma(struct device *dev) +{ + struct ipmmu_vmsa_archdata *archdata = dev->archdata.iommu; + + spin_lock(&ipmmu_slave_devices_lock); + list_del(&archdata->list); + spin_unlock(&ipmmu_slave_devices_lock); + + iommu_group_remove_device(dev); +} + +static struct device *ipmmu_find_sibling_device(struct device *dev) +{ + struct ipmmu_vmsa_archdata *archdata = dev->archdata.iommu; + struct ipmmu_vmsa_archdata *sibling_archdata = NULL; + bool found = false; + + spin_lock(&ipmmu_slave_devices_lock); + + list_for_each_entry(sibling_archdata, &ipmmu_slave_devices, list) { + if (archdata == sibling_archdata) + continue; + if (sibling_archdata->mmu == archdata->mmu) { + found = true; + break; + } + } + + spin_unlock(&ipmmu_slave_devices_lock); + + return found ? sibling_archdata->dev : NULL; +} + +static struct iommu_group *ipmmu_find_group_dma(struct device *dev) +{ + struct iommu_group *group; + struct device *sibling; + + sibling = ipmmu_find_sibling_device(dev); + if (sibling) + group = iommu_group_get(sibling); + if (!sibling || IS_ERR(group)) + group = generic_device_group(dev); + + return group; +} + +static int ipmmu_of_xlate_dma(struct device *dev, + struct of_phandle_args *spec) +{ + /* If the IPMMU device is disabled in DT then return error + * to make sure the of_iommu code does not install ops + * even though the iommu device is disabled + */ + if (!of_device_is_available(spec->np)) + return -ENODEV; + + return ipmmu_init_platform_device(dev); +} + +static const struct iommu_ops ipmmu_ops = { + .domain_alloc = ipmmu_domain_alloc_dma, + .domain_free = ipmmu_domain_free_dma, + .attach_dev = ipmmu_attach_device, + .detach_dev = ipmmu_detach_device, + .map = ipmmu_map, + .unmap = ipmmu_unmap, + .map_sg = default_iommu_map_sg, + .iova_to_phys = ipmmu_iova_to_phys, + .add_device = ipmmu_add_device_dma, + .remove_device = ipmmu_remove_device_dma, + .device_group = ipmmu_find_group_dma, + .pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K, + .of_xlate = ipmmu_of_xlate_dma, +}; + +#endif /* CONFIG_IOMMU_DMA */ + /* ----------------------------------------------------------------------------- * Probe/remove and init */ @@ -914,7 +1058,9 @@ static int ipmmu_remove(struct platform_device *pdev) list_del(&mmu->list); spin_unlock(&ipmmu_devices_lock); +#if defined(CONFIG_ARM) && !defined(CONFIG_IOMMU_DMA) arm_iommu_release_mapping(mmu->mapping); +#endif ipmmu_device_reset(mmu); From 0fbc8b04c34fef1d730712135c504978c939a5a3 Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Wed, 17 May 2017 19:07:20 +0900 Subject: [PATCH 015/436] iommu/ipmmu-vmsa: Use fwspec iommu_priv on ARM64 Convert from archdata to iommu_priv via iommu_fwspec on ARM64 but let 32-bit ARM keep on using archdata for now. Once the 32-bit ARM code and the IPMMU driver is able to move over to CONFIG_IOMMU_DMA=y then coverting to fwspec via ->of_xlate() will be easy. For now fwspec ids and num_ids are not used to allow code sharing between 32-bit and 64-bit ARM code inside the driver. Signed-off-by: Magnus Damm Signed-off-by: Joerg Roedel --- drivers/iommu/ipmmu-vmsa.c | 97 +++++++++++++++++++++++--------------- 1 file changed, 58 insertions(+), 39 deletions(-) diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c index 8b648f6e0e4d..a04babbd7de9 100644 --- a/drivers/iommu/ipmmu-vmsa.c +++ b/drivers/iommu/ipmmu-vmsa.c @@ -56,7 +56,7 @@ struct ipmmu_vmsa_domain { spinlock_t lock; /* Protects mappings */ }; -struct ipmmu_vmsa_archdata { +struct ipmmu_vmsa_iommu_priv { struct ipmmu_vmsa_device *mmu; unsigned int *utlbs; unsigned int num_utlbs; @@ -72,6 +72,24 @@ static struct ipmmu_vmsa_domain *to_vmsa_domain(struct iommu_domain *dom) return container_of(dom, struct ipmmu_vmsa_domain, io_domain); } + +static struct ipmmu_vmsa_iommu_priv *to_priv(struct device *dev) +{ +#if defined(CONFIG_ARM) + return dev->archdata.iommu; +#else + return dev->iommu_fwspec->iommu_priv; +#endif +} +static void set_priv(struct device *dev, struct ipmmu_vmsa_iommu_priv *p) +{ +#if defined(CONFIG_ARM) + dev->archdata.iommu = p; +#else + dev->iommu_fwspec->iommu_priv = p; +#endif +} + #define TLB_LOOP_TIMEOUT 100 /* 100us */ /* ----------------------------------------------------------------------------- @@ -543,8 +561,8 @@ static void ipmmu_domain_free(struct iommu_domain *io_domain) static int ipmmu_attach_device(struct iommu_domain *io_domain, struct device *dev) { - struct ipmmu_vmsa_archdata *archdata = dev->archdata.iommu; - struct ipmmu_vmsa_device *mmu = archdata->mmu; + struct ipmmu_vmsa_iommu_priv *priv = to_priv(dev); + struct ipmmu_vmsa_device *mmu = priv->mmu; struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); unsigned long flags; unsigned int i; @@ -577,8 +595,8 @@ static int ipmmu_attach_device(struct iommu_domain *io_domain, if (ret < 0) return ret; - for (i = 0; i < archdata->num_utlbs; ++i) - ipmmu_utlb_enable(domain, archdata->utlbs[i]); + for (i = 0; i < priv->num_utlbs; ++i) + ipmmu_utlb_enable(domain, priv->utlbs[i]); return 0; } @@ -586,12 +604,12 @@ static int ipmmu_attach_device(struct iommu_domain *io_domain, static void ipmmu_detach_device(struct iommu_domain *io_domain, struct device *dev) { - struct ipmmu_vmsa_archdata *archdata = dev->archdata.iommu; + struct ipmmu_vmsa_iommu_priv *priv = to_priv(dev); struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); unsigned int i; - for (i = 0; i < archdata->num_utlbs; ++i) - ipmmu_utlb_disable(domain, archdata->utlbs[i]); + for (i = 0; i < priv->num_utlbs; ++i) + ipmmu_utlb_disable(domain, priv->utlbs[i]); /* * TODO: Optimize by disabling the context when no device is attached. @@ -654,7 +672,7 @@ static int ipmmu_find_utlbs(struct ipmmu_vmsa_device *mmu, struct device *dev, static int ipmmu_init_platform_device(struct device *dev) { - struct ipmmu_vmsa_archdata *archdata; + struct ipmmu_vmsa_iommu_priv *priv; struct ipmmu_vmsa_device *mmu; unsigned int *utlbs; unsigned int i; @@ -697,17 +715,17 @@ static int ipmmu_init_platform_device(struct device *dev) } } - archdata = kzalloc(sizeof(*archdata), GFP_KERNEL); - if (!archdata) { + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) { ret = -ENOMEM; goto error; } - archdata->mmu = mmu; - archdata->utlbs = utlbs; - archdata->num_utlbs = num_utlbs; - archdata->dev = dev; - dev->archdata.iommu = archdata; + priv->mmu = mmu; + priv->utlbs = utlbs; + priv->num_utlbs = num_utlbs; + priv->dev = dev; + set_priv(dev, priv); return 0; error: @@ -727,12 +745,11 @@ static struct iommu_domain *ipmmu_domain_alloc(unsigned type) static int ipmmu_add_device(struct device *dev) { - struct ipmmu_vmsa_archdata *archdata; struct ipmmu_vmsa_device *mmu = NULL; struct iommu_group *group; int ret; - if (dev->archdata.iommu) { + if (to_priv(dev)) { dev_warn(dev, "IOMMU driver already assigned to device %s\n", dev_name(dev)); return -EINVAL; @@ -768,8 +785,7 @@ static int ipmmu_add_device(struct device *dev) * - Make the mapping size configurable ? We currently use a 2GB mapping * at a 1GB offset to ensure that NULL VAs will fault. */ - archdata = dev->archdata.iommu; - mmu = archdata->mmu; + mmu = to_priv(dev)->mmu; if (!mmu->mapping) { struct dma_iommu_mapping *mapping; @@ -800,24 +816,24 @@ error: if (!IS_ERR_OR_NULL(group)) iommu_group_remove_device(dev); - kfree(archdata->utlbs); - kfree(archdata); - dev->archdata.iommu = NULL; + kfree(to_priv(dev)->utlbs); + kfree(to_priv(dev)); + set_priv(dev, NULL); return ret; } static void ipmmu_remove_device(struct device *dev) { - struct ipmmu_vmsa_archdata *archdata = dev->archdata.iommu; + struct ipmmu_vmsa_iommu_priv *priv = to_priv(dev); arm_iommu_detach_device(dev); iommu_group_remove_device(dev); - kfree(archdata->utlbs); - kfree(archdata); + kfree(priv->utlbs); + kfree(priv); - dev->archdata.iommu = NULL; + set_priv(dev, NULL); } static const struct iommu_ops ipmmu_ops = { @@ -874,11 +890,14 @@ static void ipmmu_domain_free_dma(struct iommu_domain *io_domain) static int ipmmu_add_device_dma(struct device *dev) { - struct ipmmu_vmsa_archdata *archdata = dev->archdata.iommu; + struct iommu_fwspec *fwspec = dev->iommu_fwspec; struct iommu_group *group; - /* The device has been verified in xlate() */ - if (!archdata) + /* + * Only let through devices that have been verified in xlate() + * We may get called with dev->iommu_fwspec set to NULL. + */ + if (!fwspec || !fwspec->iommu_priv) return -ENODEV; group = iommu_group_get_for_dev(dev); @@ -886,17 +905,17 @@ static int ipmmu_add_device_dma(struct device *dev) return PTR_ERR(group); spin_lock(&ipmmu_slave_devices_lock); - list_add(&archdata->list, &ipmmu_slave_devices); + list_add(&to_priv(dev)->list, &ipmmu_slave_devices); spin_unlock(&ipmmu_slave_devices_lock); return 0; } static void ipmmu_remove_device_dma(struct device *dev) { - struct ipmmu_vmsa_archdata *archdata = dev->archdata.iommu; + struct ipmmu_vmsa_iommu_priv *priv = to_priv(dev); spin_lock(&ipmmu_slave_devices_lock); - list_del(&archdata->list); + list_del(&priv->list); spin_unlock(&ipmmu_slave_devices_lock); iommu_group_remove_device(dev); @@ -904,16 +923,16 @@ static void ipmmu_remove_device_dma(struct device *dev) static struct device *ipmmu_find_sibling_device(struct device *dev) { - struct ipmmu_vmsa_archdata *archdata = dev->archdata.iommu; - struct ipmmu_vmsa_archdata *sibling_archdata = NULL; + struct ipmmu_vmsa_iommu_priv *priv = to_priv(dev); + struct ipmmu_vmsa_iommu_priv *sibling_priv = NULL; bool found = false; spin_lock(&ipmmu_slave_devices_lock); - list_for_each_entry(sibling_archdata, &ipmmu_slave_devices, list) { - if (archdata == sibling_archdata) + list_for_each_entry(sibling_priv, &ipmmu_slave_devices, list) { + if (priv == sibling_priv) continue; - if (sibling_archdata->mmu == archdata->mmu) { + if (sibling_priv->mmu == priv->mmu) { found = true; break; } @@ -921,7 +940,7 @@ static struct device *ipmmu_find_sibling_device(struct device *dev) spin_unlock(&ipmmu_slave_devices_lock); - return found ? sibling_archdata->dev : NULL; + return found ? sibling_priv->dev : NULL; } static struct iommu_group *ipmmu_find_group_dma(struct device *dev) From d74c67d46b8e03883326163587de783adf61ef6a Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Wed, 17 May 2017 19:07:30 +0900 Subject: [PATCH 016/436] iommu/ipmmu-vmsa: Drop LPAE Kconfig dependency Neither the ARM page table code enabled by IOMMU_IO_PGTABLE_LPAE nor the IPMMU_VMSA driver actually depends on ARM_LPAE, so get rid of the dependency. Tested with ipmmu-vmsa on r8a7794 ALT and a kernel config using: # CONFIG_ARM_LPAE is not set Signed-off-by: Magnus Damm Acked-by: Laurent Pinchart Reviewed-by: Joerg Roedel Signed-off-by: Joerg Roedel --- drivers/iommu/Kconfig | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index 504ba025a54c..45cb1905b9bc 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig @@ -275,7 +275,6 @@ config EXYNOS_IOMMU_DEBUG config IPMMU_VMSA bool "Renesas VMSA-compatible IPMMU" depends on ARM || IOMMU_DMA - depends on ARM_LPAE depends on ARCH_RENESAS || COMPILE_TEST select IOMMU_API select IOMMU_IO_PGTABLE_LPAE From 26b6aec6e726597149b4bcd4cc8583f402f3da14 Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Wed, 17 May 2017 19:07:41 +0900 Subject: [PATCH 017/436] iommu/ipmmu-vmsa: Fix pgsize_bitmap semicolon typo Fix comma-instead-of-semicolon typo error present in the latest version of the IPMMU driver. Signed-off-by: Magnus Damm Reviewed-by: Geert Uytterhoeven Signed-off-by: Joerg Roedel --- drivers/iommu/ipmmu-vmsa.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c index a04babbd7de9..2a38aa15be17 100644 --- a/drivers/iommu/ipmmu-vmsa.c +++ b/drivers/iommu/ipmmu-vmsa.c @@ -358,7 +358,7 @@ static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain) * non-secure mode. */ domain->cfg.quirks = IO_PGTABLE_QUIRK_ARM_NS; - domain->cfg.pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K, + domain->cfg.pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K; domain->cfg.ias = 32; domain->cfg.oas = 40; domain->cfg.tlb = &ipmmu_gather_ops; From 757c370f036e1f9f9a816cd481a13cdbcb346eb9 Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Tue, 16 May 2017 12:26:48 +0100 Subject: [PATCH 018/436] iommu/iova: Sort out rbtree limit_pfn handling When walking the rbtree, the fact that iovad->start_pfn and limit_pfn are both inclusive limits creates an ambiguity once limit_pfn reaches the bottom of the address space and they overlap. Commit 5016bdb796b3 ("iommu/iova: Fix underflow bug in __alloc_and_insert_iova_range") fixed the worst side-effect of this, that of underflow wraparound leading to bogus allocations, but the remaining fallout is that any attempt to allocate start_pfn itself erroneously fails. The cleanest way to resolve the ambiguity is to simply make limit_pfn an exclusive limit when inside the guts of the rbtree. Since we're working with PFNs, representing one past the top of the address space is always possible without fear of overflow, and elsewhere it just makes life a little more straightforward. Reported-by: Aaron Sierra Signed-off-by: Robin Murphy Signed-off-by: Joerg Roedel --- drivers/iommu/dma-iommu.c | 2 +- drivers/iommu/iova.c | 21 +++++++++------------ 2 files changed, 10 insertions(+), 13 deletions(-) diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index 8348f366ddd1..aaf6fa304240 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -314,7 +314,7 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, * If we have devices with different DMA masks, move the free * area cache limit down for the benefit of the smaller one. */ - iovad->dma_32bit_pfn = min(end_pfn, iovad->dma_32bit_pfn); + iovad->dma_32bit_pfn = min(end_pfn + 1, iovad->dma_32bit_pfn); return 0; } diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c index 5c88ba70e4e0..3f24c9a831c9 100644 --- a/drivers/iommu/iova.c +++ b/drivers/iommu/iova.c @@ -48,7 +48,7 @@ init_iova_domain(struct iova_domain *iovad, unsigned long granule, iovad->cached32_node = NULL; iovad->granule = granule; iovad->start_pfn = start_pfn; - iovad->dma_32bit_pfn = pfn_32bit; + iovad->dma_32bit_pfn = pfn_32bit + 1; init_iova_rcaches(iovad); } EXPORT_SYMBOL_GPL(init_iova_domain); @@ -63,7 +63,7 @@ __get_cached_rbnode(struct iova_domain *iovad, unsigned long *limit_pfn) struct rb_node *prev_node = rb_prev(iovad->cached32_node); struct iova *curr_iova = rb_entry(iovad->cached32_node, struct iova, node); - *limit_pfn = curr_iova->pfn_lo - 1; + *limit_pfn = curr_iova->pfn_lo; return prev_node; } } @@ -135,7 +135,7 @@ iova_insert_rbtree(struct rb_root *root, struct iova *iova, static unsigned int iova_get_pad_size(unsigned int size, unsigned int limit_pfn) { - return (limit_pfn + 1 - size) & (__roundup_pow_of_two(size) - 1); + return (limit_pfn - size) & (__roundup_pow_of_two(size) - 1); } static int __alloc_and_insert_iova_range(struct iova_domain *iovad, @@ -155,18 +155,15 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad, while (curr) { struct iova *curr_iova = rb_entry(curr, struct iova, node); - if (limit_pfn < curr_iova->pfn_lo) + if (limit_pfn <= curr_iova->pfn_lo) { goto move_left; - else if (limit_pfn < curr_iova->pfn_hi) - goto adjust_limit_pfn; - else { + } else if (limit_pfn > curr_iova->pfn_hi) { if (size_aligned) pad_size = iova_get_pad_size(size, limit_pfn); - if ((curr_iova->pfn_hi + size + pad_size) <= limit_pfn) + if ((curr_iova->pfn_hi + size + pad_size) < limit_pfn) break; /* found a free slot */ } -adjust_limit_pfn: - limit_pfn = curr_iova->pfn_lo ? (curr_iova->pfn_lo - 1) : 0; + limit_pfn = curr_iova->pfn_lo; move_left: prev = curr; curr = rb_prev(curr); @@ -182,7 +179,7 @@ move_left: } /* pfn_lo will point to size aligned address if size_aligned is set */ - new->pfn_lo = limit_pfn - (size + pad_size) + 1; + new->pfn_lo = limit_pfn - (size + pad_size); new->pfn_hi = new->pfn_lo + size - 1; /* If we have 'prev', it's a valid place to start the insertion. */ @@ -269,7 +266,7 @@ alloc_iova(struct iova_domain *iovad, unsigned long size, if (!new_iova) return NULL; - ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn, + ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn + 1, new_iova, size_aligned); if (ret) { From f36afd38c5372a7cd5b363786fd62076c8b28427 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Wed, 17 May 2017 23:19:01 +0200 Subject: [PATCH 019/436] clk: sunxi-ng: enable SUNXI_CCU_MP for PRCM The newly added PRCM CCU driver uses SUNXI_CCU_MP_WITH_MUX_GATE, which causes a link error when no other driver enables SUNXI_CCU_MP: drivers/clk/built-in.o:(.data+0x5c8c8): undefined reference to `ccu_mp_ops' This adds an explicit 'select' statement for it. Signed-off-by: Arnd Bergmann Reviewed-by: Chen-Yu Tsai Signed-off-by: Maxime Ripard --- drivers/clk/sunxi-ng/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/clk/sunxi-ng/Kconfig b/drivers/clk/sunxi-ng/Kconfig index b0d551a8efe4..eb89c7801f00 100644 --- a/drivers/clk/sunxi-ng/Kconfig +++ b/drivers/clk/sunxi-ng/Kconfig @@ -156,6 +156,7 @@ config SUN8I_R_CCU bool "Support for Allwinner SoCs' PRCM CCUs" select SUNXI_CCU_DIV select SUNXI_CCU_GATE + select SUNXI_CCU_MP default MACH_SUN8I || (ARCH_SUNXI && ARM64) endif From 36d4d68cd658d914ef73ac845705c4a89e7d9e2f Mon Sep 17 00:00:00 2001 From: Sven Eckelmann Date: Wed, 5 Apr 2017 16:26:17 +0200 Subject: [PATCH 020/436] batman-adv: Fix rx packet/bytes stats on local ARP reply The stats are generated by batadv_interface_stats and must not be stored directly in the net_device stats member variable. The batadv_priv bat_counters information is assembled when ndo_get_stats is called. The stats previously stored in net_device::stats is then overwritten. The batman-adv counters must therefore be increased when an ARP packet is answered locally via the distributed arp table. Fixes: c384ea3ec930 ("batman-adv: Distributed ARP Table - add snooping functions for ARP messages") Signed-off-by: Sven Eckelmann Signed-off-by: Simon Wunderlich --- net/batman-adv/distributed-arp-table.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c index 013e970eff39..000ca2f113ab 100644 --- a/net/batman-adv/distributed-arp-table.c +++ b/net/batman-adv/distributed-arp-table.c @@ -1064,8 +1064,9 @@ bool batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv, skb_new->protocol = eth_type_trans(skb_new, soft_iface); - soft_iface->stats.rx_packets++; - soft_iface->stats.rx_bytes += skb->len + ETH_HLEN + hdr_size; + batadv_inc_counter(bat_priv, BATADV_CNT_RX); + batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES, + skb->len + ETH_HLEN + hdr_size); netif_rx(skb_new); batadv_dbg(BATADV_DBG_DAT, bat_priv, "ARP request replied locally\n"); From a1a745ef980a1b48299ead4ea7990e62c0516f6e Mon Sep 17 00:00:00 2001 From: Andreas Pape Date: Fri, 19 May 2017 10:01:42 +0200 Subject: [PATCH 021/436] batman-adv: fix memory leak when dropping packet from other gateway The skb must be released in the receive handler since b91a2543b4c1 ("batman-adv: Consume skb in receive handlers"). Just returning NET_RX_DROP will no longer automatically free the memory. This results in memory leaks when unicast packets from other backbones must be dropped because they share a common backbone. Fixes: 9e794b6bf4a2 ("batman-adv: drop unicast packets from other backbone gw") Signed-off-by: Andreas Pape [sven@narfation.org: adjust commit message] Signed-off-by: Sven Eckelmann Signed-off-by: Simon Wunderlich --- net/batman-adv/routing.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c index e1ebe14ee2a6..ae9f4d37d34f 100644 --- a/net/batman-adv/routing.c +++ b/net/batman-adv/routing.c @@ -987,7 +987,7 @@ int batadv_recv_unicast_packet(struct sk_buff *skb, batadv_dbg(BATADV_DBG_BLA, bat_priv, "recv_unicast_packet(): Dropped unicast pkt received from another backbone gw %pM.\n", orig_addr_gw); - return NET_RX_DROP; + goto free_skb; } } From 623d8c095cb3351564d4dd8e3b449163a07cbd47 Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Mon, 15 May 2017 10:50:00 +0200 Subject: [PATCH 022/436] arm64: allwinner: h5: Remove syslink to shared DTSI The arm64 H5 and arm H3 SoCs share roughly the same base, and therefore share a significant part of their device tree. The approach we took was to add a symlink from the arm64 DTSI to the arm DTSI. Now that the arm DT folder is exposed in the include path, we can just use it and remove our symlink. Acked-by: Chen-Yu Tsai Signed-off-by: Maxime Ripard --- arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi | 2 +- arch/arm64/boot/dts/allwinner/sunxi-h3-h5.dtsi | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) delete mode 120000 arch/arm64/boot/dts/allwinner/sunxi-h3-h5.dtsi diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi index 4d314a253fd9..732e2e06f503 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi +++ b/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi @@ -40,7 +40,7 @@ * OTHER DEALINGS IN THE SOFTWARE. */ -#include "sunxi-h3-h5.dtsi" +#include / { cpus { diff --git a/arch/arm64/boot/dts/allwinner/sunxi-h3-h5.dtsi b/arch/arm64/boot/dts/allwinner/sunxi-h3-h5.dtsi deleted file mode 120000 index 036f01dc2b9b..000000000000 --- a/arch/arm64/boot/dts/allwinner/sunxi-h3-h5.dtsi +++ /dev/null @@ -1 +0,0 @@ -../../../../arm/boot/dts/sunxi-h3-h5.dtsi \ No newline at end of file From 5bf185366bfca4ab7730c607f530188e1591a4c0 Mon Sep 17 00:00:00 2001 From: Sebastian Ott Date: Mon, 15 May 2017 17:28:34 +0200 Subject: [PATCH 023/436] s390/vfio_ccw: make some symbols static Make some symbols static to fix sparse warnings like: drivers/s390/cio/vfio_ccw_ops.c:73:1: warning: symbol 'mdev_type_attr_name' was not declared. Should it be static? Signed-off-by: Sebastian Ott Reviewed-by: Dong Jia Shi Signed-off-by: Cornelia Huck --- drivers/s390/cio/vfio_ccw_ops.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/s390/cio/vfio_ccw_ops.c b/drivers/s390/cio/vfio_ccw_ops.c index e72abbc18ee3..a66a317f3e4f 100644 --- a/drivers/s390/cio/vfio_ccw_ops.c +++ b/drivers/s390/cio/vfio_ccw_ops.c @@ -70,14 +70,14 @@ static ssize_t name_show(struct kobject *kobj, struct device *dev, char *buf) { return sprintf(buf, "I/O subchannel (Non-QDIO)\n"); } -MDEV_TYPE_ATTR_RO(name); +static MDEV_TYPE_ATTR_RO(name); static ssize_t device_api_show(struct kobject *kobj, struct device *dev, char *buf) { return sprintf(buf, "%s\n", VFIO_DEVICE_API_CCW_STRING); } -MDEV_TYPE_ATTR_RO(device_api); +static MDEV_TYPE_ATTR_RO(device_api); static ssize_t available_instances_show(struct kobject *kobj, struct device *dev, char *buf) @@ -86,7 +86,7 @@ static ssize_t available_instances_show(struct kobject *kobj, return sprintf(buf, "%d\n", atomic_read(&private->avail)); } -MDEV_TYPE_ATTR_RO(available_instances); +static MDEV_TYPE_ATTR_RO(available_instances); static struct attribute *mdev_types_attrs[] = { &mdev_type_attr_name.attr, @@ -100,7 +100,7 @@ static struct attribute_group mdev_type_group = { .attrs = mdev_types_attrs, }; -struct attribute_group *mdev_type_groups[] = { +static struct attribute_group *mdev_type_groups[] = { &mdev_type_group, NULL, }; @@ -152,7 +152,7 @@ static int vfio_ccw_mdev_open(struct mdev_device *mdev) &events, &private->nb); } -void vfio_ccw_mdev_release(struct mdev_device *mdev) +static void vfio_ccw_mdev_release(struct mdev_device *mdev) { struct vfio_ccw_private *private = dev_get_drvdata(mdev_parent_dev(mdev)); @@ -233,7 +233,7 @@ static int vfio_ccw_mdev_get_region_info(struct vfio_region_info *info, } } -int vfio_ccw_mdev_get_irq_info(struct vfio_irq_info *info) +static int vfio_ccw_mdev_get_irq_info(struct vfio_irq_info *info) { if (info->index != VFIO_CCW_IO_IRQ_INDEX) return -EINVAL; From 370d9192719e6c174167888cf9240df2542e3b4b Mon Sep 17 00:00:00 2001 From: Boris Brezillon Date: Wed, 24 May 2017 18:34:29 +0200 Subject: [PATCH 024/436] clk: sunxi-ng: sun5i: Fix ahb_bist_clk definition AHB BIST gate is actually controlled with bit 7. This bug was detected while trying to use the NAND controller which is using the DMA engine to transfer data to the NAND. Since the ahb_bist_clk gate bit conflicts with the ahb_dma_clk gate bit, the core was disabling the DMA engine clock as part of its 'disable unused clks' procedure, which was causing all DMA transfers to fail after this point. Fixes: 5e73761786d6 ("clk: sunxi-ng: Add sun5i CCU driver") Cc: stable@vger.kernel.org Reported-by: Angus Ainslie Signed-off-by: Boris Brezillon Tested-by: Angus Ainslie Reviewed-by: Chen-Yu Tsai Signed-off-by: Michael Turquette Link: lkml.kernel.org/r/1495643669-28221-1-git-send-email-boris.brezillon@free-electrons.com --- drivers/clk/sunxi-ng/ccu-sun5i.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/clk/sunxi-ng/ccu-sun5i.c b/drivers/clk/sunxi-ng/ccu-sun5i.c index 5c476f966a72..5372bf8be5e6 100644 --- a/drivers/clk/sunxi-ng/ccu-sun5i.c +++ b/drivers/clk/sunxi-ng/ccu-sun5i.c @@ -243,7 +243,7 @@ static SUNXI_CCU_GATE(ahb_ss_clk, "ahb-ss", "ahb", static SUNXI_CCU_GATE(ahb_dma_clk, "ahb-dma", "ahb", 0x060, BIT(6), 0); static SUNXI_CCU_GATE(ahb_bist_clk, "ahb-bist", "ahb", - 0x060, BIT(6), 0); + 0x060, BIT(7), 0); static SUNXI_CCU_GATE(ahb_mmc0_clk, "ahb-mmc0", "ahb", 0x060, BIT(8), 0); static SUNXI_CCU_GATE(ahb_mmc1_clk, "ahb-mmc1", "ahb", From 56b74ed9c1e8050408b9beeee25888a49a458997 Mon Sep 17 00:00:00 2001 From: Enric Balletbo i Serra Date: Mon, 22 May 2017 11:01:52 +0200 Subject: [PATCH 025/436] ARM: dts: am335x-sl50: Fix card detect pin for mmc1 The second version of the hardware moved the card detect pin from gpio0_6 to gpio1_9, as we won't support the first hardware version fix the pinmux configuration of this pin. Fixes: 8584d4fc ("ARM: dts: am335x-sl50: Add Toby-Churchill SL50 board support.") Cc: # 4.11 Signed-off-by: Enric Balletbo i Serra Signed-off-by: Tony Lindgren --- arch/arm/boot/dts/am335x-sl50.dts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/arm/boot/dts/am335x-sl50.dts b/arch/arm/boot/dts/am335x-sl50.dts index c5d2589c55fc..da4f8e376b9f 100644 --- a/arch/arm/boot/dts/am335x-sl50.dts +++ b/arch/arm/boot/dts/am335x-sl50.dts @@ -220,7 +220,7 @@ mmc1_pins: pinmux_mmc1_pins { pinctrl-single,pins = < - AM33XX_IOPAD(0x960, PIN_INPUT | MUX_MODE7) /* spi0_cs1.gpio0_6 */ + AM33XX_IOPAD(0x96c, PIN_INPUT | MUX_MODE7) /* uart0_rtsn.gpio1_9 */ >; }; @@ -384,7 +384,7 @@ pinctrl-names = "default"; pinctrl-0 = <&mmc1_pins>; bus-width = <4>; - cd-gpios = <&gpio0 6 GPIO_ACTIVE_LOW>; + cd-gpios = <&gpio1 9 GPIO_ACTIVE_LOW>; vmmc-supply = <&vmmcsd_fixed>; }; From db145db99f5bf30acc12d7450b9ad0707072a7be Mon Sep 17 00:00:00 2001 From: Enric Balletbo i Serra Date: Mon, 22 May 2017 11:01:53 +0200 Subject: [PATCH 026/436] ARM: dts: am335x-sl50: Fix cannot claim requested pins for spi0 We don't need to bitbang these pins anymore, instead we muxed these pins as SPI, after this change, done in commit 6c69f726, we introduced the following error: pinctrl-single 44e10800.pinmux: pin PIN85 already requested \ by 44e10800.pinmux; cannot claim for 48030000.spi pinctrl-single 44e10800.pinmux: pin-85 (48030000.spi) status -22 Fixes: 6c69f726 ("ARM: dts: am335x-sl50: Enable SPI0 interface and Flash Memory") Cc: # 4.11 Signed-off-by: Enric Balletbo i Serra Signed-off-by: Tony Lindgren --- arch/arm/boot/dts/am335x-sl50.dts | 4 ---- 1 file changed, 4 deletions(-) diff --git a/arch/arm/boot/dts/am335x-sl50.dts b/arch/arm/boot/dts/am335x-sl50.dts index da4f8e376b9f..fc864a855991 100644 --- a/arch/arm/boot/dts/am335x-sl50.dts +++ b/arch/arm/boot/dts/am335x-sl50.dts @@ -280,10 +280,6 @@ AM33XX_IOPAD(0x834, PIN_INPUT_PULLUP | MUX_MODE7) /* nKbdReset - gpmc_ad13.gpio1_13 */ AM33XX_IOPAD(0x838, PIN_INPUT_PULLUP | MUX_MODE7) /* nDispReset - gpmc_ad14.gpio1_14 */ AM33XX_IOPAD(0x844, PIN_INPUT_PULLUP | MUX_MODE7) /* USB1_enPower - gpmc_a1.gpio1_17 */ - /* AVR Programming - SPI Bus (bit bang) - Screen and Keyboard */ - AM33XX_IOPAD(0x954, PIN_INPUT_PULLUP | MUX_MODE7) /* Kbd/Disp/BattMOSI spi0_d0.gpio0_3 */ - AM33XX_IOPAD(0x958, PIN_INPUT_PULLUP | MUX_MODE7) /* Kbd/Disp/BattMISO spi0_d1.gpio0_4 */ - AM33XX_IOPAD(0x950, PIN_INPUT_PULLUP | MUX_MODE7) /* Kbd/Disp/BattSCLK spi0_clk.gpio0_2 */ /* PDI Bus - Battery system */ AM33XX_IOPAD(0x840, PIN_INPUT_PULLUP | MUX_MODE7) /* nBattReset gpmc_a0.gpio1_16 */ AM33XX_IOPAD(0x83c, PIN_INPUT_PULLUP | MUX_MODE7) /* BattPDIData gpmc_ad15.gpio1_15 */ From 449e2f9e95d803961635d725f6482e5e995b7554 Mon Sep 17 00:00:00 2001 From: Brian Norris Date: Tue, 23 May 2017 12:36:58 -0700 Subject: [PATCH 027/436] PCI: Make error code types consistent in pci_{read,write}_config_* Callers normally treat the config space accessors as returning PCBIOS_* error codes, not Linux error codes (or they don't look at them at all). We have pcibios_err_to_errno() in case the error code needs to be translated. Fixes: 4b1038834739 ("PCI: Don't attempt config access to disconnected devices") Signed-off-by: Brian Norris Signed-off-by: Bjorn Helgaas Reviewed-by: Keith Busch --- drivers/pci/access.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/pci/access.c b/drivers/pci/access.c index 74cf5fffb1e1..c80e37a69305 100644 --- a/drivers/pci/access.c +++ b/drivers/pci/access.c @@ -896,7 +896,7 @@ int pci_read_config_byte(const struct pci_dev *dev, int where, u8 *val) { if (pci_dev_is_disconnected(dev)) { *val = ~0; - return -ENODEV; + return PCIBIOS_DEVICE_NOT_FOUND; } return pci_bus_read_config_byte(dev->bus, dev->devfn, where, val); } @@ -906,7 +906,7 @@ int pci_read_config_word(const struct pci_dev *dev, int where, u16 *val) { if (pci_dev_is_disconnected(dev)) { *val = ~0; - return -ENODEV; + return PCIBIOS_DEVICE_NOT_FOUND; } return pci_bus_read_config_word(dev->bus, dev->devfn, where, val); } @@ -917,7 +917,7 @@ int pci_read_config_dword(const struct pci_dev *dev, int where, { if (pci_dev_is_disconnected(dev)) { *val = ~0; - return -ENODEV; + return PCIBIOS_DEVICE_NOT_FOUND; } return pci_bus_read_config_dword(dev->bus, dev->devfn, where, val); } @@ -926,7 +926,7 @@ EXPORT_SYMBOL(pci_read_config_dword); int pci_write_config_byte(const struct pci_dev *dev, int where, u8 val) { if (pci_dev_is_disconnected(dev)) - return -ENODEV; + return PCIBIOS_DEVICE_NOT_FOUND; return pci_bus_write_config_byte(dev->bus, dev->devfn, where, val); } EXPORT_SYMBOL(pci_write_config_byte); @@ -934,7 +934,7 @@ EXPORT_SYMBOL(pci_write_config_byte); int pci_write_config_word(const struct pci_dev *dev, int where, u16 val) { if (pci_dev_is_disconnected(dev)) - return -ENODEV; + return PCIBIOS_DEVICE_NOT_FOUND; return pci_bus_write_config_word(dev->bus, dev->devfn, where, val); } EXPORT_SYMBOL(pci_write_config_word); @@ -943,7 +943,7 @@ int pci_write_config_dword(const struct pci_dev *dev, int where, u32 val) { if (pci_dev_is_disconnected(dev)) - return -ENODEV; + return PCIBIOS_DEVICE_NOT_FOUND; return pci_bus_write_config_dword(dev->bus, dev->devfn, where, val); } EXPORT_SYMBOL(pci_write_config_dword); From 95264c8c6a9040e84edda883dbbe9d193c41f46c Mon Sep 17 00:00:00 2001 From: Alexey Khoroshilov Date: Sat, 27 May 2017 01:53:04 +0300 Subject: [PATCH 028/436] staging: iio: ad7152: Fix deadlock in ad7152_write_raw_samp_freq() ad7152_write_raw_samp_freq() is called by ad7152_write_raw() with chip->state_lock held. So, there is unavoidable deadlock when ad7152_write_raw_samp_freq() locks the mutex itself. The patch removes unneeded locking. Found by Linux Driver Verification project (linuxtesting.org). Signed-off-by: Alexey Khoroshilov Fixes: 6572389bcc11 ("staging: iio: cdc: ad7152: Implement IIO_CHAN_INFO_SAMP_FREQ attribute") Acked-by: Lars-Peter Clausen Cc: Signed-off-by: Jonathan Cameron --- drivers/staging/iio/cdc/ad7152.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/drivers/staging/iio/cdc/ad7152.c b/drivers/staging/iio/cdc/ad7152.c index dc6ecd824365..ff10d1f0a7e4 100644 --- a/drivers/staging/iio/cdc/ad7152.c +++ b/drivers/staging/iio/cdc/ad7152.c @@ -231,16 +231,12 @@ static int ad7152_write_raw_samp_freq(struct device *dev, int val) if (i >= ARRAY_SIZE(ad7152_filter_rate_table)) i = ARRAY_SIZE(ad7152_filter_rate_table) - 1; - mutex_lock(&chip->state_lock); ret = i2c_smbus_write_byte_data(chip->client, AD7152_REG_CFG2, AD7152_CFG2_OSR(i)); - if (ret < 0) { - mutex_unlock(&chip->state_lock); + if (ret < 0) return ret; - } chip->filter_rate_setup = i; - mutex_unlock(&chip->state_lock); return ret; } From 2f2724630f7a8d582470f03ee56b96746767d270 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Mon, 22 May 2017 16:25:14 +1000 Subject: [PATCH 029/436] KVM: PPC: Book3S HV: Cope with host using large decrementer mode POWER9 introduces a new mode for the decrementer register, called large decrementer mode, in which the decrementer counter is 56 bits wide rather than 32, and reads are sign-extended rather than zero-extended. For the decrementer, this new mode is optional and controlled by a bit in the LPCR. The hypervisor decrementer (HDEC) is 56 bits wide on POWER9 and has no mode control. Since KVM code reads and writes the decrementer and hypervisor decrementer registers in a few places, it needs to be aware of the need to treat the decrementer value as a 64-bit quantity, and only do a 32-bit sign extension when large decrementer mode is not in effect. Similarly, the HDEC should always be treated as a 64-bit quantity on POWER9. We define a new EXTEND_HDEC macro to encapsulate the feature test for POWER9 and the sign extension. To enable the sign extension to be removed in large decrementer mode, we test the LPCR_LD bit in the host LPCR image stored in the struct kvm for the guest. If is set then large decrementer mode is enabled and the sign extension should be skipped. This is partly based on an earlier patch by Oliver O'Halloran. Cc: stable@vger.kernel.org # v4.10+ Signed-off-by: Paul Mackerras --- arch/powerpc/kvm/book3s_hv_interrupts.S | 12 +++++++++++- arch/powerpc/kvm/book3s_hv_rmhandlers.S | 23 +++++++++++++++++------ 2 files changed, 28 insertions(+), 7 deletions(-) diff --git a/arch/powerpc/kvm/book3s_hv_interrupts.S b/arch/powerpc/kvm/book3s_hv_interrupts.S index 0fdc4a28970b..404deb512844 100644 --- a/arch/powerpc/kvm/book3s_hv_interrupts.S +++ b/arch/powerpc/kvm/book3s_hv_interrupts.S @@ -121,10 +121,20 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) * Put whatever is in the decrementer into the * hypervisor decrementer. */ +BEGIN_FTR_SECTION + ld r5, HSTATE_KVM_VCORE(r13) + ld r6, VCORE_KVM(r5) + ld r9, KVM_HOST_LPCR(r6) + andis. r9, r9, LPCR_LD@h +END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) mfspr r8,SPRN_DEC mftb r7 - mtspr SPRN_HDEC,r8 +BEGIN_FTR_SECTION + /* On POWER9, don't sign-extend if host LPCR[LD] bit is set */ + bne 32f +END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) extsw r8,r8 +32: mtspr SPRN_HDEC,r8 add r8,r8,r7 std r8,HSTATE_DECEXP(r13) diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index bdb3f76ceb6b..e390b383b4d6 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -32,6 +32,12 @@ #include #include +/* Sign-extend HDEC if not on POWER9 */ +#define EXTEND_HDEC(reg) \ +BEGIN_FTR_SECTION; \ + extsw reg, reg; \ +END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) + #define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM) /* Values in HSTATE_NAPPING(r13) */ @@ -214,6 +220,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) kvmppc_primary_no_guest: /* We handle this much like a ceded vcpu */ /* put the HDEC into the DEC, since HDEC interrupts don't wake us */ + /* HDEC may be larger than DEC for arch >= v3.00, but since the */ + /* HDEC value came from DEC in the first place, it will fit */ mfspr r3, SPRN_HDEC mtspr SPRN_DEC, r3 /* @@ -295,8 +303,9 @@ kvm_novcpu_wakeup: /* See if our timeslice has expired (HDEC is negative) */ mfspr r0, SPRN_HDEC + EXTEND_HDEC(r0) li r12, BOOK3S_INTERRUPT_HV_DECREMENTER - cmpwi r0, 0 + cmpdi r0, 0 blt kvm_novcpu_exit /* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */ @@ -390,8 +399,8 @@ kvm_secondary_got_guest: lbz r4, HSTATE_PTID(r13) cmpwi r4, 0 bne 63f - lis r6, 0x7fff - ori r6, r6, 0xffff + LOAD_REG_ADDR(r6, decrementer_max) + ld r6, 0(r6) mtspr SPRN_HDEC, r6 /* and set per-LPAR registers, if doing dynamic micro-threading */ ld r6, HSTATE_SPLIT_MODE(r13) @@ -968,7 +977,8 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300) /* Check if HDEC expires soon */ mfspr r3, SPRN_HDEC - cmpwi r3, 512 /* 1 microsecond */ + EXTEND_HDEC(r3) + cmpdi r3, 512 /* 1 microsecond */ blt hdec_soon #ifdef CONFIG_KVM_XICS @@ -2366,12 +2376,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_TM) mfspr r3, SPRN_DEC mfspr r4, SPRN_HDEC mftb r5 - cmpw r3, r4 + extsw r3, r3 + EXTEND_HDEC(r4) + cmpd r3, r4 ble 67f mtspr SPRN_DEC, r4 67: /* save expiry time of guest decrementer */ - extsw r3, r3 add r3, r3, r5 ld r4, HSTATE_KVM_VCPU(r13) ld r5, HSTATE_KVM_VCORE(r13) From b316d02a13c3a8ebe681b7232defbdf3fa28c41b Mon Sep 17 00:00:00 2001 From: Peter Xu Date: Mon, 22 May 2017 18:28:51 +0800 Subject: [PATCH 030/436] iommu/vt-d: Unwrap __get_valid_domain_for_dev() We do find_domain() in __get_valid_domain_for_dev(), while we do the same thing in get_valid_domain_for_dev(). No need to do it twice. Signed-off-by: Peter Xu Signed-off-by: Joerg Roedel --- drivers/iommu/intel-iommu.c | 16 ++-------------- 1 file changed, 2 insertions(+), 14 deletions(-) diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 90ab0115d78e..ee9c258d3ae0 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -2387,7 +2387,7 @@ static struct dmar_domain *find_domain(struct device *dev) /* No lock here, assumes no domain exit in normal case */ info = dev->archdata.iommu; - if (info) + if (likely(info)) return info->domain; return NULL; } @@ -3475,7 +3475,7 @@ static unsigned long intel_alloc_iova(struct device *dev, return iova_pfn; } -static struct dmar_domain *__get_valid_domain_for_dev(struct device *dev) +static struct dmar_domain *get_valid_domain_for_dev(struct device *dev) { struct dmar_domain *domain, *tmp; struct dmar_rmrr_unit *rmrr; @@ -3522,18 +3522,6 @@ out: return domain; } -static inline struct dmar_domain *get_valid_domain_for_dev(struct device *dev) -{ - struct device_domain_info *info; - - /* No lock here, assumes no domain exit in normal case */ - info = dev->archdata.iommu; - if (likely(info)) - return info->domain; - - return __get_valid_domain_for_dev(dev); -} - /* Check if the dev needs to go through non-identity map and unmap process.*/ static int iommu_no_mapping(struct device *dev) { From 71bb620df634b22a08efd62a0f93c3f2aceaa8e2 Mon Sep 17 00:00:00 2001 From: Tobias Klauser Date: Wed, 24 May 2017 16:31:23 +0200 Subject: [PATCH 031/436] iommu/vt-d: Constify irq_domain_ops struct irq_domain_ops is not modified, so it can be made const. Signed-off-by: Tobias Klauser Signed-off-by: Joerg Roedel --- drivers/iommu/intel_irq_remapping.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c index a190cbd76ef7..f7ef4a5d4785 100644 --- a/drivers/iommu/intel_irq_remapping.c +++ b/drivers/iommu/intel_irq_remapping.c @@ -76,7 +76,7 @@ static struct hpet_scope ir_hpet[MAX_HPET_TBS]; * the dmar_global_lock. */ static DEFINE_RAW_SPINLOCK(irq_2_ir_lock); -static struct irq_domain_ops intel_ir_domain_ops; +static const struct irq_domain_ops intel_ir_domain_ops; static void iommu_disable_irq_remapping(struct intel_iommu *iommu); static int __init parse_ioapics_under_ir(void); @@ -1396,7 +1396,7 @@ static void intel_irq_remapping_deactivate(struct irq_domain *domain, modify_irte(&data->irq_2_iommu, &entry); } -static struct irq_domain_ops intel_ir_domain_ops = { +static const struct irq_domain_ops intel_ir_domain_ops = { .alloc = intel_irq_remapping_alloc, .free = intel_irq_remapping_free, .activate = intel_irq_remapping_activate, From 30bf2df65b0d18f53ee9c92131239f66c8f55d63 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Mon, 15 May 2017 16:25:03 +0200 Subject: [PATCH 032/436] iommu/amd: Ratelimit io-page-faults per device Misbehaving devices can cause an endless chain of io-page-faults, flooding dmesg and making the system-log unusable or even prevent the system from booting. So ratelimit the error messages about io-page-faults on a per-device basis. Signed-off-by: Joerg Roedel --- drivers/iommu/amd_iommu.c | 40 ++++++++++++++++++++++++++++++++------- 1 file changed, 33 insertions(+), 7 deletions(-) diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 63cacf5d6cf2..d3ff766a8ca6 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -138,6 +138,8 @@ struct iommu_dev_data { PPR completions */ u32 errata; /* Bitmap for errata to apply */ bool use_vapic; /* Enable device to use vapic mode */ + + struct ratelimit_state rs; /* Ratelimit IOPF messages */ }; /* @@ -253,6 +255,8 @@ static struct iommu_dev_data *alloc_dev_data(u16 devid) list_add_tail(&dev_data->dev_data_list, &dev_data_list); spin_unlock_irqrestore(&dev_data_list_lock, flags); + ratelimit_default_init(&dev_data->rs); + return dev_data; } @@ -551,6 +555,29 @@ static void dump_command(unsigned long phys_addr) pr_err("AMD-Vi: CMD[%d]: %08x\n", i, cmd->data[i]); } +static void amd_iommu_report_page_fault(u16 devid, u16 domain_id, + u64 address, int flags) +{ + struct iommu_dev_data *dev_data = NULL; + struct pci_dev *pdev; + + pdev = pci_get_bus_and_slot(PCI_BUS_NUM(devid), devid & 0xff); + if (pdev) + dev_data = get_dev_data(&pdev->dev); + + if (dev_data && __ratelimit(&dev_data->rs)) { + dev_err(&pdev->dev, "AMD-Vi: Event logged [IO_PAGE_FAULT domain=0x%04x address=0x%016llx flags=0x%04x]\n", + domain_id, address, flags); + } else if (printk_ratelimit()) { + pr_err("AMD-Vi: Event logged [IO_PAGE_FAULT device=%02x:%02x.%x domain=0x%04x address=0x%016llx flags=0x%04x]\n", + PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), + domain_id, address, flags); + } + + if (pdev) + pci_dev_put(pdev); +} + static void iommu_print_event(struct amd_iommu *iommu, void *__evt) { int type, devid, domid, flags; @@ -575,7 +602,12 @@ retry: goto retry; } - printk(KERN_ERR "AMD-Vi: Event logged ["); + if (type == EVENT_TYPE_IO_FAULT) { + amd_iommu_report_page_fault(devid, domid, address, flags); + return; + } else { + printk(KERN_ERR "AMD-Vi: Event logged ["); + } switch (type) { case EVENT_TYPE_ILL_DEV: @@ -585,12 +617,6 @@ retry: address, flags); dump_dte_entry(devid); break; - case EVENT_TYPE_IO_FAULT: - printk("IO_PAGE_FAULT device=%02x:%02x.%x " - "domain=0x%04x address=0x%016llx flags=0x%04x]\n", - PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), - domid, address, flags); - break; case EVENT_TYPE_DEV_TAB_ERR: printk("DEV_TAB_HARDWARE_ERROR device=%02x:%02x.%x " "address=0x%016llx flags=0x%04x]\n", From e2f9d45fb452c60c9aaab55ce6f40601f9ec6516 Mon Sep 17 00:00:00 2001 From: Tobias Klauser Date: Wed, 24 May 2017 16:31:16 +0200 Subject: [PATCH 033/436] iommu/amd: Constify irq_domain_ops struct irq_domain_ops is not modified, so it can be made const. Signed-off-by: Tobias Klauser Signed-off-by: Joerg Roedel --- drivers/iommu/amd_iommu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index d3ff766a8ca6..d7748955184b 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -4299,7 +4299,7 @@ static void irq_remapping_deactivate(struct irq_domain *domain, irte_info->index); } -static struct irq_domain_ops amd_ir_domain_ops = { +static const struct irq_domain_ops amd_ir_domain_ops = { .alloc = irq_remapping_alloc, .free = irq_remapping_free, .activate = irq_remapping_activate, From a91afc974ee8441940241e3c39c75d7b8f38e911 Mon Sep 17 00:00:00 2001 From: Chen-Yu Tsai Date: Wed, 31 May 2017 15:58:19 +0800 Subject: [PATCH 034/436] dt-bindings: clock: sunxi-ccu: Add pll-periph to PRCM's needed clocks The AR100 clock in the PRCM has parents, one of which is pll-periph from the main CCU. Add it to the list of required clocks for the PRCM CCU. Signed-off-by: Chen-Yu Tsai Signed-off-by: Maxime Ripard --- Documentation/devicetree/bindings/clock/sunxi-ccu.txt | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/Documentation/devicetree/bindings/clock/sunxi-ccu.txt b/Documentation/devicetree/bindings/clock/sunxi-ccu.txt index e9c5a1d9834a..f465647a4dd2 100644 --- a/Documentation/devicetree/bindings/clock/sunxi-ccu.txt +++ b/Documentation/devicetree/bindings/clock/sunxi-ccu.txt @@ -22,7 +22,8 @@ Required properties : - #clock-cells : must contain 1 - #reset-cells : must contain 1 -For the PRCM CCUs on H3/A64, one more clock is needed: +For the PRCM CCUs on H3/A64, two more clocks are needed: +- "pll-periph": the SoC's peripheral PLL from the main CCU - "iosc": the SoC's internal frequency oscillator Example for generic CCU: @@ -39,8 +40,8 @@ Example for PRCM CCU: r_ccu: clock@01f01400 { compatible = "allwinner,sun50i-a64-r-ccu"; reg = <0x01f01400 0x100>; - clocks = <&osc24M>, <&osc32k>, <&iosc>; - clock-names = "hosc", "losc", "iosc"; + clocks = <&osc24M>, <&osc32k>, <&iosc>, <&ccu CLK_PLL_PERIPH0>; + clock-names = "hosc", "losc", "iosc", "pll-periph"; #clock-cells = <1>; #reset-cells = <1>; }; From c4be8c68e6900b1811bc64f74cb13d5032a389ce Mon Sep 17 00:00:00 2001 From: Chen-Yu Tsai Date: Wed, 31 May 2017 15:58:21 +0800 Subject: [PATCH 035/436] clk: sunxi-ng: h3: Export PLL_PERIPH0 clock for the PRCM The PRCM takes PLL_PERIPH0 as one of its parents for the AR100 clock. As such we need to be able to describe this relationship in the device tree. Export the PLL_PERIPH0 clock so we can reference it in the PRCM node. Signed-off-by: Chen-Yu Tsai Signed-off-by: Maxime Ripard --- drivers/clk/sunxi-ng/ccu-sun8i-h3.h | 4 +++- include/dt-bindings/clock/sun8i-h3-ccu.h | 2 ++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-h3.h b/drivers/clk/sunxi-ng/ccu-sun8i-h3.h index 85973d1e8165..1b4baea37d81 100644 --- a/drivers/clk/sunxi-ng/ccu-sun8i-h3.h +++ b/drivers/clk/sunxi-ng/ccu-sun8i-h3.h @@ -29,7 +29,9 @@ #define CLK_PLL_VIDEO 6 #define CLK_PLL_VE 7 #define CLK_PLL_DDR 8 -#define CLK_PLL_PERIPH0 9 + +/* PLL_PERIPH0 exported for PRCM */ + #define CLK_PLL_PERIPH0_2X 10 #define CLK_PLL_GPU 11 #define CLK_PLL_PERIPH1 12 diff --git a/include/dt-bindings/clock/sun8i-h3-ccu.h b/include/dt-bindings/clock/sun8i-h3-ccu.h index c2afc41d6964..e139fe5c62ec 100644 --- a/include/dt-bindings/clock/sun8i-h3-ccu.h +++ b/include/dt-bindings/clock/sun8i-h3-ccu.h @@ -43,6 +43,8 @@ #ifndef _DT_BINDINGS_CLK_SUN8I_H3_H_ #define _DT_BINDINGS_CLK_SUN8I_H3_H_ +#define CLK_PLL_PERIPH0 9 + #define CLK_CPUX 14 #define CLK_BUS_CE 20 From d85da227c3ae43d9ca513d60f244213cb4e55485 Mon Sep 17 00:00:00 2001 From: Chen-Yu Tsai Date: Wed, 31 May 2017 15:58:23 +0800 Subject: [PATCH 036/436] clk: sunxi-ng: a64: Export PLL_PERIPH0 clock for the PRCM The PRCM takes PLL_PERIPH0 as one of its parents for the AR100 clock. As such we need to be able to describe this relationship in the device tree. Export the PLL_PERIPH0 clock so we can reference it in the PRCM node. Signed-off-by: Chen-Yu Tsai Signed-off-by: Maxime Ripard --- drivers/clk/sunxi-ng/ccu-sun50i-a64.h | 4 +++- include/dt-bindings/clock/sun50i-a64-ccu.h | 2 ++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-a64.h b/drivers/clk/sunxi-ng/ccu-sun50i-a64.h index 9b3cd24b78d2..061b6fbb4f95 100644 --- a/drivers/clk/sunxi-ng/ccu-sun50i-a64.h +++ b/drivers/clk/sunxi-ng/ccu-sun50i-a64.h @@ -31,7 +31,9 @@ #define CLK_PLL_VIDEO0_2X 8 #define CLK_PLL_VE 9 #define CLK_PLL_DDR0 10 -#define CLK_PLL_PERIPH0 11 + +/* PLL_PERIPH0 exported for PRCM */ + #define CLK_PLL_PERIPH0_2X 12 #define CLK_PLL_PERIPH1 13 #define CLK_PLL_PERIPH1_2X 14 diff --git a/include/dt-bindings/clock/sun50i-a64-ccu.h b/include/dt-bindings/clock/sun50i-a64-ccu.h index 370c0a0473fc..d66432c6e675 100644 --- a/include/dt-bindings/clock/sun50i-a64-ccu.h +++ b/include/dt-bindings/clock/sun50i-a64-ccu.h @@ -43,6 +43,8 @@ #ifndef _DT_BINDINGS_CLK_SUN50I_A64_H_ #define _DT_BINDINGS_CLK_SUN50I_A64_H_ +#define CLK_PLL_PERIPH0 11 + #define CLK_BUS_MIPI_DSI 28 #define CLK_BUS_CE 29 #define CLK_BUS_DMA 30 From e385050873d1e19e40481d8cd868c9f60ebe46ac Mon Sep 17 00:00:00 2001 From: Harald Freudenberger Date: Wed, 24 May 2017 10:26:29 +0200 Subject: [PATCH 037/436] s390/zcrypt: Fix blocking queue device after unbind/bind. When the association between a queue device and the driver is released via unbind and later re-associated the queue device was not operational any more. Reason was a wrong administration of the card/queue lists within the ap device driver. This patch introduces revised card/queue list handling within the ap device driver: when an ap device is detected it is initial not added to the card/queue list any more. With driver probe the card device is added to the card list/the queue device is added to the queue list within a card. With driver remove the device is removed from the card/queue list. Additionally there are some situations within the ap device live where the lists need update upon card/queue device release (for example device hot unplug or suspend/resume). Signed-off-by: Harald Freudenberger Signed-off-by: Martin Schwidefsky --- drivers/s390/crypto/ap_bus.c | 38 +++++++++++++++++++++------------- drivers/s390/crypto/ap_card.c | 9 +++++++- drivers/s390/crypto/ap_queue.c | 9 +++++++- 3 files changed, 40 insertions(+), 16 deletions(-) diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c index 9be4596d8a08..ea099910b4e9 100644 --- a/drivers/s390/crypto/ap_bus.c +++ b/drivers/s390/crypto/ap_bus.c @@ -668,10 +668,28 @@ static int ap_device_probe(struct device *dev) struct ap_driver *ap_drv = to_ap_drv(dev->driver); int rc; + /* Add queue/card to list of active queues/cards */ + spin_lock_bh(&ap_list_lock); + if (is_card_dev(dev)) + list_add(&to_ap_card(dev)->list, &ap_card_list); + else + list_add(&to_ap_queue(dev)->list, + &to_ap_queue(dev)->card->queues); + spin_unlock_bh(&ap_list_lock); + ap_dev->drv = ap_drv; rc = ap_drv->probe ? ap_drv->probe(ap_dev) : -ENODEV; - if (rc) + + if (rc) { + spin_lock_bh(&ap_list_lock); + if (is_card_dev(dev)) + list_del_init(&to_ap_card(dev)->list); + else + list_del_init(&to_ap_queue(dev)->list); + spin_unlock_bh(&ap_list_lock); ap_dev->drv = NULL; + } + return rc; } @@ -680,14 +698,17 @@ static int ap_device_remove(struct device *dev) struct ap_device *ap_dev = to_ap_dev(dev); struct ap_driver *ap_drv = ap_dev->drv; + if (ap_drv->remove) + ap_drv->remove(ap_dev); + + /* Remove queue/card from list of active queues/cards */ spin_lock_bh(&ap_list_lock); if (is_card_dev(dev)) list_del_init(&to_ap_card(dev)->list); else list_del_init(&to_ap_queue(dev)->list); spin_unlock_bh(&ap_list_lock); - if (ap_drv->remove) - ap_drv->remove(ap_dev); + return 0; } @@ -1056,10 +1077,6 @@ static void ap_scan_bus(struct work_struct *unused) } /* get it and thus adjust reference counter */ get_device(&ac->ap_dev.device); - /* Add card device to card list */ - spin_lock_bh(&ap_list_lock); - list_add(&ac->list, &ap_card_list); - spin_unlock_bh(&ap_list_lock); } /* now create the new queue device */ aq = ap_queue_create(qid, type); @@ -1070,10 +1087,6 @@ static void ap_scan_bus(struct work_struct *unused) aq->ap_dev.device.parent = &ac->ap_dev.device; dev_set_name(&aq->ap_dev.device, "%02x.%04x", id, dom); - /* Add queue device to card queue list */ - spin_lock_bh(&ap_list_lock); - list_add(&aq->list, &ac->queues); - spin_unlock_bh(&ap_list_lock); /* Start with a device reset */ spin_lock_bh(&aq->lock); ap_wait(ap_sm_event(aq, AP_EVENT_POLL)); @@ -1081,9 +1094,6 @@ static void ap_scan_bus(struct work_struct *unused) /* Register device */ rc = device_register(&aq->ap_dev.device); if (rc) { - spin_lock_bh(&ap_list_lock); - list_del_init(&aq->list); - spin_unlock_bh(&ap_list_lock); put_device(&aq->ap_dev.device); continue; } diff --git a/drivers/s390/crypto/ap_card.c b/drivers/s390/crypto/ap_card.c index cfa161ccc74e..836efac96813 100644 --- a/drivers/s390/crypto/ap_card.c +++ b/drivers/s390/crypto/ap_card.c @@ -160,7 +160,14 @@ static struct device_type ap_card_type = { static void ap_card_device_release(struct device *dev) { - kfree(to_ap_card(dev)); + struct ap_card *ac = to_ap_card(dev); + + if (!list_empty(&ac->list)) { + spin_lock_bh(&ap_list_lock); + list_del_init(&ac->list); + spin_unlock_bh(&ap_list_lock); + } + kfree(ac); } struct ap_card *ap_card_create(int id, int queue_depth, int device_type, diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c index 480c58a63769..0f1a5d02acb0 100644 --- a/drivers/s390/crypto/ap_queue.c +++ b/drivers/s390/crypto/ap_queue.c @@ -584,7 +584,14 @@ static struct device_type ap_queue_type = { static void ap_queue_device_release(struct device *dev) { - kfree(to_ap_queue(dev)); + struct ap_queue *aq = to_ap_queue(dev); + + if (!list_empty(&aq->list)) { + spin_lock_bh(&ap_list_lock); + list_del_init(&aq->list); + spin_unlock_bh(&ap_list_lock); + } + kfree(aq); } struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type) From 77125a701adb21bfc03a2af211f472f8b490a084 Mon Sep 17 00:00:00 2001 From: Chen-Yu Tsai Date: Wed, 31 May 2017 15:58:22 +0800 Subject: [PATCH 038/436] ARM: sunxi: h3-h5: Add PLL_PERIPH0 clock to the R_CCU The AR100 clock within the R_CCU (PRCM) has the PLL_PERIPH0 as one of its parents. This adds the reference in the device tree describing this relationship. This patch uses a raw number for the clock index to ease merging by avoiding cross tree dependencies. Signed-off-by: Chen-Yu Tsai Signed-off-by: Maxime Ripard --- arch/arm/boot/dts/sunxi-h3-h5.dtsi | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/arch/arm/boot/dts/sunxi-h3-h5.dtsi b/arch/arm/boot/dts/sunxi-h3-h5.dtsi index d0067fec99de..d4f600dbb7eb 100644 --- a/arch/arm/boot/dts/sunxi-h3-h5.dtsi +++ b/arch/arm/boot/dts/sunxi-h3-h5.dtsi @@ -560,8 +560,9 @@ r_ccu: clock@1f01400 { compatible = "allwinner,sun8i-h3-r-ccu"; reg = <0x01f01400 0x100>; - clocks = <&osc24M>, <&osc32k>, <&iosc>; - clock-names = "hosc", "losc", "iosc"; + clocks = <&osc24M>, <&osc32k>, <&iosc>, + <&ccu 9>; + clock-names = "hosc", "losc", "iosc", "pll-periph"; #clock-cells = <1>; #reset-cells = <1>; }; From f74994a94063bc85ac1d6ad677ed06b5279c101f Mon Sep 17 00:00:00 2001 From: Chen-Yu Tsai Date: Wed, 31 May 2017 15:58:24 +0800 Subject: [PATCH 039/436] arm64: allwinner: a64: Add PLL_PERIPH0 clock to the R_CCU The AR100 clock within the R_CCU (PRCM) has the PLL_PERIPH0 as one of its parents. This adds the reference in the device tree describing this relationship. This patch uses a raw number for the clock index to ease merging by avoiding cross tree dependencies. Signed-off-by: Chen-Yu Tsai Signed-off-by: Maxime Ripard --- arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi index c7f669f5884f..166c9ef884dc 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi +++ b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi @@ -406,8 +406,9 @@ r_ccu: clock@1f01400 { compatible = "allwinner,sun50i-a64-r-ccu"; reg = <0x01f01400 0x100>; - clocks = <&osc24M>, <&osc32k>, <&iosc>; - clock-names = "hosc", "losc", "iosc"; + clocks = <&osc24M>, <&osc32k>, <&iosc>, + <&ccu 11>; + clock-names = "hosc", "losc", "iosc", "pll-periph"; #clock-cells = <1>; #reset-cells = <1>; }; From 948588e25b8af5e66962ed3f53e1cae1656fa5af Mon Sep 17 00:00:00 2001 From: Jean-Baptiste Maneyrol Date: Mon, 29 May 2017 09:59:40 +0000 Subject: [PATCH 040/436] iio: imu: inv_mpu6050: add accel lpf setting for chip >= MPU6500 Starting from MPU6500, accelerometer dlpf is set in a separate register named ACCEL_CONFIG_2. Add this new register in the map and set it for the corresponding chips. Signed-off-by: Jean-Baptiste Maneyrol Cc: stable@vger.kernel.org Signed-off-by: Jonathan Cameron --- drivers/iio/imu/inv_mpu6050/inv_mpu_core.c | 39 ++++++++++++++++++++-- drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h | 3 ++ 2 files changed, 39 insertions(+), 3 deletions(-) diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c index 96dabbd2f004..88a7c5d4e4d2 100644 --- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c +++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c @@ -41,6 +41,7 @@ static const int accel_scale[] = {598, 1196, 2392, 4785}; static const struct inv_mpu6050_reg_map reg_set_6500 = { .sample_rate_div = INV_MPU6050_REG_SAMPLE_RATE_DIV, .lpf = INV_MPU6050_REG_CONFIG, + .accel_lpf = INV_MPU6500_REG_ACCEL_CONFIG_2, .user_ctrl = INV_MPU6050_REG_USER_CTRL, .fifo_en = INV_MPU6050_REG_FIFO_EN, .gyro_config = INV_MPU6050_REG_GYRO_CONFIG, @@ -210,6 +211,37 @@ int inv_mpu6050_set_power_itg(struct inv_mpu6050_state *st, bool power_on) } EXPORT_SYMBOL_GPL(inv_mpu6050_set_power_itg); +/** + * inv_mpu6050_set_lpf_regs() - set low pass filter registers, chip dependent + * + * MPU60xx/MPU9150 use only 1 register for accelerometer + gyroscope + * MPU6500 and above have a dedicated register for accelerometer + */ +static int inv_mpu6050_set_lpf_regs(struct inv_mpu6050_state *st, + enum inv_mpu6050_filter_e val) +{ + int result; + + result = regmap_write(st->map, st->reg->lpf, val); + if (result) + return result; + + switch (st->chip_type) { + case INV_MPU6050: + case INV_MPU6000: + case INV_MPU9150: + /* old chips, nothing to do */ + result = 0; + break; + default: + /* set accel lpf */ + result = regmap_write(st->map, st->reg->accel_lpf, val); + break; + } + + return result; +} + /** * inv_mpu6050_init_config() - Initialize hardware, disable FIFO. * @@ -233,8 +265,7 @@ static int inv_mpu6050_init_config(struct iio_dev *indio_dev) if (result) return result; - d = INV_MPU6050_FILTER_20HZ; - result = regmap_write(st->map, st->reg->lpf, d); + result = inv_mpu6050_set_lpf_regs(st, INV_MPU6050_FILTER_20HZ); if (result) return result; @@ -537,6 +568,8 @@ error_write_raw: * would be alising. This function basically search for the * correct low pass parameters based on the fifo rate, e.g, * sampling frequency. + * + * lpf is set automatically when setting sampling rate to avoid any aliases. */ static int inv_mpu6050_set_lpf(struct inv_mpu6050_state *st, int rate) { @@ -552,7 +585,7 @@ static int inv_mpu6050_set_lpf(struct inv_mpu6050_state *st, int rate) while ((h < hz[i]) && (i < ARRAY_SIZE(d) - 1)) i++; data = d[i]; - result = regmap_write(st->map, st->reg->lpf, data); + result = inv_mpu6050_set_lpf_regs(st, data); if (result) return result; st->chip_config.lpf = data; diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h index ef13de7a2c20..953a0c09d568 100644 --- a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h +++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h @@ -28,6 +28,7 @@ * struct inv_mpu6050_reg_map - Notable registers. * @sample_rate_div: Divider applied to gyro output rate. * @lpf: Configures internal low pass filter. + * @accel_lpf: Configures accelerometer low pass filter. * @user_ctrl: Enables/resets the FIFO. * @fifo_en: Determines which data will appear in FIFO. * @gyro_config: gyro config register. @@ -47,6 +48,7 @@ struct inv_mpu6050_reg_map { u8 sample_rate_div; u8 lpf; + u8 accel_lpf; u8 user_ctrl; u8 fifo_en; u8 gyro_config; @@ -188,6 +190,7 @@ struct inv_mpu6050_state { #define INV_MPU6050_FIFO_THRESHOLD 500 /* mpu6500 registers */ +#define INV_MPU6500_REG_ACCEL_CONFIG_2 0x1D #define INV_MPU6500_REG_ACCEL_OFFSET 0x77 /* delay time in milliseconds */ From 1454e15bc28ba94aa5d6b31a83a42d5c03af2a6d Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Tue, 16 May 2017 15:26:12 +0000 Subject: [PATCH 041/436] iio: adc: mxs-lradc: Fix return value check in mxs_lradc_adc_probe() In case of error, the function devm_ioremap() returns NULL pointer not ERR_PTR(). The IS_ERR() test in the return value check should be replaced with NULL test. Also add NULL test for iores. Signed-off-by: Wei Yongjun Signed-off-by: Jonathan Cameron --- drivers/iio/adc/mxs-lradc-adc.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/iio/adc/mxs-lradc-adc.c b/drivers/iio/adc/mxs-lradc-adc.c index b0c7d8ee5cb8..6888167ca1e6 100644 --- a/drivers/iio/adc/mxs-lradc-adc.c +++ b/drivers/iio/adc/mxs-lradc-adc.c @@ -718,9 +718,12 @@ static int mxs_lradc_adc_probe(struct platform_device *pdev) adc->dev = dev; iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!iores) + return -EINVAL; + adc->base = devm_ioremap(dev, iores->start, resource_size(iores)); - if (IS_ERR(adc->base)) - return PTR_ERR(adc->base); + if (!adc->base) + return -ENOMEM; init_completion(&adc->completion); spin_lock_init(&adc->lock); From f4781e76f90df7aec400635d73ea4c35ee1d4765 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 30 May 2017 23:15:34 +0200 Subject: [PATCH 042/436] alarmtimer: Prevent overflow of relative timers Andrey reported a alartimer related RCU stall while fuzzing the kernel with syzkaller. The reason for this is an overflow in ktime_add() which brings the resulting time into negative space and causes immediate expiry of the timer. The following rearm with a small interval does not bring the timer back into positive space due to the same issue. This results in a permanent firing alarmtimer which hogs the CPU. Use ktime_add_safe() instead which detects the overflow and clamps the result to KTIME_SEC_MAX. Reported-by: Andrey Konovalov Signed-off-by: Thomas Gleixner Cc: Peter Zijlstra Cc: Kostya Serebryany Cc: syzkaller Cc: John Stultz Cc: Dmitry Vyukov Cc: stable@vger.kernel.org Link: http://lkml.kernel.org/r/20170530211655.802921648@linutronix.de --- kernel/time/alarmtimer.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c index 5cb5b0008d97..2b2e032b4eb4 100644 --- a/kernel/time/alarmtimer.c +++ b/kernel/time/alarmtimer.c @@ -387,7 +387,7 @@ void alarm_start_relative(struct alarm *alarm, ktime_t start) { struct alarm_base *base = &alarm_bases[alarm->type]; - start = ktime_add(start, base->gettime()); + start = ktime_add_safe(start, base->gettime()); alarm_start(alarm, start); } EXPORT_SYMBOL_GPL(alarm_start_relative); @@ -475,7 +475,7 @@ u64 alarm_forward(struct alarm *alarm, ktime_t now, ktime_t interval) overrun++; } - alarm->node.expires = ktime_add(alarm->node.expires, interval); + alarm->node.expires = ktime_add_safe(alarm->node.expires, interval); return overrun; } EXPORT_SYMBOL_GPL(alarm_forward); @@ -666,7 +666,7 @@ static int alarm_timer_set(struct k_itimer *timr, int flags, ktime_t now; now = alarm_bases[timr->it.alarm.alarmtimer.type].gettime(); - exp = ktime_add(now, exp); + exp = ktime_add_safe(now, exp); } alarm_start(&timr->it.alarm.alarmtimer, exp); From ff86bf0c65f14346bf2440534f9ba5ac232c39a0 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 30 May 2017 23:15:35 +0200 Subject: [PATCH 043/436] alarmtimer: Rate limit periodic intervals The alarmtimer code has another source of potentially rearming itself too fast. Interval timers with a very samll interval have a similar CPU hog effect as the previously fixed overflow issue. The reason is that alarmtimers do not implement the normal protection against this kind of problem which the other posix timer use: timer expires -> queue signal -> deliver signal -> rearm timer This scheme brings the rearming under scheduler control and prevents permanently firing timers which hog the CPU. Bringing this scheme to the alarm timer code is a major overhaul because it lacks all the necessary mechanisms completely. So for a quick fix limit the interval to one jiffie. This is not problematic in practice as alarmtimers are usually backed by an RTC for suspend which have 1 second resolution. It could be therefor argued that the resolution of this clock should be set to 1 second in general, but that's outside the scope of this fix. Signed-off-by: Thomas Gleixner Cc: Peter Zijlstra Cc: Kostya Serebryany Cc: syzkaller Cc: John Stultz Cc: Dmitry Vyukov Cc: stable@vger.kernel.org Link: http://lkml.kernel.org/r/20170530211655.896767100@linutronix.de --- kernel/time/alarmtimer.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c index 2b2e032b4eb4..ee2f4202d82a 100644 --- a/kernel/time/alarmtimer.c +++ b/kernel/time/alarmtimer.c @@ -660,6 +660,14 @@ static int alarm_timer_set(struct k_itimer *timr, int flags, /* start the timer */ timr->it.alarm.interval = timespec64_to_ktime(new_setting->it_interval); + + /* + * Rate limit to the tick as a hot fix to prevent DOS. Will be + * mopped up later. + */ + if (timr->it.alarm.interval < TICK_NSEC) + timr->it.alarm.interval = TICK_NSEC; + exp = timespec64_to_ktime(new_setting->it_value); /* Convert (if necessary) to absolute time */ if (flags != TIMER_ABSTIME) { From 3def03441e53e29eed3afd9009974a5a42bf124a Mon Sep 17 00:00:00 2001 From: Nicolas Iooss Date: Sat, 20 May 2017 13:27:00 +0200 Subject: [PATCH 044/436] genksyms: add printf format attribute to error_with_pos() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When compiling with -Wsuggest-attribute=format in HOSTCFLAGS, gcc complains that error_with_pos() may be declared with a printf format attribute: scripts/genksyms/genksyms.c:726:3: warning: function might be possible candidate for ‘gnu_printf’ format attribute [-Wsuggest-attribute=format] vfprintf(stderr, fmt, args); ^~~~~~~~ This would allow catching printf-format errors at compile time in callers to error_with_pos(). Add this attribute. Signed-off-by: Nicolas Iooss Signed-off-by: Masahiro Yamada --- scripts/genksyms/genksyms.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/genksyms/genksyms.h b/scripts/genksyms/genksyms.h index 3bffdcaaa274..b724a0290c75 100644 --- a/scripts/genksyms/genksyms.h +++ b/scripts/genksyms/genksyms.h @@ -75,7 +75,7 @@ struct string_list *copy_list_range(struct string_list *start, int yylex(void); int yyparse(void); -void error_with_pos(const char *, ...); +void error_with_pos(const char *, ...) __attribute__ ((format(printf, 1, 2))); /*----------------------------------------------------------------------*/ #define xmalloc(size) ({ void *__ptr = malloc(size); \ From 6bf28969f617f0131e94deb5476f4c45f43fd847 Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Mon, 5 Jun 2017 11:25:07 -0700 Subject: [PATCH 045/436] xtensa: reduce double exception literal reservation Double exception vector only needs 20 bytes of space for 5 literals, not 48. Reduce the reservation for double exception vector literals accordingly. This fixes build for configurations with small user exception vector size. Signed-off-by: Max Filippov --- arch/xtensa/kernel/vmlinux.lds.S | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S index 30d9fc21e076..162c77e53ca8 100644 --- a/arch/xtensa/kernel/vmlinux.lds.S +++ b/arch/xtensa/kernel/vmlinux.lds.S @@ -118,7 +118,7 @@ SECTIONS SECTION_VECTOR (.KernelExceptionVector.text, KERNEL_VECTOR_VADDR) SECTION_VECTOR (.UserExceptionVector.literal, USER_VECTOR_VADDR - 4) SECTION_VECTOR (.UserExceptionVector.text, USER_VECTOR_VADDR) - SECTION_VECTOR (.DoubleExceptionVector.literal, DOUBLEEXC_VECTOR_VADDR - 48) + SECTION_VECTOR (.DoubleExceptionVector.literal, DOUBLEEXC_VECTOR_VADDR - 20) SECTION_VECTOR (.DoubleExceptionVector.text, DOUBLEEXC_VECTOR_VADDR) #endif @@ -306,13 +306,13 @@ SECTIONS .UserExceptionVector.literal) SECTION_VECTOR (_DoubleExceptionVector_literal, .DoubleExceptionVector.literal, - DOUBLEEXC_VECTOR_VADDR - 48, + DOUBLEEXC_VECTOR_VADDR - 20, SIZEOF(.UserExceptionVector.text), .UserExceptionVector.text) SECTION_VECTOR (_DoubleExceptionVector_text, .DoubleExceptionVector.text, DOUBLEEXC_VECTOR_VADDR, - 48, + 20, .DoubleExceptionVector.literal) . = (LOADADDR( .DoubleExceptionVector.text ) + SIZEOF( .DoubleExceptionVector.text ) + 3) & ~ 3; From 186f0a0d8e083505bd5cd23baa82b2205224d9ad Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Mon, 5 Jun 2017 23:33:39 +0200 Subject: [PATCH 046/436] Revert "ACPICA: Disassembler: Enhance resource descriptor detection" Revert commit da28e1955d7f (ACPICA: Disassembler: Enhance resource descriptor detection) as it is based on an assumption that doesn't hold all the time and causes problems to happen because of that. Reported-by: Linda Knippers Signed-off-by: Rafael J. Wysocki --- drivers/acpi/acpica/utresrc.c | 9 --------- 1 file changed, 9 deletions(-) diff --git a/drivers/acpi/acpica/utresrc.c b/drivers/acpi/acpica/utresrc.c index e0587c85bafd..ff096d9755b9 100644 --- a/drivers/acpi/acpica/utresrc.c +++ b/drivers/acpi/acpica/utresrc.c @@ -474,15 +474,6 @@ acpi_ut_walk_aml_resources(struct acpi_walk_state *walk_state, return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG); } - /* - * The end_tag opcode must be followed by a zero byte. - * Although this byte is technically defined to be a checksum, - * in practice, all ASL compilers set this byte to zero. - */ - if (*(aml + 1) != 0) { - return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG); - } - /* Return the pointer to the end_tag if requested */ if (!user_function) { From cbf52a3e6a8a92beec6e0c70abf4111cd8f8faf7 Mon Sep 17 00:00:00 2001 From: Robert Jarzmik Date: Mon, 5 Jun 2017 13:59:15 +0200 Subject: [PATCH 047/436] tags: honor COMPILED_SOURCE with apart output directory When the kernel is compiled with an "O=" argument, the object files are not in the source tree, but in the build tree. This patch fixes O= build by looking for object files in the build tree. Fixes: 923e02ecf3f8 ("scripts/tags.sh: Support compiled source") Signed-off-by: Robert Jarzmik Signed-off-by: Masahiro Yamada --- scripts/tags.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/scripts/tags.sh b/scripts/tags.sh index d661f2f3ef61..d23dcbf17457 100755 --- a/scripts/tags.sh +++ b/scripts/tags.sh @@ -106,6 +106,7 @@ all_compiled_sources() case "$i" in *.[cS]) j=${i/\.[cS]/\.o} + j="${j#$tree}" if [ -e $j ]; then echo $i fi From e5c86679d5e864947a52fb31e45a425dea3e7fa9 Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Mon, 5 Jun 2017 02:43:51 -0700 Subject: [PATCH 048/436] xtensa: don't use linux IRQ #0 Linux IRQ #0 is reserved for error reporting and may not be used. Increase NR_IRQS for one additional slot and increase irq_domain_add_legacy parameter first_irq value to 1, so that linux IRQ #0 is not associated with hardware IRQ #0 in legacy IRQ domains. Introduce macro XTENSA_PIC_LINUX_IRQ for static translation of xtensa PIC hardware IRQ # to linux IRQ #. Use this macro in XTFPGA platform data definitions. This fixes inability to use hardware IRQ #0 in configurations that don't use device tree and allows for non-identity mapping between linux IRQ # and hardware IRQ #. Cc: stable@vger.kernel.org Signed-off-by: Max Filippov --- arch/xtensa/include/asm/irq.h | 3 ++- arch/xtensa/kernel/irq.c | 5 ----- .../platforms/xtfpga/include/platform/hardware.h | 6 ++++-- arch/xtensa/platforms/xtfpga/setup.c | 10 +++++----- drivers/irqchip/irq-xtensa-mx.c | 2 +- drivers/irqchip/irq-xtensa-pic.c | 2 +- 6 files changed, 13 insertions(+), 15 deletions(-) diff --git a/arch/xtensa/include/asm/irq.h b/arch/xtensa/include/asm/irq.h index f71f88ea7646..19707db966f1 100644 --- a/arch/xtensa/include/asm/irq.h +++ b/arch/xtensa/include/asm/irq.h @@ -29,7 +29,8 @@ static inline void variant_irq_disable(unsigned int irq) { } # define PLATFORM_NR_IRQS 0 #endif #define XTENSA_NR_IRQS XCHAL_NUM_INTERRUPTS -#define NR_IRQS (XTENSA_NR_IRQS + VARIANT_NR_IRQS + PLATFORM_NR_IRQS) +#define NR_IRQS (XTENSA_NR_IRQS + VARIANT_NR_IRQS + PLATFORM_NR_IRQS + 1) +#define XTENSA_PIC_LINUX_IRQ(hwirq) ((hwirq) + 1) #if VARIANT_NR_IRQS == 0 static inline void variant_init_irq(void) { } diff --git a/arch/xtensa/kernel/irq.c b/arch/xtensa/kernel/irq.c index a265edd6ac37..99341028cc77 100644 --- a/arch/xtensa/kernel/irq.c +++ b/arch/xtensa/kernel/irq.c @@ -34,11 +34,6 @@ asmlinkage void do_IRQ(int hwirq, struct pt_regs *regs) { int irq = irq_find_mapping(NULL, hwirq); - if (hwirq >= NR_IRQS) { - printk(KERN_EMERG "%s: cannot handle IRQ %d\n", - __func__, hwirq); - } - #ifdef CONFIG_DEBUG_STACKOVERFLOW /* Debugging check for stack overflow: is there less than 1KB free? */ { diff --git a/arch/xtensa/platforms/xtfpga/include/platform/hardware.h b/arch/xtensa/platforms/xtfpga/include/platform/hardware.h index dbeea2b440a1..1fda7e20dfcb 100644 --- a/arch/xtensa/platforms/xtfpga/include/platform/hardware.h +++ b/arch/xtensa/platforms/xtfpga/include/platform/hardware.h @@ -24,16 +24,18 @@ /* Interrupt configuration. */ -#define PLATFORM_NR_IRQS 10 +#define PLATFORM_NR_IRQS 0 /* Default assignment of LX60 devices to external interrupts. */ #ifdef CONFIG_XTENSA_MX #define DUART16552_INTNUM XCHAL_EXTINT3_NUM #define OETH_IRQ XCHAL_EXTINT4_NUM +#define C67X00_IRQ XCHAL_EXTINT8_NUM #else #define DUART16552_INTNUM XCHAL_EXTINT0_NUM #define OETH_IRQ XCHAL_EXTINT1_NUM +#define C67X00_IRQ XCHAL_EXTINT5_NUM #endif /* @@ -63,5 +65,5 @@ #define C67X00_PADDR (XCHAL_KIO_PADDR + 0x0D0D0000) #define C67X00_SIZE 0x10 -#define C67X00_IRQ 5 + #endif /* __XTENSA_XTAVNET_HARDWARE_H */ diff --git a/arch/xtensa/platforms/xtfpga/setup.c b/arch/xtensa/platforms/xtfpga/setup.c index 779be723eb2b..42285f35d313 100644 --- a/arch/xtensa/platforms/xtfpga/setup.c +++ b/arch/xtensa/platforms/xtfpga/setup.c @@ -175,8 +175,8 @@ static struct resource ethoc_res[] = { .flags = IORESOURCE_MEM, }, [2] = { /* IRQ number */ - .start = OETH_IRQ, - .end = OETH_IRQ, + .start = XTENSA_PIC_LINUX_IRQ(OETH_IRQ), + .end = XTENSA_PIC_LINUX_IRQ(OETH_IRQ), .flags = IORESOURCE_IRQ, }, }; @@ -213,8 +213,8 @@ static struct resource c67x00_res[] = { .flags = IORESOURCE_MEM, }, [1] = { /* IRQ number */ - .start = C67X00_IRQ, - .end = C67X00_IRQ, + .start = XTENSA_PIC_LINUX_IRQ(C67X00_IRQ), + .end = XTENSA_PIC_LINUX_IRQ(C67X00_IRQ), .flags = IORESOURCE_IRQ, }, }; @@ -247,7 +247,7 @@ static struct resource serial_resource = { static struct plat_serial8250_port serial_platform_data[] = { [0] = { .mapbase = DUART16552_PADDR, - .irq = DUART16552_INTNUM, + .irq = XTENSA_PIC_LINUX_IRQ(DUART16552_INTNUM), .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_IOREMAP, .iotype = XCHAL_HAVE_BE ? UPIO_MEM32BE : UPIO_MEM32, diff --git a/drivers/irqchip/irq-xtensa-mx.c b/drivers/irqchip/irq-xtensa-mx.c index bb3ac5fe5846..72a391e01011 100644 --- a/drivers/irqchip/irq-xtensa-mx.c +++ b/drivers/irqchip/irq-xtensa-mx.c @@ -142,7 +142,7 @@ static struct irq_chip xtensa_mx_irq_chip = { int __init xtensa_mx_init_legacy(struct device_node *interrupt_parent) { struct irq_domain *root_domain = - irq_domain_add_legacy(NULL, NR_IRQS, 0, 0, + irq_domain_add_legacy(NULL, NR_IRQS - 1, 1, 0, &xtensa_mx_irq_domain_ops, &xtensa_mx_irq_chip); irq_set_default_host(root_domain); diff --git a/drivers/irqchip/irq-xtensa-pic.c b/drivers/irqchip/irq-xtensa-pic.c index 472ae1770964..f728755fa292 100644 --- a/drivers/irqchip/irq-xtensa-pic.c +++ b/drivers/irqchip/irq-xtensa-pic.c @@ -89,7 +89,7 @@ static struct irq_chip xtensa_irq_chip = { int __init xtensa_pic_init_legacy(struct device_node *interrupt_parent) { struct irq_domain *root_domain = - irq_domain_add_legacy(NULL, NR_IRQS, 0, 0, + irq_domain_add_legacy(NULL, NR_IRQS - 1, 1, 0, &xtensa_irq_domain_ops, &xtensa_irq_chip); irq_set_default_host(root_domain); return 0; From 27fef9f8ecb0495d302deba210606a32e54db37a Mon Sep 17 00:00:00 2001 From: Charles Keepax Date: Tue, 6 Jun 2017 09:46:33 +0100 Subject: [PATCH 049/436] mfd: arizona: Fix typo using hard-coded register A hardcoded register is accidentally used instead of the register address passed into the function. Correct this and use the appropriate variable. This would cause minor issues on wm5102, but all other devices using this driver would have been unaffected. Fixes: commit ef84f885e037 ("mfd: arizona: Refactor arizona_poll_reg") Reported-by: Andrzej Hajda Signed-off-by: Charles Keepax Signed-off-by: Lee Jones --- drivers/mfd/arizona-core.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/mfd/arizona-core.c b/drivers/mfd/arizona-core.c index 75488e65cd96..8d46e3ad9529 100644 --- a/drivers/mfd/arizona-core.c +++ b/drivers/mfd/arizona-core.c @@ -245,8 +245,7 @@ static int arizona_poll_reg(struct arizona *arizona, int ret; ret = regmap_read_poll_timeout(arizona->regmap, - ARIZONA_INTERRUPT_RAW_STATUS_5, val, - ((val & mask) == target), + reg, val, ((val & mask) == target), ARIZONA_REG_POLL_DELAY_US, timeout_ms * 1000); if (ret) From 4b0755e90ae03ba40174842af6fa810355960fbc Mon Sep 17 00:00:00 2001 From: Philipp Zabel Date: Thu, 4 May 2017 12:20:17 -0300 Subject: [PATCH 050/436] [media] tc358743: fix register i2c_rd/wr function fix The below mentioned fix contains a small but severe bug, fix it to make the driver work again. Fixes: 3538aa6ecfb2 ("[media] tc358743: fix register i2c_rd/wr functions") Cc: Hans Verkuil Cc: Mauro Carvalho Chehab Signed-off-by: Philipp Zabel Acked-by: Arnd Bergmann Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab --- drivers/media/i2c/tc358743.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/media/i2c/tc358743.c b/drivers/media/i2c/tc358743.c index acef4eca269f..3251cba89e8f 100644 --- a/drivers/media/i2c/tc358743.c +++ b/drivers/media/i2c/tc358743.c @@ -223,7 +223,7 @@ static void i2c_wr8(struct v4l2_subdev *sd, u16 reg, u8 val) static void i2c_wr8_and_or(struct v4l2_subdev *sd, u16 reg, u8 mask, u8 val) { - i2c_wrreg(sd, reg, (i2c_rdreg(sd, reg, 2) & mask) | val, 2); + i2c_wrreg(sd, reg, (i2c_rdreg(sd, reg, 1) & mask) | val, 1); } static u16 i2c_rd16(struct v4l2_subdev *sd, u16 reg) From e0f2e5eb14668dc798942ff5f4241d9bd17f9655 Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Tue, 25 Apr 2017 00:12:46 -0300 Subject: [PATCH 051/436] [media] rainshadow-cec: Fix missing spin_lock_init() The driver allocates the spinlock but not initialize it. Use spin_lock_init() on it to initialize it correctly. This is detected by Coccinelle semantic patch. Fixes: 0f314f6c2e77 ("[media] rainshadow-cec: new RainShadow Tech HDMI CEC driver") Signed-off-by: Wei Yongjun Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab --- drivers/media/usb/rainshadow-cec/rainshadow-cec.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/media/usb/rainshadow-cec/rainshadow-cec.c b/drivers/media/usb/rainshadow-cec/rainshadow-cec.c index 71bd68548c9c..4126552c9055 100644 --- a/drivers/media/usb/rainshadow-cec/rainshadow-cec.c +++ b/drivers/media/usb/rainshadow-cec/rainshadow-cec.c @@ -336,6 +336,7 @@ static int rain_connect(struct serio *serio, struct serio_driver *drv) serio_set_drvdata(serio, rain); INIT_WORK(&rain->work, rain_irq_work_handler); mutex_init(&rain->write_lock); + spin_lock_init(&rain->buf_lock); err = serio_open(serio, drv); if (err) From 5ebb6dd36c9f5fb37b1077b393c254d70a14cb46 Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Fri, 28 Apr 2017 01:51:40 -0300 Subject: [PATCH 052/436] [media] vb2: Fix an off by one error in 'vb2_plane_vaddr' We should ensure that 'plane_no' is '< vb->num_planes' as done in 'vb2_plane_cookie' just a few lines below. Fixes: e23ccc0ad925 ("[media] v4l: add videobuf2 Video for Linux 2 driver framework") Cc: stable@vger.kernel.org Signed-off-by: Christophe JAILLET Reviewed-by: Sakari Ailus Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab --- drivers/media/v4l2-core/videobuf2-core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c index 94afbbf92807..c0175ea7e7ad 100644 --- a/drivers/media/v4l2-core/videobuf2-core.c +++ b/drivers/media/v4l2-core/videobuf2-core.c @@ -868,7 +868,7 @@ EXPORT_SYMBOL_GPL(vb2_core_create_bufs); void *vb2_plane_vaddr(struct vb2_buffer *vb, unsigned int plane_no) { - if (plane_no > vb->num_planes || !vb->planes[plane_no].mem_priv) + if (plane_no >= vb->num_planes || !vb->planes[plane_no].mem_priv) return NULL; return call_ptr_memop(vb, vaddr, vb->planes[plane_no].mem_priv); From 2302e5591a4ad2d66107c75b6be170121bff5ccd Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Fri, 21 Apr 2017 07:52:17 -0300 Subject: [PATCH 053/436] [media] cec: improve MEDIA_CEC_RC dependencies Changing the IS_REACHABLE() into a plain #ifdef broke the case of CONFIG_MEDIA_RC=m && CONFIG_MEDIA_CEC=y: drivers/media/cec/cec-core.o: In function `cec_unregister_adapter': cec-core.c:(.text.cec_unregister_adapter+0x18): undefined reference to `rc_unregister_device' drivers/media/cec/cec-core.o: In function `cec_delete_adapter': cec-core.c:(.text.cec_delete_adapter+0x54): undefined reference to `rc_free_device' drivers/media/cec/cec-core.o: In function `cec_register_adapter': cec-core.c:(.text.cec_register_adapter+0x94): undefined reference to `rc_register_device' cec-core.c:(.text.cec_register_adapter+0xa4): undefined reference to `rc_free_device' cec-core.c:(.text.cec_register_adapter+0x110): undefined reference to `rc_unregister_device' drivers/media/cec/cec-core.o: In function `cec_allocate_adapter': cec-core.c:(.text.cec_allocate_adapter+0x234): undefined reference to `rc_allocate_device' drivers/media/cec/cec-adap.o: In function `cec_received_msg': cec-adap.c:(.text.cec_received_msg+0x734): undefined reference to `rc_keydown' cec-adap.c:(.text.cec_received_msg+0x768): undefined reference to `rc_keyup' This adds an additional dependency to explicitly forbid this combination. Fixes: 5f2c467c54f5 ("[media] cec: add MEDIA_CEC_RC config option") Signed-off-by: Arnd Bergmann Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab --- drivers/media/cec/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/media/cec/Kconfig b/drivers/media/cec/Kconfig index 4e25a950ae6f..43428cec3a01 100644 --- a/drivers/media/cec/Kconfig +++ b/drivers/media/cec/Kconfig @@ -1,5 +1,6 @@ config MEDIA_CEC_RC bool "HDMI CEC RC integration" depends on CEC_CORE && RC_CORE + depends on CEC_CORE=m || RC_CORE=y ---help--- Pass on CEC remote control messages to the RC framework. From ae8eb443a17331a07579bc04817accaaaa62b78e Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Fri, 12 May 2017 16:39:21 -0300 Subject: [PATCH 054/436] [media] cec-notifier.h: handle unreachable CONFIG_CEC_CORE Fix a link error in this specific combination of config options: CONFIG_MEDIA_CEC_SUPPORT=y CONFIG_CEC_CORE=m CONFIG_MEDIA_CEC_NOTIFIER=y CONFIG_VIDEO_STI_HDMI_CEC=m CONFIG_DRM_STI=y drivers/gpu/drm/sti/sti_hdmi.o: In function `sti_hdmi_remove': sti_hdmi.c:(.text.sti_hdmi_remove+0x10): undefined reference to `cec_notifier_set_phys_addr' sti_hdmi.c:(.text.sti_hdmi_remove+0x34): undefined reference to `cec_notifier_put' drivers/gpu/drm/sti/sti_hdmi.o: In function `sti_hdmi_connector_get_modes': sti_hdmi.c:(.text.sti_hdmi_connector_get_modes+0x4a): undefined reference to `cec_notifier_set_phys_addr_from_edid' drivers/gpu/drm/sti/sti_hdmi.o: In function `sti_hdmi_probe': sti_hdmi.c:(.text.sti_hdmi_probe+0x204): undefined reference to `cec_notifier_get' drivers/gpu/drm/sti/sti_hdmi.o: In function `sti_hdmi_connector_detect': sti_hdmi.c:(.text.sti_hdmi_connector_detect+0x36): undefined reference to `cec_notifier_set_phys_addr' drivers/gpu/drm/sti/sti_hdmi.o: In function `sti_hdmi_disable': sti_hdmi.c:(.text.sti_hdmi_disable+0xc0): undefined reference to `cec_notifier_set_phys_addr' The version below seems to work, though I don't particularly like the IS_REACHABLE() addition since that can be confusing to users. Signed-off-by: Arnd Bergmann Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab --- include/media/cec-notifier.h | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/include/media/cec-notifier.h b/include/media/cec-notifier.h index 413335c8cb52..298f996969df 100644 --- a/include/media/cec-notifier.h +++ b/include/media/cec-notifier.h @@ -106,6 +106,16 @@ static inline void cec_notifier_set_phys_addr_from_edid(struct cec_notifier *n, { } +static inline void cec_notifier_register(struct cec_notifier *n, + struct cec_adapter *adap, + void (*callback)(struct cec_adapter *adap, u16 pa)) +{ +} + +static inline void cec_notifier_unregister(struct cec_notifier *n) +{ +} + #endif #endif From f8c627fbabbe9ed6ae68dcfefb7519bd153a7ac0 Mon Sep 17 00:00:00 2001 From: Sean Young Date: Tue, 16 May 2017 04:56:14 -0300 Subject: [PATCH 055/436] [media] sir_ir: infinite loop in interrupt handler Since this driver does no detection of hardware, it might be used with a non-sir port. Escape out if we are spinning. Reported-by: kbuild test robot Signed-off-by: Sean Young Signed-off-by: Mauro Carvalho Chehab --- drivers/media/rc/sir_ir.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/media/rc/sir_ir.c b/drivers/media/rc/sir_ir.c index e12ec50bf0bf..90a5f8fd5eea 100644 --- a/drivers/media/rc/sir_ir.c +++ b/drivers/media/rc/sir_ir.c @@ -183,9 +183,15 @@ static irqreturn_t sir_interrupt(int irq, void *dev_id) static unsigned long delt; unsigned long deltintr; unsigned long flags; + int counter = 0; int iir, lsr; while ((iir = inb(io + UART_IIR) & UART_IIR_ID)) { + if (++counter > 256) { + dev_err(&sir_ir_dev->dev, "Trapped in interrupt"); + break; + } + switch (iir & UART_IIR_ID) { /* FIXME toto treba preriedit */ case UART_IIR_MSI: (void)inb(io + UART_MSR); From 0e3e97526a850f97c9fe8b646937b3a2bef58290 Mon Sep 17 00:00:00 2001 From: Felipe Balbi Date: Tue, 6 Jun 2017 14:47:29 +0300 Subject: [PATCH 056/436] usb: gadget: composite: make sure to reactivate function on unbind If a function sets bind_deactivated flag, upon removal we will be left with an unbalanced deactivation. Let's make sure that we conditionally call usb_function_activate() from usb_remove_function() and make sure usb_remove_function() is called from remove_config(). Signed-off-by: Felipe Balbi --- drivers/usb/gadget/composite.c | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c index 49d685ad0da9..45b554032332 100644 --- a/drivers/usb/gadget/composite.c +++ b/drivers/usb/gadget/composite.c @@ -315,6 +315,9 @@ void usb_remove_function(struct usb_configuration *c, struct usb_function *f) list_del(&f->list); if (f->unbind) f->unbind(c, f); + + if (f->bind_deactivated) + usb_function_activate(f); } EXPORT_SYMBOL_GPL(usb_remove_function); @@ -956,12 +959,8 @@ static void remove_config(struct usb_composite_dev *cdev, f = list_first_entry(&config->functions, struct usb_function, list); - list_del(&f->list); - if (f->unbind) { - DBG(cdev, "unbind function '%s'/%p\n", f->name, f); - f->unbind(config, f); - /* may free memory for "f" */ - } + + usb_remove_function(config, f); } list_del(&config->list); if (config->unbind) { From 3db28271f0feae129262d30e41384a7c4c767987 Mon Sep 17 00:00:00 2001 From: Sebastian Parschauer Date: Tue, 6 Jun 2017 13:53:13 +0200 Subject: [PATCH 057/436] HID: Add quirk for Dell PIXART OEM mouse This mouse is also known under other IDs. It needs the quirk ALWAYS_POLL or will disconnect in runlevel 1 or 3. Signed-off-by: Sebastian Parschauer CC: stable@vger.kernel.org Signed-off-by: Jiri Kosina --- drivers/hid/hid-ids.h | 3 +++ drivers/hid/usbhid/hid-quirks.c | 1 + 2 files changed, 4 insertions(+) diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 8ca1e8ce0af2..4f9a3938189a 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -319,6 +319,9 @@ #define USB_VENDOR_ID_DELCOM 0x0fc5 #define USB_DEVICE_ID_DELCOM_VISUAL_IND 0xb080 +#define USB_VENDOR_ID_DELL 0x413c +#define USB_DEVICE_ID_DELL_PIXART_USB_OPTICAL_MOUSE 0x301a + #define USB_VENDOR_ID_DELORME 0x1163 #define USB_DEVICE_ID_DELORME_EARTHMATE 0x0100 #define USB_DEVICE_ID_DELORME_EM_LT20 0x0200 diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c index 6316498b7812..a88e7c7bea0a 100644 --- a/drivers/hid/usbhid/hid-quirks.c +++ b/drivers/hid/usbhid/hid-quirks.c @@ -85,6 +85,7 @@ static const struct hid_blacklist { { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB_RAPIDFIRE, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL }, { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL }, { USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51, HID_QUIRK_NOGET }, + { USB_VENDOR_ID_DELL, USB_DEVICE_ID_DELL_PIXART_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL }, { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET }, { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_WIIU, HID_QUIRK_MULTI_INPUT }, { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3, HID_QUIRK_MULTI_INPUT }, From 9ba26a7283f56100eb08a2df48f17da600f60d52 Mon Sep 17 00:00:00 2001 From: Cao jin Date: Tue, 6 Jun 2017 17:07:53 +0800 Subject: [PATCH 058/436] Kbuild: tiny correction on `make help` The help info of `make C=1` is little confusing, make it clear. Signed-off-by: Cao jin Signed-off-by: Masahiro Yamada --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 63e10bd4f14a..1e2ae78f8ba5 100644 --- a/Makefile +++ b/Makefile @@ -1437,7 +1437,7 @@ help: @echo ' make V=0|1 [targets] 0 => quiet build (default), 1 => verbose build' @echo ' make V=2 [targets] 2 => give reason for rebuild of target' @echo ' make O=dir [targets] Locate all output files in "dir", including .config' - @echo ' make C=1 [targets] Check all c source with $$CHECK (sparse by default)' + @echo ' make C=1 [targets] Check re-compiled c source with $$CHECK (sparse by default)' @echo ' make C=2 [targets] Force check of all c source with $$CHECK' @echo ' make RECORDMCOUNT_WARN=1 [targets] Warn about ignored mcount sections' @echo ' make W=n [targets] Enable extra gcc checks, n=1,2,3 where' From c28294b941232931fbd714099798eb7aa7e865d7 Mon Sep 17 00:00:00 2001 From: Alexander Potapenko Date: Tue, 6 Jun 2017 15:56:54 +0200 Subject: [PATCH 059/436] net: don't call strlen on non-terminated string in dev_set_alias() KMSAN reported a use of uninitialized memory in dev_set_alias(), which was caused by calling strlcpy() (which in turn called strlen()) on the user-supplied non-terminated string. Signed-off-by: Alexander Potapenko Signed-off-by: David S. Miller --- net/core/dev.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/net/core/dev.c b/net/core/dev.c index fca407b4a6ea..84e1e86a4bce 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1253,8 +1253,9 @@ int dev_set_alias(struct net_device *dev, const char *alias, size_t len) if (!new_ifalias) return -ENOMEM; dev->ifalias = new_ifalias; + memcpy(dev->ifalias, alias, len); + dev->ifalias[len] = 0; - strlcpy(dev->ifalias, alias, len+1); return len; } From 996652c7050c70008e4434af108be6f15f20fbd0 Mon Sep 17 00:00:00 2001 From: Michal Schmidt Date: Tue, 6 Jun 2017 16:30:31 +0200 Subject: [PATCH 060/436] bnx2x: fix pf2vf bulletin DMA mapping leak When freeing VF's DMA mappings, an already NULLed pointer was checked again due to an apparent copy&paste error. Consequently, the pf2vf bulletin DMA mapping was not freed. Signed-off-by: Michal Schmidt Acked-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index bdfd53b46bc5..870ea001a720 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c @@ -3042,7 +3042,7 @@ void bnx2x_vf_pci_dealloc(struct bnx2x *bp) { BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping, sizeof(struct bnx2x_vf_mbx_msg)); - BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->pf2vf_bulletin_mapping, + BNX2X_PCI_FREE(bp->pf2vf_bulletin, bp->pf2vf_bulletin_mapping, sizeof(union pf_vf_bulletin)); } From e4061d572cef1f990bb7761f45d00342daa27fbd Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Tue, 6 Jun 2017 19:01:37 +0300 Subject: [PATCH 061/436] net: fix up hash documentation commit 61b905da33 ("net: Rename skb->rxhash to skb->hash") didn't update the documentation, fix this up. Cc: Tom Herbert Signed-off-by: Michael S. Tsirkin Signed-off-by: David S. Miller --- Documentation/networking/scaling.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Documentation/networking/scaling.txt b/Documentation/networking/scaling.txt index 59f4db2a0c85..f55639d71d35 100644 --- a/Documentation/networking/scaling.txt +++ b/Documentation/networking/scaling.txt @@ -122,7 +122,7 @@ associated flow of the packet. The hash is either provided by hardware or will be computed in the stack. Capable hardware can pass the hash in the receive descriptor for the packet; this would usually be the same hash used for RSS (e.g. computed Toeplitz hash). The hash is saved in -skb->rx_hash and can be used elsewhere in the stack as a hash of the +skb->hash and can be used elsewhere in the stack as a hash of the packet’s flow. Each receive hardware queue has an associated list of CPUs to which From 79e25959403e6a79552db28a87abed34de32a1df Mon Sep 17 00:00:00 2001 From: Roland Dreier Date: Tue, 6 Jun 2017 09:22:00 -0700 Subject: [PATCH 062/436] IB/addr: Fix setting source address in addr6_resolve() Commit eea40b8f624f ("infiniband: call ipv6 route lookup via the stub interface") introduced a regression in address resolution when connecting to IPv6 destination addresses. The old code called ip6_route_output(), while the new code calls ipv6_stub->ipv6_dst_lookup(). The two are almost the same, except that ipv6_dst_lookup() also calls ip6_route_get_saddr() if the source address is in6addr_any. This means that the test of ipv6_addr_any(&fl6.saddr) now never succeeds, and so we never copy the source address out. This ends up causing rdma_resolve_addr() to fail, because without a resolved source address, cma_acquire_dev() will fail to find an RDMA device to use. For me, this causes connecting to an NVMe over Fabrics target via RoCE / IPv6 to fail. Fix this by copying out fl6.saddr if ipv6_addr_any() is true for the original source address passed into addr6_resolve(). We can drop our call to ipv6_dev_get_saddr() because ipv6_dst_lookup() already does that work. Fixes: eea40b8f624 ("infiniband: call ipv6 route lookup via the stub interface") Cc: # 3.12+ Signed-off-by: Roland Dreier Acked-by: Paolo Abeni Signed-off-by: Doug Ledford --- drivers/infiniband/core/addr.c | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c index 02971e239a18..ece6926fa2e6 100644 --- a/drivers/infiniband/core/addr.c +++ b/drivers/infiniband/core/addr.c @@ -449,12 +449,7 @@ static int addr6_resolve(struct sockaddr_in6 *src_in, return ret; rt = (struct rt6_info *)dst; - if (ipv6_addr_any(&fl6.saddr)) { - ret = ipv6_dev_get_saddr(addr->net, ip6_dst_idev(dst)->dev, - &fl6.daddr, 0, &fl6.saddr); - if (ret) - goto put; - + if (ipv6_addr_any(&src_in->sin6_addr)) { src_in->sin6_family = AF_INET6; src_in->sin6_addr = fl6.saddr; } @@ -471,9 +466,6 @@ static int addr6_resolve(struct sockaddr_in6 *src_in, *pdst = dst; return 0; -put: - dst_release(dst); - return ret; } #else static int addr6_resolve(struct sockaddr_in6 *src_in, From 3a807b751c66e76bff39da4a9bd9828d7866d025 Mon Sep 17 00:00:00 2001 From: John Allen Date: Tue, 6 Jun 2017 16:55:52 -0500 Subject: [PATCH 063/436] ibmvnic: Return failure on attempted mtu change Changing the mtu is currently not supported in the ibmvnic driver. Implement .ndo_change_mtu in the driver so that attempting to use ifconfig to change the mtu will fail and present the user with an error message. Signed-off-by: John Allen Signed-off-by: David S. Miller --- drivers/net/ethernet/ibm/ibmvnic.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index a93757c255f7..c0fbeb387db4 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -1468,6 +1468,11 @@ static void ibmvnic_netpoll_controller(struct net_device *dev) } #endif +static int ibmvnic_change_mtu(struct net_device *netdev, int new_mtu) +{ + return -EOPNOTSUPP; +} + static const struct net_device_ops ibmvnic_netdev_ops = { .ndo_open = ibmvnic_open, .ndo_stop = ibmvnic_close, @@ -1479,6 +1484,7 @@ static const struct net_device_ops ibmvnic_netdev_ops = { #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = ibmvnic_netpoll_controller, #endif + .ndo_change_mtu = ibmvnic_change_mtu, }; /* ethtool functions */ From e173db36e3090d90651d97ab4bc45ace7f8cc0c6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Antoine=20T=C3=A9nart?= Date: Wed, 7 Jun 2017 08:17:50 +0200 Subject: [PATCH 064/436] net: mvpp2: do not bypass the mvpp22_port_mii_set function The mvpp22_port_mii_set() function was added by 2697582144dd, but the function directly returns without doing anything. This return was used when debugging and wasn't removed before sending the patch. Fix this. Fixes: 2697582144dd ("net: mvpp2: handle misc PPv2.1/PPv2.2 differences") Signed-off-by: Antoine Tenart Acked-by: Thomas Petazzoni Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvpp2.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index 9b875d776b29..70bca2a6fb02 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c @@ -4186,8 +4186,6 @@ static void mvpp22_port_mii_set(struct mvpp2_port *port) { u32 val; - return; - /* Only GOP port 0 has an XLG MAC */ if (port->gop_id == 0) { val = readl(port->base + MVPP22_XLG_CTRL3_REG); From 7005cade1bdbb423413f8aafcbf17a1ec614a585 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Wed, 7 Jun 2017 13:45:37 +0200 Subject: [PATCH 065/436] bpf, arm64: use separate register for state in stxr Will reported that in BPF_XADD we must use a different register in stxr instruction for the status flag due to otherwise CONSTRAINED UNPREDICTABLE behavior per architecture. Reference manual says [1]: If s == t, then one of the following behaviors must occur: * The instruction is UNDEFINED. * The instruction executes as a NOP. * The instruction performs the store to the specified address, but the value stored is UNKNOWN. Thus, use a different temporary register for the status flag to fix it. Disassembly extract from test 226/STX_XADD_DW from test_bpf.ko: [...] 0000003c: c85f7d4b ldxr x11, [x10] 00000040: 8b07016b add x11, x11, x7 00000044: c80c7d4b stxr w12, x11, [x10] 00000048: 35ffffac cbnz w12, 0x0000003c [...] [1] https://static.docs.arm.com/ddi0487/b/DDI0487B_a_armv8_arm.pdf, p.6132 Fixes: 85f68fe89832 ("bpf, arm64: implement jiting of BPF_XADD") Reported-by: Will Deacon Signed-off-by: Daniel Borkmann Acked-by: Will Deacon Signed-off-by: David S. Miller --- arch/arm64/net/bpf_jit_comp.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c index 71f930501ade..c870d6f01ac2 100644 --- a/arch/arm64/net/bpf_jit_comp.c +++ b/arch/arm64/net/bpf_jit_comp.c @@ -36,6 +36,7 @@ int bpf_jit_enable __read_mostly; #define TMP_REG_1 (MAX_BPF_JIT_REG + 0) #define TMP_REG_2 (MAX_BPF_JIT_REG + 1) #define TCALL_CNT (MAX_BPF_JIT_REG + 2) +#define TMP_REG_3 (MAX_BPF_JIT_REG + 3) /* Map BPF registers to A64 registers */ static const int bpf2a64[] = { @@ -57,6 +58,7 @@ static const int bpf2a64[] = { /* temporary registers for internal BPF JIT */ [TMP_REG_1] = A64_R(10), [TMP_REG_2] = A64_R(11), + [TMP_REG_3] = A64_R(12), /* tail_call_cnt */ [TCALL_CNT] = A64_R(26), /* temporary register for blinding constants */ @@ -319,6 +321,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) const u8 src = bpf2a64[insn->src_reg]; const u8 tmp = bpf2a64[TMP_REG_1]; const u8 tmp2 = bpf2a64[TMP_REG_2]; + const u8 tmp3 = bpf2a64[TMP_REG_3]; const s16 off = insn->off; const s32 imm = insn->imm; const int i = insn - ctx->prog->insnsi; @@ -689,10 +692,10 @@ emit_cond_jmp: emit(A64_PRFM(tmp, PST, L1, STRM), ctx); emit(A64_LDXR(isdw, tmp2, tmp), ctx); emit(A64_ADD(isdw, tmp2, tmp2, src), ctx); - emit(A64_STXR(isdw, tmp2, tmp, tmp2), ctx); + emit(A64_STXR(isdw, tmp2, tmp, tmp3), ctx); jmp_offset = -3; check_imm19(jmp_offset); - emit(A64_CBNZ(0, tmp2, jmp_offset), ctx); + emit(A64_CBNZ(0, tmp3, jmp_offset), ctx); break; /* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + imm)) */ From cf124db566e6b036b8bcbe8decbed740bdfac8c6 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Mon, 8 May 2017 12:52:56 -0400 Subject: [PATCH 066/436] net: Fix inconsistent teardown and release of private netdev state. Network devices can allocate reasources and private memory using netdev_ops->ndo_init(). However, the release of these resources can occur in one of two different places. Either netdev_ops->ndo_uninit() or netdev->destructor(). The decision of which operation frees the resources depends upon whether it is necessary for all netdev refs to be released before it is safe to perform the freeing. netdev_ops->ndo_uninit() presumably can occur right after the NETDEV_UNREGISTER notifier completes and the unicast and multicast address lists are flushed. netdev->destructor(), on the other hand, does not run until the netdev references all go away. Further complicating the situation is that netdev->destructor() almost universally does also a free_netdev(). This creates a problem for the logic in register_netdevice(). Because all callers of register_netdevice() manage the freeing of the netdev, and invoke free_netdev(dev) if register_netdevice() fails. If netdev_ops->ndo_init() succeeds, but something else fails inside of register_netdevice(), it does call ndo_ops->ndo_uninit(). But it is not able to invoke netdev->destructor(). This is because netdev->destructor() will do a free_netdev() and then the caller of register_netdevice() will do the same. However, this means that the resources that would normally be released by netdev->destructor() will not be. Over the years drivers have added local hacks to deal with this, by invoking their destructor parts by hand when register_netdevice() fails. Many drivers do not try to deal with this, and instead we have leaks. Let's close this hole by formalizing the distinction between what private things need to be freed up by netdev->destructor() and whether the driver needs unregister_netdevice() to perform the free_netdev(). netdev->priv_destructor() performs all actions to free up the private resources that used to be freed by netdev->destructor(), except for free_netdev(). netdev->needs_free_netdev is a boolean that indicates whether free_netdev() should be done at the end of unregister_netdevice(). Now, register_netdevice() can sanely release all resources after ndo_ops->ndo_init() succeeds, by invoking both ndo_ops->ndo_uninit() and netdev->priv_destructor(). And at the end of unregister_netdevice(), we invoke netdev->priv_destructor() and optionally call free_netdev(). Signed-off-by: David S. Miller --- drivers/net/bonding/bond_main.c | 6 +++--- drivers/net/caif/caif_hsi.c | 2 +- drivers/net/caif/caif_serial.c | 2 +- drivers/net/caif/caif_spi.c | 2 +- drivers/net/caif/caif_virtio.c | 2 +- drivers/net/can/slcan.c | 7 +++---- drivers/net/can/vcan.c | 2 +- drivers/net/can/vxcan.c | 2 +- drivers/net/dummy.c | 4 ++-- drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 2 +- drivers/net/geneve.c | 2 +- drivers/net/gtp.c | 2 +- drivers/net/hamradio/6pack.c | 2 +- drivers/net/hamradio/bpqether.c | 2 +- drivers/net/ifb.c | 4 ++-- drivers/net/ipvlan/ipvlan_main.c | 2 +- drivers/net/loopback.c | 4 ++-- drivers/net/macsec.c | 4 ++-- drivers/net/macvlan.c | 2 +- drivers/net/nlmon.c | 2 +- drivers/net/slip/slip.c | 7 +++---- drivers/net/team/team.c | 4 ++-- drivers/net/tun.c | 4 ++-- drivers/net/usb/cdc-phonet.c | 2 +- drivers/net/usb/qmi_wwan.c | 2 +- drivers/net/veth.c | 4 ++-- drivers/net/vrf.c | 2 +- drivers/net/vsockmon.c | 2 +- drivers/net/vxlan.c | 2 +- drivers/net/wan/dlci.c | 2 +- drivers/net/wan/hdlc_fr.c | 2 +- drivers/net/wan/lapbether.c | 2 +- drivers/net/wireless/ath/ath6kl/main.c | 2 +- .../net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c | 1 - drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c | 3 ++- drivers/net/wireless/intersil/hostap/hostap_main.c | 2 +- drivers/net/wireless/mac80211_hwsim.c | 2 +- drivers/net/wireless/marvell/mwifiex/main.c | 2 +- drivers/staging/rtl8188eu/os_dep/mon.c | 2 +- drivers/usb/gadget/function/f_phonet.c | 2 +- include/linux/netdevice.h | 7 ++++--- net/8021q/vlan_dev.c | 4 ++-- net/batman-adv/soft-interface.c | 5 ++--- net/bluetooth/6lowpan.c | 2 +- net/bridge/br_device.c | 2 +- net/caif/chnl_net.c | 4 ++-- net/core/dev.c | 8 ++++++-- net/hsr/hsr_device.c | 4 ++-- net/ieee802154/6lowpan/core.c | 2 +- net/ipv4/ip_tunnel.c | 4 ++-- net/ipv4/ipmr.c | 2 +- net/ipv6/ip6_gre.c | 9 +++++---- net/ipv6/ip6_tunnel.c | 8 ++++---- net/ipv6/ip6_vti.c | 8 ++++---- net/ipv6/ip6mr.c | 2 +- net/ipv6/sit.c | 6 +++--- net/irda/irlan/irlan_eth.c | 2 +- net/l2tp/l2tp_eth.c | 2 +- net/mac80211/iface.c | 6 +++--- net/mac802154/iface.c | 7 +++---- net/openvswitch/vport-internal_dev.c | 4 ++-- net/phonet/pep-gprs.c | 2 +- 62 files changed, 105 insertions(+), 103 deletions(-) diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 2359478b977f..8ab6bdbe1682 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -4192,7 +4192,6 @@ static void bond_destructor(struct net_device *bond_dev) struct bonding *bond = netdev_priv(bond_dev); if (bond->wq) destroy_workqueue(bond->wq); - free_netdev(bond_dev); } void bond_setup(struct net_device *bond_dev) @@ -4212,7 +4211,8 @@ void bond_setup(struct net_device *bond_dev) bond_dev->netdev_ops = &bond_netdev_ops; bond_dev->ethtool_ops = &bond_ethtool_ops; - bond_dev->destructor = bond_destructor; + bond_dev->needs_free_netdev = true; + bond_dev->priv_destructor = bond_destructor; SET_NETDEV_DEVTYPE(bond_dev, &bond_type); @@ -4736,7 +4736,7 @@ int bond_create(struct net *net, const char *name) rtnl_unlock(); if (res < 0) - bond_destructor(bond_dev); + free_netdev(bond_dev); return res; } diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c index ddabce759456..71a7c3b44fdd 100644 --- a/drivers/net/caif/caif_hsi.c +++ b/drivers/net/caif/caif_hsi.c @@ -1121,7 +1121,7 @@ static void cfhsi_setup(struct net_device *dev) dev->flags = IFF_POINTOPOINT | IFF_NOARP; dev->mtu = CFHSI_MAX_CAIF_FRAME_SZ; dev->priv_flags |= IFF_NO_QUEUE; - dev->destructor = free_netdev; + dev->needs_free_netdev = true; dev->netdev_ops = &cfhsi_netdevops; for (i = 0; i < CFHSI_PRIO_LAST; ++i) skb_queue_head_init(&cfhsi->qhead[i]); diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c index c2dea4916e5d..76e1d3545105 100644 --- a/drivers/net/caif/caif_serial.c +++ b/drivers/net/caif/caif_serial.c @@ -428,7 +428,7 @@ static void caifdev_setup(struct net_device *dev) dev->flags = IFF_POINTOPOINT | IFF_NOARP; dev->mtu = CAIF_MAX_MTU; dev->priv_flags |= IFF_NO_QUEUE; - dev->destructor = free_netdev; + dev->needs_free_netdev = true; skb_queue_head_init(&serdev->head); serdev->common.link_select = CAIF_LINK_LOW_LATENCY; serdev->common.use_frag = true; diff --git a/drivers/net/caif/caif_spi.c b/drivers/net/caif/caif_spi.c index 3a529fbe539f..fc21afe852b9 100644 --- a/drivers/net/caif/caif_spi.c +++ b/drivers/net/caif/caif_spi.c @@ -712,7 +712,7 @@ static void cfspi_setup(struct net_device *dev) dev->flags = IFF_NOARP | IFF_POINTOPOINT; dev->priv_flags |= IFF_NO_QUEUE; dev->mtu = SPI_MAX_PAYLOAD_SIZE; - dev->destructor = free_netdev; + dev->needs_free_netdev = true; skb_queue_head_init(&cfspi->qhead); skb_queue_head_init(&cfspi->chead); cfspi->cfdev.link_select = CAIF_LINK_HIGH_BANDW; diff --git a/drivers/net/caif/caif_virtio.c b/drivers/net/caif/caif_virtio.c index 6122768c8644..1794ea0420b7 100644 --- a/drivers/net/caif/caif_virtio.c +++ b/drivers/net/caif/caif_virtio.c @@ -617,7 +617,7 @@ static void cfv_netdev_setup(struct net_device *netdev) netdev->tx_queue_len = 100; netdev->flags = IFF_POINTOPOINT | IFF_NOARP; netdev->mtu = CFV_DEF_MTU_SIZE; - netdev->destructor = free_netdev; + netdev->needs_free_netdev = true; } /* Create debugfs counters for the device */ diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c index eb7173713bbc..6a6e896e52fa 100644 --- a/drivers/net/can/slcan.c +++ b/drivers/net/can/slcan.c @@ -417,7 +417,7 @@ static int slc_open(struct net_device *dev) static void slc_free_netdev(struct net_device *dev) { int i = dev->base_addr; - free_netdev(dev); + slcan_devs[i] = NULL; } @@ -436,7 +436,8 @@ static const struct net_device_ops slc_netdev_ops = { static void slc_setup(struct net_device *dev) { dev->netdev_ops = &slc_netdev_ops; - dev->destructor = slc_free_netdev; + dev->needs_free_netdev = true; + dev->priv_destructor = slc_free_netdev; dev->hard_header_len = 0; dev->addr_len = 0; @@ -761,8 +762,6 @@ static void __exit slcan_exit(void) if (sl->tty) { printk(KERN_ERR "%s: tty discipline still running\n", dev->name); - /* Intentionally leak the control block. */ - dev->destructor = NULL; } unregister_netdev(dev); diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c index facca33d53e9..0eda1b308583 100644 --- a/drivers/net/can/vcan.c +++ b/drivers/net/can/vcan.c @@ -163,7 +163,7 @@ static void vcan_setup(struct net_device *dev) dev->flags |= IFF_ECHO; dev->netdev_ops = &vcan_netdev_ops; - dev->destructor = free_netdev; + dev->needs_free_netdev = true; } static struct rtnl_link_ops vcan_link_ops __read_mostly = { diff --git a/drivers/net/can/vxcan.c b/drivers/net/can/vxcan.c index 7fbb24795681..30cf2368becf 100644 --- a/drivers/net/can/vxcan.c +++ b/drivers/net/can/vxcan.c @@ -156,7 +156,7 @@ static void vxcan_setup(struct net_device *dev) dev->tx_queue_len = 0; dev->flags = (IFF_NOARP|IFF_ECHO); dev->netdev_ops = &vxcan_netdev_ops; - dev->destructor = free_netdev; + dev->needs_free_netdev = true; } /* forward declaration for rtnl_create_link() */ diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c index 149244aac20a..9905b52fe293 100644 --- a/drivers/net/dummy.c +++ b/drivers/net/dummy.c @@ -328,7 +328,6 @@ static void dummy_free_netdev(struct net_device *dev) struct dummy_priv *priv = netdev_priv(dev); kfree(priv->vfinfo); - free_netdev(dev); } static void dummy_setup(struct net_device *dev) @@ -338,7 +337,8 @@ static void dummy_setup(struct net_device *dev) /* Initialize the device structure. */ dev->netdev_ops = &dummy_netdev_ops; dev->ethtool_ops = &dummy_ethtool_ops; - dev->destructor = dummy_free_netdev; + dev->needs_free_netdev = true; + dev->priv_destructor = dummy_free_netdev; /* Fill in device structure with ethernet-generic values. */ dev->flags |= IFF_NOARP; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 77ed2f628f9c..ea1bfcf1870a 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -4525,7 +4525,7 @@ static void dummy_setup(struct net_device *dev) /* Initialize the device structure. */ dev->netdev_ops = &cxgb4_mgmt_netdev_ops; dev->ethtool_ops = &cxgb4_mgmt_ethtool_ops; - dev->destructor = free_netdev; + dev->needs_free_netdev = true; } static int config_mgmt_dev(struct pci_dev *pdev) diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index 6ebb0f559a42..199459bd6961 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -1007,7 +1007,7 @@ static void geneve_setup(struct net_device *dev) dev->netdev_ops = &geneve_netdev_ops; dev->ethtool_ops = &geneve_ethtool_ops; - dev->destructor = free_netdev; + dev->needs_free_netdev = true; SET_NETDEV_DEVTYPE(dev, &geneve_type); diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c index 7b652bb7ebe4..ca110cd2a4e4 100644 --- a/drivers/net/gtp.c +++ b/drivers/net/gtp.c @@ -611,7 +611,7 @@ static const struct net_device_ops gtp_netdev_ops = { static void gtp_link_setup(struct net_device *dev) { dev->netdev_ops = >p_netdev_ops; - dev->destructor = free_netdev; + dev->needs_free_netdev = true; dev->hard_header_len = 0; dev->addr_len = 0; diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c index 922bf440e9f1..021a8ec411ab 100644 --- a/drivers/net/hamradio/6pack.c +++ b/drivers/net/hamradio/6pack.c @@ -311,7 +311,7 @@ static void sp_setup(struct net_device *dev) { /* Finish setting up the DEVICE info. */ dev->netdev_ops = &sp_netdev_ops; - dev->destructor = free_netdev; + dev->needs_free_netdev = true; dev->mtu = SIXP_MTU; dev->hard_header_len = AX25_MAX_HEADER_LEN; dev->header_ops = &ax25_header_ops; diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c index f62e7f325cf9..78a6414c5fd9 100644 --- a/drivers/net/hamradio/bpqether.c +++ b/drivers/net/hamradio/bpqether.c @@ -476,7 +476,7 @@ static const struct net_device_ops bpq_netdev_ops = { static void bpq_setup(struct net_device *dev) { dev->netdev_ops = &bpq_netdev_ops; - dev->destructor = free_netdev; + dev->needs_free_netdev = true; memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN); memcpy(dev->dev_addr, &ax25_defaddr, AX25_ADDR_LEN); diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c index 312fce7302d3..144ea5ae8ab4 100644 --- a/drivers/net/ifb.c +++ b/drivers/net/ifb.c @@ -207,7 +207,6 @@ static void ifb_dev_free(struct net_device *dev) __skb_queue_purge(&txp->tq); } kfree(dp->tx_private); - free_netdev(dev); } static void ifb_setup(struct net_device *dev) @@ -230,7 +229,8 @@ static void ifb_setup(struct net_device *dev) dev->priv_flags &= ~IFF_TX_SKB_SHARING; netif_keep_dst(dev); eth_hw_addr_random(dev); - dev->destructor = ifb_dev_free; + dev->needs_free_netdev = true; + dev->priv_destructor = ifb_dev_free; } static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev) diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c index 618ed88fad0f..7c7680c8f0e3 100644 --- a/drivers/net/ipvlan/ipvlan_main.c +++ b/drivers/net/ipvlan/ipvlan_main.c @@ -632,7 +632,7 @@ void ipvlan_link_setup(struct net_device *dev) dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING); dev->priv_flags |= IFF_UNICAST_FLT | IFF_NO_QUEUE; dev->netdev_ops = &ipvlan_netdev_ops; - dev->destructor = free_netdev; + dev->needs_free_netdev = true; dev->header_ops = &ipvlan_header_ops; dev->ethtool_ops = &ipvlan_ethtool_ops; } diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c index 224f65cb576b..30612497643c 100644 --- a/drivers/net/loopback.c +++ b/drivers/net/loopback.c @@ -159,7 +159,6 @@ static void loopback_dev_free(struct net_device *dev) { dev_net(dev)->loopback_dev = NULL; free_percpu(dev->lstats); - free_netdev(dev); } static const struct net_device_ops loopback_ops = { @@ -196,7 +195,8 @@ static void loopback_setup(struct net_device *dev) dev->ethtool_ops = &loopback_ethtool_ops; dev->header_ops = ð_header_ops; dev->netdev_ops = &loopback_ops; - dev->destructor = loopback_dev_free; + dev->needs_free_netdev = true; + dev->priv_destructor = loopback_dev_free; } /* Setup and register the loopback device. */ diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c index cdc347be68f2..79411675f0e6 100644 --- a/drivers/net/macsec.c +++ b/drivers/net/macsec.c @@ -2996,7 +2996,6 @@ static void macsec_free_netdev(struct net_device *dev) free_percpu(macsec->secy.tx_sc.stats); dev_put(real_dev); - free_netdev(dev); } static void macsec_setup(struct net_device *dev) @@ -3006,7 +3005,8 @@ static void macsec_setup(struct net_device *dev) dev->max_mtu = ETH_MAX_MTU; dev->priv_flags |= IFF_NO_QUEUE; dev->netdev_ops = &macsec_netdev_ops; - dev->destructor = macsec_free_netdev; + dev->needs_free_netdev = true; + dev->priv_destructor = macsec_free_netdev; SET_NETDEV_DEVTYPE(dev, &macsec_type); eth_zero_addr(dev->broadcast); diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 346ad2ff3998..67bf7ebae5c6 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -1092,7 +1092,7 @@ void macvlan_common_setup(struct net_device *dev) netif_keep_dst(dev); dev->priv_flags |= IFF_UNICAST_FLT; dev->netdev_ops = &macvlan_netdev_ops; - dev->destructor = free_netdev; + dev->needs_free_netdev = true; dev->header_ops = &macvlan_hard_header_ops; dev->ethtool_ops = &macvlan_ethtool_ops; } diff --git a/drivers/net/nlmon.c b/drivers/net/nlmon.c index b91603835d26..c4b3362da4a2 100644 --- a/drivers/net/nlmon.c +++ b/drivers/net/nlmon.c @@ -113,7 +113,7 @@ static void nlmon_setup(struct net_device *dev) dev->netdev_ops = &nlmon_ops; dev->ethtool_ops = &nlmon_ethtool_ops; - dev->destructor = free_netdev; + dev->needs_free_netdev = true; dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA | NETIF_F_LLTX; diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c index 1da31dc47f86..74b907206aa7 100644 --- a/drivers/net/slip/slip.c +++ b/drivers/net/slip/slip.c @@ -629,7 +629,7 @@ static void sl_uninit(struct net_device *dev) static void sl_free_netdev(struct net_device *dev) { int i = dev->base_addr; - free_netdev(dev); + slip_devs[i] = NULL; } @@ -651,7 +651,8 @@ static const struct net_device_ops sl_netdev_ops = { static void sl_setup(struct net_device *dev) { dev->netdev_ops = &sl_netdev_ops; - dev->destructor = sl_free_netdev; + dev->needs_free_netdev = true; + dev->priv_destructor = sl_free_netdev; dev->hard_header_len = 0; dev->addr_len = 0; @@ -1369,8 +1370,6 @@ static void __exit slip_exit(void) if (sl->tty) { printk(KERN_ERR "%s: tty discipline still running\n", dev->name); - /* Intentionally leak the control block. */ - dev->destructor = NULL; } unregister_netdev(dev); diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index 6c5d5ef46f75..fba8c136aa7c 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -1643,7 +1643,6 @@ static void team_destructor(struct net_device *dev) struct team *team = netdev_priv(dev); free_percpu(team->pcpu_stats); - free_netdev(dev); } static int team_open(struct net_device *dev) @@ -2079,7 +2078,8 @@ static void team_setup(struct net_device *dev) dev->netdev_ops = &team_netdev_ops; dev->ethtool_ops = &team_ethtool_ops; - dev->destructor = team_destructor; + dev->needs_free_netdev = true; + dev->priv_destructor = team_destructor; dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING); dev->priv_flags |= IFF_NO_QUEUE; dev->priv_flags |= IFF_TEAM; diff --git a/drivers/net/tun.c b/drivers/net/tun.c index bbd707b9ef7a..9ee7d4275640 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -1560,7 +1560,6 @@ static void tun_free_netdev(struct net_device *dev) free_percpu(tun->pcpu_stats); tun_flow_uninit(tun); security_tun_dev_free_security(tun->security); - free_netdev(dev); } static void tun_setup(struct net_device *dev) @@ -1571,7 +1570,8 @@ static void tun_setup(struct net_device *dev) tun->group = INVALID_GID; dev->ethtool_ops = &tun_ethtool_ops; - dev->destructor = tun_free_netdev; + dev->needs_free_netdev = true; + dev->priv_destructor = tun_free_netdev; /* We prefer our own queue length */ dev->tx_queue_len = TUN_READQ_SIZE; } diff --git a/drivers/net/usb/cdc-phonet.c b/drivers/net/usb/cdc-phonet.c index eb52de8205f0..c7a350bbaaa7 100644 --- a/drivers/net/usb/cdc-phonet.c +++ b/drivers/net/usb/cdc-phonet.c @@ -298,7 +298,7 @@ static void usbpn_setup(struct net_device *dev) dev->addr_len = 1; dev->tx_queue_len = 3; - dev->destructor = free_netdev; + dev->needs_free_netdev = true; } /* diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 8f923a147fa9..949671ce4039 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -123,7 +123,7 @@ static void qmimux_setup(struct net_device *dev) dev->addr_len = 0; dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; dev->netdev_ops = &qmimux_netdev_ops; - dev->destructor = free_netdev; + dev->needs_free_netdev = true; } static struct net_device *qmimux_find_dev(struct usbnet *dev, u8 mux_id) diff --git a/drivers/net/veth.c b/drivers/net/veth.c index 38f0f03a29c8..0156fe8cac17 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -222,7 +222,6 @@ static int veth_dev_init(struct net_device *dev) static void veth_dev_free(struct net_device *dev) { free_percpu(dev->vstats); - free_netdev(dev); } #ifdef CONFIG_NET_POLL_CONTROLLER @@ -317,7 +316,8 @@ static void veth_setup(struct net_device *dev) NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX); - dev->destructor = veth_dev_free; + dev->needs_free_netdev = true; + dev->priv_destructor = veth_dev_free; dev->max_mtu = ETH_MAX_MTU; dev->hw_features = VETH_FEATURES; diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index db882493875c..d38f11d833fe 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@ -1348,7 +1348,7 @@ static void vrf_setup(struct net_device *dev) dev->netdev_ops = &vrf_netdev_ops; dev->l3mdev_ops = &vrf_l3mdev_ops; dev->ethtool_ops = &vrf_ethtool_ops; - dev->destructor = free_netdev; + dev->needs_free_netdev = true; /* Fill in device structure with ethernet-generic values. */ eth_hw_addr_random(dev); diff --git a/drivers/net/vsockmon.c b/drivers/net/vsockmon.c index 7f0136f2dd9d..c28bdce14fd5 100644 --- a/drivers/net/vsockmon.c +++ b/drivers/net/vsockmon.c @@ -135,7 +135,7 @@ static void vsockmon_setup(struct net_device *dev) dev->netdev_ops = &vsockmon_ops; dev->ethtool_ops = &vsockmon_ethtool_ops; - dev->destructor = free_netdev; + dev->needs_free_netdev = true; dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA | NETIF_F_LLTX; diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index a6b5052c1d36..5fa798a5c9a6 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -2611,7 +2611,7 @@ static void vxlan_setup(struct net_device *dev) eth_hw_addr_random(dev); ether_setup(dev); - dev->destructor = free_netdev; + dev->needs_free_netdev = true; SET_NETDEV_DEVTYPE(dev, &vxlan_type); dev->features |= NETIF_F_LLTX; diff --git a/drivers/net/wan/dlci.c b/drivers/net/wan/dlci.c index 65ee2a6f248c..a0d76f70c428 100644 --- a/drivers/net/wan/dlci.c +++ b/drivers/net/wan/dlci.c @@ -475,7 +475,7 @@ static void dlci_setup(struct net_device *dev) dev->flags = 0; dev->header_ops = &dlci_header_ops; dev->netdev_ops = &dlci_netdev_ops; - dev->destructor = free_netdev; + dev->needs_free_netdev = true; dlp->receive = dlci_receive; diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c index eb915281197e..78596e42a3f3 100644 --- a/drivers/net/wan/hdlc_fr.c +++ b/drivers/net/wan/hdlc_fr.c @@ -1106,7 +1106,7 @@ static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type) return -EIO; } - dev->destructor = free_netdev; + dev->needs_free_netdev = true; *get_dev_p(pvc, type) = dev; if (!used) { state(hdlc)->dce_changed = 1; diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c index 9df9ed62beff..63f749078a1f 100644 --- a/drivers/net/wan/lapbether.c +++ b/drivers/net/wan/lapbether.c @@ -306,7 +306,7 @@ static const struct net_device_ops lapbeth_netdev_ops = { static void lapbeth_setup(struct net_device *dev) { dev->netdev_ops = &lapbeth_netdev_ops; - dev->destructor = free_netdev; + dev->needs_free_netdev = true; dev->type = ARPHRD_X25; dev->hard_header_len = 3; dev->mtu = 1000; diff --git a/drivers/net/wireless/ath/ath6kl/main.c b/drivers/net/wireless/ath/ath6kl/main.c index 91ee542de3d7..b90c77ef792e 100644 --- a/drivers/net/wireless/ath/ath6kl/main.c +++ b/drivers/net/wireless/ath/ath6kl/main.c @@ -1287,7 +1287,7 @@ void init_netdev(struct net_device *dev) struct ath6kl *ar = ath6kl_priv(dev); dev->netdev_ops = &ath6kl_netdev_ops; - dev->destructor = free_netdev; + dev->needs_free_netdev = true; dev->watchdog_timeo = ATH6KL_TX_TIMEOUT; dev->needed_headroom = ETH_HLEN; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index cd1d6730eab7..617199c0e5a0 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c @@ -5225,7 +5225,6 @@ void brcmf_cfg80211_free_netdev(struct net_device *ndev) if (vif) brcmf_free_vif(vif); - free_netdev(ndev); } static bool brcmf_is_linkup(const struct brcmf_event_msg *e) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c index a3d82368f1a9..511d190c6cca 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c @@ -624,7 +624,8 @@ struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, s32 bsscfgidx, s32 ifidx, if (!ndev) return ERR_PTR(-ENOMEM); - ndev->destructor = brcmf_cfg80211_free_netdev; + ndev->needs_free_netdev = true; + ndev->priv_destructor = brcmf_cfg80211_free_netdev; ifp = netdev_priv(ndev); ifp->ndev = ndev; /* store mapping ifidx to bsscfgidx */ diff --git a/drivers/net/wireless/intersil/hostap/hostap_main.c b/drivers/net/wireless/intersil/hostap/hostap_main.c index 544fc09dcb62..1372b20f931e 100644 --- a/drivers/net/wireless/intersil/hostap/hostap_main.c +++ b/drivers/net/wireless/intersil/hostap/hostap_main.c @@ -73,7 +73,7 @@ struct net_device * hostap_add_interface(struct local_info *local, dev->mem_end = mdev->mem_end; hostap_setup_dev(dev, local, type); - dev->destructor = free_netdev; + dev->needs_free_netdev = true; sprintf(dev->name, "%s%s", prefix, name); if (!rtnl_locked) diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 002b25cff5b6..c854a557998b 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -2861,7 +2861,7 @@ static const struct net_device_ops hwsim_netdev_ops = { static void hwsim_mon_setup(struct net_device *dev) { dev->netdev_ops = &hwsim_netdev_ops; - dev->destructor = free_netdev; + dev->needs_free_netdev = true; ether_setup(dev); dev->priv_flags |= IFF_NO_QUEUE; dev->type = ARPHRD_IEEE80211_RADIOTAP; diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c index dd87b9ff64c3..39b6b5e3f6e0 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.c +++ b/drivers/net/wireless/marvell/mwifiex/main.c @@ -1280,7 +1280,7 @@ void mwifiex_init_priv_params(struct mwifiex_private *priv, struct net_device *dev) { dev->netdev_ops = &mwifiex_netdev_ops; - dev->destructor = free_netdev; + dev->needs_free_netdev = true; /* Initialize private structure */ priv->current_key_index = 0; priv->media_connected = false; diff --git a/drivers/staging/rtl8188eu/os_dep/mon.c b/drivers/staging/rtl8188eu/os_dep/mon.c index cfe37eb026d6..859d0d6051cd 100644 --- a/drivers/staging/rtl8188eu/os_dep/mon.c +++ b/drivers/staging/rtl8188eu/os_dep/mon.c @@ -152,7 +152,7 @@ static const struct net_device_ops mon_netdev_ops = { static void mon_setup(struct net_device *dev) { dev->netdev_ops = &mon_netdev_ops; - dev->destructor = free_netdev; + dev->needs_free_netdev = true; ether_setup(dev); dev->priv_flags |= IFF_NO_QUEUE; dev->type = ARPHRD_IEEE80211; diff --git a/drivers/usb/gadget/function/f_phonet.c b/drivers/usb/gadget/function/f_phonet.c index b4058f0000e4..6a1ce6a55158 100644 --- a/drivers/usb/gadget/function/f_phonet.c +++ b/drivers/usb/gadget/function/f_phonet.c @@ -281,7 +281,7 @@ static void pn_net_setup(struct net_device *dev) dev->tx_queue_len = 1; dev->netdev_ops = &pn_netdev_ops; - dev->destructor = free_netdev; + dev->needs_free_netdev = true; dev->header_ops = &phonet_header_ops; } diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 3f39d27decf4..ab7ca3fdc495 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -1596,8 +1596,8 @@ enum netdev_priv_flags { * @rtnl_link_state: This enum represents the phases of creating * a new link * - * @destructor: Called from unregister, - * can be used to call free_netdev + * @needs_free_netdev: Should unregister perform free_netdev? + * @priv_destructor: Called from unregister * @npinfo: XXX: need comments on this one * @nd_net: Network namespace this network device is inside * @@ -1858,7 +1858,8 @@ struct net_device { RTNL_LINK_INITIALIZING, } rtnl_link_state:16; - void (*destructor)(struct net_device *dev); + bool needs_free_netdev; + void (*priv_destructor)(struct net_device *dev); #ifdef CONFIG_NETPOLL struct netpoll_info __rcu *npinfo; diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index 953b6728bd00..abc5f400fc71 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c @@ -813,7 +813,6 @@ static void vlan_dev_free(struct net_device *dev) free_percpu(vlan->vlan_pcpu_stats); vlan->vlan_pcpu_stats = NULL; - free_netdev(dev); } void vlan_setup(struct net_device *dev) @@ -826,7 +825,8 @@ void vlan_setup(struct net_device *dev) netif_keep_dst(dev); dev->netdev_ops = &vlan_netdev_ops; - dev->destructor = vlan_dev_free; + dev->needs_free_netdev = true; + dev->priv_destructor = vlan_dev_free; dev->ethtool_ops = &vlan_ethtool_ops; dev->min_mtu = 0; diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c index b25789abf7b9..10f7edfb176e 100644 --- a/net/batman-adv/soft-interface.c +++ b/net/batman-adv/soft-interface.c @@ -1034,8 +1034,6 @@ static void batadv_softif_free(struct net_device *dev) * netdev and its private data (bat_priv) */ rcu_barrier(); - - free_netdev(dev); } /** @@ -1047,7 +1045,8 @@ static void batadv_softif_init_early(struct net_device *dev) ether_setup(dev); dev->netdev_ops = &batadv_netdev_ops; - dev->destructor = batadv_softif_free; + dev->needs_free_netdev = true; + dev->priv_destructor = batadv_softif_free; dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_NETNS_LOCAL; dev->priv_flags |= IFF_NO_QUEUE; diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c index 608959989f8e..ab3b654b05cc 100644 --- a/net/bluetooth/6lowpan.c +++ b/net/bluetooth/6lowpan.c @@ -598,7 +598,7 @@ static void netdev_setup(struct net_device *dev) dev->netdev_ops = &netdev_ops; dev->header_ops = &header_ops; - dev->destructor = free_netdev; + dev->needs_free_netdev = true; } static struct device_type bt_type = { diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c index 430b53e7d941..f0f3447e8aa4 100644 --- a/net/bridge/br_device.c +++ b/net/bridge/br_device.c @@ -379,7 +379,7 @@ void br_dev_setup(struct net_device *dev) ether_setup(dev); dev->netdev_ops = &br_netdev_ops; - dev->destructor = free_netdev; + dev->needs_free_netdev = true; dev->ethtool_ops = &br_ethtool_ops; SET_NETDEV_DEVTYPE(dev, &br_type); dev->priv_flags = IFF_EBRIDGE | IFF_NO_QUEUE; diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c index 1816fc9f1ee7..fe3c53efb949 100644 --- a/net/caif/chnl_net.c +++ b/net/caif/chnl_net.c @@ -392,14 +392,14 @@ static void chnl_net_destructor(struct net_device *dev) { struct chnl_net *priv = netdev_priv(dev); caif_free_client(&priv->chnl); - free_netdev(dev); } static void ipcaif_net_setup(struct net_device *dev) { struct chnl_net *priv; dev->netdev_ops = &netdev_ops; - dev->destructor = chnl_net_destructor; + dev->needs_free_netdev = true; + dev->priv_destructor = chnl_net_destructor; dev->flags |= IFF_NOARP; dev->flags |= IFF_POINTOPOINT; dev->mtu = GPRS_PDP_MTU; diff --git a/net/core/dev.c b/net/core/dev.c index 84e1e86a4bce..4c15466305c3 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -7502,6 +7502,8 @@ out: err_uninit: if (dev->netdev_ops->ndo_uninit) dev->netdev_ops->ndo_uninit(dev); + if (dev->priv_destructor) + dev->priv_destructor(dev); goto out; } EXPORT_SYMBOL(register_netdevice); @@ -7709,8 +7711,10 @@ void netdev_run_todo(void) WARN_ON(rcu_access_pointer(dev->ip6_ptr)); WARN_ON(dev->dn_ptr); - if (dev->destructor) - dev->destructor(dev); + if (dev->priv_destructor) + dev->priv_destructor(dev); + if (dev->needs_free_netdev) + free_netdev(dev); /* Report a network device has been unregistered */ rtnl_lock(); diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c index c73160fb11e7..0a0a392dc2bd 100644 --- a/net/hsr/hsr_device.c +++ b/net/hsr/hsr_device.c @@ -378,7 +378,6 @@ static void hsr_dev_destroy(struct net_device *hsr_dev) del_timer_sync(&hsr->announce_timer); synchronize_rcu(); - free_netdev(hsr_dev); } static const struct net_device_ops hsr_device_ops = { @@ -404,7 +403,8 @@ void hsr_dev_setup(struct net_device *dev) SET_NETDEV_DEVTYPE(dev, &hsr_type); dev->priv_flags |= IFF_NO_QUEUE; - dev->destructor = hsr_dev_destroy; + dev->needs_free_netdev = true; + dev->priv_destructor = hsr_dev_destroy; dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA | NETIF_F_GSO_MASK | NETIF_F_HW_CSUM | diff --git a/net/ieee802154/6lowpan/core.c b/net/ieee802154/6lowpan/core.c index d7efbf0dad20..0a866f332290 100644 --- a/net/ieee802154/6lowpan/core.c +++ b/net/ieee802154/6lowpan/core.c @@ -107,7 +107,7 @@ static void lowpan_setup(struct net_device *ldev) ldev->netdev_ops = &lowpan_netdev_ops; ldev->header_ops = &lowpan_header_ops; - ldev->destructor = free_netdev; + ldev->needs_free_netdev = true; ldev->features |= NETIF_F_NETNS_LOCAL; } diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c index b878ecbc0608..b436d0775631 100644 --- a/net/ipv4/ip_tunnel.c +++ b/net/ipv4/ip_tunnel.c @@ -967,7 +967,6 @@ static void ip_tunnel_dev_free(struct net_device *dev) gro_cells_destroy(&tunnel->gro_cells); dst_cache_destroy(&tunnel->dst_cache); free_percpu(dev->tstats); - free_netdev(dev); } void ip_tunnel_dellink(struct net_device *dev, struct list_head *head) @@ -1155,7 +1154,8 @@ int ip_tunnel_init(struct net_device *dev) struct iphdr *iph = &tunnel->parms.iph; int err; - dev->destructor = ip_tunnel_dev_free; + dev->needs_free_netdev = true; + dev->priv_destructor = ip_tunnel_dev_free; dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); if (!dev->tstats) return -ENOMEM; diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 551de4d023a8..b4f9622ee9f5 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -501,7 +501,7 @@ static void reg_vif_setup(struct net_device *dev) dev->mtu = ETH_DATA_LEN - sizeof(struct iphdr) - 8; dev->flags = IFF_NOARP; dev->netdev_ops = ®_vif_netdev_ops; - dev->destructor = free_netdev; + dev->needs_free_netdev = true; dev->features |= NETIF_F_NETNS_LOCAL; } diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index 0c5b4caa1949..64eea3962733 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c @@ -991,13 +991,13 @@ static void ip6gre_dev_free(struct net_device *dev) dst_cache_destroy(&t->dst_cache); free_percpu(dev->tstats); - free_netdev(dev); } static void ip6gre_tunnel_setup(struct net_device *dev) { dev->netdev_ops = &ip6gre_netdev_ops; - dev->destructor = ip6gre_dev_free; + dev->needs_free_netdev = true; + dev->priv_destructor = ip6gre_dev_free; dev->type = ARPHRD_IP6GRE; @@ -1148,7 +1148,7 @@ static int __net_init ip6gre_init_net(struct net *net) return 0; err_reg_dev: - ip6gre_dev_free(ign->fb_tunnel_dev); + free_netdev(ign->fb_tunnel_dev); err_alloc_dev: return err; } @@ -1300,7 +1300,8 @@ static void ip6gre_tap_setup(struct net_device *dev) ether_setup(dev); dev->netdev_ops = &ip6gre_tap_netdev_ops; - dev->destructor = ip6gre_dev_free; + dev->needs_free_netdev = true; + dev->priv_destructor = ip6gre_dev_free; dev->features |= NETIF_F_NETNS_LOCAL; dev->priv_flags &= ~IFF_TX_SKB_SHARING; diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 9b37f9747fc6..c3581973f5d7 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -254,7 +254,6 @@ static void ip6_dev_free(struct net_device *dev) gro_cells_destroy(&t->gro_cells); dst_cache_destroy(&t->dst_cache); free_percpu(dev->tstats); - free_netdev(dev); } static int ip6_tnl_create2(struct net_device *dev) @@ -322,7 +321,7 @@ static struct ip6_tnl *ip6_tnl_create(struct net *net, struct __ip6_tnl_parm *p) return t; failed_free: - ip6_dev_free(dev); + free_netdev(dev); failed: return ERR_PTR(err); } @@ -1777,7 +1776,8 @@ static const struct net_device_ops ip6_tnl_netdev_ops = { static void ip6_tnl_dev_setup(struct net_device *dev) { dev->netdev_ops = &ip6_tnl_netdev_ops; - dev->destructor = ip6_dev_free; + dev->needs_free_netdev = true; + dev->priv_destructor = ip6_dev_free; dev->type = ARPHRD_TUNNEL6; dev->flags |= IFF_NOARP; @@ -2224,7 +2224,7 @@ static int __net_init ip6_tnl_init_net(struct net *net) return 0; err_register: - ip6_dev_free(ip6n->fb_tnl_dev); + free_netdev(ip6n->fb_tnl_dev); err_alloc_dev: return err; } diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c index d67ef56454b2..837ea1eefe7f 100644 --- a/net/ipv6/ip6_vti.c +++ b/net/ipv6/ip6_vti.c @@ -180,7 +180,6 @@ vti6_tnl_unlink(struct vti6_net *ip6n, struct ip6_tnl *t) static void vti6_dev_free(struct net_device *dev) { free_percpu(dev->tstats); - free_netdev(dev); } static int vti6_tnl_create2(struct net_device *dev) @@ -235,7 +234,7 @@ static struct ip6_tnl *vti6_tnl_create(struct net *net, struct __ip6_tnl_parm *p return t; failed_free: - vti6_dev_free(dev); + free_netdev(dev); failed: return NULL; } @@ -842,7 +841,8 @@ static const struct net_device_ops vti6_netdev_ops = { static void vti6_dev_setup(struct net_device *dev) { dev->netdev_ops = &vti6_netdev_ops; - dev->destructor = vti6_dev_free; + dev->needs_free_netdev = true; + dev->priv_destructor = vti6_dev_free; dev->type = ARPHRD_TUNNEL6; dev->hard_header_len = LL_MAX_HEADER + sizeof(struct ipv6hdr); @@ -1100,7 +1100,7 @@ static int __net_init vti6_init_net(struct net *net) return 0; err_register: - vti6_dev_free(ip6n->fb_tnl_dev); + free_netdev(ip6n->fb_tnl_dev); err_alloc_dev: return err; } diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index 374997d26488..2ecb39b943b5 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c @@ -733,7 +733,7 @@ static void reg_vif_setup(struct net_device *dev) dev->mtu = 1500 - sizeof(struct ipv6hdr) - 8; dev->flags = IFF_NOARP; dev->netdev_ops = ®_vif_netdev_ops; - dev->destructor = free_netdev; + dev->needs_free_netdev = true; dev->features |= NETIF_F_NETNS_LOCAL; } diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index 61e5902f0687..2378503577b0 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c @@ -265,7 +265,7 @@ static struct ip_tunnel *ipip6_tunnel_locate(struct net *net, return nt; failed_free: - ipip6_dev_free(dev); + free_netdev(dev); failed: return NULL; } @@ -1336,7 +1336,6 @@ static void ipip6_dev_free(struct net_device *dev) dst_cache_destroy(&tunnel->dst_cache); free_percpu(dev->tstats); - free_netdev(dev); } #define SIT_FEATURES (NETIF_F_SG | \ @@ -1351,7 +1350,8 @@ static void ipip6_tunnel_setup(struct net_device *dev) int t_hlen = tunnel->hlen + sizeof(struct iphdr); dev->netdev_ops = &ipip6_netdev_ops; - dev->destructor = ipip6_dev_free; + dev->needs_free_netdev = true; + dev->priv_destructor = ipip6_dev_free; dev->type = ARPHRD_SIT; dev->hard_header_len = LL_MAX_HEADER + t_hlen; diff --git a/net/irda/irlan/irlan_eth.c b/net/irda/irlan/irlan_eth.c index 74d09f91709e..3be852808a9d 100644 --- a/net/irda/irlan/irlan_eth.c +++ b/net/irda/irlan/irlan_eth.c @@ -65,7 +65,7 @@ static void irlan_eth_setup(struct net_device *dev) ether_setup(dev); dev->netdev_ops = &irlan_eth_netdev_ops; - dev->destructor = free_netdev; + dev->needs_free_netdev = true; dev->min_mtu = 0; dev->max_mtu = ETH_MAX_MTU; diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c index 8b21af7321b9..f7c54ece3733 100644 --- a/net/l2tp/l2tp_eth.c +++ b/net/l2tp/l2tp_eth.c @@ -141,7 +141,7 @@ static void l2tp_eth_dev_setup(struct net_device *dev) dev->priv_flags &= ~IFF_TX_SKB_SHARING; dev->features |= NETIF_F_LLTX; dev->netdev_ops = &l2tp_eth_netdev_ops; - dev->destructor = free_netdev; + dev->needs_free_netdev = true; } static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb, int data_len) diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index 8fae1a72e6a7..915d7e1b4545 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@ -1213,7 +1213,6 @@ static const struct net_device_ops ieee80211_monitorif_ops = { static void ieee80211_if_free(struct net_device *dev) { free_percpu(dev->tstats); - free_netdev(dev); } static void ieee80211_if_setup(struct net_device *dev) @@ -1221,7 +1220,8 @@ static void ieee80211_if_setup(struct net_device *dev) ether_setup(dev); dev->priv_flags &= ~IFF_TX_SKB_SHARING; dev->netdev_ops = &ieee80211_dataif_ops; - dev->destructor = ieee80211_if_free; + dev->needs_free_netdev = true; + dev->priv_destructor = ieee80211_if_free; } static void ieee80211_if_setup_no_queue(struct net_device *dev) @@ -1905,7 +1905,7 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name, ret = register_netdevice(ndev); if (ret) { - ieee80211_if_free(ndev); + free_netdev(ndev); return ret; } } diff --git a/net/mac802154/iface.c b/net/mac802154/iface.c index 06019dba4b10..bd88a9b80773 100644 --- a/net/mac802154/iface.c +++ b/net/mac802154/iface.c @@ -526,8 +526,6 @@ static void mac802154_wpan_free(struct net_device *dev) struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); mac802154_llsec_destroy(&sdata->sec); - - free_netdev(dev); } static void ieee802154_if_setup(struct net_device *dev) @@ -593,7 +591,8 @@ ieee802154_setup_sdata(struct ieee802154_sub_if_data *sdata, sdata->dev->dev_addr); sdata->dev->header_ops = &mac802154_header_ops; - sdata->dev->destructor = mac802154_wpan_free; + sdata->dev->needs_free_netdev = true; + sdata->dev->priv_destructor = mac802154_wpan_free; sdata->dev->netdev_ops = &mac802154_wpan_ops; sdata->dev->ml_priv = &mac802154_mlme_wpan; wpan_dev->promiscuous_mode = false; @@ -608,7 +607,7 @@ ieee802154_setup_sdata(struct ieee802154_sub_if_data *sdata, break; case NL802154_IFTYPE_MONITOR: - sdata->dev->destructor = free_netdev; + sdata->dev->needs_free_netdev = true; sdata->dev->netdev_ops = &mac802154_monitor_ops; wpan_dev->promiscuous_mode = true; break; diff --git a/net/openvswitch/vport-internal_dev.c b/net/openvswitch/vport-internal_dev.c index 89193a634da4..04a3128adcf0 100644 --- a/net/openvswitch/vport-internal_dev.c +++ b/net/openvswitch/vport-internal_dev.c @@ -94,7 +94,6 @@ static void internal_dev_destructor(struct net_device *dev) struct vport *vport = ovs_internal_dev_get_vport(dev); ovs_vport_free(vport); - free_netdev(dev); } static void @@ -156,7 +155,8 @@ static void do_setup(struct net_device *netdev) netdev->priv_flags &= ~IFF_TX_SKB_SHARING; netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_OPENVSWITCH | IFF_PHONY_HEADROOM | IFF_NO_QUEUE; - netdev->destructor = internal_dev_destructor; + netdev->needs_free_netdev = true; + netdev->priv_destructor = internal_dev_destructor; netdev->ethtool_ops = &internal_dev_ethtool_ops; netdev->rtnl_link_ops = &internal_dev_link_ops; diff --git a/net/phonet/pep-gprs.c b/net/phonet/pep-gprs.c index 21c28b51be94..2c9337946e30 100644 --- a/net/phonet/pep-gprs.c +++ b/net/phonet/pep-gprs.c @@ -236,7 +236,7 @@ static void gprs_setup(struct net_device *dev) dev->tx_queue_len = 10; dev->netdev_ops = &gprs_netdev_ops; - dev->destructor = free_netdev; + dev->needs_free_netdev = true; } /* From d41519a69b35b10af7fda867fb9100df24fdf403 Mon Sep 17 00:00:00 2001 From: David Miller Date: Fri, 2 Jun 2017 11:28:54 -0400 Subject: [PATCH 067/436] crypto: Work around deallocated stack frame reference gcc bug on sparc. On sparc, if we have an alloca() like situation, as is the case with SHASH_DESC_ON_STACK(), we can end up referencing deallocated stack memory. The result can be that the value is clobbered if a trap or interrupt arrives at just the right instruction. It only occurs if the function ends returning a value from that alloca() area and that value can be placed into the return value register using a single instruction. For example, in lib/libcrc32c.c:crc32c() we end up with a return sequence like: return %i7+8 lduw [%o5+16], %o0 ! MEM[(u32 *)__shash_desc.1_10 + 16B], %o5 holds the base of the on-stack area allocated for the shash descriptor. But the return released the stack frame and the register window. So if an intererupt arrives between 'return' and 'lduw', then the value read at %o5+16 can be corrupted. Add a data compiler barrier to work around this problem. This is exactly what the gcc fix will end up doing as well, and it absolutely should not change the code generated for other cpus (unless gcc on them has the same bug :-) With crucial insight from Eric Sandeen. Cc: Reported-by: Anatoly Pugachev Signed-off-by: David S. Miller Signed-off-by: Herbert Xu --- drivers/infiniband/sw/rxe/rxe.h | 5 ++++- fs/btrfs/hash.c | 5 ++++- fs/f2fs/f2fs.h | 5 ++++- lib/libcrc32c.c | 6 ++++-- 4 files changed, 16 insertions(+), 5 deletions(-) diff --git a/drivers/infiniband/sw/rxe/rxe.h b/drivers/infiniband/sw/rxe/rxe.h index ecdba2fce083..1ac5b8551a4d 100644 --- a/drivers/infiniband/sw/rxe/rxe.h +++ b/drivers/infiniband/sw/rxe/rxe.h @@ -68,6 +68,7 @@ static inline u32 rxe_crc32(struct rxe_dev *rxe, u32 crc, void *next, size_t len) { + u32 retval; int err; SHASH_DESC_ON_STACK(shash, rxe->tfm); @@ -81,7 +82,9 @@ static inline u32 rxe_crc32(struct rxe_dev *rxe, return crc32_le(crc, next, len); } - return *(u32 *)shash_desc_ctx(shash); + retval = *(u32 *)shash_desc_ctx(shash); + barrier_data(shash_desc_ctx(shash)); + return retval; } int rxe_set_mtu(struct rxe_dev *rxe, unsigned int dev_mtu); diff --git a/fs/btrfs/hash.c b/fs/btrfs/hash.c index a97fdc156a03..baacc1866861 100644 --- a/fs/btrfs/hash.c +++ b/fs/btrfs/hash.c @@ -38,6 +38,7 @@ u32 btrfs_crc32c(u32 crc, const void *address, unsigned int length) { SHASH_DESC_ON_STACK(shash, tfm); u32 *ctx = (u32 *)shash_desc_ctx(shash); + u32 retval; int err; shash->tfm = tfm; @@ -47,5 +48,7 @@ u32 btrfs_crc32c(u32 crc, const void *address, unsigned int length) err = crypto_shash_update(shash, address, length); BUG_ON(err); - return *ctx; + retval = *ctx; + barrier_data(ctx); + return retval; } diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index 2185c7a040a1..fd2e651bad6d 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -1078,6 +1078,7 @@ static inline u32 f2fs_crc32(struct f2fs_sb_info *sbi, const void *address, { SHASH_DESC_ON_STACK(shash, sbi->s_chksum_driver); u32 *ctx = (u32 *)shash_desc_ctx(shash); + u32 retval; int err; shash->tfm = sbi->s_chksum_driver; @@ -1087,7 +1088,9 @@ static inline u32 f2fs_crc32(struct f2fs_sb_info *sbi, const void *address, err = crypto_shash_update(shash, address, length); BUG_ON(err); - return *ctx; + retval = *ctx; + barrier_data(ctx); + return retval; } static inline bool f2fs_crc_valid(struct f2fs_sb_info *sbi, __u32 blk_crc, diff --git a/lib/libcrc32c.c b/lib/libcrc32c.c index 74a54b7f2562..9f79547d1b97 100644 --- a/lib/libcrc32c.c +++ b/lib/libcrc32c.c @@ -43,7 +43,7 @@ static struct crypto_shash *tfm; u32 crc32c(u32 crc, const void *address, unsigned int length) { SHASH_DESC_ON_STACK(shash, tfm); - u32 *ctx = (u32 *)shash_desc_ctx(shash); + u32 ret, *ctx = (u32 *)shash_desc_ctx(shash); int err; shash->tfm = tfm; @@ -53,7 +53,9 @@ u32 crc32c(u32 crc, const void *address, unsigned int length) err = crypto_shash_update(shash, address, length); BUG_ON(err); - return *ctx; + ret = *ctx; + barrier_data(ctx); + return ret; } EXPORT_SYMBOL(crc32c); From b94aac64a4c17c5af92f9b4ba7164c5b384d5c02 Mon Sep 17 00:00:00 2001 From: Hans Verkuil Date: Wed, 7 Jun 2017 12:07:51 -0300 Subject: [PATCH 068/436] [media] cec: race fix: don't return -ENONET in cec_receive() When calling CEC_RECEIVE do not check if the adapter is configured. Typically CEC_RECEIVE is called after a select() and if that indicates that there are messages in the receive queue, then you should always be able to dequeue a message. The race condition here is that a message has been received and is queued, so select() tells userspace that a message is available. But before the application calls CEC_RECEIVE the adapter is unconfigured (e.g. the HDMI cable is removed). Now select will always report that there is a message, but calling CEC_RECEIVE will always return -ENONET because the adapter is no longer configured and so will never actually dequeue the message. There is really no need for this check, and in fact the ENONET error code was never documented for CEC_RECEIVE. This may have been a left-over of old code that was never updated. Signed-off-by: Hans Verkuil Cc: # for v4.10 and up Signed-off-by: Mauro Carvalho Chehab --- drivers/media/cec/cec-api.c | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/drivers/media/cec/cec-api.c b/drivers/media/cec/cec-api.c index 0860fb458757..999926f731c8 100644 --- a/drivers/media/cec/cec-api.c +++ b/drivers/media/cec/cec-api.c @@ -271,16 +271,10 @@ static long cec_receive(struct cec_adapter *adap, struct cec_fh *fh, bool block, struct cec_msg __user *parg) { struct cec_msg msg = {}; - long err = 0; + long err; if (copy_from_user(&msg, parg, sizeof(msg))) return -EFAULT; - mutex_lock(&adap->lock); - if (!adap->is_configured && fh->mode_follower < CEC_MODE_MONITOR) - err = -ENONET; - mutex_unlock(&adap->lock); - if (err) - return err; err = cec_receive_msg(fh, &msg, block); if (err) From d334a5637dfb53f7d07017afc1e491903b482ef8 Mon Sep 17 00:00:00 2001 From: Tom Lendacky Date: Mon, 5 Jun 2017 14:52:12 -0500 Subject: [PATCH 069/436] iommu/amd: Reduce amount of MMIO when submitting commands As newer, higher speed devices are developed, perf data shows that the amount of MMIO that is performed when submitting commands to the IOMMU causes performance issues. Currently, the command submission path reads the command buffer head and tail pointers and then writes the tail pointer once the command is ready. The tail pointer is only ever updated by the driver so it can be tracked by the driver without having to read it from the hardware. The head pointer is updated by the hardware, but can be read opportunistically. Reading the head pointer only when it appears that there might not be room in the command buffer and then re-checking the available space reduces the number of times the head pointer has to be read. Signed-off-by: Tom Lendacky Signed-off-by: Joerg Roedel --- drivers/iommu/amd_iommu.c | 35 +++++++++++++++++++++------------ drivers/iommu/amd_iommu_init.c | 2 ++ drivers/iommu/amd_iommu_types.h | 2 ++ 3 files changed, 26 insertions(+), 13 deletions(-) diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index d7748955184b..d81c895ff4f4 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -874,19 +874,20 @@ static int wait_on_sem(volatile u64 *sem) } static void copy_cmd_to_buffer(struct amd_iommu *iommu, - struct iommu_cmd *cmd, - u32 tail) + struct iommu_cmd *cmd) { u8 *target; - target = iommu->cmd_buf + tail; - tail = (tail + sizeof(*cmd)) % CMD_BUFFER_SIZE; + target = iommu->cmd_buf + iommu->cmd_buf_tail; + + iommu->cmd_buf_tail += sizeof(*cmd); + iommu->cmd_buf_tail %= CMD_BUFFER_SIZE; /* Copy command to buffer */ memcpy(target, cmd, sizeof(*cmd)); /* Tell the IOMMU about it */ - writel(tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); + writel(iommu->cmd_buf_tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); } static void build_completion_wait(struct iommu_cmd *cmd, u64 address) @@ -1044,23 +1045,31 @@ static int __iommu_queue_command_sync(struct amd_iommu *iommu, struct iommu_cmd *cmd, bool sync) { - u32 left, tail, head, next_tail; + bool read_head = true; + u32 left, next_tail; + next_tail = (iommu->cmd_buf_tail + sizeof(*cmd)) % CMD_BUFFER_SIZE; again: - - head = readl(iommu->mmio_base + MMIO_CMD_HEAD_OFFSET); - tail = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); - next_tail = (tail + sizeof(*cmd)) % CMD_BUFFER_SIZE; - left = (head - next_tail) % CMD_BUFFER_SIZE; + left = (iommu->cmd_buf_head - next_tail) % CMD_BUFFER_SIZE; if (left <= 0x20) { struct iommu_cmd sync_cmd; int ret; + if (read_head) { + /* Update head and recheck remaining space */ + iommu->cmd_buf_head = readl(iommu->mmio_base + + MMIO_CMD_HEAD_OFFSET); + read_head = false; + goto again; + } + + read_head = true; + iommu->cmd_sem = 0; build_completion_wait(&sync_cmd, (u64)&iommu->cmd_sem); - copy_cmd_to_buffer(iommu, &sync_cmd, tail); + copy_cmd_to_buffer(iommu, &sync_cmd); if ((ret = wait_on_sem(&iommu->cmd_sem)) != 0) return ret; @@ -1068,7 +1077,7 @@ again: goto again; } - copy_cmd_to_buffer(iommu, cmd, tail); + copy_cmd_to_buffer(iommu, cmd); /* We need to sync now to make sure all commands are processed */ iommu->need_sync = sync; diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c index 5a11328f4d98..3fa7e3b35507 100644 --- a/drivers/iommu/amd_iommu_init.c +++ b/drivers/iommu/amd_iommu_init.c @@ -588,6 +588,8 @@ void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu) writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET); writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); + iommu->cmd_buf_head = 0; + iommu->cmd_buf_tail = 0; iommu_feature_enable(iommu, CONTROL_CMDBUF_EN); } diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h index 4de8f4160bb8..6960d7db2fab 100644 --- a/drivers/iommu/amd_iommu_types.h +++ b/drivers/iommu/amd_iommu_types.h @@ -516,6 +516,8 @@ struct amd_iommu { /* command buffer virtual address */ u8 *cmd_buf; + u32 cmd_buf_head; + u32 cmd_buf_tail; /* event buffer virtual address */ u8 *evt_buf; From 23e967e17c58779b38f69f8d41d727f59440d36a Mon Sep 17 00:00:00 2001 From: Tom Lendacky Date: Mon, 5 Jun 2017 14:52:26 -0500 Subject: [PATCH 070/436] iommu/amd: Reduce delay waiting for command buffer space Currently if there is no room to add a command to the command buffer, the driver performs a "completion wait" which only returns when all commands on the queue have been processed. There is no need to wait for the entire command queue to be executed before adding the next command. Update the driver to perform the same udelay() loop that the "completion wait" performs, but instead re-read the head pointer to determine if sufficient space is available. The very first time it is found that there is no space available, the udelay() will be skipped to immediately perform the opportunistic read of the head pointer. If it is still found that there is not sufficient space, then the udelay() will be performed. Signed-off-by: Leo Duran Signed-off-by: Tom Lendacky Signed-off-by: Joerg Roedel --- drivers/iommu/amd_iommu.c | 31 ++++++++++++------------------- 1 file changed, 12 insertions(+), 19 deletions(-) diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index d81c895ff4f4..1efbef7f3b61 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -1045,7 +1045,7 @@ static int __iommu_queue_command_sync(struct amd_iommu *iommu, struct iommu_cmd *cmd, bool sync) { - bool read_head = true; + unsigned int count = 0; u32 left, next_tail; next_tail = (iommu->cmd_buf_tail + sizeof(*cmd)) % CMD_BUFFER_SIZE; @@ -1053,33 +1053,26 @@ again: left = (iommu->cmd_buf_head - next_tail) % CMD_BUFFER_SIZE; if (left <= 0x20) { - struct iommu_cmd sync_cmd; - int ret; + /* Skip udelay() the first time around */ + if (count++) { + if (count == LOOP_TIMEOUT) { + pr_err("AMD-Vi: Command buffer timeout\n"); + return -EIO; + } - if (read_head) { - /* Update head and recheck remaining space */ - iommu->cmd_buf_head = readl(iommu->mmio_base + - MMIO_CMD_HEAD_OFFSET); - read_head = false; - goto again; + udelay(1); } - read_head = true; - - iommu->cmd_sem = 0; - - build_completion_wait(&sync_cmd, (u64)&iommu->cmd_sem); - copy_cmd_to_buffer(iommu, &sync_cmd); - - if ((ret = wait_on_sem(&iommu->cmd_sem)) != 0) - return ret; + /* Update head and recheck remaining space */ + iommu->cmd_buf_head = readl(iommu->mmio_base + + MMIO_CMD_HEAD_OFFSET); goto again; } copy_cmd_to_buffer(iommu, cmd); - /* We need to sync now to make sure all commands are processed */ + /* Do we need to make sure all commands are processed? */ iommu->need_sync = sync; return 0; From 460c26d05bf7357a4d5b41b3d7a97727f5bdffe1 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Fri, 2 Jun 2017 14:28:01 +0200 Subject: [PATCH 071/436] iommu/amd: Rip out old queue flushing code The queue flushing is pretty inefficient when it flushes the queues for all cpus at once. Further it flushes all domains from all IOMMUs for all CPUs, which is overkill as well. Rip it out to make room for something more efficient. Signed-off-by: Joerg Roedel --- drivers/iommu/amd_iommu.c | 143 ++------------------------------------ 1 file changed, 6 insertions(+), 137 deletions(-) diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 1efbef7f3b61..ec9da25c06a1 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -89,25 +89,6 @@ LIST_HEAD(ioapic_map); LIST_HEAD(hpet_map); LIST_HEAD(acpihid_map); -#define FLUSH_QUEUE_SIZE 256 - -struct flush_queue_entry { - unsigned long iova_pfn; - unsigned long pages; - struct dma_ops_domain *dma_dom; -}; - -struct flush_queue { - spinlock_t lock; - unsigned next; - struct flush_queue_entry *entries; -}; - -static DEFINE_PER_CPU(struct flush_queue, flush_queue); - -static atomic_t queue_timer_on; -static struct timer_list queue_timer; - /* * Domain for untranslated devices - only allocated * if iommu=pt passed on kernel cmd line. @@ -2253,92 +2234,6 @@ static struct iommu_group *amd_iommu_device_group(struct device *dev) * *****************************************************************************/ -static void __queue_flush(struct flush_queue *queue) -{ - struct protection_domain *domain; - unsigned long flags; - int idx; - - /* First flush TLB of all known domains */ - spin_lock_irqsave(&amd_iommu_pd_lock, flags); - list_for_each_entry(domain, &amd_iommu_pd_list, list) - domain_flush_tlb(domain); - spin_unlock_irqrestore(&amd_iommu_pd_lock, flags); - - /* Wait until flushes have completed */ - domain_flush_complete(NULL); - - for (idx = 0; idx < queue->next; ++idx) { - struct flush_queue_entry *entry; - - entry = queue->entries + idx; - - free_iova_fast(&entry->dma_dom->iovad, - entry->iova_pfn, - entry->pages); - - /* Not really necessary, just to make sure we catch any bugs */ - entry->dma_dom = NULL; - } - - queue->next = 0; -} - -static void queue_flush_all(void) -{ - int cpu; - - for_each_possible_cpu(cpu) { - struct flush_queue *queue; - unsigned long flags; - - queue = per_cpu_ptr(&flush_queue, cpu); - spin_lock_irqsave(&queue->lock, flags); - if (queue->next > 0) - __queue_flush(queue); - spin_unlock_irqrestore(&queue->lock, flags); - } -} - -static void queue_flush_timeout(unsigned long unsused) -{ - atomic_set(&queue_timer_on, 0); - queue_flush_all(); -} - -static void queue_add(struct dma_ops_domain *dma_dom, - unsigned long address, unsigned long pages) -{ - struct flush_queue_entry *entry; - struct flush_queue *queue; - unsigned long flags; - int idx; - - pages = __roundup_pow_of_two(pages); - address >>= PAGE_SHIFT; - - queue = get_cpu_ptr(&flush_queue); - spin_lock_irqsave(&queue->lock, flags); - - if (queue->next == FLUSH_QUEUE_SIZE) - __queue_flush(queue); - - idx = queue->next++; - entry = queue->entries + idx; - - entry->iova_pfn = address; - entry->pages = pages; - entry->dma_dom = dma_dom; - - spin_unlock_irqrestore(&queue->lock, flags); - - if (atomic_cmpxchg(&queue_timer_on, 0, 1) == 0) - mod_timer(&queue_timer, jiffies + msecs_to_jiffies(10)); - - put_cpu_ptr(&flush_queue); -} - - /* * In the dma_ops path we only have the struct device. This function * finds the corresponding IOMMU, the protection domain and the @@ -2490,7 +2385,10 @@ static void __unmap_single(struct dma_ops_domain *dma_dom, domain_flush_tlb(&dma_dom->domain); domain_flush_complete(&dma_dom->domain); } else { - queue_add(dma_dom, dma_addr, pages); + /* Keep the if() around, we need it later again */ + dma_ops_free_iova(dma_dom, dma_addr, pages); + domain_flush_tlb(&dma_dom->domain); + domain_flush_complete(&dma_dom->domain); } } @@ -2825,7 +2723,7 @@ static int init_reserved_iova_ranges(void) int __init amd_iommu_init_api(void) { - int ret, cpu, err = 0; + int ret, err = 0; ret = iova_cache_get(); if (ret) @@ -2835,18 +2733,6 @@ int __init amd_iommu_init_api(void) if (ret) return ret; - for_each_possible_cpu(cpu) { - struct flush_queue *queue = per_cpu_ptr(&flush_queue, cpu); - - queue->entries = kzalloc(FLUSH_QUEUE_SIZE * - sizeof(*queue->entries), - GFP_KERNEL); - if (!queue->entries) - goto out_put_iova; - - spin_lock_init(&queue->lock); - } - err = bus_set_iommu(&pci_bus_type, &amd_iommu_ops); if (err) return err; @@ -2858,23 +2744,12 @@ int __init amd_iommu_init_api(void) err = bus_set_iommu(&platform_bus_type, &amd_iommu_ops); if (err) return err; + return 0; - -out_put_iova: - for_each_possible_cpu(cpu) { - struct flush_queue *queue = per_cpu_ptr(&flush_queue, cpu); - - kfree(queue->entries); - } - - return -ENOMEM; } int __init amd_iommu_init_dma_ops(void) { - setup_timer(&queue_timer, queue_flush_timeout, 0); - atomic_set(&queue_timer_on, 0); - swiotlb = iommu_pass_through ? 1 : 0; iommu_detected = 1; @@ -3030,12 +2905,6 @@ static void amd_iommu_domain_free(struct iommu_domain *dom) switch (dom->type) { case IOMMU_DOMAIN_DMA: - /* - * First make sure the domain is no longer referenced from the - * flush queue - */ - queue_flush_all(); - /* Now release the domain */ dma_dom = to_dma_ops_domain(domain); dma_ops_domain_free(dma_dom); From d4241a276119bf404e6c0e23f06f84b84c4ecfc0 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Fri, 2 Jun 2017 14:55:56 +0200 Subject: [PATCH 072/436] iommu/amd: Add per-domain flush-queue data structures Make the flush-queue per dma-ops domain and add code allocate and free the flush-queues; Signed-off-by: Joerg Roedel --- drivers/iommu/amd_iommu.c | 69 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 69 insertions(+) diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index ec9da25c06a1..2418fcc28fbe 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -136,6 +136,18 @@ static void update_domain(struct protection_domain *domain); static int protection_domain_init(struct protection_domain *domain); static void detach_device(struct device *dev); +#define FLUSH_QUEUE_SIZE 256 + +struct flush_queue_entry { + unsigned long iova_pfn; + unsigned long pages; +}; + +struct flush_queue { + struct flush_queue_entry *entries; + unsigned head, tail; +}; + /* * Data container for a dma_ops specific protection domain */ @@ -145,6 +157,8 @@ struct dma_ops_domain { /* IOVA RB-Tree */ struct iova_domain iovad; + + struct flush_queue __percpu *flush_queue; }; static struct iova_domain reserved_iova_ranges; @@ -1742,6 +1756,56 @@ static void free_gcr3_table(struct protection_domain *domain) free_page((unsigned long)domain->gcr3_tbl); } +static void dma_ops_domain_free_flush_queue(struct dma_ops_domain *dom) +{ + int cpu; + + for_each_possible_cpu(cpu) { + struct flush_queue *queue; + + queue = per_cpu_ptr(dom->flush_queue, cpu); + kfree(queue->entries); + } + + free_percpu(dom->flush_queue); + + dom->flush_queue = NULL; +} + +static int dma_ops_domain_alloc_flush_queue(struct dma_ops_domain *dom) +{ + int cpu; + + dom->flush_queue = alloc_percpu(struct flush_queue); + if (!dom->flush_queue) + return -ENOMEM; + + /* First make sure everything is cleared */ + for_each_possible_cpu(cpu) { + struct flush_queue *queue; + + queue = per_cpu_ptr(dom->flush_queue, cpu); + queue->head = 0; + queue->tail = 0; + queue->entries = NULL; + } + + /* Now start doing the allocation */ + for_each_possible_cpu(cpu) { + struct flush_queue *queue; + + queue = per_cpu_ptr(dom->flush_queue, cpu); + queue->entries = kzalloc(FLUSH_QUEUE_SIZE * sizeof(*queue->entries), + GFP_KERNEL); + if (!queue->entries) { + dma_ops_domain_free_flush_queue(dom); + return -ENOMEM; + } + } + + return 0; +} + /* * Free a domain, only used if something went wrong in the * allocation path and we need to free an already allocated page table @@ -1753,6 +1817,8 @@ static void dma_ops_domain_free(struct dma_ops_domain *dom) del_domain_from_list(&dom->domain); + dma_ops_domain_free_flush_queue(dom); + put_iova_domain(&dom->iovad); free_pagetable(&dom->domain); @@ -1791,6 +1857,9 @@ static struct dma_ops_domain *dma_ops_domain_alloc(void) /* Initialize reserved ranges */ copy_reserved_iova(&reserved_iova_ranges, &dma_dom->iovad); + if (dma_ops_domain_alloc_flush_queue(dma_dom)) + goto free_dma_dom; + add_domain_to_list(&dma_dom->domain); return dma_dom; From fd62190a67d6bdf9b93dea056adfcd7fd29b0f92 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Fri, 2 Jun 2017 15:37:26 +0200 Subject: [PATCH 073/436] iommu/amd: Make use of the per-domain flush queue Fill the flush-queue on unmap and only flush the IOMMU and device TLBs when a per-cpu queue gets full. Signed-off-by: Joerg Roedel --- drivers/iommu/amd_iommu.c | 60 ++++++++++++++++++++++++++++++++++++--- 1 file changed, 56 insertions(+), 4 deletions(-) diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 2418fcc28fbe..9fafc3026865 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -1806,6 +1806,61 @@ static int dma_ops_domain_alloc_flush_queue(struct dma_ops_domain *dom) return 0; } +static inline bool queue_ring_full(struct flush_queue *queue) +{ + return (((queue->tail + 1) % FLUSH_QUEUE_SIZE) == queue->head); +} + +#define queue_ring_for_each(i, q) \ + for (i = (q)->head; i != (q)->tail; i = (i + 1) % FLUSH_QUEUE_SIZE) + +static void queue_release(struct dma_ops_domain *dom, + struct flush_queue *queue) +{ + unsigned i; + + queue_ring_for_each(i, queue) + free_iova_fast(&dom->iovad, + queue->entries[i].iova_pfn, + queue->entries[i].pages); + + queue->head = queue->tail = 0; +} + +static inline unsigned queue_ring_add(struct flush_queue *queue) +{ + unsigned idx = queue->tail; + + queue->tail = (idx + 1) % FLUSH_QUEUE_SIZE; + + return idx; +} + +static void queue_add(struct dma_ops_domain *dom, + unsigned long address, unsigned long pages) +{ + struct flush_queue *queue; + int idx; + + pages = __roundup_pow_of_two(pages); + address >>= PAGE_SHIFT; + + queue = get_cpu_ptr(dom->flush_queue); + + if (queue_ring_full(queue)) { + domain_flush_tlb(&dom->domain); + domain_flush_complete(&dom->domain); + queue_release(dom, queue); + } + + idx = queue_ring_add(queue); + + queue->entries[idx].iova_pfn = address; + queue->entries[idx].pages = pages; + + put_cpu_ptr(dom->flush_queue); +} + /* * Free a domain, only used if something went wrong in the * allocation path and we need to free an already allocated page table @@ -2454,10 +2509,7 @@ static void __unmap_single(struct dma_ops_domain *dma_dom, domain_flush_tlb(&dma_dom->domain); domain_flush_complete(&dma_dom->domain); } else { - /* Keep the if() around, we need it later again */ - dma_ops_free_iova(dma_dom, dma_addr, pages); - domain_flush_tlb(&dma_dom->domain); - domain_flush_complete(&dma_dom->domain); + queue_add(dma_dom, dma_addr, pages); } } From e241f8e76c152e000d481fc8334d41d22c013fe8 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Fri, 2 Jun 2017 15:44:57 +0200 Subject: [PATCH 074/436] iommu/amd: Add locking to per-domain flush-queue With locking we can safely access the flush-queues of other cpus. Signed-off-by: Joerg Roedel --- drivers/iommu/amd_iommu.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 9fafc3026865..9a06acc8cc9d 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -146,6 +146,7 @@ struct flush_queue_entry { struct flush_queue { struct flush_queue_entry *entries; unsigned head, tail; + spinlock_t lock; }; /* @@ -1801,6 +1802,8 @@ static int dma_ops_domain_alloc_flush_queue(struct dma_ops_domain *dom) dma_ops_domain_free_flush_queue(dom); return -ENOMEM; } + + spin_lock_init(&queue->lock); } return 0; @@ -1808,6 +1811,8 @@ static int dma_ops_domain_alloc_flush_queue(struct dma_ops_domain *dom) static inline bool queue_ring_full(struct flush_queue *queue) { + assert_spin_locked(&queue->lock); + return (((queue->tail + 1) % FLUSH_QUEUE_SIZE) == queue->head); } @@ -1819,6 +1824,8 @@ static void queue_release(struct dma_ops_domain *dom, { unsigned i; + assert_spin_locked(&queue->lock); + queue_ring_for_each(i, queue) free_iova_fast(&dom->iovad, queue->entries[i].iova_pfn, @@ -1831,6 +1838,7 @@ static inline unsigned queue_ring_add(struct flush_queue *queue) { unsigned idx = queue->tail; + assert_spin_locked(&queue->lock); queue->tail = (idx + 1) % FLUSH_QUEUE_SIZE; return idx; @@ -1840,12 +1848,14 @@ static void queue_add(struct dma_ops_domain *dom, unsigned long address, unsigned long pages) { struct flush_queue *queue; + unsigned long flags; int idx; pages = __roundup_pow_of_two(pages); address >>= PAGE_SHIFT; queue = get_cpu_ptr(dom->flush_queue); + spin_lock_irqsave(&queue->lock, flags); if (queue_ring_full(queue)) { domain_flush_tlb(&dom->domain); @@ -1858,6 +1868,7 @@ static void queue_add(struct dma_ops_domain *dom, queue->entries[idx].iova_pfn = address; queue->entries[idx].pages = pages; + spin_unlock_irqrestore(&queue->lock, flags); put_cpu_ptr(dom->flush_queue); } From a6e3f6f030396c0576c729fd8ca4bfb654d35bfe Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Fri, 2 Jun 2017 16:01:53 +0200 Subject: [PATCH 075/436] iommu/amd: Add flush counters to struct dma_ops_domain The counters are increased every time the TLB for a given domain is flushed. We also store the current value of that counter into newly added entries of the flush-queue, so that we can tell whether this entry is already flushed. Signed-off-by: Joerg Roedel --- drivers/iommu/amd_iommu.c | 52 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 52 insertions(+) diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 9a06acc8cc9d..795208bd39bd 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -141,6 +141,7 @@ static void detach_device(struct device *dev); struct flush_queue_entry { unsigned long iova_pfn; unsigned long pages; + u64 counter; /* Flush counter when this entry was added to the queue */ }; struct flush_queue { @@ -160,6 +161,27 @@ struct dma_ops_domain { struct iova_domain iovad; struct flush_queue __percpu *flush_queue; + + /* + * We need two counter here to be race-free wrt. IOTLB flushing and + * adding entries to the flush queue. + * + * The flush_start_cnt is incremented _before_ the IOTLB flush starts. + * New entries added to the flush ring-buffer get their 'counter' value + * from here. This way we can make sure that entries added to the queue + * (or other per-cpu queues of the same domain) while the TLB is about + * to be flushed are not considered to be flushed already. + */ + atomic64_t flush_start_cnt; + + /* + * The flush_finish_cnt is incremented when an IOTLB flush is complete. + * This value is always smaller than flush_start_cnt. The queue_add + * function frees all IOVAs that have a counter value smaller than + * flush_finish_cnt. This makes sure that we only free IOVAs that are + * flushed out of the IOTLB of the domain. + */ + atomic64_t flush_finish_cnt; }; static struct iova_domain reserved_iova_ranges; @@ -1777,6 +1799,9 @@ static int dma_ops_domain_alloc_flush_queue(struct dma_ops_domain *dom) { int cpu; + atomic64_set(&dom->flush_start_cnt, 0); + atomic64_set(&dom->flush_finish_cnt, 0); + dom->flush_queue = alloc_percpu(struct flush_queue); if (!dom->flush_queue) return -ENOMEM; @@ -1844,22 +1869,48 @@ static inline unsigned queue_ring_add(struct flush_queue *queue) return idx; } +static inline void queue_ring_remove_head(struct flush_queue *queue) +{ + assert_spin_locked(&queue->lock); + queue->head = (queue->head + 1) % FLUSH_QUEUE_SIZE; +} + static void queue_add(struct dma_ops_domain *dom, unsigned long address, unsigned long pages) { struct flush_queue *queue; unsigned long flags; + u64 counter; int idx; pages = __roundup_pow_of_two(pages); address >>= PAGE_SHIFT; + counter = atomic64_read(&dom->flush_finish_cnt); + queue = get_cpu_ptr(dom->flush_queue); spin_lock_irqsave(&queue->lock, flags); + queue_ring_for_each(idx, queue) { + /* + * This assumes that counter values in the ring-buffer are + * monotonously rising. + */ + if (queue->entries[idx].counter >= counter) + break; + + free_iova_fast(&dom->iovad, + queue->entries[idx].iova_pfn, + queue->entries[idx].pages); + + queue_ring_remove_head(queue); + } + if (queue_ring_full(queue)) { + atomic64_inc(&dom->flush_start_cnt); domain_flush_tlb(&dom->domain); domain_flush_complete(&dom->domain); + atomic64_inc(&dom->flush_finish_cnt); queue_release(dom, queue); } @@ -1867,6 +1918,7 @@ static void queue_add(struct dma_ops_domain *dom, queue->entries[idx].iova_pfn = address; queue->entries[idx].pages = pages; + queue->entries[idx].counter = atomic64_read(&dom->flush_start_cnt); spin_unlock_irqrestore(&queue->lock, flags); put_cpu_ptr(dom->flush_queue); From fca6af6a5976dfa00182232f666b4f789c98bd0c Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Fri, 2 Jun 2017 18:13:37 +0200 Subject: [PATCH 076/436] iommu/amd: Add per-domain timer to flush per-cpu queues Add a timer to each dma_ops domain so that we flush unused IOTLB entries regularily, even if the queues don't get full all the time. Signed-off-by: Joerg Roedel --- drivers/iommu/amd_iommu.c | 84 +++++++++++++++++++++++++++++++-------- 1 file changed, 67 insertions(+), 17 deletions(-) diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 795208bd39bd..00c1796e07bf 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -182,6 +182,13 @@ struct dma_ops_domain { * flushed out of the IOTLB of the domain. */ atomic64_t flush_finish_cnt; + + /* + * Timer to make sure we don't keep IOVAs around unflushed + * for too long + */ + struct timer_list flush_timer; + atomic_t flush_timer_on; }; static struct iova_domain reserved_iova_ranges; @@ -1834,6 +1841,14 @@ static int dma_ops_domain_alloc_flush_queue(struct dma_ops_domain *dom) return 0; } +static void dma_ops_domain_flush_tlb(struct dma_ops_domain *dom) +{ + atomic64_inc(&dom->flush_start_cnt); + domain_flush_tlb(&dom->domain); + domain_flush_complete(&dom->domain); + atomic64_inc(&dom->flush_finish_cnt); +} + static inline bool queue_ring_full(struct flush_queue *queue) { assert_spin_locked(&queue->lock); @@ -1875,22 +1890,12 @@ static inline void queue_ring_remove_head(struct flush_queue *queue) queue->head = (queue->head + 1) % FLUSH_QUEUE_SIZE; } -static void queue_add(struct dma_ops_domain *dom, - unsigned long address, unsigned long pages) +static void queue_ring_free_flushed(struct dma_ops_domain *dom, + struct flush_queue *queue) { - struct flush_queue *queue; - unsigned long flags; - u64 counter; + u64 counter = atomic64_read(&dom->flush_finish_cnt); int idx; - pages = __roundup_pow_of_two(pages); - address >>= PAGE_SHIFT; - - counter = atomic64_read(&dom->flush_finish_cnt); - - queue = get_cpu_ptr(dom->flush_queue); - spin_lock_irqsave(&queue->lock, flags); - queue_ring_for_each(idx, queue) { /* * This assumes that counter values in the ring-buffer are @@ -1905,12 +1910,25 @@ static void queue_add(struct dma_ops_domain *dom, queue_ring_remove_head(queue); } +} + +static void queue_add(struct dma_ops_domain *dom, + unsigned long address, unsigned long pages) +{ + struct flush_queue *queue; + unsigned long flags; + int idx; + + pages = __roundup_pow_of_two(pages); + address >>= PAGE_SHIFT; + + queue = get_cpu_ptr(dom->flush_queue); + spin_lock_irqsave(&queue->lock, flags); + + queue_ring_free_flushed(dom, queue); if (queue_ring_full(queue)) { - atomic64_inc(&dom->flush_start_cnt); - domain_flush_tlb(&dom->domain); - domain_flush_complete(&dom->domain); - atomic64_inc(&dom->flush_finish_cnt); + dma_ops_domain_flush_tlb(dom); queue_release(dom, queue); } @@ -1921,9 +1939,33 @@ static void queue_add(struct dma_ops_domain *dom, queue->entries[idx].counter = atomic64_read(&dom->flush_start_cnt); spin_unlock_irqrestore(&queue->lock, flags); + + if (atomic_cmpxchg(&dom->flush_timer_on, 0, 1) == 0) + mod_timer(&dom->flush_timer, jiffies + msecs_to_jiffies(10)); + put_cpu_ptr(dom->flush_queue); } +static void queue_flush_timeout(unsigned long data) +{ + struct dma_ops_domain *dom = (struct dma_ops_domain *)data; + int cpu; + + atomic_set(&dom->flush_timer_on, 0); + + dma_ops_domain_flush_tlb(dom); + + for_each_possible_cpu(cpu) { + struct flush_queue *queue; + unsigned long flags; + + queue = per_cpu_ptr(dom->flush_queue, cpu); + spin_lock_irqsave(&queue->lock, flags); + queue_ring_free_flushed(dom, queue); + spin_unlock_irqrestore(&queue->lock, flags); + } +} + /* * Free a domain, only used if something went wrong in the * allocation path and we need to free an already allocated page table @@ -1935,6 +1977,9 @@ static void dma_ops_domain_free(struct dma_ops_domain *dom) del_domain_from_list(&dom->domain); + if (timer_pending(&dom->flush_timer)) + del_timer(&dom->flush_timer); + dma_ops_domain_free_flush_queue(dom); put_iova_domain(&dom->iovad); @@ -1978,6 +2023,11 @@ static struct dma_ops_domain *dma_ops_domain_alloc(void) if (dma_ops_domain_alloc_flush_queue(dma_dom)) goto free_dma_dom; + setup_timer(&dma_dom->flush_timer, queue_flush_timeout, + (unsigned long)dma_dom); + + atomic_set(&dma_dom->flush_timer_on, 0); + add_domain_to_list(&dma_dom->domain); return dma_dom; From ac3b708ad49f77b44faa104057783f161491e9e4 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Wed, 7 Jun 2017 14:38:15 +0200 Subject: [PATCH 077/436] iommu/amd: Remove queue_release() function We can use queue_ring_free_flushed() instead, so remove this redundancy. Signed-off-by: Joerg Roedel --- drivers/iommu/amd_iommu.c | 28 ++++++++-------------------- 1 file changed, 8 insertions(+), 20 deletions(-) diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 00c1796e07bf..6498f5baa7ea 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -1859,21 +1859,6 @@ static inline bool queue_ring_full(struct flush_queue *queue) #define queue_ring_for_each(i, q) \ for (i = (q)->head; i != (q)->tail; i = (i + 1) % FLUSH_QUEUE_SIZE) -static void queue_release(struct dma_ops_domain *dom, - struct flush_queue *queue) -{ - unsigned i; - - assert_spin_locked(&queue->lock); - - queue_ring_for_each(i, queue) - free_iova_fast(&dom->iovad, - queue->entries[i].iova_pfn, - queue->entries[i].pages); - - queue->head = queue->tail = 0; -} - static inline unsigned queue_ring_add(struct flush_queue *queue) { unsigned idx = queue->tail; @@ -1925,12 +1910,15 @@ static void queue_add(struct dma_ops_domain *dom, queue = get_cpu_ptr(dom->flush_queue); spin_lock_irqsave(&queue->lock, flags); - queue_ring_free_flushed(dom, queue); - - if (queue_ring_full(queue)) { + /* + * When ring-queue is full, flush the entries from the IOTLB so + * that we can free all entries with queue_ring_free_flushed() + * below. + */ + if (queue_ring_full(queue)) dma_ops_domain_flush_tlb(dom); - queue_release(dom, queue); - } + + queue_ring_free_flushed(dom, queue); idx = queue_ring_add(queue); From f7a31b5e7874f77464a4eae0a8ba84b9ae0b3a54 Mon Sep 17 00:00:00 2001 From: Marcin Nowakowski Date: Wed, 19 Apr 2017 14:07:43 +0200 Subject: [PATCH 078/436] MIPS: perf: Remove incorrect odd/even counter handling for I6400 All performance counters on I6400 (odd and even) are capable of counting any of the available events, so drop current logic of using the extra bit to determine which counter to use. Signed-off-by: Marcin Nowakowski Fixes: 4e88a8621301 ("MIPS: Add cases for CPU_I6400") Fixes: fd716fca10fc ("MIPS: perf: Fix I6400 event numbers") Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/15991/ Signed-off-by: Ralf Baechle --- arch/mips/kernel/perf_event_mipsxx.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c index 313a88b2973f..f3e301f95aef 100644 --- a/arch/mips/kernel/perf_event_mipsxx.c +++ b/arch/mips/kernel/perf_event_mipsxx.c @@ -1597,7 +1597,6 @@ static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config) break; case CPU_P5600: case CPU_P6600: - case CPU_I6400: /* 8-bit event numbers */ raw_id = config & 0x1ff; base_id = raw_id & 0xff; @@ -1610,6 +1609,11 @@ static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config) raw_event.range = P; #endif break; + case CPU_I6400: + /* 8-bit event numbers */ + base_id = config & 0xff; + raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD; + break; case CPU_1004K: if (IS_BOTH_COUNTERS_1004K_EVENT(base_id)) raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD; From 71eb989ab5a110df8bcbb9609bacde73feacbedd Mon Sep 17 00:00:00 2001 From: Marcin Nowakowski Date: Tue, 11 Apr 2017 09:00:34 +0200 Subject: [PATCH 079/436] MIPS: mm: fixed mappings: correct initialisation fixrange_init operates at PMD-granularity and expects the addresses to be PMD-size aligned, but currently that might not be the case for PKMAP_BASE unless it is defined properly, so ensure a correct alignment is used before passing the address to fixrange_init. fixed mappings: only align the start address that is passed to fixrange_init rather than the value before adding the size, as we may end up with uninitialised upper part of the range. Signed-off-by: Marcin Nowakowski Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/15948/ Signed-off-by: Ralf Baechle --- arch/mips/mm/pgtable-32.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/mips/mm/pgtable-32.c b/arch/mips/mm/pgtable-32.c index adc6911ba748..b19a3c506b1e 100644 --- a/arch/mips/mm/pgtable-32.c +++ b/arch/mips/mm/pgtable-32.c @@ -51,15 +51,15 @@ void __init pagetable_init(void) /* * Fixed mappings: */ - vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; - fixrange_init(vaddr, vaddr + FIXADDR_SIZE, pgd_base); + vaddr = __fix_to_virt(__end_of_fixed_addresses - 1); + fixrange_init(vaddr & PMD_MASK, vaddr + FIXADDR_SIZE, pgd_base); #ifdef CONFIG_HIGHMEM /* * Permanent kmaps: */ vaddr = PKMAP_BASE; - fixrange_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base); + fixrange_init(vaddr & PMD_MASK, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base); pgd = swapper_pg_dir + __pgd_offset(vaddr); pud = pud_offset(pgd, vaddr); From 725a269b3dd149f36c206c218253336774ef26f5 Mon Sep 17 00:00:00 2001 From: Marcin Nowakowski Date: Tue, 11 Apr 2017 09:00:35 +0200 Subject: [PATCH 080/436] MIPS: highmem: ensure that we don't use more than one page for PTEs All PTEs used by PKMAP should be allocated in a contiguous memory area, but we do not currently have a mechanism to enforce that, so ensure that we don't try to allocate more entries than would fit in a single page. Current fixed value of 1024 would not work with XPA enabled when sizeof(pte_t)==8 and we need two pages to store pte tables. Signed-off-by: Marcin Nowakowski Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/15949/ Signed-off-by: Ralf Baechle --- arch/mips/include/asm/highmem.h | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/arch/mips/include/asm/highmem.h b/arch/mips/include/asm/highmem.h index d34536e7653f..279b6d14ffeb 100644 --- a/arch/mips/include/asm/highmem.h +++ b/arch/mips/include/asm/highmem.h @@ -35,7 +35,12 @@ extern pte_t *pkmap_page_table; * easily, subsequent pte tables have to be allocated in one physical * chunk of RAM. */ +#ifdef CONFIG_PHYS_ADDR_T_64BIT +#define LAST_PKMAP 512 +#else #define LAST_PKMAP 1024 +#endif + #define LAST_PKMAP_MASK (LAST_PKMAP-1) #define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT) #define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT)) From c56e7a4c3e77f6fbd9b55c06c14eda65aae58958 Mon Sep 17 00:00:00 2001 From: Marcin Nowakowski Date: Tue, 11 Apr 2017 09:00:36 +0200 Subject: [PATCH 081/436] MIPS: mm: adjust PKMAP location Space reserved for PKMap should span from PKMAP_BASE to FIXADDR_START. For large page sizes this is not the case as eg. for 64k pages the range currently defined is from 0xfe000000 to 0x102000000(!!) which obviously isn't right. Remove the hardcoded location and set the BASE address as an offset from FIXADDR_START. Since all PKMAP ptes have to be placed in a contiguous memory, ensure that this is the case by placing them all in a single page. This is achieved by aligning the end address to pkmap pages count pages. Signed-off-by: Marcin Nowakowski Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/15950/ Signed-off-by: Ralf Baechle --- arch/mips/include/asm/pgtable-32.h | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/arch/mips/include/asm/pgtable-32.h b/arch/mips/include/asm/pgtable-32.h index 6f94bed571c4..74afe8c76bdd 100644 --- a/arch/mips/include/asm/pgtable-32.h +++ b/arch/mips/include/asm/pgtable-32.h @@ -19,6 +19,10 @@ #define __ARCH_USE_5LEVEL_HACK #include +#ifdef CONFIG_HIGHMEM +#include +#endif + extern int temp_tlb_entry; /* @@ -62,7 +66,8 @@ extern int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1, #define VMALLOC_START MAP_BASE -#define PKMAP_BASE (0xfe000000UL) +#define PKMAP_END ((FIXADDR_START) & ~((LAST_PKMAP << PAGE_SHIFT)-1)) +#define PKMAP_BASE (PKMAP_END - PAGE_SIZE * LAST_PKMAP) #ifdef CONFIG_HIGHMEM # define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE) From 87051ec120bb9abd9e57aecf2569c00d587b6f33 Mon Sep 17 00:00:00 2001 From: Marcin Nowakowski Date: Tue, 23 May 2017 12:56:43 +0200 Subject: [PATCH 082/436] MIPS: ftrace: fix init functions tracing Since introduction of tracing for init functions the in_kernel_space() check is no longer correct, as it ignores the init sections. As a result, when probes are inserted (and disabled) in the init functions, a branch instruction is inserted instead of a nop, which is likely to result in random crashes during boot. Remove the MIPS-specific in_kernel_space() method and replace it with a generic core_kernel_text() that also checks for init sections during system boot stage. Fixes: 42c269c88dc1 ("ftrace: Allow for function tracing to record init functions on boot up") Signed-off-by: Marcin Nowakowski Tested-by: Matt Redfearn Cc: Steven Rostedt Cc: Ingo Molnar Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/16092/ Signed-off-by: Ralf Baechle --- arch/mips/kernel/ftrace.c | 24 +++++------------------- 1 file changed, 5 insertions(+), 19 deletions(-) diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c index 30a3b75e88eb..9d9b8fbae202 100644 --- a/arch/mips/kernel/ftrace.c +++ b/arch/mips/kernel/ftrace.c @@ -38,20 +38,6 @@ void arch_ftrace_update_code(int command) #endif -/* - * Check if the address is in kernel space - * - * Clone core_kernel_text() from kernel/extable.c, but doesn't call - * init_kernel_text() for Ftrace doesn't trace functions in init sections. - */ -static inline int in_kernel_space(unsigned long ip) -{ - if (ip >= (unsigned long)_stext && - ip <= (unsigned long)_etext) - return 1; - return 0; -} - #ifdef CONFIG_DYNAMIC_FTRACE #define JAL 0x0c000000 /* jump & link: ip --> ra, jump to target */ @@ -198,7 +184,7 @@ int ftrace_make_nop(struct module *mod, * If ip is in kernel space, no long call, otherwise, long call is * needed. */ - new = in_kernel_space(ip) ? INSN_NOP : INSN_B_1F; + new = core_kernel_text(ip) ? INSN_NOP : INSN_B_1F; #ifdef CONFIG_64BIT return ftrace_modify_code(ip, new); #else @@ -218,12 +204,12 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) unsigned int new; unsigned long ip = rec->ip; - new = in_kernel_space(ip) ? insn_jal_ftrace_caller : insn_la_mcount[0]; + new = core_kernel_text(ip) ? insn_jal_ftrace_caller : insn_la_mcount[0]; #ifdef CONFIG_64BIT return ftrace_modify_code(ip, new); #else - return ftrace_modify_code_2r(ip, new, in_kernel_space(ip) ? + return ftrace_modify_code_2r(ip, new, core_kernel_text(ip) ? INSN_NOP : insn_la_mcount[1]); #endif } @@ -289,7 +275,7 @@ unsigned long ftrace_get_parent_ra_addr(unsigned long self_ra, unsigned long * instruction "lui v1, hi_16bit_of_mcount"(offset is 24), but for * kernel, move after the instruction "move ra, at"(offset is 16) */ - ip = self_ra - (in_kernel_space(self_ra) ? 16 : 24); + ip = self_ra - (core_kernel_text(self_ra) ? 16 : 24); /* * search the text until finding the non-store instruction or "s{d,w} @@ -394,7 +380,7 @@ void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra, * entries configured through the tracing/set_graph_function interface. */ - insns = in_kernel_space(self_ra) ? 2 : MCOUNT_OFFSET_INSNS + 1; + insns = core_kernel_text(self_ra) ? 2 : MCOUNT_OFFSET_INSNS + 1; trace.func = self_ra - (MCOUNT_INSN_SIZE * insns); /* Only trace if the calling function expects to */ From 698b851073ddf5a894910d63ca04605e0473414e Mon Sep 17 00:00:00 2001 From: Marcin Nowakowski Date: Thu, 8 Jun 2017 15:20:32 +0200 Subject: [PATCH 083/436] MIPS: kprobes: flush_insn_slot should flush only if probe initialised When ftrace is used with kprobes, it is possible for a kprobe to contain an invalid location (ie. only initialised to 0 and not to a specific location in the code). Trying to perform a cache flush on such location leads to a crash r4k_flush_icache_range(). Fixes: c1bf207d6ee1 ("MIPS: kprobe: Add support.") Signed-off-by: Marcin Nowakowski Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/16296/ Signed-off-by: Ralf Baechle --- arch/mips/include/asm/kprobes.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/mips/include/asm/kprobes.h b/arch/mips/include/asm/kprobes.h index 291846d9ba83..ad1a99948f27 100644 --- a/arch/mips/include/asm/kprobes.h +++ b/arch/mips/include/asm/kprobes.h @@ -43,7 +43,8 @@ typedef union mips_instruction kprobe_opcode_t; #define flush_insn_slot(p) \ do { \ - flush_icache_range((unsigned long)p->addr, \ + if (p->addr) \ + flush_icache_range((unsigned long)p->addr, \ (unsigned long)p->addr + \ (MAX_INSN_SIZE * sizeof(kprobe_opcode_t))); \ } while (0) From 16ddcc34b8bde5d9257114a16565fac73237bef9 Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Tue, 17 Jan 2017 12:32:15 +0100 Subject: [PATCH 084/436] s390: update defconfig Signed-off-by: Martin Schwidefsky --- arch/s390/configs/default_defconfig | 39 +++++++++++++++++++++---- arch/s390/configs/gcov_defconfig | 28 +++++++++++++++--- arch/s390/configs/performance_defconfig | 27 ++++++++++++++--- arch/s390/configs/zfcpdump_defconfig | 6 ++-- arch/s390/defconfig | 8 ++--- 5 files changed, 87 insertions(+), 21 deletions(-) diff --git a/arch/s390/configs/default_defconfig b/arch/s390/configs/default_defconfig index a5039fa89314..282072206df7 100644 --- a/arch/s390/configs/default_defconfig +++ b/arch/s390/configs/default_defconfig @@ -30,6 +30,7 @@ CONFIG_USER_NS=y CONFIG_SCHED_AUTOGROUP=y CONFIG_BLK_DEV_INITRD=y CONFIG_EXPERT=y +# CONFIG_SYSFS_SYSCALL is not set CONFIG_BPF_SYSCALL=y CONFIG_USERFAULTFD=y # CONFIG_COMPAT_BRK is not set @@ -44,7 +45,10 @@ CONFIG_MODULE_UNLOAD=y CONFIG_MODULE_FORCE_UNLOAD=y CONFIG_MODVERSIONS=y CONFIG_MODULE_SRCVERSION_ALL=y +CONFIG_BLK_DEV_INTEGRITY=y CONFIG_BLK_DEV_THROTTLING=y +CONFIG_BLK_WBT=y +CONFIG_BLK_WBT_SQ=y CONFIG_PARTITION_ADVANCED=y CONFIG_IBM_PARTITION=y CONFIG_BSD_DISKLABEL=y @@ -90,6 +94,8 @@ CONFIG_UNIX=y CONFIG_UNIX_DIAG=m CONFIG_XFRM_USER=m CONFIG_NET_KEY=m +CONFIG_SMC=m +CONFIG_SMC_DIAG=m CONFIG_INET=y CONFIG_IP_MULTICAST=y CONFIG_IP_ADVANCED_ROUTER=y @@ -359,6 +365,7 @@ CONFIG_NET_ACT_SIMP=m CONFIG_NET_ACT_SKBEDIT=m CONFIG_NET_ACT_CSUM=m CONFIG_DNS_RESOLVER=y +CONFIG_NETLINK_DIAG=m CONFIG_CGROUP_NET_PRIO=y CONFIG_BPF_JIT=y CONFIG_NET_PKTGEN=m @@ -367,16 +374,19 @@ CONFIG_DEVTMPFS=y CONFIG_DMA_CMA=y CONFIG_CMA_SIZE_MBYTES=0 CONFIG_CONNECTOR=y +CONFIG_ZRAM=m CONFIG_BLK_DEV_LOOP=m CONFIG_BLK_DEV_CRYPTOLOOP=m +CONFIG_BLK_DEV_DRBD=m CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_OSD=m CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_SIZE=32768 -CONFIG_CDROM_PKTCDVD=m -CONFIG_ATA_OVER_ETH=m +CONFIG_BLK_DEV_RAM_DAX=y CONFIG_VIRTIO_BLK=y +CONFIG_BLK_DEV_RBD=m CONFIG_ENCLOSURE_SERVICES=m +CONFIG_GENWQE=m CONFIG_RAID_ATTRS=m CONFIG_SCSI=y CONFIG_BLK_DEV_SD=y @@ -442,6 +452,8 @@ CONFIG_NLMON=m # CONFIG_NET_VENDOR_INTEL is not set # CONFIG_NET_VENDOR_MARVELL is not set CONFIG_MLX4_EN=m +CONFIG_MLX5_CORE=m +CONFIG_MLX5_CORE_EN=y # CONFIG_NET_VENDOR_NATSEMI is not set CONFIG_PPP=m CONFIG_PPP_BSDCOMP=m @@ -452,7 +464,6 @@ CONFIG_PPTP=m CONFIG_PPPOL2TP=m CONFIG_PPP_ASYNC=m CONFIG_PPP_SYNC_TTY=m -# CONFIG_INPUT_MOUSEDEV_PSAUX is not set # CONFIG_INPUT_KEYBOARD is not set # CONFIG_INPUT_MOUSE is not set # CONFIG_SERIO is not set @@ -471,6 +482,7 @@ CONFIG_DIAG288_WATCHDOG=m CONFIG_INFINIBAND=m CONFIG_INFINIBAND_USER_ACCESS=m CONFIG_MLX4_INFINIBAND=m +CONFIG_MLX5_INFINIBAND=m CONFIG_VIRTIO_BALLOON=m CONFIG_EXT4_FS=y CONFIG_EXT4_FS_POSIX_ACL=y @@ -487,12 +499,18 @@ CONFIG_XFS_POSIX_ACL=y CONFIG_XFS_RT=y CONFIG_XFS_DEBUG=y CONFIG_GFS2_FS=m +CONFIG_GFS2_FS_LOCKING_DLM=y CONFIG_OCFS2_FS=m CONFIG_BTRFS_FS=y CONFIG_BTRFS_FS_POSIX_ACL=y +CONFIG_BTRFS_DEBUG=y CONFIG_NILFS2_FS=m +CONFIG_FS_DAX=y +CONFIG_EXPORTFS_BLOCK_OPS=y CONFIG_FANOTIFY=y +CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y CONFIG_QUOTA_NETLINK_INTERFACE=y +CONFIG_QUOTA_DEBUG=y CONFIG_QFMT_V1=m CONFIG_QFMT_V2=m CONFIG_AUTOFS4_FS=m @@ -558,6 +576,7 @@ CONFIG_HEADERS_CHECK=y CONFIG_DEBUG_SECTION_MISMATCH=y CONFIG_MAGIC_SYSRQ=y CONFIG_DEBUG_PAGEALLOC=y +CONFIG_DEBUG_RODATA_TEST=y CONFIG_DEBUG_OBJECTS=y CONFIG_DEBUG_OBJECTS_SELFTEST=y CONFIG_DEBUG_OBJECTS_FREE=y @@ -580,7 +599,6 @@ CONFIG_DETECT_HUNG_TASK=y CONFIG_WQ_WATCHDOG=y CONFIG_PANIC_ON_OOPS=y CONFIG_DEBUG_TIMEKEEPING=y -CONFIG_TIMER_STATS=y CONFIG_DEBUG_RT_MUTEXES=y CONFIG_DEBUG_WW_MUTEX_SLOWPATH=y CONFIG_PROVE_LOCKING=y @@ -595,6 +613,7 @@ CONFIG_RCU_TORTURE_TEST=m CONFIG_RCU_CPU_STALL_TIMEOUT=300 CONFIG_NOTIFIER_ERROR_INJECTION=m CONFIG_PM_NOTIFIER_ERROR_INJECT=m +CONFIG_NETDEV_NOTIFIER_ERROR_INJECT=m CONFIG_FAULT_INJECTION=y CONFIG_FAILSLAB=y CONFIG_FAIL_PAGE_ALLOC=y @@ -616,13 +635,12 @@ CONFIG_HIST_TRIGGERS=y CONFIG_TRACE_ENUM_MAP_FILE=y CONFIG_LKDTM=m CONFIG_TEST_LIST_SORT=y +CONFIG_TEST_SORT=y CONFIG_KPROBES_SANITY_TEST=y CONFIG_RBTREE_TEST=y CONFIG_INTERVAL_TREE_TEST=m CONFIG_PERCPU_TEST=m CONFIG_ATOMIC64_SELFTEST=y -CONFIG_TEST_STRING_HELPERS=y -CONFIG_TEST_KSTRTOX=y CONFIG_DMA_API_DEBUG=y CONFIG_TEST_BPF=m CONFIG_BUG_ON_DATA_CORRUPTION=y @@ -630,6 +648,7 @@ CONFIG_S390_PTDUMP=y CONFIG_ENCRYPTED_KEYS=m CONFIG_SECURITY=y CONFIG_SECURITY_NETWORK=y +CONFIG_HARDENED_USERCOPY=y CONFIG_SECURITY_SELINUX=y CONFIG_SECURITY_SELINUX_BOOTPARAM=y CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0 @@ -640,7 +659,9 @@ CONFIG_CRYPTO_RSA=m CONFIG_CRYPTO_DH=m CONFIG_CRYPTO_ECDH=m CONFIG_CRYPTO_USER=m +CONFIG_CRYPTO_PCRYPT=m CONFIG_CRYPTO_CRYPTD=m +CONFIG_CRYPTO_MCRYPTD=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_CCM=m CONFIG_CRYPTO_GCM=m @@ -648,6 +669,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_KEYWRAP=m +CONFIG_CRYPTO_CMAC=m CONFIG_CRYPTO_XCBC=m CONFIG_CRYPTO_VMAC=m CONFIG_CRYPTO_CRC32=m @@ -657,8 +679,10 @@ CONFIG_CRYPTO_RMD160=m CONFIG_CRYPTO_RMD256=m CONFIG_CRYPTO_RMD320=m CONFIG_CRYPTO_SHA512=m +CONFIG_CRYPTO_SHA3=m CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m +CONFIG_CRYPTO_AES_TI=m CONFIG_CRYPTO_ANUBIS=m CONFIG_CRYPTO_BLOWFISH=m CONFIG_CRYPTO_CAMELLIA=m @@ -674,6 +698,7 @@ CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_842=m CONFIG_CRYPTO_LZ4=m CONFIG_CRYPTO_LZ4HC=m +CONFIG_CRYPTO_ANSI_CPRNG=m CONFIG_CRYPTO_USER_API_HASH=m CONFIG_CRYPTO_USER_API_SKCIPHER=m CONFIG_CRYPTO_USER_API_RNG=m @@ -685,6 +710,7 @@ CONFIG_CRYPTO_SHA256_S390=m CONFIG_CRYPTO_SHA512_S390=m CONFIG_CRYPTO_DES_S390=m CONFIG_CRYPTO_AES_S390=m +CONFIG_CRYPTO_PAES_S390=m CONFIG_CRYPTO_GHASH_S390=m CONFIG_CRYPTO_CRC32_S390=y CONFIG_ASYMMETRIC_KEY_TYPE=y @@ -692,6 +718,7 @@ CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m CONFIG_X509_CERTIFICATE_PARSER=m CONFIG_CRC7=m CONFIG_CRC8=m +CONFIG_RANDOM32_SELFTEST=y CONFIG_CORDIC=m CONFIG_CMM=m CONFIG_APPLDATA_BASE=y diff --git a/arch/s390/configs/gcov_defconfig b/arch/s390/configs/gcov_defconfig index 83970b5afb2b..3c6b78189fbc 100644 --- a/arch/s390/configs/gcov_defconfig +++ b/arch/s390/configs/gcov_defconfig @@ -31,6 +31,7 @@ CONFIG_USER_NS=y CONFIG_SCHED_AUTOGROUP=y CONFIG_BLK_DEV_INITRD=y CONFIG_EXPERT=y +# CONFIG_SYSFS_SYSCALL is not set CONFIG_BPF_SYSCALL=y CONFIG_USERFAULTFD=y # CONFIG_COMPAT_BRK is not set @@ -46,7 +47,10 @@ CONFIG_MODULE_UNLOAD=y CONFIG_MODULE_FORCE_UNLOAD=y CONFIG_MODVERSIONS=y CONFIG_MODULE_SRCVERSION_ALL=y +CONFIG_BLK_DEV_INTEGRITY=y CONFIG_BLK_DEV_THROTTLING=y +CONFIG_BLK_WBT=y +CONFIG_BLK_WBT_SQ=y CONFIG_PARTITION_ADVANCED=y CONFIG_IBM_PARTITION=y CONFIG_BSD_DISKLABEL=y @@ -88,6 +92,8 @@ CONFIG_UNIX=y CONFIG_UNIX_DIAG=m CONFIG_XFRM_USER=m CONFIG_NET_KEY=m +CONFIG_SMC=m +CONFIG_SMC_DIAG=m CONFIG_INET=y CONFIG_IP_MULTICAST=y CONFIG_IP_ADVANCED_ROUTER=y @@ -356,6 +362,7 @@ CONFIG_NET_ACT_SIMP=m CONFIG_NET_ACT_SKBEDIT=m CONFIG_NET_ACT_CSUM=m CONFIG_DNS_RESOLVER=y +CONFIG_NETLINK_DIAG=m CONFIG_CGROUP_NET_PRIO=y CONFIG_BPF_JIT=y CONFIG_NET_PKTGEN=m @@ -364,16 +371,18 @@ CONFIG_DEVTMPFS=y CONFIG_DMA_CMA=y CONFIG_CMA_SIZE_MBYTES=0 CONFIG_CONNECTOR=y +CONFIG_ZRAM=m CONFIG_BLK_DEV_LOOP=m CONFIG_BLK_DEV_CRYPTOLOOP=m +CONFIG_BLK_DEV_DRBD=m CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_OSD=m CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_SIZE=32768 -CONFIG_CDROM_PKTCDVD=m -CONFIG_ATA_OVER_ETH=m +CONFIG_BLK_DEV_RAM_DAX=y CONFIG_VIRTIO_BLK=y CONFIG_ENCLOSURE_SERVICES=m +CONFIG_GENWQE=m CONFIG_RAID_ATTRS=m CONFIG_SCSI=y CONFIG_BLK_DEV_SD=y @@ -439,6 +448,8 @@ CONFIG_NLMON=m # CONFIG_NET_VENDOR_INTEL is not set # CONFIG_NET_VENDOR_MARVELL is not set CONFIG_MLX4_EN=m +CONFIG_MLX5_CORE=m +CONFIG_MLX5_CORE_EN=y # CONFIG_NET_VENDOR_NATSEMI is not set CONFIG_PPP=m CONFIG_PPP_BSDCOMP=m @@ -449,7 +460,6 @@ CONFIG_PPTP=m CONFIG_PPPOL2TP=m CONFIG_PPP_ASYNC=m CONFIG_PPP_SYNC_TTY=m -# CONFIG_INPUT_MOUSEDEV_PSAUX is not set # CONFIG_INPUT_KEYBOARD is not set # CONFIG_INPUT_MOUSE is not set # CONFIG_SERIO is not set @@ -468,6 +478,7 @@ CONFIG_DIAG288_WATCHDOG=m CONFIG_INFINIBAND=m CONFIG_INFINIBAND_USER_ACCESS=m CONFIG_MLX4_INFINIBAND=m +CONFIG_MLX5_INFINIBAND=m CONFIG_VIRTIO_BALLOON=m CONFIG_EXT4_FS=y CONFIG_EXT4_FS_POSIX_ACL=y @@ -483,11 +494,15 @@ CONFIG_XFS_QUOTA=y CONFIG_XFS_POSIX_ACL=y CONFIG_XFS_RT=y CONFIG_GFS2_FS=m +CONFIG_GFS2_FS_LOCKING_DLM=y CONFIG_OCFS2_FS=m CONFIG_BTRFS_FS=y CONFIG_BTRFS_FS_POSIX_ACL=y CONFIG_NILFS2_FS=m +CONFIG_FS_DAX=y +CONFIG_EXPORTFS_BLOCK_OPS=y CONFIG_FANOTIFY=y +CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y CONFIG_QUOTA_NETLINK_INTERFACE=y CONFIG_QFMT_V1=m CONFIG_QFMT_V2=m @@ -553,7 +568,6 @@ CONFIG_UNUSED_SYMBOLS=y CONFIG_MAGIC_SYSRQ=y CONFIG_DEBUG_MEMORY_INIT=y CONFIG_PANIC_ON_OOPS=y -CONFIG_TIMER_STATS=y CONFIG_RCU_TORTURE_TEST=m CONFIG_RCU_CPU_STALL_TIMEOUT=60 CONFIG_LATENCYTOP=y @@ -576,6 +590,7 @@ CONFIG_BIG_KEYS=y CONFIG_ENCRYPTED_KEYS=m CONFIG_SECURITY=y CONFIG_SECURITY_NETWORK=y +CONFIG_HARDENED_USERCOPY=y CONFIG_SECURITY_SELINUX=y CONFIG_SECURITY_SELINUX_BOOTPARAM=y CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0 @@ -599,6 +614,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_KEYWRAP=m +CONFIG_CRYPTO_CMAC=m CONFIG_CRYPTO_XCBC=m CONFIG_CRYPTO_VMAC=m CONFIG_CRYPTO_CRC32=m @@ -611,6 +627,7 @@ CONFIG_CRYPTO_SHA512=m CONFIG_CRYPTO_SHA3=m CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m +CONFIG_CRYPTO_AES_TI=m CONFIG_CRYPTO_ANUBIS=m CONFIG_CRYPTO_BLOWFISH=m CONFIG_CRYPTO_CAMELLIA=m @@ -626,16 +643,19 @@ CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_842=m CONFIG_CRYPTO_LZ4=m CONFIG_CRYPTO_LZ4HC=m +CONFIG_CRYPTO_ANSI_CPRNG=m CONFIG_CRYPTO_USER_API_HASH=m CONFIG_CRYPTO_USER_API_SKCIPHER=m CONFIG_CRYPTO_USER_API_RNG=m CONFIG_CRYPTO_USER_API_AEAD=m CONFIG_ZCRYPT=m +CONFIG_PKEY=m CONFIG_CRYPTO_SHA1_S390=m CONFIG_CRYPTO_SHA256_S390=m CONFIG_CRYPTO_SHA512_S390=m CONFIG_CRYPTO_DES_S390=m CONFIG_CRYPTO_AES_S390=m +CONFIG_CRYPTO_PAES_S390=m CONFIG_CRYPTO_GHASH_S390=m CONFIG_CRYPTO_CRC32_S390=y CONFIG_CRC7=m diff --git a/arch/s390/configs/performance_defconfig b/arch/s390/configs/performance_defconfig index fbc6542aaf59..653d72bcc007 100644 --- a/arch/s390/configs/performance_defconfig +++ b/arch/s390/configs/performance_defconfig @@ -31,6 +31,7 @@ CONFIG_USER_NS=y CONFIG_SCHED_AUTOGROUP=y CONFIG_BLK_DEV_INITRD=y CONFIG_EXPERT=y +# CONFIG_SYSFS_SYSCALL is not set CONFIG_BPF_SYSCALL=y CONFIG_USERFAULTFD=y # CONFIG_COMPAT_BRK is not set @@ -44,7 +45,10 @@ CONFIG_MODULE_UNLOAD=y CONFIG_MODULE_FORCE_UNLOAD=y CONFIG_MODVERSIONS=y CONFIG_MODULE_SRCVERSION_ALL=y +CONFIG_BLK_DEV_INTEGRITY=y CONFIG_BLK_DEV_THROTTLING=y +CONFIG_BLK_WBT=y +CONFIG_BLK_WBT_SQ=y CONFIG_PARTITION_ADVANCED=y CONFIG_IBM_PARTITION=y CONFIG_BSD_DISKLABEL=y @@ -86,6 +90,8 @@ CONFIG_UNIX=y CONFIG_UNIX_DIAG=m CONFIG_XFRM_USER=m CONFIG_NET_KEY=m +CONFIG_SMC=m +CONFIG_SMC_DIAG=m CONFIG_INET=y CONFIG_IP_MULTICAST=y CONFIG_IP_ADVANCED_ROUTER=y @@ -354,6 +360,7 @@ CONFIG_NET_ACT_SIMP=m CONFIG_NET_ACT_SKBEDIT=m CONFIG_NET_ACT_CSUM=m CONFIG_DNS_RESOLVER=y +CONFIG_NETLINK_DIAG=m CONFIG_CGROUP_NET_PRIO=y CONFIG_BPF_JIT=y CONFIG_NET_PKTGEN=m @@ -362,16 +369,18 @@ CONFIG_DEVTMPFS=y CONFIG_DMA_CMA=y CONFIG_CMA_SIZE_MBYTES=0 CONFIG_CONNECTOR=y +CONFIG_ZRAM=m CONFIG_BLK_DEV_LOOP=m CONFIG_BLK_DEV_CRYPTOLOOP=m +CONFIG_BLK_DEV_DRBD=m CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_OSD=m CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_SIZE=32768 -CONFIG_CDROM_PKTCDVD=m -CONFIG_ATA_OVER_ETH=m +CONFIG_BLK_DEV_RAM_DAX=y CONFIG_VIRTIO_BLK=y CONFIG_ENCLOSURE_SERVICES=m +CONFIG_GENWQE=m CONFIG_RAID_ATTRS=m CONFIG_SCSI=y CONFIG_BLK_DEV_SD=y @@ -437,6 +446,8 @@ CONFIG_NLMON=m # CONFIG_NET_VENDOR_INTEL is not set # CONFIG_NET_VENDOR_MARVELL is not set CONFIG_MLX4_EN=m +CONFIG_MLX5_CORE=m +CONFIG_MLX5_CORE_EN=y # CONFIG_NET_VENDOR_NATSEMI is not set CONFIG_PPP=m CONFIG_PPP_BSDCOMP=m @@ -447,7 +458,6 @@ CONFIG_PPTP=m CONFIG_PPPOL2TP=m CONFIG_PPP_ASYNC=m CONFIG_PPP_SYNC_TTY=m -# CONFIG_INPUT_MOUSEDEV_PSAUX is not set # CONFIG_INPUT_KEYBOARD is not set # CONFIG_INPUT_MOUSE is not set # CONFIG_SERIO is not set @@ -466,6 +476,7 @@ CONFIG_DIAG288_WATCHDOG=m CONFIG_INFINIBAND=m CONFIG_INFINIBAND_USER_ACCESS=m CONFIG_MLX4_INFINIBAND=m +CONFIG_MLX5_INFINIBAND=m CONFIG_VIRTIO_BALLOON=m CONFIG_EXT4_FS=y CONFIG_EXT4_FS_POSIX_ACL=y @@ -481,11 +492,15 @@ CONFIG_XFS_QUOTA=y CONFIG_XFS_POSIX_ACL=y CONFIG_XFS_RT=y CONFIG_GFS2_FS=m +CONFIG_GFS2_FS_LOCKING_DLM=y CONFIG_OCFS2_FS=m CONFIG_BTRFS_FS=y CONFIG_BTRFS_FS_POSIX_ACL=y CONFIG_NILFS2_FS=m +CONFIG_FS_DAX=y +CONFIG_EXPORTFS_BLOCK_OPS=y CONFIG_FANOTIFY=y +CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y CONFIG_QUOTA_NETLINK_INTERFACE=y CONFIG_QFMT_V1=m CONFIG_QFMT_V2=m @@ -551,7 +566,6 @@ CONFIG_UNUSED_SYMBOLS=y CONFIG_MAGIC_SYSRQ=y CONFIG_DEBUG_MEMORY_INIT=y CONFIG_PANIC_ON_OOPS=y -CONFIG_TIMER_STATS=y CONFIG_RCU_TORTURE_TEST=m CONFIG_RCU_CPU_STALL_TIMEOUT=60 CONFIG_LATENCYTOP=y @@ -574,6 +588,7 @@ CONFIG_BIG_KEYS=y CONFIG_ENCRYPTED_KEYS=m CONFIG_SECURITY=y CONFIG_SECURITY_NETWORK=y +CONFIG_HARDENED_USERCOPY=y CONFIG_SECURITY_SELINUX=y CONFIG_SECURITY_SELINUX_BOOTPARAM=y CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0 @@ -597,6 +612,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_KEYWRAP=m +CONFIG_CRYPTO_CMAC=m CONFIG_CRYPTO_XCBC=m CONFIG_CRYPTO_VMAC=m CONFIG_CRYPTO_CRC32=m @@ -609,6 +625,7 @@ CONFIG_CRYPTO_SHA512=m CONFIG_CRYPTO_SHA3=m CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m +CONFIG_CRYPTO_AES_TI=m CONFIG_CRYPTO_ANUBIS=m CONFIG_CRYPTO_BLOWFISH=m CONFIG_CRYPTO_CAMELLIA=m @@ -624,6 +641,7 @@ CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_842=m CONFIG_CRYPTO_LZ4=m CONFIG_CRYPTO_LZ4HC=m +CONFIG_CRYPTO_ANSI_CPRNG=m CONFIG_CRYPTO_USER_API_HASH=m CONFIG_CRYPTO_USER_API_SKCIPHER=m CONFIG_CRYPTO_USER_API_RNG=m @@ -635,6 +653,7 @@ CONFIG_CRYPTO_SHA256_S390=m CONFIG_CRYPTO_SHA512_S390=m CONFIG_CRYPTO_DES_S390=m CONFIG_CRYPTO_AES_S390=m +CONFIG_CRYPTO_PAES_S390=m CONFIG_CRYPTO_GHASH_S390=m CONFIG_CRYPTO_CRC32_S390=y CONFIG_CRC7=m diff --git a/arch/s390/configs/zfcpdump_defconfig b/arch/s390/configs/zfcpdump_defconfig index e23d97c13735..afa46a7406ea 100644 --- a/arch/s390/configs/zfcpdump_defconfig +++ b/arch/s390/configs/zfcpdump_defconfig @@ -12,8 +12,10 @@ CONFIG_TUNE_ZEC12=y CONFIG_NR_CPUS=2 # CONFIG_HOTPLUG_CPU is not set CONFIG_HZ_100=y +# CONFIG_ARCH_RANDOM is not set # CONFIG_COMPACTION is not set # CONFIG_MIGRATION is not set +# CONFIG_BOUNCE is not set # CONFIG_CHECK_STACK is not set # CONFIG_CHSC_SCH is not set # CONFIG_SCM_BUS is not set @@ -36,11 +38,11 @@ CONFIG_SCSI_CONSTANTS=y CONFIG_SCSI_LOGGING=y CONFIG_SCSI_FC_ATTRS=y CONFIG_ZFCP=y -# CONFIG_INPUT_MOUSEDEV_PSAUX is not set # CONFIG_INPUT_KEYBOARD is not set # CONFIG_INPUT_MOUSE is not set # CONFIG_SERIO is not set # CONFIG_HVC_IUCV is not set +# CONFIG_HW_RANDOM_S390 is not set CONFIG_RAW_DRIVER=y # CONFIG_SCLP_ASYNC is not set # CONFIG_HMC_DRV is not set @@ -54,9 +56,9 @@ CONFIG_RAW_DRIVER=y # CONFIG_INOTIFY_USER is not set CONFIG_CONFIGFS_FS=y # CONFIG_MISC_FILESYSTEMS is not set +# CONFIG_NETWORK_FILESYSTEMS is not set CONFIG_PRINTK_TIME=y CONFIG_DEBUG_INFO=y -CONFIG_DEBUG_FS=y CONFIG_DEBUG_KERNEL=y CONFIG_PANIC_ON_OOPS=y # CONFIG_SCHED_DEBUG is not set diff --git a/arch/s390/defconfig b/arch/s390/defconfig index 97189dbaf34b..20244a38c886 100644 --- a/arch/s390/defconfig +++ b/arch/s390/defconfig @@ -28,6 +28,7 @@ CONFIG_NAMESPACES=y CONFIG_USER_NS=y CONFIG_BLK_DEV_INITRD=y CONFIG_EXPERT=y +# CONFIG_SYSFS_SYSCALL is not set CONFIG_BPF_SYSCALL=y CONFIG_USERFAULTFD=y # CONFIG_COMPAT_BRK is not set @@ -108,7 +109,6 @@ CONFIG_ZFCP=y CONFIG_SCSI_VIRTIO=y CONFIG_MD=y CONFIG_MD_LINEAR=m -CONFIG_MD_RAID0=m CONFIG_MD_MULTIPATH=m CONFIG_BLK_DEV_DM=y CONFIG_DM_CRYPT=m @@ -131,6 +131,7 @@ CONFIG_TUN=m CONFIG_VIRTIO_NET=y # CONFIG_NET_VENDOR_ALACRITECH is not set # CONFIG_NET_VENDOR_SOLARFLARE is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set # CONFIG_INPUT is not set # CONFIG_SERIO is not set CONFIG_DEVKMEM=y @@ -162,7 +163,6 @@ CONFIG_MAGIC_SYSRQ=y CONFIG_DEBUG_PAGEALLOC=y CONFIG_DETECT_HUNG_TASK=y CONFIG_PANIC_ON_OOPS=y -CONFIG_TIMER_STATS=y CONFIG_DEBUG_RT_MUTEXES=y CONFIG_PROVE_LOCKING=y CONFIG_LOCK_STAT=y @@ -172,14 +172,12 @@ CONFIG_DEBUG_LIST=y CONFIG_DEBUG_SG=y CONFIG_DEBUG_NOTIFIERS=y CONFIG_RCU_CPU_STALL_TIMEOUT=60 -CONFIG_RCU_TRACE=y CONFIG_LATENCYTOP=y CONFIG_SCHED_TRACER=y CONFIG_FTRACE_SYSCALLS=y CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP=y CONFIG_STACK_TRACER=y CONFIG_BLK_DEV_IO_TRACE=y -CONFIG_UPROBE_EVENTS=y CONFIG_FUNCTION_PROFILER=y CONFIG_TRACE_ENUM_MAP_FILE=y CONFIG_KPROBES_SANITY_TEST=y @@ -190,7 +188,6 @@ CONFIG_CRYPTO_CCM=m CONFIG_CRYPTO_GCM=m CONFIG_CRYPTO_CBC=y CONFIG_CRYPTO_CTS=m -CONFIG_CRYPTO_ECB=m CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_XTS=m @@ -230,6 +227,7 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m CONFIG_CRYPTO_USER_API_RNG=m CONFIG_ZCRYPT=m CONFIG_PKEY=m +CONFIG_CRYPTO_PAES_S390=m CONFIG_CRYPTO_SHA1_S390=m CONFIG_CRYPTO_SHA256_S390=m CONFIG_CRYPTO_SHA512_S390=m From cd1997f6c11483da819a7719aa013093b8003743 Mon Sep 17 00:00:00 2001 From: Stephen Rothwell Date: Thu, 8 Jun 2017 19:06:29 +1000 Subject: [PATCH 085/436] net: s390: fix up for "Fix inconsistent teardown and release of private netdev state" Signed-off-by: Stephen Rothwell Signed-off-by: David S. Miller --- drivers/s390/net/netiucv.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c index dba94b486f05..fa732bd86729 100644 --- a/drivers/s390/net/netiucv.c +++ b/drivers/s390/net/netiucv.c @@ -1954,7 +1954,6 @@ static void netiucv_free_netdevice(struct net_device *dev) privptr->conn = NULL; privptr->fsm = NULL; /* privptr gets freed by free_netdev() */ } - free_netdev(dev); } /** @@ -1972,7 +1971,8 @@ static void netiucv_setup_netdevice(struct net_device *dev) dev->mtu = NETIUCV_MTU_DEFAULT; dev->min_mtu = 576; dev->max_mtu = NETIUCV_MTU_MAX; - dev->destructor = netiucv_free_netdevice; + dev->needs_free_netdev = true; + dev->priv_destructor = netiucv_free_netdevice; dev->hard_header_len = NETIUCV_HDRLEN; dev->addr_len = 0; dev->type = ARPHRD_SLIP; From ed66e50d9587fc0bb032e276a2563c0068a5b63a Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Thu, 8 Jun 2017 10:16:05 -0400 Subject: [PATCH 086/436] hsi: Fix build regression due to netdev destructor fix. > ../drivers/hsi/clients/ssi_protocol.c:1069:5: error: 'struct net_device' has no member named 'destructor' Reported-by: Mark Brown Reported-by: Stephen Rothwell Signed-off-by: David S. Miller --- drivers/hsi/clients/ssi_protocol.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/hsi/clients/ssi_protocol.c b/drivers/hsi/clients/ssi_protocol.c index 26b05106f0d3..93d28c0ec8bf 100644 --- a/drivers/hsi/clients/ssi_protocol.c +++ b/drivers/hsi/clients/ssi_protocol.c @@ -1066,7 +1066,7 @@ static void ssip_pn_setup(struct net_device *dev) dev->addr_len = 1; dev->tx_queue_len = SSIP_TXQUEUE_LEN; - dev->destructor = free_netdev; + dev->needs_free_netdev = true; dev->header_ops = &phonet_header_ops; } From 85eac2ba35a2dbfbdd5767c7447a4af07444a5b4 Mon Sep 17 00:00:00 2001 From: Mateusz Jurczyk Date: Wed, 7 Jun 2017 15:14:29 +0200 Subject: [PATCH 087/436] decnet: dn_rtmsg: Improve input length sanitization in dnrmg_receive_user_skb Verify that the length of the socket buffer is sufficient to cover the entire nlh->nlmsg_len field before accessing that field for further input sanitization. If the client only supplies 1-3 bytes of data in sk_buff, then nlh->nlmsg_len remains partially uninitialized and contains leftover memory from the corresponding kernel allocation. Operating on such data may result in indeterminate evaluation of the nlmsg_len < sizeof(*nlh) expression. The bug was discovered by a runtime instrumentation designed to detect use of uninitialized memory in the kernel. The patch prevents this and other similar tools (e.g. KMSAN) from flagging this behavior in the future. Signed-off-by: Mateusz Jurczyk Signed-off-by: David S. Miller --- net/decnet/netfilter/dn_rtmsg.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/net/decnet/netfilter/dn_rtmsg.c b/net/decnet/netfilter/dn_rtmsg.c index 1ed81ac6dd1a..26e020e9d415 100644 --- a/net/decnet/netfilter/dn_rtmsg.c +++ b/net/decnet/netfilter/dn_rtmsg.c @@ -102,7 +102,9 @@ static inline void dnrmg_receive_user_skb(struct sk_buff *skb) { struct nlmsghdr *nlh = nlmsg_hdr(skb); - if (nlh->nlmsg_len < sizeof(*nlh) || skb->len < nlh->nlmsg_len) + if (skb->len < sizeof(nlh->nlmsg_len) || + nlh->nlmsg_len < sizeof(*nlh) || + skb->len < nlh->nlmsg_len) return; if (!netlink_capable(skb, CAP_NET_ADMIN)) From 19d90ece81da802207a9b91ce95a29fbdc40626e Mon Sep 17 00:00:00 2001 From: Christian Lamparter Date: Wed, 7 Jun 2017 15:51:15 +0200 Subject: [PATCH 088/436] net: emac: fix reset timeout with AR8035 phy This patch fixes a problem where the AR8035 PHY can't be detected on an Cisco Meraki MR24, if the ethernet cable is not connected on boot. Russell Senior provided steps to reproduce the issue: |Disconnect ethernet cable, apply power, wait until device has booted, |plug in ethernet, check for interfaces, no eth0 is listed. | |This appears to be a problem during probing of the AR8035 Phy chip. |When ethernet has no link, the phy detection fails, and eth0 is not |created. Plugging ethernet later has no effect, because there is no |interface as far as the kernel is concerned. The relevant part of |the boot log looks like this: |this is the failing case: | |[ 0.876611] /plb/opb/emac-rgmii@ef601500: input 0 in RGMII mode |[ 0.882532] /plb/opb/ethernet@ef600c00: reset timeout |[ 0.888546] /plb/opb/ethernet@ef600c00: can't find PHY! |and the succeeding case: | |[ 0.876672] /plb/opb/emac-rgmii@ef601500: input 0 in RGMII mode |[ 0.883952] eth0: EMAC-0 /plb/opb/ethernet@ef600c00, MAC 00:01:.. |[ 0.890822] eth0: found Atheros 8035 Gigabit Ethernet PHY (0x01) Based on the comment and the commit message of commit 23fbb5a87c56 ("emac: Fix EMAC soft reset on 460EX/GT"). This is because the AR8035 PHY doesn't provide the TX Clock, if the ethernet cable is not attached. This causes the reset to timeout and the PHY detection code in emac_init_phy() is unable to detect the AR8035 PHY. As a result, the emac driver bails out early and the user left with no ethernet. In order to stay compatible with existing configurations, the driver tries the current reset approach at first. Only if the first attempt timed out, it does perform one more retry with the clock temporarily switched to the internal source for just the duration of the reset. LEDE-Bug: #687 Cc: Chris Blake Reported-by: Russell Senior Fixes: 23fbb5a87c56e98 ("emac: Fix EMAC soft reset on 460EX/GT") Signed-off-by: Christian Lamparter Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/ethernet/ibm/emac/core.c | 26 ++++++++++++++++++++++---- 1 file changed, 22 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c index 508923f39ccf..b6e871bfb659 100644 --- a/drivers/net/ethernet/ibm/emac/core.c +++ b/drivers/net/ethernet/ibm/emac/core.c @@ -343,6 +343,7 @@ static int emac_reset(struct emac_instance *dev) { struct emac_regs __iomem *p = dev->emacp; int n = 20; + bool __maybe_unused try_internal_clock = false; DBG(dev, "reset" NL); @@ -355,6 +356,7 @@ static int emac_reset(struct emac_instance *dev) } #ifdef CONFIG_PPC_DCR_NATIVE +do_retry: /* * PPC460EX/GT Embedded Processor Advanced User's Manual * section 28.10.1 Mode Register 0 (EMACx_MR0) states: @@ -362,10 +364,19 @@ static int emac_reset(struct emac_instance *dev) * of the EMAC. If none is present, select the internal clock * (SDR0_ETH_CFG[EMACx_PHY_CLK] = 1). * After a soft reset, select the external clock. + * + * The AR8035-A PHY Meraki MR24 does not provide a TX Clk if the + * ethernet cable is not attached. This causes the reset to timeout + * and the PHY detection code in emac_init_phy() is unable to + * communicate and detect the AR8035-A PHY. As a result, the emac + * driver bails out early and the user has no ethernet. + * In order to stay compatible with existing configurations, the + * driver will temporarily switch to the internal clock, after + * the first reset fails. */ if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) { - if (dev->phy_address == 0xffffffff && - dev->phy_map == 0xffffffff) { + if (try_internal_clock || (dev->phy_address == 0xffffffff && + dev->phy_map == 0xffffffff)) { /* No PHY: select internal loop clock before reset */ dcri_clrset(SDR0, SDR0_ETH_CFG, 0, SDR0_ETH_CFG_ECS << dev->cell_index); @@ -383,8 +394,15 @@ static int emac_reset(struct emac_instance *dev) #ifdef CONFIG_PPC_DCR_NATIVE if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) { - if (dev->phy_address == 0xffffffff && - dev->phy_map == 0xffffffff) { + if (!n && !try_internal_clock) { + /* first attempt has timed out. */ + n = 20; + try_internal_clock = true; + goto do_retry; + } + + if (try_internal_clock || (dev->phy_address == 0xffffffff && + dev->phy_map == 0xffffffff)) { /* No PHY: restore external clock source after reset */ dcri_clrset(SDR0, SDR0_ETH_CFG, SDR0_ETH_CFG_ECS << dev->cell_index, 0); From 9065bc386fcf52dee8f697450ddeb788e1bd514c Mon Sep 17 00:00:00 2001 From: Christian Lamparter Date: Wed, 7 Jun 2017 15:51:16 +0200 Subject: [PATCH 089/436] net: emac: fix and unify emac_mdio functions emac_mdio_read_link() was not copying the requested phy settings back into the emac driver's own phy api. This has caused a link speed mismatch issue for the AR8035 as the emac driver kept trying to connect with 10/100MBps on a 1GBit/s link. This patch also unifies shared code between emac_setup_aneg() and emac_mdio_setup_forced(). And furthermore it removes a chunk of emac_mdio_init_phy(), that was copying the same data into itself. Signed-off-by: Christian Lamparter Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/ethernet/ibm/emac/core.c | 41 ++++++++++++---------------- 1 file changed, 18 insertions(+), 23 deletions(-) diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c index b6e871bfb659..259e69a52ec5 100644 --- a/drivers/net/ethernet/ibm/emac/core.c +++ b/drivers/net/ethernet/ibm/emac/core.c @@ -2478,20 +2478,24 @@ static int emac_mii_bus_reset(struct mii_bus *bus) return emac_reset(dev); } +static int emac_mdio_phy_start_aneg(struct mii_phy *phy, + struct phy_device *phy_dev) +{ + phy_dev->autoneg = phy->autoneg; + phy_dev->speed = phy->speed; + phy_dev->duplex = phy->duplex; + phy_dev->advertising = phy->advertising; + return phy_start_aneg(phy_dev); +} + static int emac_mdio_setup_aneg(struct mii_phy *phy, u32 advertise) { struct net_device *ndev = phy->dev; struct emac_instance *dev = netdev_priv(ndev); - dev->phy.autoneg = AUTONEG_ENABLE; - dev->phy.speed = SPEED_1000; - dev->phy.duplex = DUPLEX_FULL; - dev->phy.advertising = advertise; phy->autoneg = AUTONEG_ENABLE; - phy->speed = dev->phy.speed; - phy->duplex = dev->phy.duplex; phy->advertising = advertise; - return phy_start_aneg(dev->phy_dev); + return emac_mdio_phy_start_aneg(phy, dev->phy_dev); } static int emac_mdio_setup_forced(struct mii_phy *phy, int speed, int fd) @@ -2499,13 +2503,10 @@ static int emac_mdio_setup_forced(struct mii_phy *phy, int speed, int fd) struct net_device *ndev = phy->dev; struct emac_instance *dev = netdev_priv(ndev); - dev->phy.autoneg = AUTONEG_DISABLE; - dev->phy.speed = speed; - dev->phy.duplex = fd; phy->autoneg = AUTONEG_DISABLE; phy->speed = speed; phy->duplex = fd; - return phy_start_aneg(dev->phy_dev); + return emac_mdio_phy_start_aneg(phy, dev->phy_dev); } static int emac_mdio_poll_link(struct mii_phy *phy) @@ -2527,16 +2528,17 @@ static int emac_mdio_read_link(struct mii_phy *phy) { struct net_device *ndev = phy->dev; struct emac_instance *dev = netdev_priv(ndev); + struct phy_device *phy_dev = dev->phy_dev; int res; - res = phy_read_status(dev->phy_dev); + res = phy_read_status(phy_dev); if (res) return res; - dev->phy.speed = phy->speed; - dev->phy.duplex = phy->duplex; - dev->phy.pause = phy->pause; - dev->phy.asym_pause = phy->asym_pause; + phy->speed = phy_dev->speed; + phy->duplex = phy_dev->duplex; + phy->pause = phy_dev->pause; + phy->asym_pause = phy_dev->asym_pause; return 0; } @@ -2546,13 +2548,6 @@ static int emac_mdio_init_phy(struct mii_phy *phy) struct emac_instance *dev = netdev_priv(ndev); phy_start(dev->phy_dev); - dev->phy.autoneg = phy->autoneg; - dev->phy.speed = phy->speed; - dev->phy.duplex = phy->duplex; - dev->phy.advertising = phy->advertising; - dev->phy.pause = phy->pause; - dev->phy.asym_pause = phy->asym_pause; - return phy_init_hw(dev->phy_dev); } From c164772dd32343ee19e276ce0137830557517834 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Thu, 8 Jun 2017 10:50:18 -0400 Subject: [PATCH 090/436] Revert "decnet: dn_rtmsg: Improve input length sanitization in dnrmg_receive_user_skb" This reverts commit 85eac2ba35a2dbfbdd5767c7447a4af07444a5b4. There is an updated version of this fix which we should use instead. Signed-off-by: David S. Miller --- net/decnet/netfilter/dn_rtmsg.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/net/decnet/netfilter/dn_rtmsg.c b/net/decnet/netfilter/dn_rtmsg.c index 26e020e9d415..1ed81ac6dd1a 100644 --- a/net/decnet/netfilter/dn_rtmsg.c +++ b/net/decnet/netfilter/dn_rtmsg.c @@ -102,9 +102,7 @@ static inline void dnrmg_receive_user_skb(struct sk_buff *skb) { struct nlmsghdr *nlh = nlmsg_hdr(skb); - if (skb->len < sizeof(nlh->nlmsg_len) || - nlh->nlmsg_len < sizeof(*nlh) || - skb->len < nlh->nlmsg_len) + if (nlh->nlmsg_len < sizeof(*nlh) || skb->len < nlh->nlmsg_len) return; if (!netlink_capable(skb, CAP_NET_ADMIN)) From dd0da17b209ed91f39872766634ca967c170ada1 Mon Sep 17 00:00:00 2001 From: Mateusz Jurczyk Date: Wed, 7 Jun 2017 16:14:29 +0200 Subject: [PATCH 091/436] decnet: dn_rtmsg: Improve input length sanitization in dnrmg_receive_user_skb Verify that the length of the socket buffer is sufficient to cover the nlmsghdr structure before accessing the nlh->nlmsg_len field for further input sanitization. If the client only supplies 1-3 bytes of data in sk_buff, then nlh->nlmsg_len remains partially uninitialized and contains leftover memory from the corresponding kernel allocation. Operating on such data may result in indeterminate evaluation of the nlmsg_len < sizeof(*nlh) expression. The bug was discovered by a runtime instrumentation designed to detect use of uninitialized memory in the kernel. The patch prevents this and other similar tools (e.g. KMSAN) from flagging this behavior in the future. Signed-off-by: Mateusz Jurczyk Signed-off-by: David S. Miller --- net/decnet/netfilter/dn_rtmsg.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/net/decnet/netfilter/dn_rtmsg.c b/net/decnet/netfilter/dn_rtmsg.c index 1ed81ac6dd1a..aa8ffecc46a4 100644 --- a/net/decnet/netfilter/dn_rtmsg.c +++ b/net/decnet/netfilter/dn_rtmsg.c @@ -102,7 +102,9 @@ static inline void dnrmg_receive_user_skb(struct sk_buff *skb) { struct nlmsghdr *nlh = nlmsg_hdr(skb); - if (nlh->nlmsg_len < sizeof(*nlh) || skb->len < nlh->nlmsg_len) + if (skb->len < sizeof(*nlh) || + nlh->nlmsg_len < sizeof(*nlh) || + skb->len < nlh->nlmsg_len) return; if (!netlink_capable(skb, CAP_NET_ADMIN)) From 0eed9cf58446b28b233388b7f224cbca268b6986 Mon Sep 17 00:00:00 2001 From: "Mintz, Yuval" Date: Wed, 7 Jun 2017 21:00:33 +0300 Subject: [PATCH 092/436] net: Zero ifla_vf_info in rtnl_fill_vfinfo() Some of the structure's fields are not initialized by the rtnetlink. If driver doesn't set those in ndo_get_vf_config(), they'd leak memory to user. Signed-off-by: Yuval Mintz CC: Michal Schmidt Reviewed-by: Greg Rose Signed-off-by: David S. Miller --- net/core/rtnetlink.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 9e2c0a7cb325..5e61456f6bc7 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -1124,6 +1124,8 @@ static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb, struct ifla_vf_mac vf_mac; struct ifla_vf_info ivi; + memset(&ivi, 0, sizeof(ivi)); + /* Not all SR-IOV capable drivers support the * spoofcheck and "RSS query enable" query. Preset to * -1 so the user space tool can detect that the driver @@ -1132,7 +1134,6 @@ static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb, ivi.spoofchk = -1; ivi.rss_query_en = -1; ivi.trusted = -1; - memset(ivi.mac, 0, sizeof(ivi.mac)); /* The default value for VF link state is "auto" * IFLA_VF_LINK_STATE_AUTO which equals zero */ From 8397ed36b7c585f8d3e06c431f4137309124f78f Mon Sep 17 00:00:00 2001 From: David Ahern Date: Wed, 7 Jun 2017 12:26:23 -0600 Subject: [PATCH 093/436] net: ipv6: Release route when device is unregistering Roopa reported attempts to delete a bond device that is referenced in a multipath route is hanging: $ ifdown bond2 # ifupdown2 command that deletes virtual devices unregister_netdevice: waiting for bond2 to become free. Usage count = 2 Steps to reproduce: echo 1 > /proc/sys/net/ipv6/conf/all/ignore_routes_with_linkdown ip link add dev bond12 type bond ip link add dev bond13 type bond ip addr add 2001:db8:2::0/64 dev bond12 ip addr add 2001:db8:3::0/64 dev bond13 ip route add 2001:db8:33::0/64 nexthop via 2001:db8:2::2 nexthop via 2001:db8:3::2 ip link del dev bond12 ip link del dev bond13 The root cause is the recent change to keep routes on a linkdown. Update the check to detect when the device is unregistering and release the route for that case. Fixes: a1a22c12060e4 ("net: ipv6: Keep nexthop of multipath route on admin down") Reported-by: Roopa Prabhu Signed-off-by: David Ahern Acked-by: Roopa Prabhu Signed-off-by: David S. Miller --- include/linux/netdevice.h | 5 +++++ net/ipv6/route.c | 1 + 2 files changed, 6 insertions(+) diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index ab7ca3fdc495..846193dfb0ac 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -4262,6 +4262,11 @@ static inline const char *netdev_name(const struct net_device *dev) return dev->name; } +static inline bool netdev_unregistering(const struct net_device *dev) +{ + return dev->reg_state == NETREG_UNREGISTERING; +} + static inline const char *netdev_reg_state(const struct net_device *dev) { switch (dev->reg_state) { diff --git a/net/ipv6/route.c b/net/ipv6/route.c index dc61b0b5e64e..7cebd954d5bb 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -2804,6 +2804,7 @@ static int fib6_ifdown(struct rt6_info *rt, void *arg) if ((rt->dst.dev == dev || !dev) && rt != adn->net->ipv6.ip6_null_entry && (rt->rt6i_nsiblings == 0 || + (dev && netdev_unregistering(dev)) || !rt->rt6i_idev->cnf.ignore_routes_with_linkdown)) return -1; From 95989c46d2a156365867b1d795fdefce71bce378 Mon Sep 17 00:00:00 2001 From: Brian Foster Date: Thu, 8 Jun 2017 08:23:07 -0700 Subject: [PATCH 094/436] xfs: fix spurious spin_is_locked() assert failures on non-smp kernels The 0-day kernel test robot reports assertion failures on !CONFIG_SMP kernels due to failed spin_is_locked() checks. As it turns out, spin_is_locked() is hardcoded to return zero on !CONFIG_SMP kernels and so this function cannot be relied on to verify spinlock state in this configuration. To avoid this problem, replace the associated asserts with lockdep variants that do the right thing regardless of kernel configuration. Drop the one assert that checks for an unlocked lock as there is no suitable lockdep variant for that case. This moves the spinlock checks from XFS debug code to lockdep, but generally provides the same level of protection. Reported-by: kbuild test robot Signed-off-by: Brian Foster Reviewed-by: Christoph Hellwig Reviewed-by: Darrick J. Wong Signed-off-by: Darrick J. Wong --- fs/xfs/xfs_buf.c | 2 +- fs/xfs/xfs_icache.c | 5 ++--- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c index 07b77b73b024..16d6a578fc16 100644 --- a/fs/xfs/xfs_buf.c +++ b/fs/xfs/xfs_buf.c @@ -117,7 +117,7 @@ static inline void __xfs_buf_ioacct_dec( struct xfs_buf *bp) { - ASSERT(spin_is_locked(&bp->b_lock)); + lockdep_assert_held(&bp->b_lock); if (bp->b_state & XFS_BSTATE_IN_FLIGHT) { bp->b_state &= ~XFS_BSTATE_IN_FLIGHT; diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c index f61c84f8e31a..990210fcb9c3 100644 --- a/fs/xfs/xfs_icache.c +++ b/fs/xfs/xfs_icache.c @@ -66,7 +66,6 @@ xfs_inode_alloc( XFS_STATS_INC(mp, vn_active); ASSERT(atomic_read(&ip->i_pincount) == 0); - ASSERT(!spin_is_locked(&ip->i_flags_lock)); ASSERT(!xfs_isiflocked(ip)); ASSERT(ip->i_ino == 0); @@ -190,7 +189,7 @@ xfs_perag_set_reclaim_tag( { struct xfs_mount *mp = pag->pag_mount; - ASSERT(spin_is_locked(&pag->pag_ici_lock)); + lockdep_assert_held(&pag->pag_ici_lock); if (pag->pag_ici_reclaimable++) return; @@ -212,7 +211,7 @@ xfs_perag_clear_reclaim_tag( { struct xfs_mount *mp = pag->pag_mount; - ASSERT(spin_is_locked(&pag->pag_ici_lock)); + lockdep_assert_held(&pag->pag_ici_lock); if (--pag->pag_ici_reclaimable) return; From fbd4c7e768f1719bea340e40148800279d230922 Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Wed, 7 Jun 2017 15:53:47 -0700 Subject: [PATCH 095/436] netvsc: fix rcu dereference warning from ethtool The ethtool info command calls the netvsc get_sset_count with RTNL but not with RCU. Which causes warning: drivers/net/hyperv/netvsc_drv.c:1010 suspicious rcu_dereference_check() usage! Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- drivers/net/hyperv/netvsc_drv.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 4421a6d00375..d93e4da25fd2 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -1028,7 +1028,7 @@ static const struct { static int netvsc_get_sset_count(struct net_device *dev, int string_set) { struct net_device_context *ndc = netdev_priv(dev); - struct netvsc_device *nvdev = rcu_dereference(ndc->nvdev); + struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev); if (!nvdev) return -ENODEV; From a5ecd43992a7cd9f91d5f98b0082ae44df5e543c Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Wed, 7 Jun 2017 15:53:48 -0700 Subject: [PATCH 096/436] netvsc: fix net poll mode The ndo_poll_controller function needs to schedule NAPI to pick up arriving packets and send completions. Otherwise no data will ever be received. For simple case of netconsole, it also will allow send completions to happen. Without this netpoll will eventually get stuck. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- drivers/net/hyperv/netvsc_drv.c | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index d93e4da25fd2..252e5d52d17e 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -1158,11 +1158,22 @@ netvsc_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, } #ifdef CONFIG_NET_POLL_CONTROLLER -static void netvsc_poll_controller(struct net_device *net) +static void netvsc_poll_controller(struct net_device *dev) { - /* As netvsc_start_xmit() works synchronous we don't have to - * trigger anything here. - */ + struct net_device_context *ndc = netdev_priv(dev); + struct netvsc_device *ndev; + int i; + + rcu_read_lock(); + ndev = rcu_dereference(ndc->nvdev); + if (ndev) { + for (i = 0; i < ndev->num_chn; i++) { + struct netvsc_channel *nvchan = &ndev->chan_table[i]; + + napi_schedule(&nvchan->napi); + } + } + rcu_read_unlock(); } #endif From 4f19c0d8070cd4aa8e85bbf8a19d9ef5bef77c90 Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Wed, 7 Jun 2017 15:53:49 -0700 Subject: [PATCH 097/436] netvsc: move filter setting to rndis_device The work queue and handling of network filter parameters should be in rndis_device. This gets rid of warning from RCU checks, eliminates a race and cleans up code. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- drivers/net/hyperv/hyperv_net.h | 5 +++-- drivers/net/hyperv/netvsc_drv.c | 33 ++----------------------------- drivers/net/hyperv/rndis_filter.c | 30 +++++++++++++++++++++++++++- 3 files changed, 34 insertions(+), 34 deletions(-) diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index 262b2ea576a3..6066f1bcaf2d 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -171,6 +171,8 @@ struct rndis_device { spinlock_t request_lock; struct list_head req_list; + struct work_struct mcast_work; + u8 hw_mac_adr[ETH_ALEN]; u8 rss_key[NETVSC_HASH_KEYLEN]; u16 ind_table[ITAB_NUM]; @@ -201,6 +203,7 @@ int rndis_filter_open(struct netvsc_device *nvdev); int rndis_filter_close(struct netvsc_device *nvdev); int rndis_filter_device_add(struct hv_device *dev, struct netvsc_device_info *info); +void rndis_filter_update(struct netvsc_device *nvdev); void rndis_filter_device_remove(struct hv_device *dev, struct netvsc_device *nvdev); int rndis_filter_set_rss_param(struct rndis_device *rdev, @@ -211,7 +214,6 @@ int rndis_filter_receive(struct net_device *ndev, struct vmbus_channel *channel, void *data, u32 buflen); -int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter); int rndis_filter_set_device_mac(struct net_device *ndev, char *mac); void netvsc_switch_datapath(struct net_device *nv_dev, bool vf); @@ -696,7 +698,6 @@ struct net_device_context { /* list protection */ spinlock_t lock; - struct work_struct work; u32 msg_enable; /* debug level */ u32 tx_checksum_mask; diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 252e5d52d17e..82d6c022ca85 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -56,37 +56,12 @@ static int debug = -1; module_param(debug, int, S_IRUGO); MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); -static void do_set_multicast(struct work_struct *w) -{ - struct net_device_context *ndevctx = - container_of(w, struct net_device_context, work); - struct hv_device *device_obj = ndevctx->device_ctx; - struct net_device *ndev = hv_get_drvdata(device_obj); - struct netvsc_device *nvdev = rcu_dereference(ndevctx->nvdev); - struct rndis_device *rdev; - - if (!nvdev) - return; - - rdev = nvdev->extension; - if (rdev == NULL) - return; - - if (ndev->flags & IFF_PROMISC) - rndis_filter_set_packet_filter(rdev, - NDIS_PACKET_TYPE_PROMISCUOUS); - else - rndis_filter_set_packet_filter(rdev, - NDIS_PACKET_TYPE_BROADCAST | - NDIS_PACKET_TYPE_ALL_MULTICAST | - NDIS_PACKET_TYPE_DIRECTED); -} - static void netvsc_set_multicast_list(struct net_device *net) { struct net_device_context *net_device_ctx = netdev_priv(net); + struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev); - schedule_work(&net_device_ctx->work); + rndis_filter_update(nvdev); } static int netvsc_open(struct net_device *net) @@ -123,8 +98,6 @@ static int netvsc_close(struct net_device *net) netif_tx_disable(net); - /* Make sure netvsc_set_multicast_list doesn't re-enable filter! */ - cancel_work_sync(&net_device_ctx->work); ret = rndis_filter_close(nvdev); if (ret != 0) { netdev_err(net, "unable to close device (ret %d).\n", ret); @@ -1563,7 +1536,6 @@ static int netvsc_probe(struct hv_device *dev, hv_set_drvdata(dev, net); INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change); - INIT_WORK(&net_device_ctx->work, do_set_multicast); spin_lock_init(&net_device_ctx->lock); INIT_LIST_HEAD(&net_device_ctx->reconfig_events); @@ -1633,7 +1605,6 @@ static int netvsc_remove(struct hv_device *dev) netif_device_detach(net); cancel_delayed_work_sync(&ndev_ctx->dwork); - cancel_work_sync(&ndev_ctx->work); /* * Call to the vsc driver to let it know that the device is being diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index f9d5b0b8209a..cb79cd081f42 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c @@ -31,6 +31,7 @@ #include "hyperv_net.h" +static void rndis_set_multicast(struct work_struct *w); #define RNDIS_EXT_LEN PAGE_SIZE struct rndis_request { @@ -76,6 +77,7 @@ static struct rndis_device *get_rndis_device(void) spin_lock_init(&device->request_lock); INIT_LIST_HEAD(&device->req_list); + INIT_WORK(&device->mcast_work, rndis_set_multicast); device->state = RNDIS_DEV_UNINITIALIZED; @@ -815,7 +817,8 @@ static int rndis_filter_query_link_speed(struct rndis_device *dev) return ret; } -int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter) +static int rndis_filter_set_packet_filter(struct rndis_device *dev, + u32 new_filter) { struct rndis_request *request; struct rndis_set_request *set; @@ -846,6 +849,28 @@ int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter) return ret; } +static void rndis_set_multicast(struct work_struct *w) +{ + struct rndis_device *rdev + = container_of(w, struct rndis_device, mcast_work); + + if (rdev->ndev->flags & IFF_PROMISC) + rndis_filter_set_packet_filter(rdev, + NDIS_PACKET_TYPE_PROMISCUOUS); + else + rndis_filter_set_packet_filter(rdev, + NDIS_PACKET_TYPE_BROADCAST | + NDIS_PACKET_TYPE_ALL_MULTICAST | + NDIS_PACKET_TYPE_DIRECTED); +} + +void rndis_filter_update(struct netvsc_device *nvdev) +{ + struct rndis_device *rdev = nvdev->extension; + + schedule_work(&rdev->mcast_work); +} + static int rndis_filter_init_device(struct rndis_device *dev) { struct rndis_request *request; @@ -973,6 +998,9 @@ static int rndis_filter_close_device(struct rndis_device *dev) if (dev->state != RNDIS_DEV_DATAINITIALIZED) return 0; + /* Make sure rndis_set_multicast doesn't re-enable filter! */ + cancel_work_sync(&dev->mcast_work); + ret = rndis_filter_set_packet_filter(dev, 0); if (ret == -ENODEV) ret = 0; From 0bed865060836dc5f26b68052de0a504d66d9fae Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Thu, 8 Jun 2017 11:51:59 -0400 Subject: [PATCH 098/436] net: Fix build regression in rtl8723bs staging driver. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c: In function ‘rtw_cfg80211_add_monitor_if’: drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c:2670:10: error: ‘struct net_device’ has no member named ‘destructor’ mon_ndev->destructor = rtw_ndev_destructor; ^ Signed-off-by: David S. Miller --- drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c | 3 ++- drivers/staging/rtl8723bs/os_dep/os_intfs.c | 2 -- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c index 36c3189fc4b7..bd4352fe2de3 100644 --- a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c +++ b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c @@ -2667,7 +2667,8 @@ static int rtw_cfg80211_add_monitor_if (struct adapter *padapter, char *name, st mon_ndev->type = ARPHRD_IEEE80211_RADIOTAP; strncpy(mon_ndev->name, name, IFNAMSIZ); mon_ndev->name[IFNAMSIZ - 1] = 0; - mon_ndev->destructor = rtw_ndev_destructor; + mon_ndev->needs_free_netdev = true; + mon_ndev->priv_destructor = rtw_ndev_destructor; mon_ndev->netdev_ops = &rtw_cfg80211_monitor_if_ops; diff --git a/drivers/staging/rtl8723bs/os_dep/os_intfs.c b/drivers/staging/rtl8723bs/os_dep/os_intfs.c index f83cfc76505c..021589913681 100644 --- a/drivers/staging/rtl8723bs/os_dep/os_intfs.c +++ b/drivers/staging/rtl8723bs/os_dep/os_intfs.c @@ -1207,8 +1207,6 @@ void rtw_ndev_destructor(struct net_device *ndev) if (ndev->ieee80211_ptr) kfree((u8 *)ndev->ieee80211_ptr); - - free_netdev(ndev); } void rtw_dev_unload(struct adapter *padapter) From 0db47e3d323411beeb6ea97f2c4d19395c91fd8b Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Thu, 8 Jun 2017 09:54:24 +0200 Subject: [PATCH 099/436] ila_xlat: add missing hash secret initialization While discussing the possible merits of clang warning about unused initialized functions, I found one function that was clearly meant to be called but never actually is. __ila_hash_secret_init() initializes the hash value for the ila locator, apparently this is intended to prevent hash collision attacks, but this ends up being a read-only zero constant since there is no caller. I could find no indication of why it was never called, the earliest patch submission for the module already was like this. If my interpretation is right, we certainly want to backport the patch to stable kernels as well. I considered adding it to the ila_xlat_init callback, but for best effect the random data is read as late as possible, just before it is first used. The underlying net_get_random_once() is already highly optimized to avoid overhead when called frequently. Fixes: 7f00feaf1076 ("ila: Add generic ILA translation facility") Cc: stable@vger.kernel.org Link: https://www.spinics.net/lists/kernel/msg2527243.html Signed-off-by: Arnd Bergmann Signed-off-by: David S. Miller --- net/ipv6/ila/ila_xlat.c | 1 + 1 file changed, 1 insertion(+) diff --git a/net/ipv6/ila/ila_xlat.c b/net/ipv6/ila/ila_xlat.c index 2fd5ca151dcf..77f7f8c7d93d 100644 --- a/net/ipv6/ila/ila_xlat.c +++ b/net/ipv6/ila/ila_xlat.c @@ -62,6 +62,7 @@ static inline u32 ila_locator_hash(struct ila_locator loc) { u32 *v = (u32 *)loc.v32; + __ila_hash_secret_init(); return jhash_2words(v[0], v[1], hashrnd); } From f9f314f323951a33d8b4a4f63f7d04b7f3bc0603 Mon Sep 17 00:00:00 2001 From: Hans Verkuil Date: Thu, 8 Jun 2017 15:37:44 -0300 Subject: [PATCH 100/436] [media] media/cec.h: use IS_REACHABLE instead of IS_ENABLED Fix messages like this: adv7842.c:(.text+0x2edadd): undefined reference to `cec_unregister_adapter' when CEC_CORE=m but the driver including media/cec.h is built-in. In that case the static inlines provided in media/cec.h should be used by that driver. Reported-by: Randy Dunlap Reported-by: kbuild test robot Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab --- include/media/cec.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/media/cec.h b/include/media/cec.h index bfa88d4d67e1..201f060978da 100644 --- a/include/media/cec.h +++ b/include/media/cec.h @@ -206,7 +206,7 @@ static inline bool cec_is_sink(const struct cec_adapter *adap) #define cec_phys_addr_exp(pa) \ ((pa) >> 12), ((pa) >> 8) & 0xf, ((pa) >> 4) & 0xf, (pa) & 0xf -#if IS_ENABLED(CONFIG_CEC_CORE) +#if IS_REACHABLE(CONFIG_CEC_CORE) struct cec_adapter *cec_allocate_adapter(const struct cec_adap_ops *ops, void *priv, const char *name, u32 caps, u8 available_las); int cec_register_adapter(struct cec_adapter *adap, struct device *parent); From daa6630a310fe2ad90ce5f7d2d196cd0353ef4fa Mon Sep 17 00:00:00 2001 From: Nicolas Dichtel Date: Thu, 8 Jun 2017 10:37:45 +0200 Subject: [PATCH 101/436] openvswitch: warn about missing first netlink attribute The first netlink attribute (value 0) must always be defined as none/unspec. Because we cannot change an existing UAPI, I add a comment to point the mistake and avoid to propagate it in a new ovs API in the future. Signed-off-by: Nicolas Dichtel Signed-off-by: David S. Miller --- include/uapi/linux/openvswitch.h | 1 + 1 file changed, 1 insertion(+) diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h index 61b7d36dfe34..156ee4cab82e 100644 --- a/include/uapi/linux/openvswitch.h +++ b/include/uapi/linux/openvswitch.h @@ -343,6 +343,7 @@ enum ovs_key_attr { #define OVS_KEY_ATTR_MAX (__OVS_KEY_ATTR_MAX - 1) enum ovs_tunnel_key_attr { + /* OVS_TUNNEL_KEY_ATTR_NONE, standard nl API requires this attribute! */ OVS_TUNNEL_KEY_ATTR_ID, /* be64 Tunnel ID */ OVS_TUNNEL_KEY_ATTR_IPV4_SRC, /* be32 src IP address. */ OVS_TUNNEL_KEY_ATTR_IPV4_DST, /* be32 dst IP address. */ From c7c550670afda2e16f9e2d06a1473885312eb6b5 Mon Sep 17 00:00:00 2001 From: Thibaut Collet Date: Thu, 8 Jun 2017 11:18:11 +0200 Subject: [PATCH 102/436] bonding: fix 802.3ad support for 5G and 50G speeds This patch adds [5|50] Gbps enum definition, and fixes aggregated bandwidth calculation based on above slave links. Fixes: c9a70d43461d ("net-next: ethtool: Added port speed macros.") Signed-off-by: Thibaut Collet Signed-off-by: Nicolas Dichtel Acked-by: Andy Gospodarek Signed-off-by: David S. Miller --- drivers/net/bonding/bond_3ad.c | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c index b44a6aeb346d..d1b09be63ba4 100644 --- a/drivers/net/bonding/bond_3ad.c +++ b/drivers/net/bonding/bond_3ad.c @@ -90,10 +90,12 @@ enum ad_link_speed_type { AD_LINK_SPEED_100MBPS, AD_LINK_SPEED_1000MBPS, AD_LINK_SPEED_2500MBPS, + AD_LINK_SPEED_5000MBPS, AD_LINK_SPEED_10000MBPS, AD_LINK_SPEED_20000MBPS, AD_LINK_SPEED_25000MBPS, AD_LINK_SPEED_40000MBPS, + AD_LINK_SPEED_50000MBPS, AD_LINK_SPEED_56000MBPS, AD_LINK_SPEED_100000MBPS, }; @@ -259,10 +261,12 @@ static inline int __check_agg_selection_timer(struct port *port) * %AD_LINK_SPEED_100MBPS, * %AD_LINK_SPEED_1000MBPS, * %AD_LINK_SPEED_2500MBPS, + * %AD_LINK_SPEED_5000MBPS, * %AD_LINK_SPEED_10000MBPS * %AD_LINK_SPEED_20000MBPS * %AD_LINK_SPEED_25000MBPS * %AD_LINK_SPEED_40000MBPS + * %AD_LINK_SPEED_50000MBPS * %AD_LINK_SPEED_56000MBPS * %AD_LINK_SPEED_100000MBPS */ @@ -296,6 +300,10 @@ static u16 __get_link_speed(struct port *port) speed = AD_LINK_SPEED_2500MBPS; break; + case SPEED_5000: + speed = AD_LINK_SPEED_5000MBPS; + break; + case SPEED_10000: speed = AD_LINK_SPEED_10000MBPS; break; @@ -312,6 +320,10 @@ static u16 __get_link_speed(struct port *port) speed = AD_LINK_SPEED_40000MBPS; break; + case SPEED_50000: + speed = AD_LINK_SPEED_50000MBPS; + break; + case SPEED_56000: speed = AD_LINK_SPEED_56000MBPS; break; @@ -707,6 +719,9 @@ static u32 __get_agg_bandwidth(struct aggregator *aggregator) case AD_LINK_SPEED_2500MBPS: bandwidth = nports * 2500; break; + case AD_LINK_SPEED_5000MBPS: + bandwidth = nports * 5000; + break; case AD_LINK_SPEED_10000MBPS: bandwidth = nports * 10000; break; @@ -719,6 +734,9 @@ static u32 __get_agg_bandwidth(struct aggregator *aggregator) case AD_LINK_SPEED_40000MBPS: bandwidth = nports * 40000; break; + case AD_LINK_SPEED_50000MBPS: + bandwidth = nports * 50000; + break; case AD_LINK_SPEED_56000MBPS: bandwidth = nports * 56000; break; From 3fcd64cfa0e9cb72b99aaba5c6bc13af9c03417f Mon Sep 17 00:00:00 2001 From: Nicolas Dichtel Date: Thu, 8 Jun 2017 11:18:12 +0200 Subject: [PATCH 103/436] bonding: fix 802.3ad support for 14G speed This patch adds 14 Gbps enum definition, and fixes aggregated bandwidth calculation based on above slave links. Fixes: 0d7e2d2166f6 ("IB/ipoib: add get_link_ksettings in ethtool") Signed-off-by: Nicolas Dichtel Acked-by: Andy Gospodarek Signed-off-by: David S. Miller --- drivers/net/bonding/bond_3ad.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c index d1b09be63ba4..e5386ab706ec 100644 --- a/drivers/net/bonding/bond_3ad.c +++ b/drivers/net/bonding/bond_3ad.c @@ -92,6 +92,7 @@ enum ad_link_speed_type { AD_LINK_SPEED_2500MBPS, AD_LINK_SPEED_5000MBPS, AD_LINK_SPEED_10000MBPS, + AD_LINK_SPEED_14000MBPS, AD_LINK_SPEED_20000MBPS, AD_LINK_SPEED_25000MBPS, AD_LINK_SPEED_40000MBPS, @@ -263,6 +264,7 @@ static inline int __check_agg_selection_timer(struct port *port) * %AD_LINK_SPEED_2500MBPS, * %AD_LINK_SPEED_5000MBPS, * %AD_LINK_SPEED_10000MBPS + * %AD_LINK_SPEED_14000MBPS, * %AD_LINK_SPEED_20000MBPS * %AD_LINK_SPEED_25000MBPS * %AD_LINK_SPEED_40000MBPS @@ -308,6 +310,10 @@ static u16 __get_link_speed(struct port *port) speed = AD_LINK_SPEED_10000MBPS; break; + case SPEED_14000: + speed = AD_LINK_SPEED_14000MBPS; + break; + case SPEED_20000: speed = AD_LINK_SPEED_20000MBPS; break; @@ -725,6 +731,9 @@ static u32 __get_agg_bandwidth(struct aggregator *aggregator) case AD_LINK_SPEED_10000MBPS: bandwidth = nports * 10000; break; + case AD_LINK_SPEED_14000MBPS: + bandwidth = nports * 14000; + break; case AD_LINK_SPEED_20000MBPS: bandwidth = nports * 20000; break; From 297fb414d0d190ca82bf0b46fb19d7fda1598737 Mon Sep 17 00:00:00 2001 From: Nicolas Dichtel Date: Thu, 8 Jun 2017 11:18:13 +0200 Subject: [PATCH 104/436] ethtool.h: remind to update 802.3ad when adding new speeds Each time a new speed is added, the bonding 802.3ad isn't updated. Add a comment to remind the developer to update this driver. Signed-off-by: Nicolas Dichtel Acked-by: Andy Gospodarek Signed-off-by: David S. Miller --- include/uapi/linux/ethtool.h | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h index d179d7767f51..7d4a594d5d58 100644 --- a/include/uapi/linux/ethtool.h +++ b/include/uapi/linux/ethtool.h @@ -1486,8 +1486,10 @@ enum ethtool_link_mode_bit_indices { * it was forced up into this mode or autonegotiated. */ -/* The forced speed, in units of 1Mb. All values 0 to INT_MAX are legal. */ -/* Update drivers/net/phy/phy.c:phy_speed_to_str() when adding new values */ +/* The forced speed, in units of 1Mb. All values 0 to INT_MAX are legal. + * Update drivers/net/phy/phy.c:phy_speed_to_str() and + * drivers/net/bonding/bond_3ad.c:__get_link_speed() when adding new values. + */ #define SPEED_10 10 #define SPEED_100 100 #define SPEED_1000 1000 From 78a5a93c1eeb4e6933d1f62b33e5496d53b46c5a Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Thu, 8 Jun 2017 19:06:25 +0200 Subject: [PATCH 105/436] bpf, tests: fix endianness selection I noticed that test_l4lb was failing in selftests: # ./test_progs test_pkt_access:PASS:ipv4 77 nsec test_pkt_access:PASS:ipv6 44 nsec test_xdp:PASS:ipv4 2933 nsec test_xdp:PASS:ipv6 1500 nsec test_l4lb:PASS:ipv4 377 nsec test_l4lb:PASS:ipv6 544 nsec test_l4lb:FAIL:stats 6297600000 200000 test_tcp_estats:PASS: 0 nsec Summary: 7 PASSED, 1 FAILED Tracking down the issue actually revealed that endianness selection in bpf_endian.h is broken when compiled with clang with bpf target. test_pkt_access.c, test_l4lb.c is compiled with __BYTE_ORDER as __BIG_ENDIAN, test_xdp.c as __LITTLE_ENDIAN! test_l4lb noticeably fails, because the test accounts bytes via bpf_ntohs(ip6h->payload_len) and bpf_ntohs(iph->tot_len), and compares them against a defined value and given a wrong endianness, the test outcome is different, of course. Turns out that there are actually two bugs: i) when we do __BYTE_ORDER comparison with __LITTLE_ENDIAN/__BIG_ENDIAN, then depending on the include order we see different outcomes. Reason is that __BYTE_ORDER is undefined due to missing endian.h include. Before we include the asm/byteorder.h (e.g. through linux/in.h), then __BYTE_ORDER equals __LITTLE_ENDIAN since both are undefined, after the include which correctly pulls in linux/byteorder/little_endian.h, __LITTLE_ENDIAN is defined, but given __BYTE_ORDER is still undefined, we match on __BYTE_ORDER equals to __BIG_ENDIAN since __BIG_ENDIAN is also undefined at that point, sigh. ii) But even that would be wrong, since when compiling the test cases with clang, one can select between bpfeb and bpfel targets for cross compilation. Hence, we can also not rely on what the system's endian.h provides, but we need to look at the compiler's defined endianness. The compiler defines __BYTE_ORDER__, and we can match __ORDER_LITTLE_ENDIAN__ and __ORDER_BIG_ENDIAN__, which also reflects targets bpf (native), bpfel, bpfeb correctly, thus really only rely on that. After patch: # ./test_progs test_pkt_access:PASS:ipv4 74 nsec test_pkt_access:PASS:ipv6 42 nsec test_xdp:PASS:ipv4 2340 nsec test_xdp:PASS:ipv6 1461 nsec test_l4lb:PASS:ipv4 400 nsec test_l4lb:PASS:ipv6 530 nsec test_tcp_estats:PASS: 0 nsec Summary: 7 PASSED, 0 FAILED Fixes: 43bcf707ccdc ("bpf: fix _htons occurences in test_progs") Signed-off-by: Daniel Borkmann Acked-by: Alexei Starovoitov Signed-off-by: David S. Miller --- tools/testing/selftests/bpf/bpf_endian.h | 41 +++++++++++++++++------- 1 file changed, 30 insertions(+), 11 deletions(-) diff --git a/tools/testing/selftests/bpf/bpf_endian.h b/tools/testing/selftests/bpf/bpf_endian.h index 19d0604f8694..487cbfb89beb 100644 --- a/tools/testing/selftests/bpf/bpf_endian.h +++ b/tools/testing/selftests/bpf/bpf_endian.h @@ -1,23 +1,42 @@ #ifndef __BPF_ENDIAN__ #define __BPF_ENDIAN__ -#include +#include -#if __BYTE_ORDER == __LITTLE_ENDIAN -# define __bpf_ntohs(x) __builtin_bswap16(x) -# define __bpf_htons(x) __builtin_bswap16(x) -#elif __BYTE_ORDER == __BIG_ENDIAN -# define __bpf_ntohs(x) (x) -# define __bpf_htons(x) (x) +/* LLVM's BPF target selects the endianness of the CPU + * it compiles on, or the user specifies (bpfel/bpfeb), + * respectively. The used __BYTE_ORDER__ is defined by + * the compiler, we cannot rely on __BYTE_ORDER from + * libc headers, since it doesn't reflect the actual + * requested byte order. + * + * Note, LLVM's BPF target has different __builtin_bswapX() + * semantics. It does map to BPF_ALU | BPF_END | BPF_TO_BE + * in bpfel and bpfeb case, which means below, that we map + * to cpu_to_be16(). We could use it unconditionally in BPF + * case, but better not rely on it, so that this header here + * can be used from application and BPF program side, which + * use different targets. + */ +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ +# define __bpf_ntohs(x) __builtin_bswap16(x) +# define __bpf_htons(x) __builtin_bswap16(x) +# define __bpf_constant_ntohs(x) ___constant_swab16(x) +# define __bpf_constant_htons(x) ___constant_swab16(x) +#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ +# define __bpf_ntohs(x) (x) +# define __bpf_htons(x) (x) +# define __bpf_constant_ntohs(x) (x) +# define __bpf_constant_htons(x) (x) #else -# error "Fix your __BYTE_ORDER?!" +# error "Fix your compiler's __BYTE_ORDER__?!" #endif #define bpf_htons(x) \ (__builtin_constant_p(x) ? \ - __constant_htons(x) : __bpf_htons(x)) + __bpf_constant_htons(x) : __bpf_htons(x)) #define bpf_ntohs(x) \ (__builtin_constant_p(x) ? \ - __constant_ntohs(x) : __bpf_ntohs(x)) + __bpf_constant_ntohs(x) : __bpf_ntohs(x)) -#endif +#endif /* __BPF_ENDIAN__ */ From 097d3c9508dc58286344e4a22b300098cf0c1566 Mon Sep 17 00:00:00 2001 From: David Ahern Date: Thu, 8 Jun 2017 11:31:11 -0600 Subject: [PATCH 106/436] net: vrf: Make add_fib_rules per network namespace flag Commit 1aa6c4f6b8cd8 ("net: vrf: Add l3mdev rules on first device create") adds the l3mdev FIB rule the first time a VRF device is created. However, it only creates the rule once and only in the namespace the first device is created - which may not be init_net. Fix by using the net_generic capability to make the add_fib_rules flag per network namespace. Fixes: 1aa6c4f6b8cd8 ("net: vrf: Add l3mdev rules on first device create") Reported-by: Petr Machata Signed-off-by: David Ahern Signed-off-by: David S. Miller --- drivers/net/vrf.c | 36 ++++++++++++++++++++++++++++++++---- 1 file changed, 32 insertions(+), 4 deletions(-) diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index d38f11d833fe..022c0b5f9844 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@ -36,12 +36,14 @@ #include #include #include +#include #define DRV_NAME "vrf" #define DRV_VERSION "1.0" #define FIB_RULE_PREF 1000 /* default preference for FIB rules */ -static bool add_fib_rules = true; + +static unsigned int vrf_net_id; struct net_vrf { struct rtable __rcu *rth; @@ -1394,6 +1396,8 @@ static int vrf_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[]) { struct net_vrf *vrf = netdev_priv(dev); + bool *add_fib_rules; + struct net *net; int err; if (!data || !data[IFLA_VRF_TABLE]) @@ -1409,13 +1413,15 @@ static int vrf_newlink(struct net *src_net, struct net_device *dev, if (err) goto out; - if (add_fib_rules) { + net = dev_net(dev); + add_fib_rules = net_generic(net, vrf_net_id); + if (*add_fib_rules) { err = vrf_add_fib_rules(dev); if (err) { unregister_netdevice(dev); goto out; } - add_fib_rules = false; + *add_fib_rules = false; } out: @@ -1498,16 +1504,38 @@ static struct notifier_block vrf_notifier_block __read_mostly = { .notifier_call = vrf_device_event, }; +/* Initialize per network namespace state */ +static int __net_init vrf_netns_init(struct net *net) +{ + bool *add_fib_rules = net_generic(net, vrf_net_id); + + *add_fib_rules = true; + + return 0; +} + +static struct pernet_operations vrf_net_ops __net_initdata = { + .init = vrf_netns_init, + .id = &vrf_net_id, + .size = sizeof(bool), +}; + static int __init vrf_init_module(void) { int rc; register_netdevice_notifier(&vrf_notifier_block); - rc = rtnl_link_register(&vrf_link_ops); + rc = register_pernet_subsys(&vrf_net_ops); if (rc < 0) goto error; + rc = rtnl_link_register(&vrf_link_ops); + if (rc < 0) { + unregister_pernet_subsys(&vrf_net_ops); + goto error; + } + return 0; error: From 73d4e580ccc5c3e05cea002f18111f66c9c07034 Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Fri, 2 Jun 2017 20:00:17 -0700 Subject: [PATCH 107/436] target: Fix kref->refcount underflow in transport_cmd_finish_abort This patch fixes a se_cmd->cmd_kref underflow during CMD_T_ABORTED when a fabric driver drops it's second reference from below the target_core_tmr.c based callers of transport_cmd_finish_abort(). Recently with the conversion of kref to refcount_t, this bug was manifesting itself as: [705519.601034] refcount_t: underflow; use-after-free. [705519.604034] INFO: NMI handler (kgdb_nmi_handler) took too long to run: 20116.512 msecs [705539.719111] ------------[ cut here ]------------ [705539.719117] WARNING: CPU: 3 PID: 26510 at lib/refcount.c:184 refcount_sub_and_test+0x33/0x51 Since the original kref atomic_t based kref_put() didn't check for underflow and only invoked the final callback when zero was reached, this bug did not manifest in practice since all se_cmd memory is using preallocated tags. To address this, go ahead and propigate the existing return from transport_put_cmd() up via transport_cmd_finish_abort(), and change transport_cmd_finish_abort() + core_tmr_handle_tas_abort() callers to only do their local target_put_sess_cmd() if necessary. Reported-by: Bart Van Assche Tested-by: Bart Van Assche Cc: Mike Christie Cc: Hannes Reinecke Cc: Christoph Hellwig Cc: Himanshu Madhani Cc: Sagi Grimberg Cc: stable@vger.kernel.org # 3.14+ Tested-by: Gary Guo Tested-by: Chu Yuan Lin Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_internal.h | 2 +- drivers/target/target_core_tmr.c | 16 ++++++++-------- drivers/target/target_core_transport.c | 9 ++++++--- 3 files changed, 15 insertions(+), 12 deletions(-) diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h index 9ab7090f7c83..0912de7c0cf8 100644 --- a/drivers/target/target_core_internal.h +++ b/drivers/target/target_core_internal.h @@ -136,7 +136,7 @@ int init_se_kmem_caches(void); void release_se_kmem_caches(void); u32 scsi_get_new_index(scsi_index_t); void transport_subsystem_check_init(void); -void transport_cmd_finish_abort(struct se_cmd *, int); +int transport_cmd_finish_abort(struct se_cmd *, int); unsigned char *transport_dump_cmd_direction(struct se_cmd *); void transport_dump_dev_state(struct se_device *, char *, int *); void transport_dump_dev_info(struct se_device *, struct se_lun *, diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c index dce1e1b47316..13f47bf4d16b 100644 --- a/drivers/target/target_core_tmr.c +++ b/drivers/target/target_core_tmr.c @@ -75,7 +75,7 @@ void core_tmr_release_req(struct se_tmr_req *tmr) kfree(tmr); } -static void core_tmr_handle_tas_abort(struct se_cmd *cmd, int tas) +static int core_tmr_handle_tas_abort(struct se_cmd *cmd, int tas) { unsigned long flags; bool remove = true, send_tas; @@ -91,7 +91,7 @@ static void core_tmr_handle_tas_abort(struct se_cmd *cmd, int tas) transport_send_task_abort(cmd); } - transport_cmd_finish_abort(cmd, remove); + return transport_cmd_finish_abort(cmd, remove); } static int target_check_cdb_and_preempt(struct list_head *list, @@ -184,8 +184,8 @@ void core_tmr_abort_task( cancel_work_sync(&se_cmd->work); transport_wait_for_tasks(se_cmd); - transport_cmd_finish_abort(se_cmd, true); - target_put_sess_cmd(se_cmd); + if (!transport_cmd_finish_abort(se_cmd, true)) + target_put_sess_cmd(se_cmd); printk("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for" " ref_tag: %llu\n", ref_tag); @@ -281,8 +281,8 @@ static void core_tmr_drain_tmr_list( cancel_work_sync(&cmd->work); transport_wait_for_tasks(cmd); - transport_cmd_finish_abort(cmd, 1); - target_put_sess_cmd(cmd); + if (!transport_cmd_finish_abort(cmd, 1)) + target_put_sess_cmd(cmd); } } @@ -380,8 +380,8 @@ static void core_tmr_drain_state_list( cancel_work_sync(&cmd->work); transport_wait_for_tasks(cmd); - core_tmr_handle_tas_abort(cmd, tas); - target_put_sess_cmd(cmd); + if (!core_tmr_handle_tas_abort(cmd, tas)) + target_put_sess_cmd(cmd); } } diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 6025935036c9..f1b3a46bdcaf 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -651,9 +651,10 @@ static void transport_lun_remove_cmd(struct se_cmd *cmd) percpu_ref_put(&lun->lun_ref); } -void transport_cmd_finish_abort(struct se_cmd *cmd, int remove) +int transport_cmd_finish_abort(struct se_cmd *cmd, int remove) { bool ack_kref = (cmd->se_cmd_flags & SCF_ACK_KREF); + int ret = 0; if (cmd->se_cmd_flags & SCF_SE_LUN_CMD) transport_lun_remove_cmd(cmd); @@ -665,9 +666,11 @@ void transport_cmd_finish_abort(struct se_cmd *cmd, int remove) cmd->se_tfo->aborted_task(cmd); if (transport_cmd_check_stop_to_fabric(cmd)) - return; + return 1; if (remove && ack_kref) - transport_put_cmd(cmd); + ret = transport_put_cmd(cmd); + + return ret; } static void target_complete_failure_work(struct work_struct *work) From 105fa2f44e504c830697b0c794822112d79808dc Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Sat, 3 Jun 2017 05:35:47 -0700 Subject: [PATCH 108/436] iscsi-target: Fix delayed logout processing greater than SECONDS_FOR_LOGOUT_COMP This patch fixes a BUG() in iscsit_close_session() that could be triggered when iscsit_logout_post_handler() execution from within tx thread context was not run for more than SECONDS_FOR_LOGOUT_COMP (15 seconds), and the TCP connection didn't already close before then forcing tx thread context to automatically exit. This would manifest itself during explicit logout as: [33206.974254] 1 connection(s) still exist for iSCSI session to iqn.1993-08.org.debian:01:3f5523242179 [33206.980184] INFO: NMI handler (kgdb_nmi_handler) took too long to run: 2100.772 msecs [33209.078643] ------------[ cut here ]------------ [33209.078646] kernel BUG at drivers/target/iscsi/iscsi_target.c:4346! Normally when explicit logout attempt fails, the tx thread context exits and iscsit_close_connection() from rx thread context does the extra cleanup once it detects conn->conn_logout_remove has not been cleared by the logout type specific post handlers. To address this special case, if the logout post handler in tx thread context detects conn->tx_thread_active has already been cleared, simply return and exit in order for existing iscsit_close_connection() logic from rx thread context do failed logout cleanup. Reported-by: Bart Van Assche Tested-by: Bart Van Assche Cc: Mike Christie Cc: Hannes Reinecke Cc: Sagi Grimberg Cc: stable@vger.kernel.org # 3.14+ Tested-by: Gary Guo Tested-by: Chu Yuan Lin Signed-off-by: Nicholas Bellinger --- drivers/target/iscsi/iscsi_target.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 0d8f81591bed..c0254516b380 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -4423,8 +4423,11 @@ static void iscsit_logout_post_handler_closesession( * always sleep waiting for RX/TX thread shutdown to complete * within iscsit_close_connection(). */ - if (!conn->conn_transport->rdma_shutdown) + if (!conn->conn_transport->rdma_shutdown) { sleep = cmpxchg(&conn->tx_thread_active, true, false); + if (!sleep) + return; + } atomic_set(&conn->conn_logout_remove, 0); complete(&conn->conn_logout_comp); @@ -4440,8 +4443,11 @@ static void iscsit_logout_post_handler_samecid( { int sleep = 1; - if (!conn->conn_transport->rdma_shutdown) + if (!conn->conn_transport->rdma_shutdown) { sleep = cmpxchg(&conn->tx_thread_active, true, false); + if (!sleep) + return; + } atomic_set(&conn->conn_logout_remove, 0); complete(&conn->conn_logout_comp); From abb85a9b512e8ca7ad04a5a8a6db9664fe644974 Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Wed, 7 Jun 2017 20:29:50 -0700 Subject: [PATCH 109/436] iscsi-target: Reject immediate data underflow larger than SCSI transfer length When iscsi WRITE underflow occurs there are two different scenarios that can happen. Normally in practice, when an EDTL vs. SCSI CDB TRANSFER LENGTH underflow is detected, the iscsi immediate data payload is the smaller SCSI CDB TRANSFER LENGTH. That is, when a host fabric LLD is using a fixed size EDTL for a specific control CDB, the SCSI CDB TRANSFER LENGTH and actual SCSI payload ends up being smaller than EDTL. In iscsi, this means the received iscsi immediate data payload matches the smaller SCSI CDB TRANSFER LENGTH, because there is no more SCSI payload to accept beyond SCSI CDB TRANSFER LENGTH. However, it's possible for a malicous host to send a WRITE underflow where EDTL is larger than SCSI CDB TRANSFER LENGTH, but incoming iscsi immediate data actually matches EDTL. In the wild, we've never had a iscsi host environment actually try to do this. For this special case, it's wrong to truncate part of the control CDB payload and continue to process the command during underflow when immediate data payload received was larger than SCSI CDB TRANSFER LENGTH, so go ahead and reject and drop the bogus payload as a defensive action. Note this potential bug was originally relaxed by the following for allowing WRITE underflow in MSFT FCP host environments: commit c72c5250224d475614a00c1d7e54a67f77cd3410 Author: Roland Dreier Date: Wed Jul 22 15:08:18 2015 -0700 target: allow underflow/overflow for PR OUT etc. commands Cc: Roland Dreier Cc: Mike Christie Cc: Hannes Reinecke Cc: Martin K. Petersen Cc: # v4.3+ Signed-off-by: Nicholas Bellinger --- drivers/target/iscsi/iscsi_target.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index c0254516b380..3fdca2cdd8da 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -1279,6 +1279,18 @@ iscsit_get_immediate_data(struct iscsi_cmd *cmd, struct iscsi_scsi_req *hdr, */ if (dump_payload) goto after_immediate_data; + /* + * Check for underflow case where both EDTL and immediate data payload + * exceeds what is presented by CDB's TRANSFER LENGTH, and what has + * already been set in target_cmd_size_check() as se_cmd->data_length. + * + * For this special case, fail the command and dump the immediate data + * payload. + */ + if (cmd->first_burst_len > cmd->se_cmd.data_length) { + cmd->sense_reason = TCM_INVALID_CDB_FIELD; + goto after_immediate_data; + } immed_ret = iscsit_handle_immediate_data(cmd, hdr, cmd->first_burst_len); From ba714a9c1dea85e0bf2899d02dfeb9c70040427c Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 23 May 2017 23:23:32 +0200 Subject: [PATCH 110/436] pinctrl/amd: Use regular interrupt instead of chained The AMD pinctrl driver uses a chained interrupt to demultiplex the GPIO interrupts. Kevin Vandeventer reported, that his new AMD Ryzen locks up hard on boot when the AMD pinctrl driver is initialized. The reason is an interrupt storm. It's not clear whether that's caused by hardware or firmware or both. Using chained interrupts on X86 is a dangerous endavour. If a system is misconfigured or the hardware buggy there is no safety net to catch an interrupt storm. Convert the driver to use a regular interrupt for the demultiplex handler. This allows the interrupt storm detector to catch the malfunction and lets the system boot up. This should be backported to stable because it's likely that more users run into this problem as the AMD Ryzen machines are spreading. Reported-by: Kevin Vandeventer Link: https://bugzilla.suse.com/show_bug.cgi?id=1034261 Signed-off-by: Thomas Gleixner Signed-off-by: Linus Walleij --- drivers/pinctrl/pinctrl-amd.c | 91 ++++++++++++++++------------------- 1 file changed, 41 insertions(+), 50 deletions(-) diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c index 1482d132fbb8..e432ec887479 100644 --- a/drivers/pinctrl/pinctrl-amd.c +++ b/drivers/pinctrl/pinctrl-amd.c @@ -495,64 +495,54 @@ static struct irq_chip amd_gpio_irqchip = { .flags = IRQCHIP_SKIP_SET_WAKE, }; -static void amd_gpio_irq_handler(struct irq_desc *desc) +#define PIN_IRQ_PENDING (BIT(INTERRUPT_STS_OFF) | BIT(WAKE_STS_OFF)) + +static irqreturn_t amd_gpio_irq_handler(int irq, void *dev_id) { - u32 i; - u32 off; - u32 reg; - u32 pin_reg; - u64 reg64; - int handled = 0; - unsigned int irq; + struct amd_gpio *gpio_dev = dev_id; + struct gpio_chip *gc = &gpio_dev->gc; + irqreturn_t ret = IRQ_NONE; + unsigned int i, irqnr; unsigned long flags; - struct irq_chip *chip = irq_desc_get_chip(desc); - struct gpio_chip *gc = irq_desc_get_handler_data(desc); - struct amd_gpio *gpio_dev = gpiochip_get_data(gc); + u32 *regs, regval; + u64 status, mask; - chained_irq_enter(chip, desc); - /*enable GPIO interrupt again*/ + /* Read the wake status */ raw_spin_lock_irqsave(&gpio_dev->lock, flags); - reg = readl(gpio_dev->base + WAKE_INT_STATUS_REG1); - reg64 = reg; - reg64 = reg64 << 32; - - reg = readl(gpio_dev->base + WAKE_INT_STATUS_REG0); - reg64 |= reg; + status = readl(gpio_dev->base + WAKE_INT_STATUS_REG1); + status <<= 32; + status |= readl(gpio_dev->base + WAKE_INT_STATUS_REG0); raw_spin_unlock_irqrestore(&gpio_dev->lock, flags); - /* - * first 46 bits indicates interrupt status. - * one bit represents four interrupt sources. - */ - for (off = 0; off < 46 ; off++) { - if (reg64 & BIT(off)) { - for (i = 0; i < 4; i++) { - pin_reg = readl(gpio_dev->base + - (off * 4 + i) * 4); - if ((pin_reg & BIT(INTERRUPT_STS_OFF)) || - (pin_reg & BIT(WAKE_STS_OFF))) { - irq = irq_find_mapping(gc->irqdomain, - off * 4 + i); - generic_handle_irq(irq); - writel(pin_reg, - gpio_dev->base - + (off * 4 + i) * 4); - handled++; - } - } + /* Bit 0-45 contain the relevant status bits */ + status &= (1ULL << 46) - 1; + regs = gpio_dev->base; + for (mask = 1, irqnr = 0; status; mask <<= 1, regs += 4, irqnr += 4) { + if (!(status & mask)) + continue; + status &= ~mask; + + /* Each status bit covers four pins */ + for (i = 0; i < 4; i++) { + regval = readl(regs + i); + if (!(regval & PIN_IRQ_PENDING)) + continue; + irq = irq_find_mapping(gc->irqdomain, irqnr + i); + generic_handle_irq(irq); + /* Clear interrupt */ + writel(regval, regs + i); + ret = IRQ_HANDLED; } } - if (handled == 0) - handle_bad_irq(desc); - + /* Signal EOI to the GPIO unit */ raw_spin_lock_irqsave(&gpio_dev->lock, flags); - reg = readl(gpio_dev->base + WAKE_INT_MASTER_REG); - reg |= EOI_MASK; - writel(reg, gpio_dev->base + WAKE_INT_MASTER_REG); + regval = readl(gpio_dev->base + WAKE_INT_MASTER_REG); + regval |= EOI_MASK; + writel(regval, gpio_dev->base + WAKE_INT_MASTER_REG); raw_spin_unlock_irqrestore(&gpio_dev->lock, flags); - chained_irq_exit(chip, desc); + return ret; } static int amd_get_groups_count(struct pinctrl_dev *pctldev) @@ -821,10 +811,11 @@ static int amd_gpio_probe(struct platform_device *pdev) goto out2; } - gpiochip_set_chained_irqchip(&gpio_dev->gc, - &amd_gpio_irqchip, - irq_base, - amd_gpio_irq_handler); + ret = devm_request_irq(&pdev->dev, irq_base, amd_gpio_irq_handler, 0, + KBUILD_MODNAME, gpio_dev); + if (ret) + goto out2; + platform_set_drvdata(pdev, gpio_dev); dev_dbg(&pdev->dev, "amd gpio driver loaded\n"); From b7c747d4627462f25b3daabf49c18895a6722faa Mon Sep 17 00:00:00 2001 From: Alexandre TORGUE Date: Tue, 30 May 2017 16:43:04 +0200 Subject: [PATCH 111/436] pinctrl: stm32: Fix bad function call In stm32_pconf_parse_conf function, stm32_pmx_gpio_set_direction is called with wrong parameter value. Indeed, using NULL value for range will raise an oops. Fixes: aceb16dc2da5 ("pinctrl: Add STM32 MCUs support") Reported-by: Dan Carpenter Signed-off-by: Alexandre TORGUE Signed-off-by: Linus Walleij --- drivers/pinctrl/stm32/pinctrl-stm32.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.c b/drivers/pinctrl/stm32/pinctrl-stm32.c index d3c5f5dfbbd7..222b6685b09f 100644 --- a/drivers/pinctrl/stm32/pinctrl-stm32.c +++ b/drivers/pinctrl/stm32/pinctrl-stm32.c @@ -798,7 +798,7 @@ static int stm32_pconf_parse_conf(struct pinctrl_dev *pctldev, break; case PIN_CONFIG_OUTPUT: __stm32_gpio_set(bank, offset, arg); - ret = stm32_pmx_gpio_set_direction(pctldev, NULL, pin, false); + ret = stm32_pmx_gpio_set_direction(pctldev, range, pin, false); break; default: ret = -EINVAL; From ff3416fb5b02cfae94591ef4395b0b4dc793f25e Mon Sep 17 00:00:00 2001 From: Marc Kleine-Budde Date: Thu, 18 May 2017 10:22:22 +0200 Subject: [PATCH 112/436] can: dev: make can_change_state() robust to be called with cf == NULL In OOM situations where no skb can be allocated, can_change_state() may be called with cf == NULL. As this function updates the state and error statistics it's not an option to skip the call to can_change_state() in OOM situations. This patch makes can_change_state() robust, so that it can be called with cf == NULL. Signed-off-by: Marc Kleine-Budde --- drivers/net/can/dev.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c index 611d16a7061d..ae4ed03dc642 100644 --- a/drivers/net/can/dev.c +++ b/drivers/net/can/dev.c @@ -391,6 +391,9 @@ void can_change_state(struct net_device *dev, struct can_frame *cf, can_update_state_error_stats(dev, new_state); priv->state = new_state; + if (!cf) + return; + if (unlikely(new_state == CAN_STATE_BUS_OFF)) { cf->can_id |= CAN_ERR_BUSOFF; return; From f2a918b40cea994f9524ace67fd05ef42a8d9e5b Mon Sep 17 00:00:00 2001 From: Stephane Grosjean Date: Wed, 3 May 2017 10:35:04 +0200 Subject: [PATCH 113/436] can: peak_canfd: fix uninitialized symbol warnings This patch fixes two uninitialized symbol warnings in the new code adding support of the PEAK-System PCAN-PCI Express FD boards, in the socket-CAN network protocol family. Signed-off-by: Stephane Grosjean Reported-by: Dan Carpenter Signed-off-by: Marc Kleine-Budde --- drivers/net/can/peak_canfd/peak_canfd.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/can/peak_canfd/peak_canfd.c b/drivers/net/can/peak_canfd/peak_canfd.c index 0d57be5ea97b..85268be0c913 100644 --- a/drivers/net/can/peak_canfd/peak_canfd.c +++ b/drivers/net/can/peak_canfd/peak_canfd.c @@ -489,7 +489,7 @@ int peak_canfd_handle_msgs_list(struct peak_canfd_priv *priv, struct pucan_rx_msg *msg_list, int msg_count) { void *msg_ptr = msg_list; - int i, msg_size; + int i, msg_size = 0; for (i = 0; i < msg_count; i++) { msg_size = peak_canfd_handle_msg(priv, msg_ptr); From dadcd398b3f059a0aa9de1cac8030a1d5a3791a5 Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Fri, 12 May 2017 12:09:25 +0200 Subject: [PATCH 114/436] can: peak_usb: fix product-id endianness in error message Make sure to use the USB device product-id stored in host-byte order in a probe error message. Also remove a redundant reassignment of the local usb_dev variable which had already been used to retrieve the product id. Signed-off-by: Johan Hovold Signed-off-by: Marc Kleine-Budde --- drivers/net/can/usb/peak_usb/pcan_usb_core.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c index 57913dbbae0a..1ca76e03e965 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c @@ -908,8 +908,6 @@ static int peak_usb_probe(struct usb_interface *intf, const struct peak_usb_adapter *peak_usb_adapter = NULL; int i, err = -ENOMEM; - usb_dev = interface_to_usbdev(intf); - /* get corresponding PCAN-USB adapter */ for (i = 0; i < ARRAY_SIZE(peak_usb_adapters_list); i++) if (peak_usb_adapters_list[i]->device_id == usb_id_product) { @@ -920,7 +918,7 @@ static int peak_usb_probe(struct usb_interface *intf, if (!peak_usb_adapter) { /* should never come except device_id bad usage in this file */ pr_err("%s: didn't find device id. 0x%x in devices list\n", - PCAN_USB_DRIVER_NAME, usb_dev->descriptor.idProduct); + PCAN_USB_DRIVER_NAME, usb_id_product); return -ENODEV; } From 5cda3ee5138e91ac369ed9d0b55eab0dab077686 Mon Sep 17 00:00:00 2001 From: Marc Kleine-Budde Date: Sun, 4 Jun 2017 14:03:42 +0200 Subject: [PATCH 115/436] can: gs_usb: fix memory leak in gs_cmd_reset() This patch adds the missing kfree() in gs_cmd_reset() to free the memory that is not used anymore after usb_control_msg(). Cc: linux-stable Cc: Maximilian Schneider Signed-off-by: Marc Kleine-Budde --- drivers/net/can/usb/gs_usb.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c index eecee7f8dfb7..afcc1312dbaf 100644 --- a/drivers/net/can/usb/gs_usb.c +++ b/drivers/net/can/usb/gs_usb.c @@ -265,6 +265,8 @@ static int gs_cmd_reset(struct gs_usb *gsusb, struct gs_can *gsdev) sizeof(*dm), 1000); + kfree(dm); + return rc; } From 74b7b490886852582d986a33443c2ffa50970169 Mon Sep 17 00:00:00 2001 From: Marc Kleine-Budde Date: Tue, 6 Jun 2017 13:53:16 +0200 Subject: [PATCH 116/436] can: af_can: namespace support: fix lockdep splat: properly initialize spin_lock This patch uses spin_lock_init() instead of __SPIN_LOCK_UNLOCKED() to initialize the per namespace net->can.can_rcvlists_lock lock to fix this lockdep warning: | INFO: trying to register non-static key. | the code is fine but needs lockdep annotation. | turning off the locking correctness validator. | CPU: 0 PID: 186 Comm: candump Not tainted 4.12.0-rc3+ #47 | Hardware name: Marvell Kirkwood (Flattened Device Tree) | [] (unwind_backtrace) from [] (show_stack+0x18/0x1c) | [] (show_stack) from [] (register_lock_class+0x1e4/0x55c) | [] (register_lock_class) from [] (__lock_acquire+0x148/0x1990) | [] (__lock_acquire) from [] (lock_acquire+0x174/0x210) | [] (lock_acquire) from [] (_raw_spin_lock+0x50/0x88) | [] (_raw_spin_lock) from [] (can_rx_register+0x94/0x15c [can]) | [] (can_rx_register [can]) from [] (raw_enable_filters+0x60/0xc0 [can_raw]) | [] (raw_enable_filters [can_raw]) from [] (raw_enable_allfilters+0x2c/0xa0 [can_raw]) | [] (raw_enable_allfilters [can_raw]) from [] (raw_bind+0xb0/0x250 [can_raw]) | [] (raw_bind [can_raw]) from [] (SyS_bind+0x70/0xac) | [] (SyS_bind) from [] (ret_fast_syscall+0x0/0x1c) Cc: Mario Kicherer Acked-by: Oliver Hartkopp Signed-off-by: Marc Kleine-Budde --- net/can/af_can.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/net/can/af_can.c b/net/can/af_can.c index b6406fe33c76..88edac0f3e36 100644 --- a/net/can/af_can.c +++ b/net/can/af_can.c @@ -872,8 +872,7 @@ static int can_notifier(struct notifier_block *nb, unsigned long msg, static int can_pernet_init(struct net *net) { - net->can.can_rcvlists_lock = - __SPIN_LOCK_UNLOCKED(net->can.can_rcvlists_lock); + spin_lock_init(&net->can.can_rcvlists_lock); net->can.can_rx_alldev_list = kzalloc(sizeof(struct dev_rcv_lists), GFP_KERNEL); From 97edec3a11cf6f73f2e45c3035b5ff8e4c3543dd Mon Sep 17 00:00:00 2001 From: Oliver Hartkopp Date: Fri, 2 Jun 2017 19:37:30 +0200 Subject: [PATCH 117/436] can: enable CAN FD for virtual CAN devices by default CAN FD capable CAN interfaces can handle (classic) CAN 2.0 frames too. New users usually fail at their first attempt to explore CAN FD on virtual CAN interfaces due to the current CAN_MTU default. Set the MTU to CANFD_MTU by default to reduce this confusion. If someone *really* needs a 'classic CAN'-only device this can be set with the 'ip' tool with e.g. 'ip link set vcan0 mtu 16' as before. Signed-off-by: Oliver Hartkopp Signed-off-by: Marc Kleine-Budde --- drivers/net/can/vcan.c | 2 +- drivers/net/can/vxcan.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c index 0eda1b308583..a8cb33264ff1 100644 --- a/drivers/net/can/vcan.c +++ b/drivers/net/can/vcan.c @@ -152,7 +152,7 @@ static const struct net_device_ops vcan_netdev_ops = { static void vcan_setup(struct net_device *dev) { dev->type = ARPHRD_CAN; - dev->mtu = CAN_MTU; + dev->mtu = CANFD_MTU; dev->hard_header_len = 0; dev->addr_len = 0; dev->tx_queue_len = 0; diff --git a/drivers/net/can/vxcan.c b/drivers/net/can/vxcan.c index 30cf2368becf..cfe889e8f172 100644 --- a/drivers/net/can/vxcan.c +++ b/drivers/net/can/vxcan.c @@ -150,7 +150,7 @@ static const struct net_device_ops vxcan_netdev_ops = { static void vxcan_setup(struct net_device *dev) { dev->type = ARPHRD_CAN; - dev->mtu = CAN_MTU; + dev->mtu = CANFD_MTU; dev->hard_header_len = 0; dev->addr_len = 0; dev->tx_queue_len = 0; From f50b878fed33e360d01dcdc31a8eeb1815d033d5 Mon Sep 17 00:00:00 2001 From: Alan Stern Date: Thu, 8 Jun 2017 13:55:59 -0400 Subject: [PATCH 118/436] USB: gadget: fix GPF in gadgetfs A NULL-pointer dereference bug in gadgetfs was uncovered by syzkaller: > kasan: GPF could be caused by NULL-ptr deref or user memory access > general protection fault: 0000 [#1] SMP KASAN > Dumping ftrace buffer: > (ftrace buffer empty) > Modules linked in: > CPU: 2 PID: 4820 Comm: syz-executor0 Not tainted 4.12.0-rc4+ #5 > Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Bochs 01/01/2011 > task: ffff880039542dc0 task.stack: ffff88003bdd0000 > RIP: 0010:__list_del_entry_valid+0x7e/0x170 lib/list_debug.c:51 > RSP: 0018:ffff88003bdd6e50 EFLAGS: 00010246 > RAX: dffffc0000000000 RBX: 0000000000000000 RCX: 0000000000010000 > RDX: 0000000000000000 RSI: ffffffff86504948 RDI: ffffffff86504950 > RBP: ffff88003bdd6e68 R08: ffff880039542dc0 R09: ffffffff8778ce00 > R10: ffff88003bdd6e68 R11: dffffc0000000000 R12: 0000000000000000 > R13: dffffc0000000000 R14: 1ffff100077badd2 R15: ffffffff864d2e40 > FS: 0000000000000000(0000) GS:ffff88006dc00000(0000) knlGS:0000000000000000 > CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 > CR2: 000000002014aff9 CR3: 0000000006022000 CR4: 00000000000006e0 > Call Trace: > __list_del_entry include/linux/list.h:116 [inline] > list_del include/linux/list.h:124 [inline] > usb_gadget_unregister_driver+0x166/0x4c0 drivers/usb/gadget/udc/core.c:1387 > dev_release+0x80/0x160 drivers/usb/gadget/legacy/inode.c:1187 > __fput+0x332/0x7f0 fs/file_table.c:209 > ____fput+0x15/0x20 fs/file_table.c:245 > task_work_run+0x19b/0x270 kernel/task_work.c:116 > exit_task_work include/linux/task_work.h:21 [inline] > do_exit+0x18a3/0x2820 kernel/exit.c:878 > do_group_exit+0x149/0x420 kernel/exit.c:982 > get_signal+0x77f/0x1780 kernel/signal.c:2318 > do_signal+0xd2/0x2130 arch/x86/kernel/signal.c:808 > exit_to_usermode_loop+0x1a7/0x240 arch/x86/entry/common.c:157 > prepare_exit_to_usermode arch/x86/entry/common.c:194 [inline] > syscall_return_slowpath+0x3ba/0x410 arch/x86/entry/common.c:263 > entry_SYSCALL_64_fastpath+0xbc/0xbe > RIP: 0033:0x4461f9 > RSP: 002b:00007fdac2b1ecf8 EFLAGS: 00000246 ORIG_RAX: 00000000000000ca > RAX: fffffffffffffe00 RBX: 00000000007080c8 RCX: 00000000004461f9 > RDX: 0000000000000000 RSI: 0000000000000000 RDI: 00000000007080c8 > RBP: 00000000007080a8 R08: 0000000000000000 R09: 0000000000000000 > R10: 0000000000000000 R11: 0000000000000246 R12: 0000000000000000 > R13: 0000000000000000 R14: 00007fdac2b1f9c0 R15: 00007fdac2b1f700 > Code: 00 00 00 00 ad de 49 39 c4 74 6a 48 b8 00 02 00 00 00 00 ad de > 48 89 da 48 39 c3 74 74 48 c1 ea 03 48 b8 00 00 00 00 00 fc ff df <80> > 3c 02 00 0f 85 92 00 00 00 48 8b 13 48 39 f2 75 66 49 8d 7c > RIP: __list_del_entry_valid+0x7e/0x170 lib/list_debug.c:51 RSP: ffff88003bdd6e50 > ---[ end trace 30e94b1eec4831c8 ]--- > Kernel panic - not syncing: Fatal exception The bug was caused by dev_release() failing to turn off its gadget_registered flag after unregistering the gadget driver. As a result, when a later user closed the device file before writing a valid set of descriptors, dev_release() thought the gadget had been registered and tried to unregister it, even though it had not been. This led to the NULL pointer dereference. The fix is simple: turn off the flag when the gadget is unregistered. Signed-off-by: Alan Stern Reported-and-tested-by: Andrey Konovalov CC: Signed-off-by: Felipe Balbi --- drivers/usb/gadget/legacy/inode.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c index b9ca0a26cbd9..5ffd879f7886 100644 --- a/drivers/usb/gadget/legacy/inode.c +++ b/drivers/usb/gadget/legacy/inode.c @@ -1183,8 +1183,10 @@ dev_release (struct inode *inode, struct file *fd) /* closing ep0 === shutdown all */ - if (dev->gadget_registered) + if (dev->gadget_registered) { usb_gadget_unregister_driver (&gadgetfs_driver); + dev->gadget_registered = false; + } /* at this point "good" hardware has disconnected the * device from USB; the host won't see it any more. From fc5b775da468e359154795afeb27be79b509b76d Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Fri, 9 Jun 2017 15:45:32 +0200 Subject: [PATCH 119/436] net: phy: add missing SPEED_14000 Fixes: 0d7e2d2166f6 ("IB/ipoib: add get_link_ksettings in ethtool") Signed-off-by: Joe Perches Signed-off-by: Nicolas Dichtel Signed-off-by: David S. Miller --- drivers/net/phy/phy.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 7524caa0f29d..eebb0e1c70ff 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -54,6 +54,8 @@ static const char *phy_speed_to_str(int speed) return "5Gbps"; case SPEED_10000: return "10Gbps"; + case SPEED_14000: + return "14Gbps"; case SPEED_20000: return "20Gbps"; case SPEED_25000: From defbcf2decc903a28d8398aa477b6881e711e3ea Mon Sep 17 00:00:00 2001 From: Mateusz Jurczyk Date: Thu, 8 Jun 2017 11:13:36 +0200 Subject: [PATCH 120/436] af_unix: Add sockaddr length checks before accessing sa_family in bind and connect handlers Verify that the caller-provided sockaddr structure is large enough to contain the sa_family field, before accessing it in bind() and connect() handlers of the AF_UNIX socket. Since neither syscall enforces a minimum size of the corresponding memory region, very short sockaddrs (zero or one byte long) result in operating on uninitialized memory while referencing .sa_family. Signed-off-by: Mateusz Jurczyk Signed-off-by: David S. Miller --- net/unix/af_unix.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 6a7fe7660551..1a0c961f4ffe 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -999,7 +999,8 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) struct path path = { }; err = -EINVAL; - if (sunaddr->sun_family != AF_UNIX) + if (addr_len < offsetofend(struct sockaddr_un, sun_family) || + sunaddr->sun_family != AF_UNIX) goto out; if (addr_len == sizeof(short)) { @@ -1110,6 +1111,10 @@ static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr, unsigned int hash; int err; + err = -EINVAL; + if (alen < offsetofend(struct sockaddr, sa_family)) + goto out; + if (addr->sa_family != AF_UNSPEC) { err = unix_mkname(sunaddr, alen, &hash); if (err < 0) From f186ce61bb8235d80068c390dc2aad7ca427a4c2 Mon Sep 17 00:00:00 2001 From: Krister Johansen Date: Thu, 8 Jun 2017 13:12:38 -0700 Subject: [PATCH 121/436] Fix an intermittent pr_emerg warning about lo becoming free. It looks like this: Message from syslogd@flamingo at Apr 26 00:45:00 ... kernel:unregister_netdevice: waiting for lo to become free. Usage count = 4 They seem to coincide with net namespace teardown. The message is emitted by netdev_wait_allrefs(). Forced a kdump in netdev_run_todo, but found that the refcount on the lo device was already 0 at the time we got to the panic. Used bcc to check the blocking in netdev_run_todo. The only places where we're off cpu there are in the rcu_barrier() and msleep() calls. That behavior is expected. The msleep time coincides with the amount of time we spend waiting for the refcount to reach zero; the rcu_barrier() wait times are not excessive. After looking through the list of callbacks that the netdevice notifiers invoke in this path, it appears that the dst_dev_event is the most interesting. The dst_ifdown path places a hold on the loopback_dev as part of releasing the dev associated with the original dst cache entry. Most of our notifier callbacks are straight-forward, but this one a) looks complex, and b) places a hold on the network interface in question. I constructed a new bcc script that watches various events in the liftime of a dst cache entry. Note that dst_ifdown will take a hold on the loopback device until the invalidated dst entry gets freed. [ __dst_free] on DST: ffff883ccabb7900 IF tap1008300eth0 invoked at 1282115677036183 __dst_free rcu_nocb_kthread kthread ret_from_fork Acked-by: Eric Dumazet Signed-off-by: David S. Miller --- net/core/dst.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/net/core/dst.c b/net/core/dst.c index 6192f11beec9..13ba4a090c41 100644 --- a/net/core/dst.c +++ b/net/core/dst.c @@ -469,6 +469,20 @@ static int dst_dev_event(struct notifier_block *this, unsigned long event, spin_lock_bh(&dst_garbage.lock); dst = dst_garbage.list; dst_garbage.list = NULL; + /* The code in dst_ifdown places a hold on the loopback device. + * If the gc entry processing is set to expire after a lengthy + * interval, this hold can cause netdev_wait_allrefs() to hang + * out and wait for a long time -- until the the loopback + * interface is released. If we're really unlucky, it'll emit + * pr_emerg messages to console too. Reset the interval here, + * so dst cleanups occur in a more timely fashion. + */ + if (dst_garbage.timer_inc > DST_GC_INC) { + dst_garbage.timer_inc = DST_GC_INC; + dst_garbage.timer_expires = DST_GC_MIN; + mod_delayed_work(system_wq, &dst_gc_work, + dst_garbage.timer_expires); + } spin_unlock_bh(&dst_garbage.lock); if (last) From ff85a1a80e00349dc7783c8dc4d6233d9a709283 Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Sun, 21 May 2017 11:44:47 +0200 Subject: [PATCH 122/436] kconfig: Check for libncurses before menuconfig There is a check and a nice user-friendly message when the curses library is not present on the system and the user wants to do "make menuconfig". It doesn't get issued, though. Instead, we fail the build when mconf.c doesn't find the curses.h header: HOSTCC scripts/kconfig/mconf.o In file included from scripts/kconfig/mconf.c:23:0: scripts/kconfig/lxdialog/dialog.h:38:20: fatal error: curses.h: No such file or directory #include CURSES_LOC ^ compilation terminated. Make that check a prerequisite to mconf so that the user sees the error message instead: $ make menuconfig *** Unable to find the ncurses libraries or the *** required header files. *** 'make menuconfig' requires the ncurses libraries. *** *** Install ncurses (ncurses-devel) and try again. *** scripts/kconfig/Makefile:203: recipe for target 'scripts/kconfig/dochecklxdialog' failed make[1]: *** [scripts/kconfig/dochecklxdialog] Error 1 Makefile:548: recipe for target 'menuconfig' failed make: *** [menuconfig] Error 2 Signed-off-by: Borislav Petkov Signed-off-by: Masahiro Yamada --- scripts/kconfig/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/kconfig/Makefile b/scripts/kconfig/Makefile index 90a091b6ae4d..eb8144643b78 100644 --- a/scripts/kconfig/Makefile +++ b/scripts/kconfig/Makefile @@ -196,7 +196,7 @@ clean-files += config.pot linux.pot # Check that we have the required ncurses stuff installed for lxdialog (menuconfig) PHONY += $(obj)/dochecklxdialog -$(addprefix $(obj)/,$(lxdialog)): $(obj)/dochecklxdialog +$(addprefix $(obj)/, mconf.o $(lxdialog)): $(obj)/dochecklxdialog $(obj)/dochecklxdialog: $(Q)$(CONFIG_SHELL) $(check-lxdialog) -check $(HOSTCC) $(HOST_EXTRACFLAGS) $(HOSTLOADLIBES_mconf) From fd6720aefde06eacf17404eed2cad65c6ec103e1 Mon Sep 17 00:00:00 2001 From: Mario Molitor Date: Thu, 8 Jun 2017 22:41:02 +0200 Subject: [PATCH 123/436] stmmac: fix ptp header for GMAC3 hw timestamp According the CYCLON V documention only the bit 16 of snaptypesel should set. (more information see Table 17-20 (cv_5v4.pdf) : Timestamp Snapshot Dependency on Register Bits) Fixes: d2042052a0aa ("stmmac: update the PTP header file") Signed-off-by: Mario Molitor Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 15 ++++++++++++--- drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h | 3 ++- 2 files changed, 14 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 12236daf7bb6..d54e5d74b3fd 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -546,7 +546,10 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr) /* PTP v1, UDP, any kind of event packet */ config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; /* take time stamp for all event messages */ - snap_type_sel = PTP_TCR_SNAPTYPSEL_1; + if (priv->plat->has_gmac4) + snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1; + else + snap_type_sel = PTP_TCR_SNAPTYPSEL_1; ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; @@ -578,7 +581,10 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr) config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; ptp_v2 = PTP_TCR_TSVER2ENA; /* take time stamp for all event messages */ - snap_type_sel = PTP_TCR_SNAPTYPSEL_1; + if (priv->plat->has_gmac4) + snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1; + else + snap_type_sel = PTP_TCR_SNAPTYPSEL_1; ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; @@ -612,7 +618,10 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr) config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; ptp_v2 = PTP_TCR_TSVER2ENA; /* take time stamp for all event messages */ - snap_type_sel = PTP_TCR_SNAPTYPSEL_1; + if (priv->plat->has_gmac4) + snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1; + else + snap_type_sel = PTP_TCR_SNAPTYPSEL_1; ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h index 48fb72fc423c..f4b31d69f60e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h @@ -59,7 +59,8 @@ /* Enable Snapshot for Messages Relevant to Master */ #define PTP_TCR_TSMSTRENA BIT(15) /* Select PTP packets for Taking Snapshots */ -#define PTP_TCR_SNAPTYPSEL_1 GENMASK(17, 16) +#define PTP_TCR_SNAPTYPSEL_1 BIT(16) +#define PTP_GMAC4_TCR_SNAPTYPSEL_1 GENMASK(17, 16) /* Enable MAC address for PTP Frame Filtering */ #define PTP_TCR_TSENMACADDR BIT(18) From 33d4c4821323729b463e45d0b2d32f6a87dcba5b Mon Sep 17 00:00:00 2001 From: Mario Molitor Date: Thu, 8 Jun 2017 23:03:09 +0200 Subject: [PATCH 124/436] stmmac: fix for hw timestamp of GMAC3 unit 1.) Bugfix of function stmmac_get_tx_hwtstamp. Corrected the tx timestamp available check (same as 4.8 and older) Change printout from info syslevel to debug. 2.) Bugfix of function stmmac_get_rx_hwtstamp. Corrected the rx timestamp available check (same as 4.8 and older) Change printout from info syslevel to debug. Fixes: ba1ffd74df74 ("stmmac: fix PTP support for GMAC4") Signed-off-by: Mario Molitor Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c | 11 +++++++---- drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 10 +++++----- 2 files changed, 12 insertions(+), 9 deletions(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c index aa6476439aee..e0ef02f9503b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c @@ -214,13 +214,13 @@ static int dwmac4_wrback_get_tx_timestamp_status(struct dma_desc *p) { /* Context type from W/B descriptor must be zero */ if (le32_to_cpu(p->des3) & TDES3_CONTEXT_TYPE) - return -EINVAL; + return 0; /* Tx Timestamp Status is 1 so des0 and des1'll have valid values */ if (le32_to_cpu(p->des3) & TDES3_TIMESTAMP_STATUS) - return 0; + return 1; - return 1; + return 0; } static inline u64 dwmac4_get_timestamp(void *desc, u32 ats) @@ -282,7 +282,10 @@ static int dwmac4_wrback_get_rx_timestamp_status(void *desc, u32 ats) } } exit: - return ret; + if (likely(ret == 0)) + return 1; + + return 0; } static void dwmac4_rd_init_rx_desc(struct dma_desc *p, int disable_rx_ic, diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index d54e5d74b3fd..d16d11bfc046 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -434,14 +434,14 @@ static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv, return; /* check tx tstamp status */ - if (!priv->hw->desc->get_tx_timestamp_status(p)) { + if (priv->hw->desc->get_tx_timestamp_status(p)) { /* get the valid tstamp */ ns = priv->hw->desc->get_timestamp(p, priv->adv_ts); memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); shhwtstamp.hwtstamp = ns_to_ktime(ns); - netdev_info(priv->dev, "get valid TX hw timestamp %llu\n", ns); + netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns); /* pass tstamp to stack */ skb_tstamp_tx(skb, &shhwtstamp); } @@ -468,19 +468,19 @@ static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p, return; /* Check if timestamp is available */ - if (!priv->hw->desc->get_rx_timestamp_status(p, priv->adv_ts)) { + if (priv->hw->desc->get_rx_timestamp_status(p, priv->adv_ts)) { /* For GMAC4, the valid timestamp is from CTX next desc. */ if (priv->plat->has_gmac4) ns = priv->hw->desc->get_timestamp(np, priv->adv_ts); else ns = priv->hw->desc->get_timestamp(p, priv->adv_ts); - netdev_info(priv->dev, "get valid RX hw timestamp %llu\n", ns); + netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns); shhwtstamp = skb_hwtstamps(skb); memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); shhwtstamp->hwtstamp = ns_to_ktime(ns); } else { - netdev_err(priv->dev, "cannot get RX hw timestamp\n"); + netdev_dbg(priv->dev, "cannot get RX hw timestamp\n"); } } From 773fc8f6e8d63ec9d840588e161cbb73a01cfc45 Mon Sep 17 00:00:00 2001 From: "ashwanth@codeaurora.org" Date: Fri, 9 Jun 2017 14:24:58 +0530 Subject: [PATCH 125/436] net: rps: send out pending IPI's on CPU hotplug IPI's from the victim cpu are not handled in dev_cpu_callback. So these pending IPI's would be sent to the remote cpu only when NET_RX is scheduled on the victim cpu and since this trigger is unpredictable it would result in packet latencies on the remote cpu. This patch add support to send the pending ipi's of victim cpu. Signed-off-by: Ashwanth Goli Signed-off-by: David S. Miller --- net/core/dev.c | 31 ++++++++++++++++++++++--------- 1 file changed, 22 insertions(+), 9 deletions(-) diff --git a/net/core/dev.c b/net/core/dev.c index 4c15466305c3..54bb8d99d26a 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -4949,6 +4949,19 @@ __sum16 __skb_gro_checksum_complete(struct sk_buff *skb) } EXPORT_SYMBOL(__skb_gro_checksum_complete); +static void net_rps_send_ipi(struct softnet_data *remsd) +{ +#ifdef CONFIG_RPS + while (remsd) { + struct softnet_data *next = remsd->rps_ipi_next; + + if (cpu_online(remsd->cpu)) + smp_call_function_single_async(remsd->cpu, &remsd->csd); + remsd = next; + } +#endif +} + /* * net_rps_action_and_irq_enable sends any pending IPI's for rps. * Note: called with local irq disabled, but exits with local irq enabled. @@ -4964,14 +4977,7 @@ static void net_rps_action_and_irq_enable(struct softnet_data *sd) local_irq_enable(); /* Send pending IPI's to kick RPS processing on remote cpus. */ - while (remsd) { - struct softnet_data *next = remsd->rps_ipi_next; - - if (cpu_online(remsd->cpu)) - smp_call_function_single_async(remsd->cpu, - &remsd->csd); - remsd = next; - } + net_rps_send_ipi(remsd); } else #endif local_irq_enable(); @@ -8197,7 +8203,7 @@ static int dev_cpu_dead(unsigned int oldcpu) struct sk_buff **list_skb; struct sk_buff *skb; unsigned int cpu; - struct softnet_data *sd, *oldsd; + struct softnet_data *sd, *oldsd, *remsd; local_irq_disable(); cpu = smp_processor_id(); @@ -8238,6 +8244,13 @@ static int dev_cpu_dead(unsigned int oldcpu) raise_softirq_irqoff(NET_TX_SOFTIRQ); local_irq_enable(); +#ifdef CONFIG_RPS + remsd = oldsd->rps_ipi_list; + oldsd->rps_ipi_list = NULL; +#endif + /* send out pending IPI's on offline CPU */ + net_rps_send_ipi(remsd); + /* Process offline CPU's input_pkt_queue */ while ((skb = __skb_dequeue(&oldsd->process_queue))) { netif_rx_ni(skb); From c7a61cba71fd151cc7d9ebe53a090e0e61eeebf3 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Fri, 9 Jun 2017 21:33:09 +0200 Subject: [PATCH 126/436] mac80211: free netdev on dev_alloc_name() error The change to remove free_netdev() from ieee80211_if_free() erroneously didn't add the necessary free_netdev() for when ieee80211_if_free() is called directly in one place, rather than as the priv_destructor. Add the missing call. Fixes: cf124db566e6 ("net: Fix inconsistent teardown and release of private netdev state.") Signed-off-by: Johannes Berg Signed-off-by: David S. Miller --- net/mac80211/iface.c | 1 + 1 file changed, 1 insertion(+) diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index 915d7e1b4545..f5f50150ba1c 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@ -1816,6 +1816,7 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name, ret = dev_alloc_name(ndev, ndev->name); if (ret < 0) { ieee80211_if_free(ndev); + free_netdev(ndev); return ret; } From 92f85f05caa51d844af6ea14ffbc7a786446a644 Mon Sep 17 00:00:00 2001 From: "Mintz, Yuval" Date: Fri, 9 Jun 2017 17:17:01 +0300 Subject: [PATCH 127/436] bnx2x: Allow vfs to disable txvlan offload VF clients are configured as enforced, meaning firmware is validating the correctness of their ethertype/vid during transmission. Once txvlan is disabled, VF would start getting SKBs for transmission here vlan is on the payload - but it'll pass the packet's ethertype instead of the vid, leading to firmware declaring it as malicious. Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- .../net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 5f49334dcad5..f619c4cac51f 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -3883,15 +3883,26 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) /* when transmitting in a vf, start bd must hold the ethertype * for fw to enforce it */ + u16 vlan_tci = 0; #ifndef BNX2X_STOP_ON_ERROR - if (IS_VF(bp)) + if (IS_VF(bp)) { #endif - tx_start_bd->vlan_or_ethertype = - cpu_to_le16(ntohs(eth->h_proto)); + /* Still need to consider inband vlan for enforced */ + if (__vlan_get_tag(skb, &vlan_tci)) { + tx_start_bd->vlan_or_ethertype = + cpu_to_le16(ntohs(eth->h_proto)); + } else { + tx_start_bd->bd_flags.as_bitfield |= + (X_ETH_INBAND_VLAN << + ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT); + tx_start_bd->vlan_or_ethertype = + cpu_to_le16(vlan_tci); + } #ifndef BNX2X_STOP_ON_ERROR - else + } else { /* used by FW for packet accounting */ tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod); + } #endif } From 3523882229b903e967de05665b871dab87c5df0f Mon Sep 17 00:00:00 2001 From: "Mintz, Yuval" Date: Fri, 9 Jun 2017 17:17:02 +0300 Subject: [PATCH 128/436] bnx2x: Don't post statistics to malicious VFs Once firmware indicates that a given VF is malicious and until that VF passes an FLR all bets are off - PF can't know anything is happening to the VF [since VF can't communicate anything to its PF]. But PF is currently still periodically asking device to collect statistics for the VF which might in turn fill logs by IOMMU blocking memory access done by the VF's PCI function [in the case VF has unmapped its buffers]. Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c | 13 ++++++++++++- drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h | 1 + 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index 870ea001a720..9ca994d0bab6 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c @@ -901,6 +901,8 @@ static void bnx2x_vf_flr(struct bnx2x *bp, struct bnx2x_virtf *vf) /* release VF resources */ bnx2x_vf_free_resc(bp, vf); + vf->malicious = false; + /* re-open the mailbox */ bnx2x_vf_enable_mbx(bp, vf->abs_vfid); return; @@ -1822,9 +1824,11 @@ get_vf: vf->abs_vfid, qidx); bnx2x_vf_handle_rss_update_eqe(bp, vf); case EVENT_RING_OPCODE_VF_FLR: - case EVENT_RING_OPCODE_MALICIOUS_VF: /* Do nothing for now */ return 0; + case EVENT_RING_OPCODE_MALICIOUS_VF: + vf->malicious = true; + return 0; } return 0; @@ -1905,6 +1909,13 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp) continue; } + if (vf->malicious) { + DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS), + "vf %d malicious so no stats for it\n", + vf->abs_vfid); + continue; + } + DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS), "add addresses for vf %d\n", vf->abs_vfid); for_each_vfq(vf, j) { diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h index 888d0b6632e8..53466f6cebab 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h @@ -141,6 +141,7 @@ struct bnx2x_virtf { #define VF_RESET 3 /* VF FLR'd, pending cleanup */ bool flr_clnup_stage; /* true during flr cleanup */ + bool malicious; /* true if FW indicated so, until FLR */ /* dma */ dma_addr_t fw_stat_map; From d8dba51de5dfb55225b169653bbd7379ac0d0c63 Mon Sep 17 00:00:00 2001 From: Philippe Reynes Date: Fri, 9 Jun 2017 23:50:57 +0200 Subject: [PATCH 129/436] net: aquantia: atlantic: remove declaration of hw_atl_utils_hw_set_power This function is not defined, so no need to declare it. As I don't have the hardware, I'd be very pleased if someone may test this patch. Signed-off-by: Philippe Reynes Signed-off-by: David S. Miller --- drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h | 3 --- 1 file changed, 3 deletions(-) diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h index b8e3d88f0879..a66aee51ab5b 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h @@ -193,9 +193,6 @@ int hw_atl_utils_hw_get_regs(struct aq_hw_s *self, struct aq_hw_caps_s *aq_hw_caps, u32 *regs_buff); -int hw_atl_utils_hw_get_settings(struct aq_hw_s *self, - struct ethtool_cmd *cmd); - int hw_atl_utils_hw_set_power(struct aq_hw_s *self, unsigned int power_state); From 9b3dc0a17d7388c4fb83736ca45253a93e994ce4 Mon Sep 17 00:00:00 2001 From: Dominik Heidler Date: Fri, 9 Jun 2017 16:29:47 +0200 Subject: [PATCH 130/436] l2tp: cast l2tp traffic counter to unsigned This fixes a counter problem on 32bit systems: When the rx_bytes counter reached 2 GiB, it jumpd to (2^64 Bytes - 2GiB) Bytes. rtnl_link_stats64 has __u64 type and atomic_long_read returns atomic_long_t which is signed. Due to the conversation we get an incorrect value on 32bit systems if the MSB of the atomic_long_t value is set. CC: Tom Parkin Fixes: 7b7c0719cd7a ("l2tp: avoid deadlock in l2tp stats update") Signed-off-by: Dominik Heidler Signed-off-by: David S. Miller --- net/l2tp/l2tp_eth.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c index f7c54ece3733..4de2ec94b08c 100644 --- a/net/l2tp/l2tp_eth.c +++ b/net/l2tp/l2tp_eth.c @@ -114,12 +114,13 @@ static void l2tp_eth_get_stats64(struct net_device *dev, { struct l2tp_eth *priv = netdev_priv(dev); - stats->tx_bytes = atomic_long_read(&priv->tx_bytes); - stats->tx_packets = atomic_long_read(&priv->tx_packets); - stats->tx_dropped = atomic_long_read(&priv->tx_dropped); - stats->rx_bytes = atomic_long_read(&priv->rx_bytes); - stats->rx_packets = atomic_long_read(&priv->rx_packets); - stats->rx_errors = atomic_long_read(&priv->rx_errors); + stats->tx_bytes = (unsigned long) atomic_long_read(&priv->tx_bytes); + stats->tx_packets = (unsigned long) atomic_long_read(&priv->tx_packets); + stats->tx_dropped = (unsigned long) atomic_long_read(&priv->tx_dropped); + stats->rx_bytes = (unsigned long) atomic_long_read(&priv->rx_bytes); + stats->rx_packets = (unsigned long) atomic_long_read(&priv->rx_packets); + stats->rx_errors = (unsigned long) atomic_long_read(&priv->rx_errors); + } static const struct net_device_ops l2tp_eth_netdev_ops = { From bf292f1b2c813f1d6ac49b04bd1a9863d8314266 Mon Sep 17 00:00:00 2001 From: Fabio Estevam Date: Fri, 9 Jun 2017 22:37:22 -0300 Subject: [PATCH 131/436] net: fec: Add a fec_enet_clear_ethtool_stats() stub for CONFIG_M5272 Commit 2b30842b23b9 ("net: fec: Clear and enable MIB counters on imx51") introduced fec_enet_clear_ethtool_stats(), but missed to add a stub for the CONFIG_M5272=y case, causing build failure for the m5272c3_defconfig. Add the missing empty stub to fix the build failure. Reported-by: Paul Gortmaker Signed-off-by: Fabio Estevam Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/fec_main.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index f7c8649fd28f..2205e5b32a10 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -2361,6 +2361,10 @@ static int fec_enet_get_sset_count(struct net_device *dev, int sset) static inline void fec_enet_update_ethtool_stats(struct net_device *dev) { } + +static inline void fec_enet_clear_ethtool_stats(struct net_device *dev) +{ +} #endif /* !defined(CONFIG_M5272) */ /* ITR clock source is enet system clock (clk_ahb). From 581409dacc9176b0de1f6c4ca8d66e13aa8e1b29 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Sat, 10 Jun 2017 14:48:14 +0800 Subject: [PATCH 132/436] sctp: disable BH in sctp_for_each_endpoint Now sctp holds read_lock when foreach sctp_ep_hashtable without disabling BH. If CPU schedules to another thread A at this moment, the thread A may be trying to hold the write_lock with disabling BH. As BH is disabled and CPU cannot schedule back to the thread holding the read_lock, while the thread A keeps waiting for the read_lock. A dead lock would be triggered by this. This patch is to fix this dead lock by calling read_lock_bh instead to disable BH when holding the read_lock in sctp_for_each_endpoint. Fixes: 626d16f50f39 ("sctp: export some apis or variables for sctp_diag and reuse some for proc") Reported-by: Xiumei Mu Signed-off-by: Xin Long Acked-by: Marcelo Ricardo Leitner Signed-off-by: David S. Miller --- net/sctp/socket.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/net/sctp/socket.c b/net/sctp/socket.c index f16c8d97b7f3..30aa0a529215 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -4622,13 +4622,13 @@ int sctp_for_each_endpoint(int (*cb)(struct sctp_endpoint *, void *), for (head = sctp_ep_hashtable; hash < sctp_ep_hashsize; hash++, head++) { - read_lock(&head->lock); + read_lock_bh(&head->lock); sctp_for_each_hentry(epb, &head->chain) { err = cb(sctp_ep(epb), p); if (err) break; } - read_unlock(&head->lock); + read_unlock_bh(&head->lock); } return err; From 5aa32f53ab93175442dffbd95a231d0798cab4d1 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sat, 10 Jun 2017 16:44:28 -0400 Subject: [PATCH 133/436] Revert "net: fec: Add a fec_enet_clear_ethtool_stats() stub for CONFIG_M5272" This reverts commit bf292f1b2c813f1d6ac49b04bd1a9863d8314266. It belongs in 'net-next' not 'net'. Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/fec_main.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 2205e5b32a10..f7c8649fd28f 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -2361,10 +2361,6 @@ static int fec_enet_get_sset_count(struct net_device *dev, int sset) static inline void fec_enet_update_ethtool_stats(struct net_device *dev) { } - -static inline void fec_enet_clear_ethtool_stats(struct net_device *dev) -{ -} #endif /* !defined(CONFIG_M5272) */ /* ITR clock source is enet system clock (clk_ahb). From f146e872eb12ebbe92d8e583b2637e0741440db3 Mon Sep 17 00:00:00 2001 From: Jia-Ju Bai Date: Sat, 10 Jun 2017 16:49:39 +0800 Subject: [PATCH 134/436] net: caif: Fix a sleep-in-atomic bug in cfpkt_create_pfx The kernel may sleep under a rcu read lock in cfpkt_create_pfx, and the function call path is: cfcnfg_linkup_rsp (acquire the lock by rcu_read_lock) cfctrl_linkdown_req cfpkt_create cfpkt_create_pfx alloc_skb(GFP_KERNEL) --> may sleep cfserl_receive (acquire the lock by rcu_read_lock) cfpkt_split cfpkt_create_pfx alloc_skb(GFP_KERNEL) --> may sleep There is "in_interrupt" in cfpkt_create_pfx to decide use "GFP_KERNEL" or "GFP_ATOMIC". In this situation, "GFP_KERNEL" is used because the function is called under a rcu read lock, instead in interrupt. To fix it, only "GFP_ATOMIC" is used in cfpkt_create_pfx. Signed-off-by: Jia-Ju Bai Signed-off-by: David S. Miller --- net/caif/cfpkt_skbuff.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/net/caif/cfpkt_skbuff.c b/net/caif/cfpkt_skbuff.c index 59ce1fcc220c..71b6ab240dea 100644 --- a/net/caif/cfpkt_skbuff.c +++ b/net/caif/cfpkt_skbuff.c @@ -81,11 +81,7 @@ static struct cfpkt *cfpkt_create_pfx(u16 len, u16 pfx) { struct sk_buff *skb; - if (likely(in_interrupt())) - skb = alloc_skb(len + pfx, GFP_ATOMIC); - else - skb = alloc_skb(len + pfx, GFP_KERNEL); - + skb = alloc_skb(len + pfx, GFP_ATOMIC); if (unlikely(skb == NULL)) return NULL; From 343eba69c6968190d8654b857aea952fed9a6749 Mon Sep 17 00:00:00 2001 From: Jia-Ju Bai Date: Sat, 10 Jun 2017 17:03:35 +0800 Subject: [PATCH 135/436] net: tipc: Fix a sleep-in-atomic bug in tipc_msg_reverse The kernel may sleep under a rcu read lock in tipc_msg_reverse, and the function call path is: tipc_l2_rcv_msg (acquire the lock by rcu_read_lock) tipc_rcv tipc_sk_rcv tipc_msg_reverse pskb_expand_head(GFP_KERNEL) --> may sleep tipc_node_broadcast tipc_node_xmit_skb tipc_node_xmit tipc_sk_rcv tipc_msg_reverse pskb_expand_head(GFP_KERNEL) --> may sleep To fix it, "GFP_KERNEL" is replaced with "GFP_ATOMIC". Signed-off-by: Jia-Ju Bai Signed-off-by: David S. Miller --- net/tipc/msg.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/tipc/msg.c b/net/tipc/msg.c index 312ef7de57d7..ab3087687a32 100644 --- a/net/tipc/msg.c +++ b/net/tipc/msg.c @@ -508,7 +508,7 @@ bool tipc_msg_reverse(u32 own_node, struct sk_buff **skb, int err) } if (skb_cloned(_skb) && - pskb_expand_head(_skb, BUF_HEADROOM, BUF_TAILROOM, GFP_KERNEL)) + pskb_expand_head(_skb, BUF_HEADROOM, BUF_TAILROOM, GFP_ATOMIC)) goto exit; /* Now reverse the concerned fields */ From 56b8aae959499508090b2d2db6961ed905f38164 Mon Sep 17 00:00:00 2001 From: Thomas Petazzoni Date: Sat, 10 Jun 2017 23:18:21 +0200 Subject: [PATCH 136/436] net: mvpp2: remove mvpp2_bm_cookie_{build,pool_get} This commit removes the useless remove mvpp2_bm_cookie_{build,pool_get} functions. All what mvpp2_bm_cookie_build() was doing is compute a 32-bit value by concatenating the pool number and the CPU number... only to get the pool number re-extracted by mvpp2_bm_cookie_pool_get() later on. Instead, just get the pool number directly from RX descriptor status, and pass it to mvpp2_pool_refill() and mvpp2_rx_refill(). This has the added benefit of dropping a smp_processor_id() call in a migration-enabled context, which is wrong, and is the original motivation for making this change. Fixes: 3f518509dedc9 ("ethernet: Add new driver for Marvell Armada 375 network unit") Signed-off-by: Thomas Petazzoni Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvpp2.c | 47 +++++++++------------------- 1 file changed, 14 insertions(+), 33 deletions(-) diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index 70bca2a6fb02..5841e53614c6 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c @@ -3920,12 +3920,6 @@ static inline u32 mvpp2_bm_cookie_pool_set(u32 cookie, int pool) return bm; } -/* Get pool number from a BM cookie */ -static inline int mvpp2_bm_cookie_pool_get(unsigned long cookie) -{ - return (cookie >> MVPP2_BM_COOKIE_POOL_OFFS) & 0xFF; -} - /* Release buffer to BM */ static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool, dma_addr_t buf_dma_addr, @@ -3961,12 +3955,10 @@ static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool, } /* Refill BM pool */ -static void mvpp2_pool_refill(struct mvpp2_port *port, u32 bm, +static void mvpp2_pool_refill(struct mvpp2_port *port, int pool, dma_addr_t dma_addr, phys_addr_t phys_addr) { - int pool = mvpp2_bm_cookie_pool_get(bm); - mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr); } @@ -4513,21 +4505,6 @@ static void mvpp2_rxq_offset_set(struct mvpp2_port *port, mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val); } -/* Obtain BM cookie information from descriptor */ -static u32 mvpp2_bm_cookie_build(struct mvpp2_port *port, - struct mvpp2_rx_desc *rx_desc) -{ - int cpu = smp_processor_id(); - int pool; - - pool = (mvpp2_rxdesc_status_get(port, rx_desc) & - MVPP2_RXD_BM_POOL_ID_MASK) >> - MVPP2_RXD_BM_POOL_ID_OFFS; - - return ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS) | - ((cpu & 0xFF) << MVPP2_BM_COOKIE_CPU_OFFS); -} - /* Tx descriptors helper methods */ /* Get pointer to next Tx descriptor to be processed (send) by HW */ @@ -4978,9 +4955,13 @@ static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port, for (i = 0; i < rx_received; i++) { struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq); - u32 bm = mvpp2_bm_cookie_build(port, rx_desc); + u32 status = mvpp2_rxdesc_status_get(port, rx_desc); + int pool; - mvpp2_pool_refill(port, bm, + pool = (status & MVPP2_RXD_BM_POOL_ID_MASK) >> + MVPP2_RXD_BM_POOL_ID_OFFS; + + mvpp2_pool_refill(port, pool, mvpp2_rxdesc_dma_addr_get(port, rx_desc), mvpp2_rxdesc_cookie_get(port, rx_desc)); } @@ -5418,7 +5399,7 @@ static void mvpp2_rx_csum(struct mvpp2_port *port, u32 status, /* Reuse skb if possible, or allocate a new skb and add it to BM pool */ static int mvpp2_rx_refill(struct mvpp2_port *port, - struct mvpp2_bm_pool *bm_pool, u32 bm) + struct mvpp2_bm_pool *bm_pool, int pool) { dma_addr_t dma_addr; phys_addr_t phys_addr; @@ -5430,7 +5411,7 @@ static int mvpp2_rx_refill(struct mvpp2_port *port, if (!buf) return -ENOMEM; - mvpp2_pool_refill(port, bm, dma_addr, phys_addr); + mvpp2_pool_refill(port, pool, dma_addr, phys_addr); return 0; } @@ -5488,7 +5469,7 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo, unsigned int frag_size; dma_addr_t dma_addr; phys_addr_t phys_addr; - u32 bm, rx_status; + u32 rx_status; int pool, rx_bytes, err; void *data; @@ -5500,8 +5481,8 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo, phys_addr = mvpp2_rxdesc_cookie_get(port, rx_desc); data = (void *)phys_to_virt(phys_addr); - bm = mvpp2_bm_cookie_build(port, rx_desc); - pool = mvpp2_bm_cookie_pool_get(bm); + pool = (rx_status & MVPP2_RXD_BM_POOL_ID_MASK) >> + MVPP2_RXD_BM_POOL_ID_OFFS; bm_pool = &port->priv->bm_pools[pool]; /* In case of an error, release the requested buffer pointer @@ -5514,7 +5495,7 @@ err_drop_frame: dev->stats.rx_errors++; mvpp2_rx_error(port, rx_desc); /* Return the buffer to the pool */ - mvpp2_pool_refill(port, bm, dma_addr, phys_addr); + mvpp2_pool_refill(port, pool, dma_addr, phys_addr); continue; } @@ -5529,7 +5510,7 @@ err_drop_frame: goto err_drop_frame; } - err = mvpp2_rx_refill(port, bm_pool, bm); + err = mvpp2_rx_refill(port, bm_pool, pool); if (err) { netdev_err(port->dev, "failed to refill BM pools\n"); goto err_drop_frame; From a704bb5c052bbd6b77c6530a2e83cef42486e335 Mon Sep 17 00:00:00 2001 From: Thomas Petazzoni Date: Sat, 10 Jun 2017 23:18:22 +0200 Subject: [PATCH 137/436] net: mvpp2: use {get, put}_cpu() instead of smp_processor_id() smp_processor_id() should not be used in migration-enabled contexts. We originally thought it was OK in the specific situation of this driver, but it was wrong, and calling smp_processor_id() in a migration-enabled context prints a big fat warning when CONFIG_DEBUG_PREEMPT=y. Therefore, this commit replaces the smp_processor_id() in migration-enabled contexts by the appropriate get_cpu/put_cpu sections. Reported-by: Marc Zyngier Fixes: a786841df72e ("net: mvpp2: handle register mapping and access for PPv2.2") Signed-off-by: Thomas Petazzoni Tested-by: Marc Zyngier Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvpp2.c | 27 +++++++++++++++++++-------- 1 file changed, 19 insertions(+), 8 deletions(-) diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index 5841e53614c6..33c901622ed5 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c @@ -3719,7 +3719,7 @@ static void mvpp2_bm_bufs_get_addrs(struct device *dev, struct mvpp2 *priv, dma_addr_t *dma_addr, phys_addr_t *phys_addr) { - int cpu = smp_processor_id(); + int cpu = get_cpu(); *dma_addr = mvpp2_percpu_read(priv, cpu, MVPP2_BM_PHY_ALLOC_REG(bm_pool->id)); @@ -3740,6 +3740,8 @@ static void mvpp2_bm_bufs_get_addrs(struct device *dev, struct mvpp2 *priv, if (sizeof(phys_addr_t) == 8) *phys_addr |= (u64)phys_addr_highbits << 32; } + + put_cpu(); } /* Free all buffers from the pool */ @@ -3925,7 +3927,7 @@ static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool, dma_addr_t buf_dma_addr, phys_addr_t buf_phys_addr) { - int cpu = smp_processor_id(); + int cpu = get_cpu(); if (port->priv->hw_version == MVPP22) { u32 val = 0; @@ -3952,6 +3954,8 @@ static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool, MVPP2_BM_VIRT_RLS_REG, buf_phys_addr); mvpp2_percpu_write(port->priv, cpu, MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr); + + put_cpu(); } /* Refill BM pool */ @@ -4732,7 +4736,7 @@ static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port) static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port, struct mvpp2_rx_queue *rxq) { - int cpu = smp_processor_id(); + int cpu = get_cpu(); if (rxq->pkts_coal > MVPP2_OCCUPIED_THRESH_MASK) rxq->pkts_coal = MVPP2_OCCUPIED_THRESH_MASK; @@ -4740,6 +4744,8 @@ static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port, mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id); mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_THRESH_REG, rxq->pkts_coal); + + put_cpu(); } static u32 mvpp2_usec_to_cycles(u32 usec, unsigned long clk_hz) @@ -4920,7 +4926,7 @@ static int mvpp2_rxq_init(struct mvpp2_port *port, mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0); /* Set Rx descriptors queue starting address - indirect access */ - cpu = smp_processor_id(); + cpu = get_cpu(); mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id); if (port->priv->hw_version == MVPP21) rxq_dma = rxq->descs_dma; @@ -4929,6 +4935,7 @@ static int mvpp2_rxq_init(struct mvpp2_port *port, mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma); mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, rxq->size); mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_INDEX_REG, 0); + put_cpu(); /* Set Offset */ mvpp2_rxq_offset_set(port, rxq->id, NET_SKB_PAD); @@ -4991,10 +4998,11 @@ static void mvpp2_rxq_deinit(struct mvpp2_port *port, * free descriptor number */ mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0); - cpu = smp_processor_id(); + cpu = get_cpu(); mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id); mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, 0); mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, 0); + put_cpu(); } /* Create and initialize a Tx queue */ @@ -5017,7 +5025,7 @@ static int mvpp2_txq_init(struct mvpp2_port *port, txq->last_desc = txq->size - 1; /* Set Tx descriptors queue starting address - indirect access */ - cpu = smp_processor_id(); + cpu = get_cpu(); mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id); mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG, txq->descs_dma); @@ -5042,6 +5050,7 @@ static int mvpp2_txq_init(struct mvpp2_port *port, mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 | MVPP2_PREF_BUF_THRESH(desc_per_txq / 2)); + put_cpu(); /* WRR / EJP configuration - indirect access */ tx_port_num = mvpp2_egress_port(port); @@ -5112,10 +5121,11 @@ static void mvpp2_txq_deinit(struct mvpp2_port *port, mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0); /* Set Tx descriptors queue starting address and size */ - cpu = smp_processor_id(); + cpu = get_cpu(); mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id); mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG, 0); mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_SIZE_REG, 0); + put_cpu(); } /* Cleanup Tx ports */ @@ -5125,7 +5135,7 @@ static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq) int delay, pending, cpu; u32 val; - cpu = smp_processor_id(); + cpu = get_cpu(); mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id); val = mvpp2_percpu_read(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG); val |= MVPP2_TXQ_DRAIN_EN_MASK; @@ -5152,6 +5162,7 @@ static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq) val &= ~MVPP2_TXQ_DRAIN_EN_MASK; mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, val); + put_cpu(); for_each_present_cpu(cpu) { txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); From f67abed585efe251edda52dc9690020d6441890f Mon Sep 17 00:00:00 2001 From: Marcin Nowakowski Date: Fri, 9 Jun 2017 10:00:29 +0200 Subject: [PATCH 138/436] sched/fair: Fix typo in printk message 'schedstats' kernel parameter should be set to enable/disable, so correct the printk hint saying that it should be set to 'enable' rather than 'enabled' to enable scheduler tracepoints. Signed-off-by: Marcin Nowakowski Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/1496995229-31245-1-git-send-email-marcin.nowakowski@imgtec.com Signed-off-by: Ingo Molnar --- kernel/sched/fair.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index d71109321841..c77e4b1d51c0 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -3563,7 +3563,7 @@ static inline void check_schedstat_required(void) trace_sched_stat_runtime_enabled()) { printk_deferred_once("Scheduler tracepoints stat_sleep, stat_iowait, " "stat_blocked and stat_runtime require the " - "kernel parameter schedstats=enabled or " + "kernel parameter schedstats=enable or " "kernel.sched_schedstats=1\n"); } #endif From 252d2a4117bc181b287eeddf848863788da733ae Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Fri, 9 Jun 2017 11:49:15 -0700 Subject: [PATCH 139/436] sched/core: Idle_task_exit() shouldn't use switch_mm_irqs_off() idle_task_exit() can be called with IRQs on x86 on and therefore should use switch_mm(), not switch_mm_irqs_off(). This doesn't seem to cause any problems right now, but it will confuse my upcoming TLB flush changes. Nonetheless, I think it should be backported because it's trivial. There won't be any meaningful performance impact because idle_task_exit() is only used when offlining a CPU. Signed-off-by: Andy Lutomirski Cc: Borislav Petkov Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: stable@vger.kernel.org Fixes: f98db6013c55 ("sched/core: Add switch_mm_irqs_off() and use it in the scheduler") Link: http://lkml.kernel.org/r/ca3d1a9fa93a0b49f5a8ff729eda3640fb6abdf9.1497034141.git.luto@kernel.org Signed-off-by: Ingo Molnar --- kernel/sched/core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 803c3bc274c4..326d4f88e2b1 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -5605,7 +5605,7 @@ void idle_task_exit(void) BUG_ON(cpu_online(smp_processor_id())); if (mm != &init_mm) { - switch_mm_irqs_off(mm, &init_mm, current); + switch_mm(mm, &init_mm, current); finish_arch_post_lock_switch(); } mmdrop(mm); From 57f35c93a2072c1f66ab1728a77a9ee4bc33bf42 Mon Sep 17 00:00:00 2001 From: Mohamad Haj Yahia Date: Thu, 25 May 2017 16:46:14 +0300 Subject: [PATCH 140/436] net/mlx5: Fix create vport flow table flow Send vport number to the create flow table inner method instead of ignoring the vport argument and sending always 0. Fixes: b3ba51498bdd ('net/mlx5: Refactor create flow table method to accept underlay QP') Signed-off-by: Mohamad Haj Yahia Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 0e487e8ca634..8f5125ccd8d4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -862,7 +862,7 @@ struct mlx5_flow_table *mlx5_create_vport_flow_table(struct mlx5_flow_namespace ft_attr.level = level; ft_attr.prio = prio; - return __mlx5_create_flow_table(ns, &ft_attr, FS_FT_OP_MOD_NORMAL, 0); + return __mlx5_create_flow_table(ns, &ft_attr, FS_FT_OP_MOD_NORMAL, vport); } struct mlx5_flow_table* From 3fece5d676939f42f434c63dfe1bd42d7d94e6f0 Mon Sep 17 00:00:00 2001 From: Mohamad Haj Yahia Date: Sun, 9 Apr 2017 17:19:37 +0300 Subject: [PATCH 141/436] net/mlx5: Continue health polling until it is explicitly stopped The issue is that when we get an assert we will stop polling the health and thus we cant enter error state when we have a real health issue. Fixes: fd76ee4da55a ('net/mlx5_core: Fix internal error detection conditions') Signed-off-by: Mohamad Haj Yahia Reviewed-by: Daniel Jurgens Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/health.c | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c index 44f59b1d6f0f..f27f84ffbc85 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c @@ -275,10 +275,8 @@ static void poll_health(unsigned long data) struct mlx5_core_health *health = &dev->priv.health; u32 count; - if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { - mod_timer(&health->timer, get_next_poll_jiffies()); - return; - } + if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) + goto out; count = ioread32be(health->health_counter); if (count == health->prev) @@ -290,8 +288,6 @@ static void poll_health(unsigned long data) if (health->miss_counter == MAX_MISSES) { dev_err(&dev->pdev->dev, "device's health compromised - reached miss count\n"); print_health_info(dev); - } else { - mod_timer(&health->timer, get_next_poll_jiffies()); } if (in_fatal(dev) && !health->sick) { @@ -305,6 +301,9 @@ static void poll_health(unsigned long data) "new health works are not permitted at this stage\n"); spin_unlock(&health->wq_lock); } + +out: + mod_timer(&health->timer, get_next_poll_jiffies()); } void mlx5_start_health_poll(struct mlx5_core_dev *dev) From f729860a177d097ac44321fb2f7d927a0c54c5a3 Mon Sep 17 00:00:00 2001 From: Huy Nguyen Date: Mon, 8 May 2017 11:46:50 -0500 Subject: [PATCH 142/436] net/mlx5: Remove several module events out of ethtool stats Remove the following module event counters out of ethtool stats. The reason for removing these event counters is that these events do not occur without techinician's intervention. module_pwr_budget_exd module_long_range module_no_eeprom module_enforce_part module_unknown_id module_unknown_status module_plug Fixes: bedb7c909c19 ("net/mlx5e: Add port module event counters to ethtool stats") Signed-off-by: Huy Nguyen Reviewed by: Gal Pressman Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_stats.h | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index 53e4992d6511..f81c3aa60b46 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -417,20 +417,13 @@ struct mlx5e_stats { }; static const struct counter_desc mlx5e_pme_status_desc[] = { - { "module_plug", 0 }, { "module_unplug", 8 }, }; static const struct counter_desc mlx5e_pme_error_desc[] = { - { "module_pwr_budget_exd", 0 }, /* power budget exceed */ - { "module_long_range", 8 }, /* long range for non MLNX cable */ - { "module_bus_stuck", 16 }, /* bus stuck (I2C or data shorted) */ - { "module_no_eeprom", 24 }, /* no eeprom/retry time out */ - { "module_enforce_part", 32 }, /* enforce part number list */ - { "module_unknown_id", 40 }, /* unknown identifier */ - { "module_high_temp", 48 }, /* high temperature */ + { "module_bus_stuck", 16 }, /* bus stuck (I2C or data shorted) */ + { "module_high_temp", 48 }, /* high temperature */ { "module_bad_shorted", 56 }, /* bad or shorted cable/module */ - { "module_unknown_status", 64 }, }; #endif /* __MLX5_EN_STATS_H__ */ From c3164d2fc48fd4fa0477ab658b644559c3fe9073 Mon Sep 17 00:00:00 2001 From: Tal Gilboa Date: Mon, 15 May 2017 14:13:16 +0300 Subject: [PATCH 143/436] net/mlx5e: Added BW check for DIM decision mechanism DIM (Dynamically-tuned Interrupt Moderation) is a mechanism designed for changing the channel interrupt moderation values in order to reduce CPU overhead for all traffic types. Until now only interrupt and packet rate were sampled. We found a scenario on which we get a false indication since a change in DIM caused more aggregation and reduced packet rate while increasing BW. We now regard a change as succesfull iff: current_BW > (prev_BW + threshold) or current_BW ~= prev_BW and current_PR > (prev_PR + threshold) or current_BW ~= prev_BW and current_PR ~= prev_PR and current_IR < (prev_IR - threshold) Where BW = Bandwidth, PR = Packet rate and IR = Interrupt rate Improvements (ConnectX-4Lx 25GbE, single RX queue, LRO off) -------------------------------------------------- packet size | before[Mb/s] | after[Mb/s] | gain | 2B | 343.4 | 359.4 | 4.5% | 16B | 2739.7 | 2814.8 | 2.7% | 64B | 9739 | 10185.3 | 4.5% | Fixes: cb3c7fd4f839 ("net/mlx5e: Support adaptive RX coalescing") Signed-off-by: Tal Gilboa Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 2 + .../ethernet/mellanox/mlx5/core/en_rx_am.c | 37 ++++++++++--------- 2 files changed, 22 insertions(+), 17 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 2fd044b23875..429da21a583d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -458,12 +458,14 @@ struct mlx5e_mpw_info { struct mlx5e_rx_am_stats { int ppms; /* packets per msec */ + int bpms; /* bytes per msec */ int epms; /* events per msec */ }; struct mlx5e_rx_am_sample { ktime_t time; unsigned int pkt_ctr; + unsigned int byte_ctr; u16 event_ctr; }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c index 02dd3a95ed8f..54cdda957f9a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c @@ -183,28 +183,27 @@ static void mlx5e_am_exit_parking(struct mlx5e_rx_am *am) mlx5e_am_step(am); } +#define IS_SIGNIFICANT_DIFF(val, ref) \ + (((100 * abs((val) - (ref))) / (ref)) > 10) /* more than 10% difference */ + static int mlx5e_am_stats_compare(struct mlx5e_rx_am_stats *curr, struct mlx5e_rx_am_stats *prev) { - int diff; - - if (!prev->ppms) - return curr->ppms ? MLX5E_AM_STATS_BETTER : + if (!prev->bpms) + return curr->bpms ? MLX5E_AM_STATS_BETTER : MLX5E_AM_STATS_SAME; - diff = curr->ppms - prev->ppms; - if (((100 * abs(diff)) / prev->ppms) > 10) /* more than 10% diff */ - return (diff > 0) ? MLX5E_AM_STATS_BETTER : - MLX5E_AM_STATS_WORSE; + if (IS_SIGNIFICANT_DIFF(curr->bpms, prev->bpms)) + return (curr->bpms > prev->bpms) ? MLX5E_AM_STATS_BETTER : + MLX5E_AM_STATS_WORSE; - if (!prev->epms) - return curr->epms ? MLX5E_AM_STATS_WORSE : - MLX5E_AM_STATS_SAME; + if (IS_SIGNIFICANT_DIFF(curr->ppms, prev->ppms)) + return (curr->ppms > prev->ppms) ? MLX5E_AM_STATS_BETTER : + MLX5E_AM_STATS_WORSE; - diff = curr->epms - prev->epms; - if (((100 * abs(diff)) / prev->epms) > 10) /* more than 10% diff */ - return (diff < 0) ? MLX5E_AM_STATS_BETTER : - MLX5E_AM_STATS_WORSE; + if (IS_SIGNIFICANT_DIFF(curr->epms, prev->epms)) + return (curr->epms < prev->epms) ? MLX5E_AM_STATS_BETTER : + MLX5E_AM_STATS_WORSE; return MLX5E_AM_STATS_SAME; } @@ -266,6 +265,7 @@ static void mlx5e_am_sample(struct mlx5e_rq *rq, { s->time = ktime_get(); s->pkt_ctr = rq->stats.packets; + s->byte_ctr = rq->stats.bytes; s->event_ctr = rq->cq.event_ctr; } @@ -278,12 +278,15 @@ static void mlx5e_am_calc_stats(struct mlx5e_rx_am_sample *start, /* u32 holds up to 71 minutes, should be enough */ u32 delta_us = ktime_us_delta(end->time, start->time); unsigned int npkts = end->pkt_ctr - start->pkt_ctr; + unsigned int nbytes = end->byte_ctr - start->byte_ctr; if (!delta_us) return; - curr_stats->ppms = (npkts * USEC_PER_MSEC) / delta_us; - curr_stats->epms = (MLX5E_AM_NEVENTS * USEC_PER_MSEC) / delta_us; + curr_stats->ppms = DIV_ROUND_UP(npkts * USEC_PER_MSEC, delta_us); + curr_stats->bpms = DIV_ROUND_UP(nbytes * USEC_PER_MSEC, delta_us); + curr_stats->epms = DIV_ROUND_UP(MLX5E_AM_NEVENTS * USEC_PER_MSEC, + delta_us); } void mlx5e_rx_am_work(struct work_struct *work) From 53acd76ce571e3b71f9205f2d49ab285a9f1aad8 Mon Sep 17 00:00:00 2001 From: Tal Gilboa Date: Mon, 29 May 2017 17:02:55 +0300 Subject: [PATCH 144/436] net/mlx5e: Fix wrong indications in DIM due to counter wraparound DIM (Dynamically-tuned Interrupt Moderation) is a mechanism designed for changing the channel interrupt moderation values in order to reduce CPU overhead for all traffic types. Each iteration of the algorithm, DIM calculates the difference in throughput, packet rate and interrupt rate from last iteration in order to make a decision. DIM relies on counters for each metric. When these counters get to their type's max value they wraparound. In this case the delta between 'end' and 'start' samples is negative and when translated to unsigned integers - very high. This results in a false indication to the algorithm and might result in a wrong decision. The fix calculates the 'distance' between 'end' and 'start' samples in a cyclic way around the relevant type's max value. It can also be viewed as an absolute value around the type's max value instead of around 0. Testing show higher stability in DIM profile selection and no wraparound issues. Fixes: cb3c7fd4f839 ("net/mlx5e: Support adaptive RX coalescing") Signed-off-by: Tal Gilboa Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 8 ++++---- drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c | 10 +++++++--- 2 files changed, 11 insertions(+), 7 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 429da21a583d..944fc1742464 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -463,10 +463,10 @@ struct mlx5e_rx_am_stats { }; struct mlx5e_rx_am_sample { - ktime_t time; - unsigned int pkt_ctr; - unsigned int byte_ctr; - u16 event_ctr; + ktime_t time; + u32 pkt_ctr; + u32 byte_ctr; + u16 event_ctr; }; struct mlx5e_rx_am { /* Adaptive Moderation */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c index 54cdda957f9a..acf32fe952cd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c @@ -270,6 +270,8 @@ static void mlx5e_am_sample(struct mlx5e_rq *rq, } #define MLX5E_AM_NEVENTS 64 +#define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE) +#define BIT_GAP(bits, end, start) ((((end) - (start)) + BIT_ULL(bits)) & (BIT_ULL(bits) - 1)) static void mlx5e_am_calc_stats(struct mlx5e_rx_am_sample *start, struct mlx5e_rx_am_sample *end, @@ -277,8 +279,9 @@ static void mlx5e_am_calc_stats(struct mlx5e_rx_am_sample *start, { /* u32 holds up to 71 minutes, should be enough */ u32 delta_us = ktime_us_delta(end->time, start->time); - unsigned int npkts = end->pkt_ctr - start->pkt_ctr; - unsigned int nbytes = end->byte_ctr - start->byte_ctr; + u32 npkts = BIT_GAP(BITS_PER_TYPE(u32), end->pkt_ctr, start->pkt_ctr); + u32 nbytes = BIT_GAP(BITS_PER_TYPE(u32), end->byte_ctr, + start->byte_ctr); if (!delta_us) return; @@ -311,7 +314,8 @@ void mlx5e_rx_am(struct mlx5e_rq *rq) switch (am->state) { case MLX5E_AM_MEASURE_IN_PROGRESS: - nevents = rq->cq.event_ctr - am->start_sample.event_ctr; + nevents = BIT_GAP(BITS_PER_TYPE(u16), rq->cq.event_ctr, + am->start_sample.event_ctr); if (nevents < MLX5E_AM_NEVENTS) break; mlx5e_am_sample(rq, &end_sample); From 91828bd89940e8145f91751a015bc11bc486aad0 Mon Sep 17 00:00:00 2001 From: Majd Dibbiny Date: Sun, 28 May 2017 14:47:56 +0300 Subject: [PATCH 145/436] net/mlx5: Enable 4K UAR only when page size is bigger than 4K When the page size isn't bigger than 4K, there is no added value of enabling 4K UAR feature in the Firmware. Modified the condition of enabling the 4K UAR accordingly. Fixes: f502d834950a ("net/mlx5: Activate support for 4K UARs") Signed-off-by: Majd Dibbiny Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/main.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index af945edfee19..4f577a5abf88 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -537,8 +537,10 @@ static int handle_hca_cap(struct mlx5_core_dev *dev) /* disable cmdif checksum */ MLX5_SET(cmd_hca_cap, set_hca_cap, cmdif_checksum, 0); - /* If the HCA supports 4K UARs use it */ - if (MLX5_CAP_GEN_MAX(dev, uar_4k)) + /* Enable 4K UAR only when HCA supports it and page size is bigger + * than 4K. + */ + if (MLX5_CAP_GEN_MAX(dev, uar_4k) && PAGE_SIZE > 4096) MLX5_SET(cmd_hca_cap, set_hca_cap, uar_4k, 1); MLX5_SET(cmd_hca_cap, set_hca_cap, log_uar_page_sz, PAGE_SHIFT - 12); From 103a07d4278203d6299798cd74cdc4d209801cac Mon Sep 17 00:00:00 2001 From: Martin Blumenstingl Date: Sun, 4 Jun 2017 15:28:23 +0200 Subject: [PATCH 146/436] iio: adc: meson-saradc: fix potential crash in meson_sar_adc_clear_fifo meson_sar_adc_clear_fifo passes a 0 as value-pointer to regmap_read(). In case of the meson-saradc driver this ends up in regmap_mmio_read(), where the value-pointer is de-referenced unconditionally to assign the value which was read. Fix this by passing an actual pointer, even though all we want to do is to discard the value. As a side-effect this fixes a sparse warning ("Using plain integer as NULL pointer") as reported by Paolo Cretaro. Fixes: 3adbf3427330 ("iio: adc: add a driver for the SAR ADC found in Amlogic Meson SoCs") Reported-by: Paolo Cretaro Signed-off-by: Martin Blumenstingl Cc: Signed-off-by: Jonathan Cameron --- drivers/iio/adc/meson_saradc.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/iio/adc/meson_saradc.c b/drivers/iio/adc/meson_saradc.c index dd4190b50df6..6066bbfc42fe 100644 --- a/drivers/iio/adc/meson_saradc.c +++ b/drivers/iio/adc/meson_saradc.c @@ -468,13 +468,13 @@ static void meson_sar_adc_unlock(struct iio_dev *indio_dev) static void meson_sar_adc_clear_fifo(struct iio_dev *indio_dev) { struct meson_sar_adc_priv *priv = iio_priv(indio_dev); - int count; + unsigned int count, tmp; for (count = 0; count < MESON_SAR_ADC_MAX_FIFO_SIZE; count++) { if (!meson_sar_adc_get_fifo_count(indio_dev)) break; - regmap_read(priv->regmap, MESON_SAR_ADC_FIFO_RD, 0); + regmap_read(priv->regmap, MESON_SAR_ADC_FIFO_RD, &tmp); } } From a77c1aafcc906f657d1a0890c1d898be9ee1d5c9 Mon Sep 17 00:00:00 2001 From: Netanel Belgazal Date: Sun, 11 Jun 2017 15:42:43 +0300 Subject: [PATCH 147/436] net: ena: fix rare uncompleted admin command false alarm The current flow to detect admin completion is: while (command_not_completed) { if (timeout) error check_for_completion() sleep() } So in case the sleep took more than the timeout (in case the thread/workqueue was not scheduled due to higher priority task or prolonged VMexit), the driver can detect a stall even if the completion is present. The fix changes the order of this function to first check for completion and only after that check if the timeout expired. Fixes: 1738cd3ed342 ("Add a driver for Amazon Elastic Network Adapters (ENA)") Signed-off-by: Netanel Belgazal Signed-off-by: David S. Miller --- drivers/net/ethernet/amazon/ena/ena_com.c | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c index 08d11cede9c9..e1c2fab6292f 100644 --- a/drivers/net/ethernet/amazon/ena/ena_com.c +++ b/drivers/net/ethernet/amazon/ena/ena_com.c @@ -508,15 +508,20 @@ static int ena_com_comp_status_to_errno(u8 comp_status) static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx, struct ena_com_admin_queue *admin_queue) { - unsigned long flags; - u32 start_time; + unsigned long flags, timeout; int ret; - start_time = ((u32)jiffies_to_usecs(jiffies)); + timeout = jiffies + ADMIN_CMD_TIMEOUT_US; - while (comp_ctx->status == ENA_CMD_SUBMITTED) { - if ((((u32)jiffies_to_usecs(jiffies)) - start_time) > - ADMIN_CMD_TIMEOUT_US) { + while (1) { + spin_lock_irqsave(&admin_queue->q_lock, flags); + ena_com_handle_admin_completion(admin_queue); + spin_unlock_irqrestore(&admin_queue->q_lock, flags); + + if (comp_ctx->status != ENA_CMD_SUBMITTED) + break; + + if (time_is_before_jiffies(timeout)) { pr_err("Wait for completion (polling) timeout\n"); /* ENA didn't have any completion */ spin_lock_irqsave(&admin_queue->q_lock, flags); @@ -528,10 +533,6 @@ static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_c goto err; } - spin_lock_irqsave(&admin_queue->q_lock, flags); - ena_com_handle_admin_completion(admin_queue); - spin_unlock_irqrestore(&admin_queue->q_lock, flags); - msleep(100); } From 418df30f7e9e8f4795fb2f3abf9744e5886df5ca Mon Sep 17 00:00:00 2001 From: Netanel Belgazal Date: Sun, 11 Jun 2017 15:42:44 +0300 Subject: [PATCH 148/436] net: ena: fix bug that might cause hang after consecutive open/close interface. Fixing a bug that the driver does not unmask the IO interrupts in ndo_open(): occasionally, the MSI-X interrupt (for one or more IO queues) can be masked when ndo_close() was called. If that is followed by ndo open(), then the MSI-X will be still masked so no interrupt will be received by the driver. Fixes: 1738cd3ed342 ("Add a driver for Amazon Elastic Network Adapters (ENA)") Signed-off-by: Netanel Belgazal Signed-off-by: David S. Miller --- drivers/net/ethernet/amazon/ena/ena_netdev.c | 41 +++++++++++++------- 1 file changed, 26 insertions(+), 15 deletions(-) diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index 7c1214d78855..0e3c60c7eccf 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -1078,6 +1078,26 @@ inline void ena_adjust_intr_moderation(struct ena_ring *rx_ring, rx_ring->per_napi_bytes = 0; } +static inline void ena_unmask_interrupt(struct ena_ring *tx_ring, + struct ena_ring *rx_ring) +{ + struct ena_eth_io_intr_reg intr_reg; + + /* Update intr register: rx intr delay, + * tx intr delay and interrupt unmask + */ + ena_com_update_intr_reg(&intr_reg, + rx_ring->smoothed_interval, + tx_ring->smoothed_interval, + true); + + /* It is a shared MSI-X. + * Tx and Rx CQ have pointer to it. + * So we use one of them to reach the intr reg + */ + ena_com_unmask_intr(rx_ring->ena_com_io_cq, &intr_reg); +} + static inline void ena_update_ring_numa_node(struct ena_ring *tx_ring, struct ena_ring *rx_ring) { @@ -1108,7 +1128,6 @@ static int ena_io_poll(struct napi_struct *napi, int budget) { struct ena_napi *ena_napi = container_of(napi, struct ena_napi, napi); struct ena_ring *tx_ring, *rx_ring; - struct ena_eth_io_intr_reg intr_reg; u32 tx_work_done; u32 rx_work_done; @@ -1149,22 +1168,9 @@ static int ena_io_poll(struct napi_struct *napi, int budget) if (ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev)) ena_adjust_intr_moderation(rx_ring, tx_ring); - /* Update intr register: rx intr delay, - * tx intr delay and interrupt unmask - */ - ena_com_update_intr_reg(&intr_reg, - rx_ring->smoothed_interval, - tx_ring->smoothed_interval, - true); - - /* It is a shared MSI-X. - * Tx and Rx CQ have pointer to it. - * So we use one of them to reach the intr reg - */ - ena_com_unmask_intr(rx_ring->ena_com_io_cq, &intr_reg); + ena_unmask_interrupt(tx_ring, rx_ring); } - ena_update_ring_numa_node(tx_ring, rx_ring); ret = rx_work_done; @@ -1485,6 +1491,11 @@ static int ena_up_complete(struct ena_adapter *adapter) ena_napi_enable_all(adapter); + /* Enable completion queues interrupt */ + for (i = 0; i < adapter->num_queues; i++) + ena_unmask_interrupt(&adapter->tx_ring[i], + &adapter->rx_ring[i]); + /* schedule napi in case we had pending packets * from the last time we disable napi */ From 2d2c600a917127f16f179d5a88fc44ba3ed263ed Mon Sep 17 00:00:00 2001 From: Netanel Belgazal Date: Sun, 11 Jun 2017 15:42:45 +0300 Subject: [PATCH 149/436] net: ena: add missing return when ena_com_get_io_handlers() fails Fixes: 1738cd3ed342 ("Add a driver for Amazon Elastic Network Adapters (ENA)") Signed-off-by: Netanel Belgazal Signed-off-by: David S. Miller --- drivers/net/ethernet/amazon/ena/ena_netdev.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index 0e3c60c7eccf..1e71e89e1e18 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -1543,6 +1543,7 @@ static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid) "Failed to get TX queue handlers. TX queue num %d rc: %d\n", qid, rc); ena_com_destroy_io_queue(ena_dev, ena_qid); + return rc; } ena_com_update_numa_node(tx_ring->ena_com_io_cq, ctx.numa_node); @@ -1607,6 +1608,7 @@ static int ena_create_io_rx_queue(struct ena_adapter *adapter, int qid) "Failed to get RX queue handlers. RX queue num %d rc: %d\n", qid, rc); ena_com_destroy_io_queue(ena_dev, ena_qid); + return rc; } ena_com_update_numa_node(rx_ring->ena_com_io_cq, ctx.numa_node); From 661d2b0ccef6a63f48b61105cf7be17403d1db01 Mon Sep 17 00:00:00 2001 From: Netanel Belgazal Date: Sun, 11 Jun 2017 15:42:46 +0300 Subject: [PATCH 150/436] net: ena: fix race condition between submit and completion admin command Bug: "Completion context is occupied" error printout will be noticed in dmesg. This error will cause the admin command to fail, which will lead to an ena_probe() failure or a watchdog reset (depends on which admin command failed). Root cause: __ena_com_submit_admin_cmd() is the function that submits new entries to the admin queue. The function have a check that makes sure the queue is not full and the function does not override any outstanding command. It uses head and tail indexes for this check. The head is increased by ena_com_handle_admin_completion() which runs from interrupt context, and the tail index is increased by the submit function (the function is running under ->q_lock, so there is no risk of multithread increment). Each command is associated with a completion context. This context allocated before call to __ena_com_submit_admin_cmd() and freed by ena_com_wait_and_process_admin_cq_interrupts(), right after the command was completed. This can lead to a state where the head was increased, the check passed, but the completion context is still in use. Solution: Use the atomic variable ->outstanding_cmds instead of using the head and the tail indexes. This variable is safe for use since it is bumped in get_comp_ctx() in __ena_com_submit_admin_cmd() and is freed by comp_ctxt_release() Fixes: 1738cd3ed342 ("Add a driver for Amazon Elastic Network Adapters (ENA)") Signed-off-by: Netanel Belgazal Signed-off-by: David S. Miller --- drivers/net/ethernet/amazon/ena/ena_com.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c index e1c2fab6292f..ea60b9e67acb 100644 --- a/drivers/net/ethernet/amazon/ena/ena_com.c +++ b/drivers/net/ethernet/amazon/ena/ena_com.c @@ -232,11 +232,9 @@ static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queu tail_masked = admin_queue->sq.tail & queue_size_mask; /* In case of queue FULL */ - cnt = admin_queue->sq.tail - admin_queue->sq.head; + cnt = atomic_read(&admin_queue->outstanding_cmds); if (cnt >= admin_queue->q_depth) { - pr_debug("admin queue is FULL (tail %d head %d depth: %d)\n", - admin_queue->sq.tail, admin_queue->sq.head, - admin_queue->q_depth); + pr_debug("admin queue is full.\n"); admin_queue->stats.out_of_space++; return ERR_PTR(-ENOSPC); } From 0857d92f71b6cb75281fde913554b2d5436c394b Mon Sep 17 00:00:00 2001 From: Netanel Belgazal Date: Sun, 11 Jun 2017 15:42:47 +0300 Subject: [PATCH 151/436] net: ena: add missing unmap bars on device removal This patch also change the mapping functions to devm_ functions Fixes: 1738cd3ed342 ("Add a driver for Amazon Elastic Network Adapters (ENA)") Signed-off-by: Netanel Belgazal Signed-off-by: David S. Miller --- drivers/net/ethernet/amazon/ena/ena_netdev.c | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index 1e71e89e1e18..4e9fbddd3b47 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -2853,6 +2853,11 @@ static void ena_release_bars(struct ena_com_dev *ena_dev, struct pci_dev *pdev) { int release_bars; + if (ena_dev->mem_bar) + devm_iounmap(&pdev->dev, ena_dev->mem_bar); + + devm_iounmap(&pdev->dev, ena_dev->reg_bar); + release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK; pci_release_selected_regions(pdev, release_bars); } @@ -2940,8 +2945,9 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_free_ena_dev; } - ena_dev->reg_bar = ioremap(pci_resource_start(pdev, ENA_REG_BAR), - pci_resource_len(pdev, ENA_REG_BAR)); + ena_dev->reg_bar = devm_ioremap(&pdev->dev, + pci_resource_start(pdev, ENA_REG_BAR), + pci_resource_len(pdev, ENA_REG_BAR)); if (!ena_dev->reg_bar) { dev_err(&pdev->dev, "failed to remap regs bar\n"); rc = -EFAULT; @@ -2961,8 +2967,9 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ena_set_push_mode(pdev, ena_dev, &get_feat_ctx); if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { - ena_dev->mem_bar = ioremap_wc(pci_resource_start(pdev, ENA_MEM_BAR), - pci_resource_len(pdev, ENA_MEM_BAR)); + ena_dev->mem_bar = devm_ioremap_wc(&pdev->dev, + pci_resource_start(pdev, ENA_MEM_BAR), + pci_resource_len(pdev, ENA_MEM_BAR)); if (!ena_dev->mem_bar) { rc = -EFAULT; goto err_device_destroy; From a3af7c18cfe545a711e5df7491b7d6df71eba2ff Mon Sep 17 00:00:00 2001 From: Netanel Belgazal Date: Sun, 11 Jun 2017 15:42:48 +0300 Subject: [PATCH 152/436] net: ena: fix theoretical Rx hang on low memory systems For the rare case where the device runs out of free rx buffer descriptors (in case of pressure on kernel memory), and the napi handler continuously fail to refill new Rx descriptors until device rx queue totally runs out of all free rx buffers to post incoming packet, leading to a deadlock: * The device won't send interrupts since all the new Rx packets will be dropped. * The napi handler won't try to allocate new Rx descriptors since allocation is part of NAPI that's not being invoked any more The fix involves detecting this scenario and rescheduling NAPI (to refill buffers) by the keepalive/watchdog task. Fixes: 1738cd3ed342 ("Add a driver for Amazon Elastic Network Adapters (ENA)") Signed-off-by: Netanel Belgazal Signed-off-by: David S. Miller --- drivers/net/ethernet/amazon/ena/ena_ethtool.c | 1 + drivers/net/ethernet/amazon/ena/ena_netdev.c | 55 +++++++++++++++++++ drivers/net/ethernet/amazon/ena/ena_netdev.h | 2 + 3 files changed, 58 insertions(+) diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c index 67b2338f8fb3..533b2fbdeef1 100644 --- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c +++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c @@ -94,6 +94,7 @@ static const struct ena_stats ena_stats_rx_strings[] = { ENA_STAT_RX_ENTRY(dma_mapping_err), ENA_STAT_RX_ENTRY(bad_desc_num), ENA_STAT_RX_ENTRY(rx_copybreak_pkt), + ENA_STAT_RX_ENTRY(empty_rx_ring), }; static const struct ena_stats ena_stats_ena_com_strings[] = { diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index 4e9fbddd3b47..3c366bfbbab1 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -190,6 +190,7 @@ static void ena_init_io_rings(struct ena_adapter *adapter) rxr->sgl_size = adapter->max_rx_sgl_size; rxr->smoothed_interval = ena_com_get_nonadaptive_moderation_interval_rx(ena_dev); + rxr->empty_rx_queue = 0; } } @@ -2619,6 +2620,58 @@ static void check_for_missing_tx_completions(struct ena_adapter *adapter) adapter->last_monitored_tx_qid = i % adapter->num_queues; } +/* trigger napi schedule after 2 consecutive detections */ +#define EMPTY_RX_REFILL 2 +/* For the rare case where the device runs out of Rx descriptors and the + * napi handler failed to refill new Rx descriptors (due to a lack of memory + * for example). + * This case will lead to a deadlock: + * The device won't send interrupts since all the new Rx packets will be dropped + * The napi handler won't allocate new Rx descriptors so the device will be + * able to send new packets. + * + * This scenario can happen when the kernel's vm.min_free_kbytes is too small. + * It is recommended to have at least 512MB, with a minimum of 128MB for + * constrained environment). + * + * When such a situation is detected - Reschedule napi + */ +static void check_for_empty_rx_ring(struct ena_adapter *adapter) +{ + struct ena_ring *rx_ring; + int i, refill_required; + + if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags)) + return; + + if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags)) + return; + + for (i = 0; i < adapter->num_queues; i++) { + rx_ring = &adapter->rx_ring[i]; + + refill_required = + ena_com_sq_empty_space(rx_ring->ena_com_io_sq); + if (unlikely(refill_required == (rx_ring->ring_size - 1))) { + rx_ring->empty_rx_queue++; + + if (rx_ring->empty_rx_queue >= EMPTY_RX_REFILL) { + u64_stats_update_begin(&rx_ring->syncp); + rx_ring->rx_stats.empty_rx_ring++; + u64_stats_update_end(&rx_ring->syncp); + + netif_err(adapter, drv, adapter->netdev, + "trigger refill for ring %d\n", i); + + napi_schedule(rx_ring->napi); + rx_ring->empty_rx_queue = 0; + } + } else { + rx_ring->empty_rx_queue = 0; + } + } +} + /* Check for keep alive expiration */ static void check_for_missing_keep_alive(struct ena_adapter *adapter) { @@ -2673,6 +2726,8 @@ static void ena_timer_service(unsigned long data) check_for_missing_tx_completions(adapter); + check_for_empty_rx_ring(adapter); + if (debug_area) ena_dump_stats_to_buf(adapter, debug_area); diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h index 0e22bce6239d..8828f1d6dd22 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.h +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h @@ -184,6 +184,7 @@ struct ena_stats_rx { u64 dma_mapping_err; u64 bad_desc_num; u64 rx_copybreak_pkt; + u64 empty_rx_ring; }; struct ena_ring { @@ -231,6 +232,7 @@ struct ena_ring { struct ena_stats_tx tx_stats; struct ena_stats_rx rx_stats; }; + int empty_rx_queue; } ____cacheline_aligned; struct ena_stats_dev { From a2cc5198dac102775b21787752a2e0afe44ad311 Mon Sep 17 00:00:00 2001 From: Netanel Belgazal Date: Sun, 11 Jun 2017 15:42:49 +0300 Subject: [PATCH 153/436] net: ena: disable admin msix while working in polling mode Fixes: 1738cd3ed342 ("Add a driver for Amazon Elastic Network Adapters (ENA)") Signed-off-by: Netanel Belgazal Signed-off-by: David S. Miller --- drivers/net/ethernet/amazon/ena/ena_com.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c index ea60b9e67acb..f5b237e0bd60 100644 --- a/drivers/net/ethernet/amazon/ena/ena_com.c +++ b/drivers/net/ethernet/amazon/ena/ena_com.c @@ -61,6 +61,8 @@ #define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF +#define ENA_REGS_ADMIN_INTR_MASK 1 + /*****************************************************************************/ /*****************************************************************************/ /*****************************************************************************/ @@ -1454,6 +1456,12 @@ void ena_com_admin_destroy(struct ena_com_dev *ena_dev) void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling) { + u32 mask_value = 0; + + if (polling) + mask_value = ENA_REGS_ADMIN_INTR_MASK; + + writel(mask_value, ena_dev->reg_bar + ENA_REGS_INTR_MASK_OFF); ena_dev->admin_queue.polling = polling; } From 800c55cb76be6617232ef50a2be29830f3aa8e5c Mon Sep 17 00:00:00 2001 From: Netanel Belgazal Date: Sun, 11 Jun 2017 15:42:50 +0300 Subject: [PATCH 154/436] net: ena: bug fix in lost tx packets detection mechanism check_for_missing_tx_completions() is called from a timer task and looking for lost tx packets. The old implementation accumulate all the lost tx packets and did not check if those packets were retrieved on a later stage. This cause to a situation where the driver reset the device for no reason. Fixes: 1738cd3ed342 ("Add a driver for Amazon Elastic Network Adapters (ENA)") Signed-off-by: Netanel Belgazal Signed-off-by: David S. Miller --- drivers/net/ethernet/amazon/ena/ena_ethtool.c | 1 - drivers/net/ethernet/amazon/ena/ena_netdev.c | 66 +++++++++++-------- drivers/net/ethernet/amazon/ena/ena_netdev.h | 14 +++- 3 files changed, 50 insertions(+), 31 deletions(-) diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c index 533b2fbdeef1..3ee55e2fd694 100644 --- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c +++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c @@ -80,7 +80,6 @@ static const struct ena_stats ena_stats_tx_strings[] = { ENA_STAT_TX_ENTRY(tx_poll), ENA_STAT_TX_ENTRY(doorbells), ENA_STAT_TX_ENTRY(prepare_ctx_err), - ENA_STAT_TX_ENTRY(missing_tx_comp), ENA_STAT_TX_ENTRY(bad_req_id), }; diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index 3c366bfbbab1..4f16ed38bcf3 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -1995,6 +1995,7 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev) tx_info->tx_descs = nb_hw_desc; tx_info->last_jiffies = jiffies; + tx_info->print_once = 0; tx_ring->next_to_use = ENA_TX_RING_IDX_NEXT(next_to_use, tx_ring->ring_size); @@ -2564,13 +2565,44 @@ err: "Reset attempt failed. Can not reset the device\n"); } -static void check_for_missing_tx_completions(struct ena_adapter *adapter) +static int check_missing_comp_in_queue(struct ena_adapter *adapter, + struct ena_ring *tx_ring) { struct ena_tx_buffer *tx_buf; unsigned long last_jiffies; + u32 missed_tx = 0; + int i; + + for (i = 0; i < tx_ring->ring_size; i++) { + tx_buf = &tx_ring->tx_buffer_info[i]; + last_jiffies = tx_buf->last_jiffies; + if (unlikely(last_jiffies && + time_is_before_jiffies(last_jiffies + TX_TIMEOUT))) { + if (!tx_buf->print_once) + netif_notice(adapter, tx_err, adapter->netdev, + "Found a Tx that wasn't completed on time, qid %d, index %d.\n", + tx_ring->qid, i); + + tx_buf->print_once = 1; + missed_tx++; + + if (unlikely(missed_tx > MAX_NUM_OF_TIMEOUTED_PACKETS)) { + netif_err(adapter, tx_err, adapter->netdev, + "The number of lost tx completions is above the threshold (%d > %d). Reset the device\n", + missed_tx, MAX_NUM_OF_TIMEOUTED_PACKETS); + set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); + return -EIO; + } + } + } + + return 0; +} + +static void check_for_missing_tx_completions(struct ena_adapter *adapter) +{ struct ena_ring *tx_ring; - int i, j, budget; - u32 missed_tx; + int i, budget, rc; /* Make sure the driver doesn't turn the device in other process */ smp_rmb(); @@ -2586,31 +2618,9 @@ static void check_for_missing_tx_completions(struct ena_adapter *adapter) for (i = adapter->last_monitored_tx_qid; i < adapter->num_queues; i++) { tx_ring = &adapter->tx_ring[i]; - for (j = 0; j < tx_ring->ring_size; j++) { - tx_buf = &tx_ring->tx_buffer_info[j]; - last_jiffies = tx_buf->last_jiffies; - if (unlikely(last_jiffies && time_is_before_jiffies(last_jiffies + TX_TIMEOUT))) { - netif_notice(adapter, tx_err, adapter->netdev, - "Found a Tx that wasn't completed on time, qid %d, index %d.\n", - tx_ring->qid, j); - - u64_stats_update_begin(&tx_ring->syncp); - missed_tx = tx_ring->tx_stats.missing_tx_comp++; - u64_stats_update_end(&tx_ring->syncp); - - /* Clear last jiffies so the lost buffer won't - * be counted twice. - */ - tx_buf->last_jiffies = 0; - - if (unlikely(missed_tx > MAX_NUM_OF_TIMEOUTED_PACKETS)) { - netif_err(adapter, tx_err, adapter->netdev, - "The number of lost tx completion is above the threshold (%d > %d). Reset the device\n", - missed_tx, MAX_NUM_OF_TIMEOUTED_PACKETS); - set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); - } - } - } + rc = check_missing_comp_in_queue(adapter, tx_ring); + if (unlikely(rc)) + return; budget--; if (!budget) diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h index 8828f1d6dd22..88b5e5612338 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.h +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h @@ -146,7 +146,18 @@ struct ena_tx_buffer { u32 tx_descs; /* num of buffers used by this skb */ u32 num_of_bufs; - /* Save the last jiffies to detect missing tx packets */ + + /* Used for detect missing tx packets to limit the number of prints */ + u32 print_once; + /* Save the last jiffies to detect missing tx packets + * + * sets to non zero value on ena_start_xmit and set to zero on + * napi and timer_Service_routine. + * + * while this value is not protected by lock, + * a given packet is not expected to be handled by ena_start_xmit + * and by napi/timer_service at the same time. + */ unsigned long last_jiffies; struct ena_com_buf bufs[ENA_PKT_MAX_BUFS]; } ____cacheline_aligned; @@ -170,7 +181,6 @@ struct ena_stats_tx { u64 napi_comp; u64 tx_poll; u64 doorbells; - u64 missing_tx_comp; u64 bad_req_id; }; From e7ff7efae5708513a795e329909ccbe2ac367b1a Mon Sep 17 00:00:00 2001 From: Netanel Belgazal Date: Sun, 11 Jun 2017 15:42:51 +0300 Subject: [PATCH 155/436] net: ena: update ena driver to version 1.1.7 Signed-off-by: Netanel Belgazal Signed-off-by: David S. Miller --- drivers/net/ethernet/amazon/ena/ena_netdev.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h index 88b5e5612338..a4d3d5e21068 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.h +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h @@ -45,7 +45,7 @@ #define DRV_MODULE_VER_MAJOR 1 #define DRV_MODULE_VER_MINOR 1 -#define DRV_MODULE_VER_SUBMINOR 2 +#define DRV_MODULE_VER_SUBMINOR 7 #define DRV_MODULE_NAME "ena" #ifndef DRV_MODULE_VERSION From 4b1f0d33db7d5bf92b5623e3ea2066e2de3999e3 Mon Sep 17 00:00:00 2001 From: Donald Sharp Date: Sat, 10 Jun 2017 16:30:17 -0400 Subject: [PATCH 156/436] net: ipmr: Fix some mroute forwarding issues in vrf's This patch fixes two issues: 1) When forwarding on *,G mroutes that are in a vrf, the kernel was dropping information about the actual incoming interface when calling ip_mr_forward from ip_mr_input. This caused ip_mr_forward to send the multicast packet back out the incoming interface. Fix this by modifying ip_mr_forward to be handed the correctly resolved dev. 2) When a unresolved cache entry is created we store the incoming skb on the unresolved cache entry and upon mroute resolution from the user space daemon, we attempt to forward the packet. Again we were not resolving to the correct incoming device for a vrf scenario, before calling ip_mr_forward. Fix this by resolving to the correct interface and calling ip_mr_forward with the result. Fixes: e58e41596811 ("net: Enable support for VRF with ipv4 multicast") Signed-off-by: Donald Sharp Acked-by: David Ahern Acked-by: Nikolay Aleksandrov Reviewed-by: Yotam Gigi Signed-off-by: David S. Miller --- net/ipv4/ipmr.c | 32 +++++++++++++++----------------- 1 file changed, 15 insertions(+), 17 deletions(-) diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index b4f9622ee9f5..8ae425cad818 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -101,8 +101,8 @@ static struct mr_table *ipmr_new_table(struct net *net, u32 id); static void ipmr_free_table(struct mr_table *mrt); static void ip_mr_forward(struct net *net, struct mr_table *mrt, - struct sk_buff *skb, struct mfc_cache *cache, - int local); + struct net_device *dev, struct sk_buff *skb, + struct mfc_cache *cache, int local); static int ipmr_cache_report(struct mr_table *mrt, struct sk_buff *pkt, vifi_t vifi, int assert); static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, @@ -988,7 +988,7 @@ static void ipmr_cache_resolve(struct net *net, struct mr_table *mrt, rtnl_unicast(skb, net, NETLINK_CB(skb).portid); } else { - ip_mr_forward(net, mrt, skb, c, 0); + ip_mr_forward(net, mrt, skb->dev, skb, c, 0); } } } @@ -1073,7 +1073,7 @@ static int ipmr_cache_report(struct mr_table *mrt, /* Queue a packet for resolution. It gets locked cache entry! */ static int ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi, - struct sk_buff *skb) + struct sk_buff *skb, struct net_device *dev) { const struct iphdr *iph = ip_hdr(skb); struct mfc_cache *c; @@ -1130,6 +1130,10 @@ static int ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi, kfree_skb(skb); err = -ENOBUFS; } else { + if (dev) { + skb->dev = dev; + skb->skb_iif = dev->ifindex; + } skb_queue_tail(&c->mfc_un.unres.unresolved, skb); err = 0; } @@ -1828,10 +1832,10 @@ static int ipmr_find_vif(struct mr_table *mrt, struct net_device *dev) /* "local" means that we should preserve one skb (for local delivery) */ static void ip_mr_forward(struct net *net, struct mr_table *mrt, - struct sk_buff *skb, struct mfc_cache *cache, - int local) + struct net_device *dev, struct sk_buff *skb, + struct mfc_cache *cache, int local) { - int true_vifi = ipmr_find_vif(mrt, skb->dev); + int true_vifi = ipmr_find_vif(mrt, dev); int psend = -1; int vif, ct; @@ -1853,13 +1857,7 @@ static void ip_mr_forward(struct net *net, struct mr_table *mrt, } /* Wrong interface: drop packet and (maybe) send PIM assert. */ - if (mrt->vif_table[vif].dev != skb->dev) { - struct net_device *mdev; - - mdev = l3mdev_master_dev_rcu(mrt->vif_table[vif].dev); - if (mdev == skb->dev) - goto forward; - + if (mrt->vif_table[vif].dev != dev) { if (rt_is_output_route(skb_rtable(skb))) { /* It is our own packet, looped back. * Very complicated situation... @@ -2053,7 +2051,7 @@ int ip_mr_input(struct sk_buff *skb) read_lock(&mrt_lock); vif = ipmr_find_vif(mrt, dev); if (vif >= 0) { - int err2 = ipmr_cache_unresolved(mrt, vif, skb); + int err2 = ipmr_cache_unresolved(mrt, vif, skb, dev); read_unlock(&mrt_lock); return err2; @@ -2064,7 +2062,7 @@ int ip_mr_input(struct sk_buff *skb) } read_lock(&mrt_lock); - ip_mr_forward(net, mrt, skb, cache, local); + ip_mr_forward(net, mrt, dev, skb, cache, local); read_unlock(&mrt_lock); if (local) @@ -2238,7 +2236,7 @@ int ipmr_get_route(struct net *net, struct sk_buff *skb, iph->saddr = saddr; iph->daddr = daddr; iph->version = 0; - err = ipmr_cache_unresolved(mrt, vif, skb2); + err = ipmr_cache_unresolved(mrt, vif, skb2, dev); read_unlock(&mrt_lock); rcu_read_unlock(); return err; From 973a27c7464231282489b2f14581e33cf29024b8 Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Fri, 19 May 2017 16:20:35 +0530 Subject: [PATCH 157/436] PM / devfreq: exynos-nocp: Handle return value of clk_prepare_enable clk_prepare_enable() can fail here and we must check its return value. Signed-off-by: Arvind Yadav Signed-off-by: MyungJoo Ham --- drivers/devfreq/event/exynos-nocp.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/devfreq/event/exynos-nocp.c b/drivers/devfreq/event/exynos-nocp.c index 5c3e7b11e8a6..f6e7956fc91a 100644 --- a/drivers/devfreq/event/exynos-nocp.c +++ b/drivers/devfreq/event/exynos-nocp.c @@ -267,7 +267,11 @@ static int exynos_nocp_probe(struct platform_device *pdev) } platform_set_drvdata(pdev, nocp); - clk_prepare_enable(nocp->clk); + ret = clk_prepare_enable(nocp->clk); + if (ret) { + dev_err(&pdev->dev, "failed to prepare ppmu clock\n"); + return ret; + } pr_info("exynos-nocp: new NoC Probe device registered: %s\n", dev_name(dev)); From 97a6ba5bd0b54544fabe090711fabefc7a703a5f Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Fri, 19 May 2017 16:26:04 +0530 Subject: [PATCH 158/436] PM / devfreq: exynos-ppmu: Handle return value of clk_prepare_enable clk_prepare_enable() can fail here and we must check its return value. Signed-off-by: Arvind Yadav Signed-off-by: MyungJoo Ham --- drivers/devfreq/event/exynos-ppmu.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/devfreq/event/exynos-ppmu.c b/drivers/devfreq/event/exynos-ppmu.c index 9b7350935b73..8f6537aa989b 100644 --- a/drivers/devfreq/event/exynos-ppmu.c +++ b/drivers/devfreq/event/exynos-ppmu.c @@ -648,7 +648,11 @@ static int exynos_ppmu_probe(struct platform_device *pdev) dev_name(&pdev->dev), desc[i].name); } - clk_prepare_enable(info->ppmu.clk); + ret = clk_prepare_enable(info->ppmu.clk); + if (ret) { + dev_err(&pdev->dev, "failed to prepare ppmu clock\n"); + return ret; + } return 0; } From 6b1355f9ace06a65443c607e1b70872a5f613f80 Mon Sep 17 00:00:00 2001 From: Krzysztof Kozlowski Date: Wed, 7 Jun 2017 20:12:28 +0200 Subject: [PATCH 159/436] PM / devfreq: exynos-ppmu: Staticize event list The ppmu_events array is accessed only in this compilation unit so it can be made static. Signed-off-by: Krzysztof Kozlowski Acked-by: Chanwoo Choi Signed-off-by: MyungJoo Ham --- drivers/devfreq/event/exynos-ppmu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/devfreq/event/exynos-ppmu.c b/drivers/devfreq/event/exynos-ppmu.c index 8f6537aa989b..d96e3dc71cf8 100644 --- a/drivers/devfreq/event/exynos-ppmu.c +++ b/drivers/devfreq/event/exynos-ppmu.c @@ -44,7 +44,7 @@ struct exynos_ppmu { { "ppmu-event2-"#name, PPMU_PMNCNT2 }, \ { "ppmu-event3-"#name, PPMU_PMNCNT3 } -struct __exynos_ppmu_events { +static struct __exynos_ppmu_events { char *name; int id; } ppmu_events[] = { From d758619ba6a5d250914b06b2b923a65e48bdd002 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Sat, 10 Jun 2017 11:59:43 +0300 Subject: [PATCH 160/436] drm: dw-hdmi: Fix compilation breakage by selecting REGMAP_MMIO The dw-hdmi driver switched to regmap-mmio, but lacks the dependency in Kconfig. This can result in compilation breakages. Fix it by selecting REGMAP_MMIO. Fixes: 80e2f97968b5 ("drm: bridge: dw-hdmi: Switch to regmap for register access") Reported-by: kbuild test robot Signed-off-by: Laurent Pinchart Signed-off-by: Archit Taneja Link: http://patchwork.freedesktop.org/patch/msgid/20170610085943.15788-1-laurent.pinchart+renesas@ideasonboard.com --- drivers/gpu/drm/bridge/synopsys/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/bridge/synopsys/Kconfig b/drivers/gpu/drm/bridge/synopsys/Kconfig index 40d2827a6d19..53e78d092d18 100644 --- a/drivers/gpu/drm/bridge/synopsys/Kconfig +++ b/drivers/gpu/drm/bridge/synopsys/Kconfig @@ -1,6 +1,7 @@ config DRM_DW_HDMI tristate select DRM_KMS_HELPER + select REGMAP_MMIO config DRM_DW_HDMI_AHB_AUDIO tristate "Synopsys Designware AHB Audio interface" From 24835611a96e9b41ad57dd5024915106293be7e9 Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Sat, 10 Jun 2017 13:36:38 +0200 Subject: [PATCH 161/436] mmc: meson-gx: work around broken SDIO with certain WiFi chips There have been reports about SDIO failing with certain WiFi chips in descriptor chain mode. SD / eMMC are working fine. So let's fall back to bounce buffer mode for command SD_IO_RW_EXTENDED. This was reported to fix the error. Fixes: 79ed05e329c3 "mmc: meson-gx: add support for descriptor chain mode" Signed-off-by: Heiner Kallweit Tested-by: Martin Blumenstingl Signed-off-by: Ulf Hansson --- drivers/mmc/host/meson-gx-mmc.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c index 1842ed341af1..de962c2d5e00 100644 --- a/drivers/mmc/host/meson-gx-mmc.c +++ b/drivers/mmc/host/meson-gx-mmc.c @@ -210,6 +210,15 @@ static void meson_mmc_get_transfer_mode(struct mmc_host *mmc, int i; bool use_desc_chain_mode = true; + /* + * Broken SDIO with AP6255-based WiFi on Khadas VIM Pro has been + * reported. For some strange reason this occurs in descriptor + * chain mode only. So let's fall back to bounce buffer mode + * for command SD_IO_RW_EXTENDED. + */ + if (mrq->cmd->opcode == SD_IO_RW_EXTENDED) + return; + for_each_sg(data->sg, sg, data->sg_len, i) /* check for 8 byte alignment */ if (sg->offset & 7) { From ba80aa909c99802c428682c352b0ee0baac0acd3 Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Thu, 8 Jun 2017 04:51:54 +0000 Subject: [PATCH 162/436] configfs: Fix race between create_link and configfs_rmdir This patch closes a long standing race in configfs between the creation of a new symlink in create_link(), while the symlink target's config_item is being concurrently removed via configfs_rmdir(). This can happen because the symlink target's reference is obtained by config_item_get() in create_link() before the CONFIGFS_USET_DROPPING bit set by configfs_detach_prep() during configfs_rmdir() shutdown is actually checked.. This originally manifested itself on ppc64 on v4.8.y under heavy load using ibmvscsi target ports with Novalink API: [ 7877.289863] rpadlpar_io: slot U8247.22L.212A91A-V1-C8 added [ 7879.893760] ------------[ cut here ]------------ [ 7879.893768] WARNING: CPU: 15 PID: 17585 at ./include/linux/kref.h:46 config_item_get+0x7c/0x90 [configfs] [ 7879.893811] CPU: 15 PID: 17585 Comm: targetcli Tainted: G O 4.8.17-customv2.22 #12 [ 7879.893812] task: c00000018a0d3400 task.stack: c0000001f3b40000 [ 7879.893813] NIP: d000000002c664ec LR: d000000002c60980 CTR: c000000000b70870 [ 7879.893814] REGS: c0000001f3b43810 TRAP: 0700 Tainted: G O (4.8.17-customv2.22) [ 7879.893815] MSR: 8000000000029033 CR: 28222242 XER: 00000000 [ 7879.893820] CFAR: d000000002c664bc SOFTE: 1 GPR00: d000000002c60980 c0000001f3b43a90 d000000002c70908 c0000000fbc06820 GPR04: c0000001ef1bd900 0000000000000004 0000000000000001 0000000000000000 GPR08: 0000000000000000 0000000000000001 d000000002c69560 d000000002c66d80 GPR12: c000000000b70870 c00000000e798700 c0000001f3b43ca0 c0000001d4949d40 GPR16: c00000014637e1c0 0000000000000000 0000000000000000 c0000000f2392940 GPR20: c0000001f3b43b98 0000000000000041 0000000000600000 0000000000000000 GPR24: fffffffffffff000 0000000000000000 d000000002c60be0 c0000001f1dac490 GPR28: 0000000000000004 0000000000000000 c0000001ef1bd900 c0000000f2392940 [ 7879.893839] NIP [d000000002c664ec] config_item_get+0x7c/0x90 [configfs] [ 7879.893841] LR [d000000002c60980] check_perm+0x80/0x2e0 [configfs] [ 7879.893842] Call Trace: [ 7879.893844] [c0000001f3b43ac0] [d000000002c60980] check_perm+0x80/0x2e0 [configfs] [ 7879.893847] [c0000001f3b43b10] [c000000000329770] do_dentry_open+0x2c0/0x460 [ 7879.893849] [c0000001f3b43b70] [c000000000344480] path_openat+0x210/0x1490 [ 7879.893851] [c0000001f3b43c80] [c00000000034708c] do_filp_open+0xfc/0x170 [ 7879.893853] [c0000001f3b43db0] [c00000000032b5bc] do_sys_open+0x1cc/0x390 [ 7879.893856] [c0000001f3b43e30] [c000000000009584] system_call+0x38/0xec [ 7879.893856] Instruction dump: [ 7879.893858] 409d0014 38210030 e8010010 7c0803a6 4e800020 3d220000 e94981e0 892a0000 [ 7879.893861] 2f890000 409effe0 39200001 992a0000 <0fe00000> 4bffffd0 60000000 60000000 [ 7879.893866] ---[ end trace 14078f0b3b5ad0aa ]--- To close this race, go ahead and obtain the symlink's target config_item reference only after the existing CONFIGFS_USET_DROPPING check succeeds. This way, if configfs_rmdir() wins create_link() will return -ENONET, and if create_link() wins configfs_rmdir() will return -EBUSY. Reported-by: Bryant G. Ly Tested-by: Bryant G. Ly Signed-off-by: Nicholas Bellinger Signed-off-by: Christoph Hellwig Cc: stable@vger.kernel.org --- fs/configfs/symlink.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/fs/configfs/symlink.c b/fs/configfs/symlink.c index a6ab012a2c6a..c8aabba502f6 100644 --- a/fs/configfs/symlink.c +++ b/fs/configfs/symlink.c @@ -83,14 +83,13 @@ static int create_link(struct config_item *parent_item, ret = -ENOMEM; sl = kmalloc(sizeof(struct configfs_symlink), GFP_KERNEL); if (sl) { - sl->sl_target = config_item_get(item); spin_lock(&configfs_dirent_lock); if (target_sd->s_type & CONFIGFS_USET_DROPPING) { spin_unlock(&configfs_dirent_lock); - config_item_put(item); kfree(sl); return -ENOENT; } + sl->sl_target = config_item_get(item); list_add(&sl->sl_list, &target_sd->s_links); spin_unlock(&configfs_dirent_lock); ret = configfs_create_link(sl, parent_item->ci_dentry, From 19e72d3abb63cb16d021a4066ce1a18880509e99 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Thu, 9 Feb 2017 17:28:50 -0800 Subject: [PATCH 163/436] configfs: Introduce config_item_get_unless_zero() Signed-off-by: Bart Van Assche [hch: minor style tweak] Signed-off-by: Christoph Hellwig --- fs/configfs/item.c | 8 ++++++++ include/linux/configfs.h | 3 ++- 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/fs/configfs/item.c b/fs/configfs/item.c index 8b2a994042dd..a66f6624d899 100644 --- a/fs/configfs/item.c +++ b/fs/configfs/item.c @@ -138,6 +138,14 @@ struct config_item *config_item_get(struct config_item *item) } EXPORT_SYMBOL(config_item_get); +struct config_item *config_item_get_unless_zero(struct config_item *item) +{ + if (item && kref_get_unless_zero(&item->ci_kref)) + return item; + return NULL; +} +EXPORT_SYMBOL(config_item_get_unless_zero); + static void config_item_cleanup(struct config_item *item) { struct config_item_type *t = item->ci_type; diff --git a/include/linux/configfs.h b/include/linux/configfs.h index 2319b8c108e8..c96709049683 100644 --- a/include/linux/configfs.h +++ b/include/linux/configfs.h @@ -74,7 +74,8 @@ extern void config_item_init_type_name(struct config_item *item, const char *name, struct config_item_type *type); -extern struct config_item * config_item_get(struct config_item *); +extern struct config_item *config_item_get(struct config_item *); +extern struct config_item *config_item_get_unless_zero(struct config_item *); extern void config_item_put(struct config_item *); struct config_item_type { From 83848fbe7e6af978c080a88c130a67178b1ac0e4 Mon Sep 17 00:00:00 2001 From: Lv Zheng Date: Wed, 7 Jun 2017 12:54:58 +0800 Subject: [PATCH 164/436] ACPICA: Tables: Mechanism to handle late stage acpi_get_table() imbalance Considering this case: 1. A program opens a sysfs table file 65535 times, it can increase validation_count and first increment cause the table to be mapped: validation_count = 65535 2. AML execution causes "Load" to be executed on the same table, this time it cannot increase validation_count, so validation_count remains: validation_count = 65535 3. The program closes sysfs table file 65535 times, it can decrease validation_count and the last decrement cause the table to be unmapped: validation_count = 0 4. AML code still accessing the loaded table, kernel crash can be observed. To prevent that from happening, add a validation_count threashold. When it is reached, the validation_count can no longer be incremented/decremented to invalidate the table descriptor (means preventing table unmappings) Note that code added in acpi_tb_put_table() is actually a no-op but changes the warning message into a "warn once" one. Lv Zheng. Signed-off-by: Lv Zheng [ rjw: Changelog, comments ] Signed-off-by: Rafael J. Wysocki --- drivers/acpi/acpica/tbutils.c | 34 +++++++++++++++++++++++++--------- include/acpi/actbl.h | 14 ++++++++++++++ 2 files changed, 39 insertions(+), 9 deletions(-) diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c index 7abe66505739..0d2e98920069 100644 --- a/drivers/acpi/acpica/tbutils.c +++ b/drivers/acpi/acpica/tbutils.c @@ -416,9 +416,18 @@ acpi_tb_get_table(struct acpi_table_desc *table_desc, } } - table_desc->validation_count++; - if (table_desc->validation_count == 0) { - table_desc->validation_count--; + if (table_desc->validation_count < ACPI_MAX_TABLE_VALIDATIONS) { + table_desc->validation_count++; + + /* + * Detect validation_count overflows to ensure that the warning + * message will only be printed once. + */ + if (table_desc->validation_count >= ACPI_MAX_TABLE_VALIDATIONS) { + ACPI_WARNING((AE_INFO, + "Table %p, Validation count overflows\n", + table_desc)); + } } *out_table = table_desc->pointer; @@ -445,13 +454,20 @@ void acpi_tb_put_table(struct acpi_table_desc *table_desc) ACPI_FUNCTION_TRACE(acpi_tb_put_table); - if (table_desc->validation_count == 0) { - ACPI_WARNING((AE_INFO, - "Table %p, Validation count is zero before decrement\n", - table_desc)); - return_VOID; + if (table_desc->validation_count < ACPI_MAX_TABLE_VALIDATIONS) { + table_desc->validation_count--; + + /* + * Detect validation_count underflows to ensure that the warning + * message will only be printed once. + */ + if (table_desc->validation_count >= ACPI_MAX_TABLE_VALIDATIONS) { + ACPI_WARNING((AE_INFO, + "Table %p, Validation count underflows\n", + table_desc)); + return_VOID; + } } - table_desc->validation_count--; if (table_desc->validation_count == 0) { diff --git a/include/acpi/actbl.h b/include/acpi/actbl.h index d92543f3bbfd..bdc55c0da19c 100644 --- a/include/acpi/actbl.h +++ b/include/acpi/actbl.h @@ -374,6 +374,20 @@ struct acpi_table_desc { u16 validation_count; }; +/* + * Maximum value of the validation_count field in struct acpi_table_desc. + * When reached, validation_count cannot be changed any more and the table will + * be permanently regarded as validated. + * + * This is to prevent situations in which unbalanced table get/put operations + * may cause premature table unmapping in the OS to happen. + * + * The maximum validation count can be defined to any value, but should be + * greater than the maximum number of OS early stage mapping slots to avoid + * leaking early stage table mappings to the late stage. + */ +#define ACPI_MAX_TABLE_VALIDATIONS ACPI_UINT16_MAX + /* Masks for Flags field above */ #define ACPI_TABLE_ORIGIN_EXTERNAL_VIRTUAL (0) /* Virtual address, external maintained */ From ff0a6d6f932ff4b9eb8e8140f98cc1cf763d0d78 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Mon, 12 Jun 2017 14:16:16 +0200 Subject: [PATCH 165/436] Revert "cpufreq: schedutil: Reduce frequencies slower" Revert commit 39b64aa1c007 (cpufreq: schedutil: Reduce frequencies slower) that introduced unintentional changes in behavior leading to adverse effects on some systems. Reported-by: Viresh Kumar Signed-off-by: Rafael J. Wysocki --- kernel/sched/cpufreq_schedutil.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c index 76877a62b5fa..8773d1efdfab 100644 --- a/kernel/sched/cpufreq_schedutil.c +++ b/kernel/sched/cpufreq_schedutil.c @@ -101,9 +101,6 @@ static void sugov_update_commit(struct sugov_policy *sg_policy, u64 time, if (sg_policy->next_freq == next_freq) return; - if (sg_policy->next_freq > next_freq) - next_freq = (sg_policy->next_freq + next_freq) >> 1; - sg_policy->next_freq = next_freq; sg_policy->last_freq_update_time = time; From b8e11f7d2791bd9320be1c6e772a60b2aa093e45 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Wilczy=C5=84ski?= Date: Sun, 11 Jun 2017 17:28:39 +0900 Subject: [PATCH 166/436] cpufreq: conservative: Allow down_threshold to take values from 1 to 10 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Commit 27ed3cd2ebf4 (cpufreq: conservative: Fix the logic in frequency decrease checking) removed the 10 point substraction when comparing the load against down_threshold but did not remove the related limit for the down_threshold value. As a result, down_threshold lower than 11 is not allowed even though values from 1 to 10 do work correctly too. The comment ("cannot be lower than 11 otherwise freq will not fall") also does not apply after removing the substraction. For this reason, allow down_threshold to take any value from 1 to 99 and fix the related comment. Fixes: 27ed3cd2ebf4 (cpufreq: conservative: Fix the logic in frequency decrease checking) Signed-off-by: Tomasz Wilczyński Acked-by: Viresh Kumar Cc: 3.10+ # 3.10+ Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/cpufreq_conservative.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index 992f7c20760f..88220ff3e1c2 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c @@ -185,8 +185,8 @@ static ssize_t store_down_threshold(struct gov_attr_set *attr_set, int ret; ret = sscanf(buf, "%u", &input); - /* cannot be lower than 11 otherwise freq will not fall */ - if (ret != 1 || input < 11 || input > 100 || + /* cannot be lower than 1 otherwise freq will not fall */ + if (ret != 1 || input < 1 || input > 100 || input >= dbs_data->up_threshold) return -EINVAL; From b2cdd8e1b54849477a32d820acc2e87828a38f3d Mon Sep 17 00:00:00 2001 From: Christophe Jaillet Date: Sun, 11 Jun 2017 14:28:54 +0200 Subject: [PATCH 167/436] cpuidle: dt: Add missing 'of_node_put()' 'of_node_put()' should be called on pointer returned by 'of_parse_phandle()' when done. In this function this is done in all path except this 'continue', so add it. Fixes: 97735da074fd (drivers: cpuidle: Add status property to ARM idle states) Signed-off-by: Christophe Jaillet Signed-off-by: Rafael J. Wysocki --- drivers/cpuidle/dt_idle_states.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/cpuidle/dt_idle_states.c b/drivers/cpuidle/dt_idle_states.c index ffca4fc0061d..ae8eb0359889 100644 --- a/drivers/cpuidle/dt_idle_states.c +++ b/drivers/cpuidle/dt_idle_states.c @@ -180,8 +180,10 @@ int dt_init_idle_driver(struct cpuidle_driver *drv, if (!state_node) break; - if (!of_device_is_available(state_node)) + if (!of_device_is_available(state_node)) { + of_node_put(state_node); continue; + } if (!idle_state_valid(state_node, i, cpumask)) { pr_warn("%s idle state not valid, bailing out\n", From 3db1200ca21f3c63c9044185dc5762ef996848cb Mon Sep 17 00:00:00 2001 From: Frank Rowand Date: Fri, 9 Jun 2017 17:26:32 -0700 Subject: [PATCH 168/436] clocksource/drivers/arm_arch_timer: Fix read and iounmap of incorrect variable Fix boot warning 'Trying to vfree() nonexistent vm area' from arch_timer_mem_of_init(). Refactored code attempts to read and iounmap using address frame instead of address ioremap(frame->cntbase). Fixes: c389d701dfb70 ("clocksource: arm_arch_timer: split MMIO timer probing.") Signed-off-by: Frank Rowand Reviewed-by: Fu Wei Acked-by: Marc Zyngier Signed-off-by: Daniel Lezcano --- drivers/clocksource/arm_arch_timer.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c index 4bed671e490e..8b5c30062d99 100644 --- a/drivers/clocksource/arm_arch_timer.c +++ b/drivers/clocksource/arm_arch_timer.c @@ -1209,9 +1209,9 @@ arch_timer_mem_frame_get_cntfrq(struct arch_timer_mem_frame *frame) return 0; } - rate = readl_relaxed(frame + CNTFRQ); + rate = readl_relaxed(base + CNTFRQ); - iounmap(frame); + iounmap(base); return rate; } From 3500cd73dff48f28f4ba80c171c4c80034d40f76 Mon Sep 17 00:00:00 2001 From: Christian Perle Date: Mon, 12 Jun 2017 10:06:57 +0200 Subject: [PATCH 169/436] proc: snmp6: Use correct type in memset Reading /proc/net/snmp6 yields bogus values on 32 bit kernels. Use "u64" instead of "unsigned long" in sizeof(). Fixes: 4a4857b1c81e ("proc: Reduce cache miss in snmp6_seq_show") Signed-off-by: Christian Perle Signed-off-by: David S. Miller --- net/ipv6/proc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c index cc8e3ae9ca73..e88bcb8ff0fd 100644 --- a/net/ipv6/proc.c +++ b/net/ipv6/proc.c @@ -219,7 +219,7 @@ static void snmp6_seq_show_item64(struct seq_file *seq, void __percpu *mib, u64 buff64[SNMP_MIB_MAX]; int i; - memset(buff64, 0, sizeof(unsigned long) * SNMP_MIB_MAX); + memset(buff64, 0, sizeof(u64) * SNMP_MIB_MAX); snmp_get_cpu_field64_batch(buff64, itemlist, mib, syncpoff); for (i = 0; itemlist[i].name; i++) From b72eb8435b25be3a1880264cf32ac91e626ba5ba Mon Sep 17 00:00:00 2001 From: YD Tseng Date: Fri, 9 Jun 2017 14:48:40 +0300 Subject: [PATCH 170/436] usb: xhci: Fix USB 3.1 supported protocol parsing xHCI host controllers can have both USB 3.1 and 3.0 extended speed protocol lists. If the USB3.1 speed is parsed first and 3.0 second then the minor revision supported will be overwritten by the 3.0 speeds and the USB3 roothub will only show support for USB 3.0 speeds. This was the case with a xhci controller with the supported protocol capability listed below. In xhci-mem.c, the USB 3.1 speed is parsed first, the min_rev of usb3_rhub is set as 0x10. And then USB 3.0 is parsed. However, the min_rev of usb3_rhub will be changed to 0x00. If USB 3.1 device is connected behind this host controller, the speed of USB 3.1 device just reports 5G speed using lsusb. 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F 00 01 08 00 00 00 00 00 40 00 00 00 00 00 00 00 00 10 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 20 02 08 10 03 55 53 42 20 01 02 00 00 00 00 00 00 //USB 3.1 30 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 40 02 08 00 03 55 53 42 20 03 06 00 00 00 00 00 00 //USB 3.0 50 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 60 02 08 00 02 55 53 42 20 09 0E 19 00 00 00 00 00 //USB 2.0 70 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 This patch fixes the issue by only owerwriting the minor revision if it is higher than the existing one. [reword commit message -Mathias] Cc: Signed-off-by: YD Tseng Signed-off-by: Mathias Nyman Signed-off-by: Greg Kroah-Hartman --- drivers/usb/host/xhci-mem.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index 1f1687e888d6..fddf2731f798 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -2119,11 +2119,12 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports, { u32 temp, port_offset, port_count; int i; - u8 major_revision; + u8 major_revision, minor_revision; struct xhci_hub *rhub; temp = readl(addr); major_revision = XHCI_EXT_PORT_MAJOR(temp); + minor_revision = XHCI_EXT_PORT_MINOR(temp); if (major_revision == 0x03) { rhub = &xhci->usb3_rhub; @@ -2137,7 +2138,9 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports, return; } rhub->maj_rev = XHCI_EXT_PORT_MAJOR(temp); - rhub->min_rev = XHCI_EXT_PORT_MINOR(temp); + + if (rhub->min_rev < minor_revision) + rhub->min_rev = minor_revision; /* Port offset and count in the third dword, see section 7.2 */ temp = readl(addr + 2); From d2f48f05cd2a2a0a708fbfa45f1a00a87660d937 Mon Sep 17 00:00:00 2001 From: Corentin Labbe Date: Fri, 9 Jun 2017 14:48:41 +0300 Subject: [PATCH 171/436] usb: xhci: ASMedia ASM1042A chipset need shorts TX quirk When plugging an USB webcam I see the following message: [106385.615559] xhci_hcd 0000:04:00.0: WARN Successful completion on short TX: needs XHCI_TRUST_TX_LENGTH quirk? [106390.583860] handle_tx_event: 913 callbacks suppressed With this patch applied, I get no more printing of this message. Cc: Signed-off-by: Corentin Labbe Signed-off-by: Mathias Nyman Signed-off-by: Greg Kroah-Hartman --- drivers/usb/host/xhci-pci.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index fcf1f3f63e7a..1bcf971141c0 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -201,6 +201,9 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && pdev->device == 0x1042) xhci->quirks |= XHCI_BROKEN_STREAMS; + if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && + pdev->device == 0x1142) + xhci->quirks |= XHCI_TRUST_TX_LENGTH; if (pdev->vendor == PCI_VENDOR_ID_TI && pdev->device == 0x8241) xhci->quirks |= XHCI_LIMIT_ENDPOINT_INTERVAL_7; From 94114c367553f3301747e47f6947cabde947575f Mon Sep 17 00:00:00 2001 From: Stephen Boyd Date: Wed, 7 Jun 2017 23:36:03 -0700 Subject: [PATCH 172/436] tick/broadcast: Make tick_broadcast_setup_oneshot() static This function isn't used outside of tick-broadcast.c, so let's mark it static. Signed-off-by: Stephen Boyd Link: http://lkml.kernel.org/r/20170608063603.13276-1-sboyd@codeaurora.org Signed-off-by: Thomas Gleixner --- kernel/time/tick-broadcast.c | 4 +++- kernel/time/tick-internal.h | 2 -- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index 987e496bb51a..b398c2ea69b2 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c @@ -37,9 +37,11 @@ static int tick_broadcast_forced; static __cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(tick_broadcast_lock); #ifdef CONFIG_TICK_ONESHOT +static void tick_broadcast_setup_oneshot(struct clock_event_device *bc); static void tick_broadcast_clear_oneshot(int cpu); static void tick_resume_broadcast_oneshot(struct clock_event_device *bc); #else +static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc) { BUG(); } static inline void tick_broadcast_clear_oneshot(int cpu) { } static inline void tick_resume_broadcast_oneshot(struct clock_event_device *bc) { } #endif @@ -867,7 +869,7 @@ static void tick_broadcast_init_next_event(struct cpumask *mask, /** * tick_broadcast_setup_oneshot - setup the broadcast device */ -void tick_broadcast_setup_oneshot(struct clock_event_device *bc) +static void tick_broadcast_setup_oneshot(struct clock_event_device *bc) { int cpu = smp_processor_id(); diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h index f738251000fe..be0ac01f2e12 100644 --- a/kernel/time/tick-internal.h +++ b/kernel/time/tick-internal.h @@ -126,7 +126,6 @@ static inline int tick_check_oneshot_change(int allow_nohz) { return 0; } /* Functions related to oneshot broadcasting */ #if defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) && defined(CONFIG_TICK_ONESHOT) -extern void tick_broadcast_setup_oneshot(struct clock_event_device *bc); extern void tick_broadcast_switch_to_oneshot(void); extern void tick_shutdown_broadcast_oneshot(unsigned int cpu); extern int tick_broadcast_oneshot_active(void); @@ -134,7 +133,6 @@ extern void tick_check_oneshot_broadcast_this_cpu(void); bool tick_broadcast_oneshot_available(void); extern struct cpumask *tick_get_broadcast_oneshot_mask(void); #else /* !(BROADCAST && ONESHOT): */ -static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc) { BUG(); } static inline void tick_broadcast_switch_to_oneshot(void) { } static inline void tick_shutdown_broadcast_oneshot(unsigned int cpu) { } static inline int tick_broadcast_oneshot_active(void) { return 0; } From 8a524f803a3e0290cdba6d373361b2cef9752934 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 12 Jun 2017 13:52:46 +0200 Subject: [PATCH 173/436] x86/debug: Handle early WARN_ONs proper Hans managed to trigger a WARN very early in the boot which killed his (Virtual) box. The reason is that the recent rework of WARN() to use UD0 forgot to add the fixup_bug() call to early_fixup_exception(). As a result the kernel does not handle the WARN_ON injected UD0 exception and panics. Add the missing fixup call, so early UD's injected by WARN() get handled. Fixes: 9a93848fe787 ("x86/debug: Implement __WARN() using UD0") Reported-and-tested-by: Hans de Goede Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Thomas Gleixner Cc: Linus Torvalds Cc: Frank Mehnert Cc: Hans de Goede Cc: Michael Thayer Link: http://lkml.kernel.org/r/20170612180108.w4vgu2ckucmllf3a@hirez.programming.kicks-ass.net --- arch/x86/include/asm/extable.h | 1 + arch/x86/kernel/traps.c | 2 +- arch/x86/mm/extable.c | 3 +++ 3 files changed, 5 insertions(+), 1 deletion(-) diff --git a/arch/x86/include/asm/extable.h b/arch/x86/include/asm/extable.h index b8ad261d11dc..c66d19e3c23e 100644 --- a/arch/x86/include/asm/extable.h +++ b/arch/x86/include/asm/extable.h @@ -29,6 +29,7 @@ struct pt_regs; } while (0) extern int fixup_exception(struct pt_regs *regs, int trapnr); +extern int fixup_bug(struct pt_regs *regs, int trapnr); extern bool ex_has_fault_handler(unsigned long ip); extern void early_fixup_exception(struct pt_regs *regs, int trapnr); diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 3995d3a777d4..bf54309b85da 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -182,7 +182,7 @@ int is_valid_bugaddr(unsigned long addr) return ud == INSN_UD0 || ud == INSN_UD2; } -static int fixup_bug(struct pt_regs *regs, int trapnr) +int fixup_bug(struct pt_regs *regs, int trapnr) { if (trapnr != X86_TRAP_UD) return 0; diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c index 35ea061010a1..0ea8afcb929c 100644 --- a/arch/x86/mm/extable.c +++ b/arch/x86/mm/extable.c @@ -162,6 +162,9 @@ void __init early_fixup_exception(struct pt_regs *regs, int trapnr) if (fixup_exception(regs, trapnr)) return; + if (fixup_bug(regs, trapnr)) + return; + fail: early_printk("PANIC: early exception 0x%02x IP %lx:%lx error %lx cr2 0x%lx\n", (unsigned)trapnr, (unsigned long)regs->cs, regs->ip, From 675c8da049fd6556eb2d6cdd745fe812752f07a8 Mon Sep 17 00:00:00 2001 From: "Karicheri, Muralidharan" Date: Mon, 12 Jun 2017 15:06:26 -0400 Subject: [PATCH 174/436] hsr: fix incorrect warning When HSR interface is setup using ip link command, an annoying warning appears with the trace as below:- [ 203.019828] hsr_get_node: Non-HSR frame [ 203.019833] Modules linked in: [ 203.019848] CPU: 0 PID: 158 Comm: sd-resolve Tainted: G W 4.12.0-rc3-00052-g9fa6bf70 #2 [ 203.019853] Hardware name: Generic DRA74X (Flattened Device Tree) [ 203.019869] [] (unwind_backtrace) from [] (show_stack+0x10/0x14) [ 203.019880] [] (show_stack) from [] (dump_stack+0xac/0xe0) [ 203.019894] [] (dump_stack) from [] (__warn+0xd8/0x104) [ 203.019907] [] (__warn) from [] (warn_slowpath_fmt+0x34/0x44) root@am57xx-evm:~# [ 203.019921] [] (warn_slowpath_fmt) from [] (hsr_get_node+0x148/0x170) [ 203.019932] [] (hsr_get_node) from [] (hsr_forward_skb+0x110/0x7c0) [ 203.019942] [] (hsr_forward_skb) from [] (hsr_dev_xmit+0x2c/0x34) [ 203.019954] [] (hsr_dev_xmit) from [] (dev_hard_start_xmit+0xc4/0x3bc) [ 203.019963] [] (dev_hard_start_xmit) from [] (__dev_queue_xmit+0x7c4/0x98c) [ 203.019974] [] (__dev_queue_xmit) from [] (ip6_finish_output2+0x330/0xc1c) [ 203.019983] [] (ip6_finish_output2) from [] (ip6_output+0x58/0x454) [ 203.019994] [] (ip6_output) from [] (mld_sendpack+0x420/0x744) As this is an expected path to hsr_get_node() with frame coming from the master interface, add a check to ensure packet is not from the master port and then warn. Signed-off-by: Murali Karicheri Signed-off-by: David S. Miller --- net/hsr/hsr_forward.c | 3 +-- net/hsr/hsr_framereg.c | 9 +++++++-- net/hsr/hsr_framereg.h | 2 +- 3 files changed, 9 insertions(+), 5 deletions(-) diff --git a/net/hsr/hsr_forward.c b/net/hsr/hsr_forward.c index 4ebe2aa3e7d3..04b5450c5a55 100644 --- a/net/hsr/hsr_forward.c +++ b/net/hsr/hsr_forward.c @@ -324,8 +324,7 @@ static int hsr_fill_frame_info(struct hsr_frame_info *frame, unsigned long irqflags; frame->is_supervision = is_supervision_frame(port->hsr, skb); - frame->node_src = hsr_get_node(&port->hsr->node_db, skb, - frame->is_supervision); + frame->node_src = hsr_get_node(port, skb, frame->is_supervision); if (frame->node_src == NULL) return -1; /* Unknown node and !is_supervision, or no mem */ diff --git a/net/hsr/hsr_framereg.c b/net/hsr/hsr_framereg.c index 7ea925816f79..284a9b820df8 100644 --- a/net/hsr/hsr_framereg.c +++ b/net/hsr/hsr_framereg.c @@ -158,9 +158,10 @@ struct hsr_node *hsr_add_node(struct list_head *node_db, unsigned char addr[], /* Get the hsr_node from which 'skb' was sent. */ -struct hsr_node *hsr_get_node(struct list_head *node_db, struct sk_buff *skb, +struct hsr_node *hsr_get_node(struct hsr_port *port, struct sk_buff *skb, bool is_sup) { + struct list_head *node_db = &port->hsr->node_db; struct hsr_node *node; struct ethhdr *ethhdr; u16 seq_out; @@ -186,7 +187,11 @@ struct hsr_node *hsr_get_node(struct list_head *node_db, struct sk_buff *skb, */ seq_out = hsr_get_skb_sequence_nr(skb) - 1; } else { - WARN_ONCE(1, "%s: Non-HSR frame\n", __func__); + /* this is called also for frames from master port and + * so warn only for non master ports + */ + if (port->type != HSR_PT_MASTER) + WARN_ONCE(1, "%s: Non-HSR frame\n", __func__); seq_out = HSR_SEQNR_START; } diff --git a/net/hsr/hsr_framereg.h b/net/hsr/hsr_framereg.h index 438b40f98f5a..4e04f0e868e9 100644 --- a/net/hsr/hsr_framereg.h +++ b/net/hsr/hsr_framereg.h @@ -18,7 +18,7 @@ struct hsr_node; struct hsr_node *hsr_add_node(struct list_head *node_db, unsigned char addr[], u16 seq_out); -struct hsr_node *hsr_get_node(struct list_head *node_db, struct sk_buff *skb, +struct hsr_node *hsr_get_node(struct hsr_port *port, struct sk_buff *skb, bool is_sup); void hsr_handle_sup_frame(struct sk_buff *skb, struct hsr_node *node_curr, struct hsr_port *port); From 98dbf5af4fdd142f6184dbb4e4164a8d1850d526 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Mon, 12 Jun 2017 15:43:03 -0500 Subject: [PATCH 175/436] PCI: endpoint: Select CRC32 to fix test build error The PCI endpoint test driver uses crc32_le() so it should select CRC32. Fixes this build error (when CRC32=m): drivers/built-in.o: In function `pci_epf_test_cmd_handler': pci-epf-test.c:(.text+0x2d98d): undefined reference to `crc32_le' Fixes: 349e7a85b25f ("PCI: endpoint: functions: Add an EP function to test PCI") Reported-by: kbuild test robot Signed-off-by: Randy Dunlap Signed-off-by: Bjorn Helgaas Acked-by: Kishon Vijay Abraham I --- drivers/pci/endpoint/functions/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/pci/endpoint/functions/Kconfig b/drivers/pci/endpoint/functions/Kconfig index 175edad42d2f..2942066607e0 100644 --- a/drivers/pci/endpoint/functions/Kconfig +++ b/drivers/pci/endpoint/functions/Kconfig @@ -5,6 +5,7 @@ config PCI_EPF_TEST tristate "PCI Endpoint Test driver" depends on PCI_ENDPOINT + select CRC32 help Enable this configuration option to enable the test driver for PCI Endpoint. From 2ad50606f847a902303a5364b7cad64bdd6246f6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ondrej=20Mosn=C3=A1=C4=8Dek?= Date: Mon, 5 Jun 2017 17:52:39 +0200 Subject: [PATCH 176/436] dm integrity: reject mappings too large for device dm-integrity would successfully create mappings with the number of sectors greater than the provided data sector count. Attempts to read sectors of this mapping that were beyond the provided data sector count would then yield run-time messages of the form "device-mapper: integrity: Too big sector number: ...". Fix this by emitting an error when the requested mapping size is bigger than the provided data sector count. Signed-off-by: Ondrej Mosnacek Acked-by: Mikulas Patocka Signed-off-by: Mike Snitzer --- drivers/md/dm-integrity.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c index 7910bfe50da4..4ab10cf718c9 100644 --- a/drivers/md/dm-integrity.c +++ b/drivers/md/dm-integrity.c @@ -3040,6 +3040,11 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv) ti->error = "The device is too small"; goto bad; } + if (ti->len > ic->provided_data_sectors) { + r = -EINVAL; + ti->error = "Not enough provided sectors for requested mapping size"; + goto bad; + } if (!buffer_sectors) buffer_sectors = 1; From fa07ab72cbb0d843429e61bf179308aed6cbe0dd Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Sun, 11 Jun 2017 00:38:36 +0200 Subject: [PATCH 177/436] genirq: Release resources in __setup_irq() error path In case __irq_set_trigger() fails the resources requested via irq_request_resources() are not released. Add the missing release call into the error handling path. Fixes: c1bacbae8192 ("genirq: Provide irq_request/release_resources chip callbacks") Signed-off-by: Heiner Kallweit Signed-off-by: Thomas Gleixner Cc: stable@vger.kernel.org Link: http://lkml.kernel.org/r/655538f5-cb20-a892-ff15-fbd2dd1fa4ec@gmail.com --- kernel/irq/manage.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 070be980c37a..425170d4439b 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -1312,8 +1312,10 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) ret = __irq_set_trigger(desc, new->flags & IRQF_TRIGGER_MASK); - if (ret) + if (ret) { + irq_release_resources(desc); goto out_mask; + } } desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \ From 6964e53f55837b0c49ed60d36656d2e0ee4fc27b Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Mon, 12 Jun 2017 15:38:36 -0700 Subject: [PATCH 178/436] i40e: fix handling of HW ATR eviction A recent commit to refactor the driver and remove the hw_disabled_flags field accidentally introduced two regressions. First, we overwrote pf->flags which removed various key flags including the MSI-X settings. Additionally, it was intended that we have now two flags, HW_ATR_EVICT_CAPABLE and HW_ATR_EVICT_ENABLED, but this was not done, and we accidentally were mis-using HW_ATR_EVICT_CAPABLE everywhere. This patch adds the missing piece, HW_ATR_EVICT_ENABLED, and safely updates pf->flags instead of overwriting it. Without this patch we will have many problems including disabling MSI-X support, and we'll attempt to use HW ATR eviction on devices which do not support it. Fixes: 47994c119a36 ("i40e: remove hw_disabled_flags in favor of using separate flag bits", 2017-04-19) Signed-off-by: Jacob Keller Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher Signed-off-by: David S. Miller --- drivers/net/ethernet/intel/i40e/i40e.h | 1 + drivers/net/ethernet/intel/i40e/i40e_ethtool.c | 4 ++-- drivers/net/ethernet/intel/i40e/i40e_main.c | 7 ++++--- drivers/net/ethernet/intel/i40e/i40e_txrx.c | 4 ++-- 4 files changed, 9 insertions(+), 7 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index cdde3cc28fb5..44d9610f7a15 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -399,6 +399,7 @@ struct i40e_pf { #define I40E_FLAG_RX_CSUM_ENABLED BIT_ULL(1) #define I40E_FLAG_MSI_ENABLED BIT_ULL(2) #define I40E_FLAG_MSIX_ENABLED BIT_ULL(3) +#define I40E_FLAG_HW_ATR_EVICT_ENABLED BIT_ULL(4) #define I40E_FLAG_RSS_ENABLED BIT_ULL(6) #define I40E_FLAG_VMDQ_ENABLED BIT_ULL(7) #define I40E_FLAG_IWARP_ENABLED BIT_ULL(10) diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 7a8eb486b9ea..894c8e57ba00 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -224,7 +224,7 @@ static const struct i40e_priv_flags i40e_gstrings_priv_flags[] = { I40E_PRIV_FLAG("LinkPolling", I40E_FLAG_LINK_POLLING_ENABLED, 0), I40E_PRIV_FLAG("flow-director-atr", I40E_FLAG_FD_ATR_ENABLED, 0), I40E_PRIV_FLAG("veb-stats", I40E_FLAG_VEB_STATS_ENABLED, 0), - I40E_PRIV_FLAG("hw-atr-eviction", I40E_FLAG_HW_ATR_EVICT_CAPABLE, 0), + I40E_PRIV_FLAG("hw-atr-eviction", I40E_FLAG_HW_ATR_EVICT_ENABLED, 0), I40E_PRIV_FLAG("legacy-rx", I40E_FLAG_LEGACY_RX, 0), }; @@ -4092,7 +4092,7 @@ flags_complete: /* Only allow ATR evict on hardware that is capable of handling it */ if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) - pf->flags &= ~I40E_FLAG_HW_ATR_EVICT_CAPABLE; + pf->flags &= ~I40E_FLAG_HW_ATR_EVICT_ENABLED; if (changed_flags & I40E_FLAG_TRUE_PROMISC_SUPPORT) { u16 sw_flags = 0, valid_flags = 0; diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 150caf6ca2b4..a7a4b28b4144 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -8821,11 +8821,12 @@ static int i40e_sw_init(struct i40e_pf *pf) (pf->hw.aq.api_min_ver > 4))) { /* Supported in FW API version higher than 1.4 */ pf->flags |= I40E_FLAG_GENEVE_OFFLOAD_CAPABLE; - pf->flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE; - } else { - pf->flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE; } + /* Enable HW ATR eviction if possible */ + if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) + pf->flags |= I40E_FLAG_HW_ATR_EVICT_ENABLED; + pf->eeprom_version = 0xDEAD; pf->lan_veb = I40E_NO_VEB; pf->lan_vsi = I40E_NO_VSI; diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index cd894f4023b1..77115c25d96f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -2341,7 +2341,7 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, /* Due to lack of space, no more new filters can be programmed */ if (th->syn && (pf->flags & I40E_FLAG_FD_ATR_AUTO_DISABLED)) return; - if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) { + if (pf->flags & I40E_FLAG_HW_ATR_EVICT_ENABLED) { /* HW ATR eviction will take care of removing filters on FIN * and RST packets. */ @@ -2403,7 +2403,7 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) & I40E_TXD_FLTR_QW1_CNTINDEX_MASK; - if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) + if (pf->flags & I40E_FLAG_HW_ATR_EVICT_ENABLED) dtype_cmd |= I40E_TXD_FLTR_QW1_ATR_MASK; fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype); From ca8efa1df1d15a1795a2da57f9f6aada6ed6b946 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Tue, 6 Jun 2017 16:47:22 +1000 Subject: [PATCH 179/436] KVM: PPC: Book3S HV: Context-switch EBB registers properly This adds code to save the values of three SPRs (special-purpose registers) used by userspace to control event-based branches (EBBs), which are essentially interrupts that get delivered directly to userspace. These registers are loaded up with guest values when entering the guest, and their values are saved when exiting the guest, but we were not saving the host values and restoring them before going back to userspace. On POWER8 this would only affect userspace programs which explicitly request the use of EBBs and also use the KVM_RUN ioctl, since the only source of EBBs on POWER8 is the PMU, and there is an explicit enable bit in the PMU registers (and those PMU registers do get properly context-switched between host and guest). On POWER9 there is provision for externally-generated EBBs, and these are not subject to the control in the PMU registers. Since these registers only affect userspace, we can save them when we first come in from userspace and restore them before returning to userspace, rather than saving/restoring the host values on every guest entry/exit. Similarly, we don't need to worry about their values on offline secondary threads since they execute in the context of the idle task, which never executes in userspace. Fixes: b005255e12a3 ("KVM: PPC: Book3S HV: Context-switch new POWER8 SPRs", 2014-01-08) Cc: stable@vger.kernel.org # v3.14+ Signed-off-by: Paul Mackerras --- arch/powerpc/kvm/book3s_hv.c | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 42b7a4fd57d9..400a5992b121 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -2907,6 +2907,7 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu) { int r; int srcu_idx; + unsigned long ebb_regs[3] = {}; /* shut up GCC */ if (!vcpu->arch.sane) { run->exit_reason = KVM_EXIT_INTERNAL_ERROR; @@ -2934,6 +2935,13 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu) flush_all_to_thread(current); + /* Save userspace EBB register values */ + if (cpu_has_feature(CPU_FTR_ARCH_207S)) { + ebb_regs[0] = mfspr(SPRN_EBBHR); + ebb_regs[1] = mfspr(SPRN_EBBRR); + ebb_regs[2] = mfspr(SPRN_BESCR); + } + vcpu->arch.wqp = &vcpu->arch.vcore->wq; vcpu->arch.pgdir = current->mm->pgd; vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; @@ -2960,6 +2968,13 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu) } } while (is_kvmppc_resume_guest(r)); + /* Restore userspace EBB register values */ + if (cpu_has_feature(CPU_FTR_ARCH_207S)) { + mtspr(SPRN_EBBHR, ebb_regs[0]); + mtspr(SPRN_EBBRR, ebb_regs[1]); + mtspr(SPRN_BESCR, ebb_regs[2]); + } + out: vcpu->arch.state = KVMPPC_VCPU_NOTREADY; atomic_dec(&vcpu->kvm->arch.vcpus_running); From d9ee35acfabbc909c3be4360cd5655a006628b2e Mon Sep 17 00:00:00 2001 From: Vlastimil Babka Date: Mon, 12 Jun 2017 09:21:30 +0200 Subject: [PATCH 180/436] x86/mm: Disable 1GB direct mappings when disabling 2MB mappings The kmemleak and debug_pagealloc features both disable using huge pages for direct mappings so they can do cpa() on page level granularity in any context. However they only do that for 2MB pages, which means 1GB pages can still be used if the CPU supports it, unless disabled by a boot param, which is non-obvious. Disable also 1GB pages when disabling 2MB pages. Signed-off-by: Vlastimil Babka Cc: Christian Borntraeger Cc: Linus Torvalds Cc: Pekka Enberg Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Vegard Nossum Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/2be70c78-6130-855d-3dfa-d87bd1dd4fda@suse.cz Signed-off-by: Ingo Molnar --- arch/x86/mm/init.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index cbc87ea98751..9b3f9fa5b283 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c @@ -161,16 +161,16 @@ static int page_size_mask; static void __init probe_page_size_mask(void) { -#if !defined(CONFIG_KMEMCHECK) /* * For CONFIG_KMEMCHECK or pagealloc debugging, identity mapping will * use small pages. * This will simplify cpa(), which otherwise needs to support splitting * large pages into small in interrupt context, etc. */ - if (boot_cpu_has(X86_FEATURE_PSE) && !debug_pagealloc_enabled()) + if (boot_cpu_has(X86_FEATURE_PSE) && !debug_pagealloc_enabled() && !IS_ENABLED(CONFIG_KMEMCHECK)) page_size_mask |= 1 << PG_LEVEL_2M; -#endif + else + direct_gbpages = 0; /* Enable PSE if available */ if (boot_cpu_has(X86_FEATURE_PSE)) From 023f108dcc187e34ef864bf10ed966cf25e14e2a Mon Sep 17 00:00:00 2001 From: Paul Moore Date: Wed, 7 Jun 2017 16:48:19 -0400 Subject: [PATCH 181/436] selinux: fix double free in selinux_parse_opts_str() This patch is based on a discussion generated by an earlier patch from Tetsuo Handa: * https://marc.info/?t=149035659300001&r=1&w=2 The double free problem involves the mnt_opts field of the security_mnt_opts struct, selinux_parse_opts_str() frees the memory on error, but doesn't set the field to NULL so if the caller later attempts to call security_free_mnt_opts() we trigger the problem. In order to play it safe we change selinux_parse_opts_str() to call security_free_mnt_opts() on error instead of free'ing the memory directly. This should ensure that everything is handled correctly, regardless of what the caller may do. Fixes: e0007529893c1c06 ("LSM/SELinux: Interfaces to allow FS to control mount options") Cc: stable@vger.kernel.org Cc: Tetsuo Handa Reported-by: Dmitry Vyukov Signed-off-by: Paul Moore Signed-off-by: James Morris --- security/selinux/hooks.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index e67a526d1f30..819fd6858b49 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c @@ -1106,10 +1106,8 @@ static int selinux_parse_opts_str(char *options, opts->mnt_opts_flags = kcalloc(NUM_SEL_MNT_OPTS, sizeof(int), GFP_KERNEL); - if (!opts->mnt_opts_flags) { - kfree(opts->mnt_opts); + if (!opts->mnt_opts_flags) goto out_err; - } if (fscontext) { opts->mnt_opts[num_mnt_opts] = fscontext; @@ -1132,6 +1130,7 @@ static int selinux_parse_opts_str(char *options, return 0; out_err: + security_free_mnt_opts(opts); kfree(context); kfree(defcontext); kfree(fscontext); From 9a775e0308b575e3a17c66a586ed049b07f48199 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Thu, 8 Jun 2017 17:40:01 +0300 Subject: [PATCH 182/436] drm/i915: Fix scaling check for 90/270 degree plane rotation MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Starting from commit b63a16f6cd89 ("drm/i915: Compute display surface offset in the plane check hook for SKL+") we've already rotated the src coordinates by 270 degrees by the time we check if a scaler is needed or not, so we must not account for the rotation a second time. Previously we did these steps in the opposite order and hence the scaler check had to deal with rotation itself. The double rotation handling causes us to enable a scaler pretty much every time 90/270 degree plane rotation is requested, leading to fuzzier fonts and whatnot. v2: s/unsigned/unsigned int/ to appease checkpatch v3: s/DRM_ROTATE_0/DRM_MODE_ROTATE_0/ Cc: stable@vger.kernel.org Cc: Tvrtko Ursulin Reported-by: Tvrtko Ursulin Tested-by: Tvrtko Ursulin Fixes: b63a16f6cd89 ("drm/i915: Compute display surface offset in the plane check hook for SKL+") Signed-off-by: Ville Syrjälä Link: http://patchwork.freedesktop.org/patch/msgid/20170331180056.14086-2-ville.syrjala@linux.intel.com Reviewed-by: Maarten Lankhorst (cherry picked from commit d96a7d2adb040a67e163a82dad6316f9f572498a) Signed-off-by: Jani Nikula Link: http://patchwork.freedesktop.org/patch/msgid/20170608144002.1605-1-ville.syrjala@linux.intel.com --- drivers/gpu/drm/i915/intel_display.c | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 569717a12723..96b0b01677e2 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -4598,7 +4598,7 @@ static void cpt_verify_modeset(struct drm_device *dev, int pipe) static int skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach, - unsigned scaler_user, int *scaler_id, unsigned int rotation, + unsigned int scaler_user, int *scaler_id, int src_w, int src_h, int dst_w, int dst_h) { struct intel_crtc_scaler_state *scaler_state = @@ -4607,9 +4607,12 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach, to_intel_crtc(crtc_state->base.crtc); int need_scaling; - need_scaling = drm_rotation_90_or_270(rotation) ? - (src_h != dst_w || src_w != dst_h): - (src_w != dst_w || src_h != dst_h); + /* + * Src coordinates are already rotated by 270 degrees for + * the 90/270 degree plane rotation cases (to match the + * GTT mapping), hence no need to account for rotation here. + */ + need_scaling = src_w != dst_w || src_h != dst_h; /* * if plane is being disabled or scaler is no more required or force detach @@ -4671,7 +4674,7 @@ int skl_update_scaler_crtc(struct intel_crtc_state *state) const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode; return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX, - &state->scaler_state.scaler_id, DRM_ROTATE_0, + &state->scaler_state.scaler_id, state->pipe_src_w, state->pipe_src_h, adjusted_mode->crtc_hdisplay, adjusted_mode->crtc_vdisplay); } @@ -4700,7 +4703,6 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state, ret = skl_update_scaler(crtc_state, force_detach, drm_plane_index(&intel_plane->base), &plane_state->scaler_id, - plane_state->base.rotation, drm_rect_width(&plane_state->base.src) >> 16, drm_rect_height(&plane_state->base.src) >> 16, drm_rect_width(&plane_state->base.dst), From 1c2d6bbf0433ddf7c978ac5f2bd582e9e7d34687 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Thu, 8 Jun 2017 17:40:02 +0300 Subject: [PATCH 183/436] drm/i915: Fix SKL+ watermarks for 90/270 rotation MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit skl_check_plane_surface() already rotates the clipped plane source coordinates to match the scanout direction because that's the way the GTT mapping is set up. Thus we no longer need to rotate the coordinates in the watermark code. For cursors we use the non-clipped coordinates which are not rotated appropriately, but that doesn't actually matter since cursors don't even support 90/270 degree rotation. v2: Resolve conflicts from SKL+ wm rework Cc: stable@vger.kernel.org Cc: Tvrtko Ursulin Fixes: b63a16f6cd89 ("drm/i915: Compute display surface offset in the plane check hook for SKL+") Signed-off-by: Ville Syrjälä Link: http://patchwork.freedesktop.org/patch/msgid/20170331180056.14086-3-ville.syrjala@linux.intel.com Tested-by: Tvrtko Ursulin Reviewed-by: Maarten Lankhorst (cherry picked from commit fce5adf568abb1e8264d677156e2e0deb529194d) Signed-off-by: Jani Nikula Link: http://patchwork.freedesktop.org/patch/msgid/20170608144002.1605-2-ville.syrjala@linux.intel.com --- drivers/gpu/drm/i915/intel_pm.c | 36 ++++++++++++++++++++++----------- 1 file changed, 24 insertions(+), 12 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 2ca481b5aa69..078fd1bfa5ea 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -3373,20 +3373,26 @@ skl_plane_downscale_amount(const struct intel_crtc_state *cstate, /* n.b., src is 16.16 fixed point, dst is whole integer */ if (plane->id == PLANE_CURSOR) { + /* + * Cursors only support 0/180 degree rotation, + * hence no need to account for rotation here. + */ src_w = pstate->base.src_w; src_h = pstate->base.src_h; dst_w = pstate->base.crtc_w; dst_h = pstate->base.crtc_h; } else { + /* + * Src coordinates are already rotated by 270 degrees for + * the 90/270 degree plane rotation cases (to match the + * GTT mapping), hence no need to account for rotation here. + */ src_w = drm_rect_width(&pstate->base.src); src_h = drm_rect_height(&pstate->base.src); dst_w = drm_rect_width(&pstate->base.dst); dst_h = drm_rect_height(&pstate->base.dst); } - if (drm_rotation_90_or_270(pstate->base.rotation)) - swap(dst_w, dst_h); - downscale_h = max(src_h / dst_h, (uint32_t)DRM_PLANE_HELPER_NO_SCALING); downscale_w = max(src_w / dst_w, (uint32_t)DRM_PLANE_HELPER_NO_SCALING); @@ -3417,12 +3423,14 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate, if (y && format != DRM_FORMAT_NV12) return 0; + /* + * Src coordinates are already rotated by 270 degrees for + * the 90/270 degree plane rotation cases (to match the + * GTT mapping), hence no need to account for rotation here. + */ width = drm_rect_width(&intel_pstate->base.src) >> 16; height = drm_rect_height(&intel_pstate->base.src) >> 16; - if (drm_rotation_90_or_270(pstate->rotation)) - swap(width, height); - /* for planar format */ if (format == DRM_FORMAT_NV12) { if (y) /* y-plane data rate */ @@ -3505,12 +3513,14 @@ skl_ddb_min_alloc(const struct drm_plane_state *pstate, fb->modifier != I915_FORMAT_MOD_Yf_TILED) return 8; + /* + * Src coordinates are already rotated by 270 degrees for + * the 90/270 degree plane rotation cases (to match the + * GTT mapping), hence no need to account for rotation here. + */ src_w = drm_rect_width(&intel_pstate->base.src) >> 16; src_h = drm_rect_height(&intel_pstate->base.src) >> 16; - if (drm_rotation_90_or_270(pstate->rotation)) - swap(src_w, src_h); - /* Halve UV plane width and height for NV12 */ if (fb->format->format == DRM_FORMAT_NV12 && !y) { src_w /= 2; @@ -3794,13 +3804,15 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv, width = intel_pstate->base.crtc_w; height = intel_pstate->base.crtc_h; } else { + /* + * Src coordinates are already rotated by 270 degrees for + * the 90/270 degree plane rotation cases (to match the + * GTT mapping), hence no need to account for rotation here. + */ width = drm_rect_width(&intel_pstate->base.src) >> 16; height = drm_rect_height(&intel_pstate->base.src) >> 16; } - if (drm_rotation_90_or_270(pstate->rotation)) - swap(width, height); - cpp = fb->format->cpp[0]; plane_pixel_rate = skl_adjusted_plane_pixel_rate(cstate, intel_pstate); From c380f681245d7ae57f17d9ebbbe8f8f1557ee1fb Mon Sep 17 00:00:00 2001 From: Zhenyu Wang Date: Fri, 9 Jun 2017 15:48:05 +0800 Subject: [PATCH 184/436] drm/i915: Fix GVT-g PVINFO version compatibility check Current it's strictly checked if PVINFO version matches 1.0 for GVT-g i915 guest which doesn't help for compatibility at all and forces GVT-g host can't extend PVINFO easily with version bump for real compatibility check. This fixes that to check minimal required PVINFO version instead. v2: - drop unneeded version macro - use only major version for sanity check v3: - fix up PVInfo value with kernel type - one indent fix Reviewed-by: Joonas Lahtinen Cc: Chuanxiao Dong Cc: Joonas Lahtinen Cc: stable@vger.kernel.org # v4.10+ Signed-off-by: Zhenyu Wang Signed-off-by: Joonas Lahtinen Link: http://patchwork.freedesktop.org/patch/msgid/20170609074805.5101-1-zhenyuw@linux.intel.com (cherry picked from commit 0c8792d00d38de85b6ceb1dd67d3ee009d7c8e42) Signed-off-by: Jani Nikula --- drivers/gpu/drm/i915/i915_pvinfo.h | 8 ++------ drivers/gpu/drm/i915/i915_vgpu.c | 10 ++++------ 2 files changed, 6 insertions(+), 12 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_pvinfo.h b/drivers/gpu/drm/i915/i915_pvinfo.h index c0cb2974caac..2cfe96d3e5d1 100644 --- a/drivers/gpu/drm/i915/i915_pvinfo.h +++ b/drivers/gpu/drm/i915/i915_pvinfo.h @@ -36,10 +36,6 @@ #define VGT_VERSION_MAJOR 1 #define VGT_VERSION_MINOR 0 -#define INTEL_VGT_IF_VERSION_ENCODE(major, minor) ((major) << 16 | (minor)) -#define INTEL_VGT_IF_VERSION \ - INTEL_VGT_IF_VERSION_ENCODE(VGT_VERSION_MAJOR, VGT_VERSION_MINOR) - /* * notifications from guest to vgpu device model */ @@ -55,8 +51,8 @@ enum vgt_g2v_type { struct vgt_if { u64 magic; /* VGT_MAGIC */ - uint16_t version_major; - uint16_t version_minor; + u16 version_major; + u16 version_minor; u32 vgt_id; /* ID of vGT instance */ u32 rsv1[12]; /* pad to offset 0x40 */ /* diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c index 4ab8a973b61f..2e739018fb4c 100644 --- a/drivers/gpu/drm/i915/i915_vgpu.c +++ b/drivers/gpu/drm/i915/i915_vgpu.c @@ -60,8 +60,8 @@ */ void i915_check_vgpu(struct drm_i915_private *dev_priv) { - uint64_t magic; - uint32_t version; + u64 magic; + u16 version_major; BUILD_BUG_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE); @@ -69,10 +69,8 @@ void i915_check_vgpu(struct drm_i915_private *dev_priv) if (magic != VGT_MAGIC) return; - version = INTEL_VGT_IF_VERSION_ENCODE( - __raw_i915_read16(dev_priv, vgtif_reg(version_major)), - __raw_i915_read16(dev_priv, vgtif_reg(version_minor))); - if (version != INTEL_VGT_IF_VERSION) { + version_major = __raw_i915_read16(dev_priv, vgtif_reg(version_major)); + if (version_major < VGT_VERSION_MAJOR) { DRM_INFO("VGT interface version mismatch!\n"); return; } From 769dc04db3ed8484798aceb015b94deacc2ba557 Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Thu, 8 Jun 2017 14:00:49 +0300 Subject: [PATCH 185/436] mac80211: don't look at the PM bit of BAR frames When a peer sends a BAR frame with PM bit clear, we should not modify its PM state as madated by the spec in 802.11-20012 10.2.1.2. Cc: stable@vger.kernel.org Signed-off-by: Emmanuel Grumbach Signed-off-by: Johannes Berg --- net/mac80211/rx.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 1f75280ba26c..3674fe3d67dc 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -1613,12 +1613,16 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx) */ if (!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS) && !ieee80211_has_morefrags(hdr->frame_control) && + !ieee80211_is_back_req(hdr->frame_control) && !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) && (rx->sdata->vif.type == NL80211_IFTYPE_AP || rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) && - /* PM bit is only checked in frames where it isn't reserved, + /* + * PM bit is only checked in frames where it isn't reserved, * in AP mode it's reserved in non-bufferable management frames * (cf. IEEE 802.11-2012 8.2.4.1.7 Power Management field) + * BAR frames should be ignored as specified in + * IEEE 802.11-2012 10.2.1.2. */ (!ieee80211_is_mgmt(hdr->frame_control) || ieee80211_is_bufferable_mmpdu(hdr->frame_control))) { From 204a7dbcb27bc4b461f42d7f96fdc875eb677f2f Mon Sep 17 00:00:00 2001 From: Avraham Stern Date: Mon, 12 Jun 2017 10:44:58 +0300 Subject: [PATCH 186/436] mac80211: Fix incorrect condition when checking rx timestamp If the driver reports the rx timestamp at PLCP start, mac80211 can only handle legacy encoding, but the code checks that the encoding is not legacy. Fix this. Fixes: da6a4352e7c8 ("mac80211: separate encoding/bandwidth from flags") Signed-off-by: Avraham Stern Signed-off-by: Luca Coelho Signed-off-by: Johannes Berg --- net/mac80211/ieee80211_i.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 665501ac358f..5e002f62c235 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -1531,7 +1531,7 @@ ieee80211_have_rx_timestamp(struct ieee80211_rx_status *status) return true; /* can't handle non-legacy preamble yet */ if (status->flag & RX_FLAG_MACTIME_PLCP_START && - status->encoding != RX_ENC_LEGACY) + status->encoding == RX_ENC_LEGACY) return true; return false; } From 44f6d42cbd6e4c1d4d25f19502dd5f27aedf89d4 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Sat, 10 Jun 2017 13:52:44 +0300 Subject: [PATCH 187/436] mac80211: remove 5/10 MHz rate code from station MLME There's no need for the station MLME code to handle bitrates for 5 or 10 MHz channels when it can't ever create such a configuration. Remove the unnecessary code. Signed-off-by: Johannes Berg --- net/mac80211/mlme.c | 24 +++--------------------- 1 file changed, 3 insertions(+), 21 deletions(-) diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 0ea9712bd99e..2f46db7d3afc 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -601,7 +601,7 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata) struct ieee80211_supported_band *sband; struct ieee80211_chanctx_conf *chanctx_conf; struct ieee80211_channel *chan; - u32 rate_flags, rates = 0; + u32 rates = 0; sdata_assert_lock(sdata); @@ -612,7 +612,6 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata) return; } chan = chanctx_conf->def.chan; - rate_flags = ieee80211_chandef_rate_flags(&chanctx_conf->def); rcu_read_unlock(); sband = local->hw.wiphy->bands[chan->band]; shift = ieee80211_vif_get_shift(&sdata->vif); @@ -636,9 +635,6 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata) */ rates_len = 0; for (i = 0; i < sband->n_bitrates; i++) { - if ((rate_flags & sband->bitrates[i].flags) - != rate_flags) - continue; rates |= BIT(i); rates_len++; } @@ -2818,7 +2814,7 @@ static void ieee80211_get_rates(struct ieee80211_supported_band *sband, u32 *rates, u32 *basic_rates, bool *have_higher_than_11mbit, int *min_rate, int *min_rate_index, - int shift, u32 rate_flags) + int shift) { int i, j; @@ -2846,8 +2842,6 @@ static void ieee80211_get_rates(struct ieee80211_supported_band *sband, int brate; br = &sband->bitrates[j]; - if ((rate_flags & br->flags) != rate_flags) - continue; brate = DIV_ROUND_UP(br->bitrate, (1 << shift) * 5); if (brate == rate) { @@ -4411,27 +4405,15 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata, u32 rates = 0, basic_rates = 0; bool have_higher_than_11mbit; int min_rate = INT_MAX, min_rate_index = -1; - struct ieee80211_chanctx_conf *chanctx_conf; const struct cfg80211_bss_ies *ies; int shift = ieee80211_vif_get_shift(&sdata->vif); - u32 rate_flags; - - rcu_read_lock(); - chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); - if (WARN_ON(!chanctx_conf)) { - rcu_read_unlock(); - sta_info_free(local, new_sta); - return -EINVAL; - } - rate_flags = ieee80211_chandef_rate_flags(&chanctx_conf->def); - rcu_read_unlock(); ieee80211_get_rates(sband, bss->supp_rates, bss->supp_rates_len, &rates, &basic_rates, &have_higher_than_11mbit, &min_rate, &min_rate_index, - shift, rate_flags); + shift); /* * This used to be a workaround for basic rates missing From c87905bec5dad66aa6bb43d11502cafdb33e07db Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Sat, 10 Jun 2017 13:52:43 +0300 Subject: [PATCH 188/436] mac80211: set bss_info data before configuring the channel When mac80211 changes the channel, it also calls into the driver's bss_info_changed() callback, e.g. with BSS_CHANGED_IDLE. The driver may, like iwlwifi does, access more data from bss_info in that case and iwlwifi accesses the basic_rates bitmap, but if changing from a band with more (basic) rates to one with fewer, an out-of-bounds access of the rate array may result. While we can't avoid having invalid data at some point in time, we can avoid having it while we call the driver - so set up all the data before configuring the channel, and then apply it afterwards. This fixes https://bugzilla.kernel.org/show_bug.cgi?id=195677 Reported-by: Johannes Hirte Tested-by: Johannes Hirte Debugged-by: Emmanuel Grumbach Signed-off-by: Johannes Berg --- net/mac80211/mlme.c | 38 ++++++++++++++++++++++++++++---------- 1 file changed, 28 insertions(+), 10 deletions(-) diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 2f46db7d3afc..cc8e6ea1b27e 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -4392,15 +4392,19 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata, return -ENOMEM; } - if (new_sta || override) { - err = ieee80211_prep_channel(sdata, cbss); - if (err) { - if (new_sta) - sta_info_free(local, new_sta); - return -EINVAL; - } - } - + /* + * Set up the information for the new channel before setting the + * new channel. We can't - completely race-free - change the basic + * rates bitmap and the channel (sband) that it refers to, but if + * we set it up before we at least avoid calling into the driver's + * bss_info_changed() method with invalid information (since we do + * call that from changing the channel - only for IDLE and perhaps + * some others, but ...). + * + * So to avoid that, just set up all the new information before the + * channel, but tell the driver to apply it only afterwards, since + * it might need the new channel for that. + */ if (new_sta) { u32 rates = 0, basic_rates = 0; bool have_higher_than_11mbit; @@ -4471,8 +4475,22 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata, sdata->vif.bss_conf.sync_dtim_count = 0; } rcu_read_unlock(); + } - /* tell driver about BSSID, basic rates and timing */ + if (new_sta || override) { + err = ieee80211_prep_channel(sdata, cbss); + if (err) { + if (new_sta) + sta_info_free(local, new_sta); + return -EINVAL; + } + } + + if (new_sta) { + /* + * tell driver about BSSID, basic rates and timing + * this was set up above, before setting the channel + */ ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BSSID | BSS_CHANGED_BASIC_RATES | BSS_CHANGED_BEACON_INT); From 98c67d187db7808b1f3c95f2110dd4392d034182 Mon Sep 17 00:00:00 2001 From: "Jason A. Donenfeld" Date: Sat, 10 Jun 2017 04:59:12 +0200 Subject: [PATCH 189/436] mac80211/wpa: use constant time memory comparison for MACs Otherwise, we enable all sorts of forgeries via timing attack. Signed-off-by: Jason A. Donenfeld Cc: Johannes Berg Cc: linux-wireless@vger.kernel.org Cc: stable@vger.kernel.org Signed-off-by: Johannes Berg --- net/mac80211/wpa.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c index c1ef22df865f..cc19614ff4e6 100644 --- a/net/mac80211/wpa.c +++ b/net/mac80211/wpa.c @@ -17,6 +17,7 @@ #include #include #include +#include #include "ieee80211_i.h" #include "michael.h" @@ -153,7 +154,7 @@ ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx) data_len = skb->len - hdrlen - MICHAEL_MIC_LEN; key = &rx->key->conf.key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY]; michael_mic(key, hdr, data, data_len, mic); - if (memcmp(mic, data + data_len, MICHAEL_MIC_LEN) != 0) + if (crypto_memneq(mic, data + data_len, MICHAEL_MIC_LEN)) goto mic_fail; /* remove Michael MIC from payload */ @@ -1048,7 +1049,7 @@ ieee80211_crypto_aes_cmac_decrypt(struct ieee80211_rx_data *rx) bip_aad(skb, aad); ieee80211_aes_cmac(key->u.aes_cmac.tfm, aad, skb->data + 24, skb->len - 24, mic); - if (memcmp(mic, mmie->mic, sizeof(mmie->mic)) != 0) { + if (crypto_memneq(mic, mmie->mic, sizeof(mmie->mic))) { key->u.aes_cmac.icverrors++; return RX_DROP_UNUSABLE; } @@ -1098,7 +1099,7 @@ ieee80211_crypto_aes_cmac_256_decrypt(struct ieee80211_rx_data *rx) bip_aad(skb, aad); ieee80211_aes_cmac_256(key->u.aes_cmac.tfm, aad, skb->data + 24, skb->len - 24, mic); - if (memcmp(mic, mmie->mic, sizeof(mmie->mic)) != 0) { + if (crypto_memneq(mic, mmie->mic, sizeof(mmie->mic))) { key->u.aes_cmac.icverrors++; return RX_DROP_UNUSABLE; } @@ -1202,7 +1203,7 @@ ieee80211_crypto_aes_gmac_decrypt(struct ieee80211_rx_data *rx) if (ieee80211_aes_gmac(key->u.aes_gmac.tfm, aad, nonce, skb->data + 24, skb->len - 24, mic) < 0 || - memcmp(mic, mmie->mic, sizeof(mmie->mic)) != 0) { + crypto_memneq(mic, mmie->mic, sizeof(mmie->mic))) { key->u.aes_gmac.icverrors++; return RX_DROP_UNUSABLE; } From b3dd8279659f14f3624bb32559782d699fa6f7d1 Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Sat, 10 Jun 2017 13:52:45 +0300 Subject: [PATCH 190/436] mac80211: don't send SMPS action frame in AP mode when not needed mac80211 allows to modify the SMPS state of an AP both, when it is started, and after it has been started. Such a change will trigger an action frame to all the peers that are currently connected, and will be remembered so that new peers will get notified as soon as they connect (since the SMPS setting in the beacon may not be the right one). This means that we need to remember the SMPS state currently requested as well as the SMPS state that was configured initially (and advertised in the beacon). The former is bss->req_smps and the latter is sdata->smps_mode. Initially, the AP interface could only be started with SMPS_OFF, which means that sdata->smps_mode was SMPS_OFF always. Later, a nl80211 API was added to be able to start an AP with a different AP mode. That code forgot to update bss->req_smps and because of that, if the AP interface was started with SMPS_DYNAMIC, we had: sdata->smps_mode = SMPS_DYNAMIC bss->req_smps = SMPS_OFF That configuration made mac80211 think it needs to fire off an action frame to any new station connecting to the AP in order to let it know that the actual SMPS configuration is SMPS_OFF. Fix that by properly setting bss->req_smps in ieee80211_start_ap. Fixes: f69931748730 ("mac80211: set smps_mode according to ap params") Signed-off-by: Emmanuel Grumbach Signed-off-by: Luca Coelho Signed-off-by: Johannes Berg --- net/mac80211/cfg.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 6c2e6060cd54..4a388fe8c2d1 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -902,6 +902,8 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev, default: return -EINVAL; } + sdata->u.ap.req_smps = sdata->smps_mode; + sdata->needed_rx_chains = sdata->local->rx_chains; sdata->vif.bss_conf.beacon_int = params->beacon_interval; From 0ca4cd7bccf0b82d2c10069f295772bb7b76d006 Mon Sep 17 00:00:00 2001 From: Jiri Kosina Date: Fri, 9 Jun 2017 13:15:37 +0200 Subject: [PATCH 191/436] HID: let generic driver yield control iff specific driver has been enabled There are many situations where generic HID driver provides some basic level of support for certain device, but later this support (usually by implementing vendor-specific extensions of HID protocol) is extended and the support moved over to a separate (usually per-vendor) specific driver. This might bring a rather unpleasant suprise for users, as all of a sudden there is a new config option they have to enable in order to get any support for their device whatsoever, although previous kernel versions provided basic support through the generic driver. Which is rightfully seen as a regression. Fix this by including the entry for a particular device in hid_have_special_driver[] iff the specific config option has been specified, and let generic driver handle the device otherwise. Also make the behavior of hid_scan_report() (where the same decision is being taken on a per-report level) consistent. While at it, reshuffle the hid_have_special_driver[] a bit to restore the alphabetical ordering (first order by config option, and within those sections order by VID). This is considered a short-term solution, before generic way of giving precedence to special drivers and falling back to generic driver is figured out. While at it, fixup a missing entry for GFRM driver; thanks to Hans de Geode for spotting this (and for discovering a few issues in the conversion). Signed-off-by: Jiri Kosina --- drivers/hid/hid-core.c | 282 ++++++++++++++++++++++++++++++++--------- 1 file changed, 221 insertions(+), 61 deletions(-) diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index 04cee65531d7..6e040692f1d8 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -826,11 +826,35 @@ static int hid_scan_report(struct hid_device *hid) * hid-rmi should take care of them, * not hid-generic */ - if (IS_ENABLED(CONFIG_HID_RMI)) - hid->group = HID_GROUP_RMI; + hid->group = HID_GROUP_RMI; break; } + /* fall back to generic driver in case specific driver doesn't exist */ + switch (hid->group) { + case HID_GROUP_MULTITOUCH_WIN_8: + /* fall-through */ + case HID_GROUP_MULTITOUCH: + if (!IS_ENABLED(CONFIG_HID_MULTITOUCH)) + hid->group = HID_GROUP_GENERIC; + break; + case HID_GROUP_SENSOR_HUB: + if (!IS_ENABLED(CONFIG_HID_SENSOR_HUB)) + hid->group = HID_GROUP_GENERIC; + break; + case HID_GROUP_RMI: + if (!IS_ENABLED(CONFIG_HID_RMI)) + hid->group = HID_GROUP_GENERIC; + break; + case HID_GROUP_WACOM: + if (!IS_ENABLED(CONFIG_HID_WACOM)) + hid->group = HID_GROUP_GENERIC; + break; + case HID_GROUP_LOGITECH_DJ_DEVICE: + if (!IS_ENABLED(CONFIG_HID_LOGITECH_DJ)) + hid->group = HID_GROUP_GENERIC; + break; + } vfree(parser); return 0; } @@ -1763,15 +1787,23 @@ EXPORT_SYMBOL_GPL(hid_disconnect); * used as a driver. See hid_scan_report(). */ static const struct hid_device_id hid_have_special_driver[] = { +#if IS_ENABLED(CONFIG_HID_A4TECH) { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_WCP32PU) }, { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_X5_005D) }, { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_RP_649) }, +#endif +#if IS_ENABLED(CONFIG_HID_ACCUTOUCH) + { HID_USB_DEVICE(USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_ACCUTOUCH_2216) }, +#endif +#if IS_ENABLED(CONFIG_HID_ACRUX) { HID_USB_DEVICE(USB_VENDOR_ID_ACRUX, 0x0802) }, { HID_USB_DEVICE(USB_VENDOR_ID_ACRUX, 0xf705) }, +#endif +#if IS_ENABLED(CONFIG_HID_ALPS) { HID_DEVICE(HID_BUS_ANY, HID_GROUP_ANY, USB_VENDOR_ID_ALPS_JP, HID_DEVICE_ID_ALPS_U1_DUAL) }, +#endif +#if IS_ENABLED(CONFIG_HID_APPLE) { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MIGHTYMOUSE) }, - { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICMOUSE) }, - { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICTRACKPAD) }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ANSI) }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ISO) }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_ANSI) }, @@ -1792,11 +1824,6 @@ static const struct hid_device_id hid_have_special_driver[] = { { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_ANSI) }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_ISO) }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_JIS) }, - { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL) }, - { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL2) }, - { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL3) }, - { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL4) }, - { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL5) }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI) }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ISO) }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_JIS) }, @@ -1851,62 +1878,100 @@ static const struct hid_device_id hid_have_special_driver[] = { { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI) }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) }, +#endif +#if IS_ENABLED(CONFIG_HID_APPLEIR) + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL) }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL2) }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL3) }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL4) }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL5) }, +#endif +#if IS_ENABLED(CONFIG_HID_ASUS) { HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_I2C_KEYBOARD) }, { HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_I2C_TOUCHPAD) }, { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD1) }, { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD2) }, { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_T100_KEYBOARD) }, +#endif +#if IS_ENABLED(CONFIG_HID_AUREAL) { HID_USB_DEVICE(USB_VENDOR_ID_AUREAL, USB_DEVICE_ID_AUREAL_W01RN) }, +#endif +#if IS_ENABLED(CONFIG_HID_BELKIN) { HID_USB_DEVICE(USB_VENDOR_ID_BELKIN, USB_DEVICE_ID_FLIP_KVM) }, + { HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD) }, +#endif +#if IS_ENABLED(CONFIG_HID_BETOP_FF) { HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185BFM, 0x2208) }, { HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185PC, 0x5506) }, { HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185V2PC, 0x1850) }, { HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185V2BFM, 0x5500) }, - { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE) }, - { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE_2) }, +#endif +#if IS_ENABLED(CONFIG_HID_CHERRY) { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION) }, { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION_SOLAR) }, +#endif +#if IS_ENABLED(CONFIG_HID_CHICONY) { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_TACTICAL_PAD) }, - { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS) }, { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS2) }, { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_AK1D) }, { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_ACER_SWITCH12) }, + { HID_USB_DEVICE(USB_VENDOR_ID_JESS, USB_DEVICE_ID_JESS_ZEN_AIO_KBD) }, +#endif +#if IS_ENABLED(CONFIG_HID_CMEDIA) + { HID_USB_DEVICE(USB_VENDOR_ID_CMEDIA, USB_DEVICE_ID_CM6533) }, +#endif +#if IS_ENABLED(CONFIG_HID_CORSAIR) { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K90) }, { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB) }, - { HID_USB_DEVICE(USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_PRODIKEYS_PCMIDI) }, +#endif +#if IS_ENABLED(CONFIG_HID_CP2112) { HID_USB_DEVICE(USB_VENDOR_ID_CYGNAL, USB_DEVICE_ID_CYGNAL_CP2112) }, +#endif +#if IS_ENABLED(CONFIG_HID_CYPRESS) { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_1) }, { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_2) }, { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_3) }, { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_4) }, { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_MOUSE) }, - { HID_USB_DEVICE(USB_VENDOR_ID_DELCOM, USB_DEVICE_ID_DELCOM_VISUAL_IND) }, +#endif +#if IS_ENABLED(CONFIG_HID_DRAGONRISE) { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0006) }, { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0011) }, -#if IS_ENABLED(CONFIG_HID_MAYFLASH) - { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3) }, - { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_DOLPHINBAR) }, - { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE1) }, - { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE2) }, #endif - { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, USB_DEVICE_ID_DREAM_CHEEKY_WN) }, - { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, USB_DEVICE_ID_DREAM_CHEEKY_FA) }, +#if IS_ENABLED(CONFIG_HID_ELECOM) { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) }, { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRED) }, { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRELESS) }, +#endif +#if IS_ENABLED(CONFIG_HID_ELO) { HID_USB_DEVICE(USB_VENDOR_ID_ELO, 0x0009) }, { HID_USB_DEVICE(USB_VENDOR_ID_ELO, 0x0030) }, - { HID_USB_DEVICE(USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_ACCUTOUCH_2216) }, +#endif +#if IS_ENABLED(CONFIG_HID_EMS_FF) { HID_USB_DEVICE(USB_VENDOR_ID_EMS, USB_DEVICE_ID_EMS_TRIO_LINKER_PLUS_II) }, +#endif +#if IS_ENABLED(CONFIG_HID_EZKEY) { HID_USB_DEVICE(USB_VENDOR_ID_EZKEY, USB_DEVICE_ID_BTC_8193) }, - { HID_USB_DEVICE(USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PSX_ADAPTOR) }, - { HID_USB_DEVICE(USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PCS_ADAPTOR) }, +#endif +#if IS_ENABLED(CONFIG_HID_GEMBIRD) { HID_USB_DEVICE(USB_VENDOR_ID_GEMBIRD, USB_DEVICE_ID_GEMBIRD_JPD_DUALFORCE2) }, - { HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, 0x0003) }, +#endif +#if IS_ENABLED(CONFIG_HID_GFRM) + { HID_BLUETOOTH_DEVICE(0x58, 0x2000) }, + { HID_BLUETOOTH_DEVICE(0x471, 0x2210) }, +#endif +#if IS_ENABLED(CONFIG_HID_GREENASIA) { HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, 0x0012) }, +#endif +#if IS_ENABLED(CONFIG_HID_GT683R) + { HID_USB_DEVICE(USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GT683R_LED_PANEL) }, +#endif +#if IS_ENABLED(CONFIG_HID_GYRATION) { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE) }, { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_2) }, { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_3) }, +#endif +#if IS_ENABLED(CONFIG_HID_HOLTEK) { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK, USB_DEVICE_ID_HOLTEK_ON_LINE_GRIP) }, { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD) }, { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A04A) }, @@ -1915,12 +1980,17 @@ static const struct hid_device_id hid_have_special_driver[] = { { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A072) }, { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081) }, { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A0C2) }, - { HID_USB_DEVICE(USB_VENDOR_ID_HUION, USB_DEVICE_ID_HUION_TABLET) }, - { HID_USB_DEVICE(USB_VENDOR_ID_JESS, USB_DEVICE_ID_JESS_ZEN_AIO_KBD) }, - { HID_USB_DEVICE(USB_VENDOR_ID_JESS2, USB_DEVICE_ID_JESS2_COLOR_RUMBLE_PAD) }, +#endif +#if IS_ENABLED(CONFIG_HID_ICADE) { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ION, USB_DEVICE_ID_ICADE) }, +#endif +#if IS_ENABLED(CONFIG_HID_KENSINGTON) { HID_USB_DEVICE(USB_VENDOR_ID_KENSINGTON, USB_DEVICE_ID_KS_SLIMBLADE) }, +#endif +#if IS_ENABLED(CONFIG_HID_KEYTOUCH) { HID_USB_DEVICE(USB_VENDOR_ID_KEYTOUCH, USB_DEVICE_ID_KEYTOUCH_IEC) }, +#endif +#if IS_ENABLED(CONFIG_HID_KYE) { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_GILA_GAMING_MOUSE) }, { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_MANTICORE) }, { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_GX_IMPERATOR) }, @@ -1930,21 +2000,29 @@ static const struct hid_device_id hid_have_special_driver[] = { { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X_V2) }, { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X) }, { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_PENSKETCH_M912) }, - { HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD) }, +#endif +#if IS_ENABLED(CONFIG_HID_LCPOWER) { HID_USB_DEVICE(USB_VENDOR_ID_LCPOWER, USB_DEVICE_ID_LCPOWER_LC1000 ) }, +#endif +#if IS_ENABLED(CONFIG_HID_LED) + { HID_USB_DEVICE(USB_VENDOR_ID_DELCOM, USB_DEVICE_ID_DELCOM_VISUAL_IND) }, + { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, USB_DEVICE_ID_DREAM_CHEEKY_WN) }, + { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, USB_DEVICE_ID_DREAM_CHEEKY_FA) }, + { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_LUXAFOR) }, + { HID_USB_DEVICE(USB_VENDOR_ID_RISO_KAGAKU, USB_DEVICE_ID_RI_KA_WEBMAIL) }, + { HID_USB_DEVICE(USB_VENDOR_ID_THINGM, USB_DEVICE_ID_BLINK1) }, +#endif #if IS_ENABLED(CONFIG_HID_LENOVO) { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TPKBD) }, { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_CUSBKBD) }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_CBTKBD) }, { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TPPRODOCK) }, #endif - { HID_USB_DEVICE(USB_VENDOR_ID_LG, USB_DEVICE_ID_LG_MELFAS_MT) }, +#if IS_ENABLED(CONFIG_HID_LOGITECH) { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_MX3000_RECEIVER) }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER) }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER_2) }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RECEIVER) }, - { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_PS3) }, - { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_T651) }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_DESKTOP) }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_EDGE) }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_MINI) }, @@ -1957,7 +2035,6 @@ static const struct hid_device_id hid_have_special_driver[] = { { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD) }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2_2) }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G29_WHEEL) }, - { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G920_WHEEL) }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_F3D) }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_FFG ) }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_FORCE3D_PRO) }, @@ -1969,17 +2046,30 @@ static const struct hid_device_id hid_have_special_driver[] = { { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DFGT_WHEEL) }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G25_WHEEL) }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G27_WHEEL) }, -#if IS_ENABLED(CONFIG_HID_LOGITECH_DJ) - { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER) }, - { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER_2) }, -#endif { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WII_WHEEL) }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2) }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACETRAVELLER) }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACENAVIGATOR) }, - { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD) }, - { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD_BOOTLOADER) }, - { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_LUXAFOR) }, +#endif +#if IS_ENABLED(CONFIG_HID_LOGITECH_HIDPP) + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_T651) }, + { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G920_WHEEL) }, +#endif +#if IS_ENABLED(CONFIG_HID_LOGITECH_DJ) + { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER) }, + { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER_2) }, +#endif +#if IS_ENABLED(CONFIG_HID_MAGICMOUSE) + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICMOUSE) }, + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICTRACKPAD) }, +#endif +#if IS_ENABLED(CONFIG_HID_MAYFLASH) + { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3) }, + { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_DOLPHINBAR) }, + { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE1) }, + { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE2) }, +#endif +#if IS_ENABLED(CONFIG_HID_MICROSOFT) { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_MOUSE_4500) }, { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_KEYBOARD) }, { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_SIDEWINDER_GV) }, @@ -1995,9 +2085,22 @@ static const struct hid_device_id hid_have_special_driver[] = { { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_600) }, { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3KV1) }, { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER) }, + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_BT) }, +#endif +#if IS_ENABLED(CONFIG_HID_MONTEREY) { HID_USB_DEVICE(USB_VENDOR_ID_MONTEREY, USB_DEVICE_ID_GENIUS_KB29E) }, - { HID_USB_DEVICE(USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GT683R_LED_PANEL) }, +#endif +#if IS_ENABLED(CONFIG_HID_MULTITOUCH) + { HID_USB_DEVICE(USB_VENDOR_ID_LG, USB_DEVICE_ID_LG_MELFAS_MT) }, +#endif +#if IS_ENABLED(CONFIG_HID_WIIMOTE) + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE) }, + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE2) }, +#endif +#if IS_ENABLED(CONFIG_HID_NTI) { HID_USB_DEVICE(USB_VENDOR_ID_NTI, USB_DEVICE_ID_USB_SUN) }, +#endif +#if IS_ENABLED(CONFIG_HID_NTRIG) { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN) }, { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_1) }, { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_2) }, @@ -2017,13 +2120,41 @@ static const struct hid_device_id hid_have_special_driver[] = { { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_16) }, { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_17) }, { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_18) }, +#endif +#if IS_ENABLED(CONFIG_HID_ORTEK) { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_PKB1700) }, { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_WKB2000) }, + { HID_USB_DEVICE(USB_VENDOR_ID_SKYCABLE, USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER) }, +#endif +#if IS_ENABLED(CONFIG_HID_PANTHERLORD) + { HID_USB_DEVICE(USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PSX_ADAPTOR) }, + { HID_USB_DEVICE(USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PCS_ADAPTOR) }, + { HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, 0x0003) }, + { HID_USB_DEVICE(USB_VENDOR_ID_JESS2, USB_DEVICE_ID_JESS2_COLOR_RUMBLE_PAD) }, +#endif +#if IS_ENABLED(CONFIG_HID_PENMOUNT) { HID_USB_DEVICE(USB_VENDOR_ID_PENMOUNT, USB_DEVICE_ID_PENMOUNT_6000) }, +#endif +#if IS_ENABLED(CONFIG_HID_PETALYNX) { HID_USB_DEVICE(USB_VENDOR_ID_PETALYNX, USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE) }, +#endif +#if IS_ENABLED(CONFIG_HID_PICOLCD) + { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD) }, + { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD_BOOTLOADER) }, +#endif +#if IS_ENABLED(CONFIG_HID_PLANTRONICS) { HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS, HID_ANY_ID) }, +#endif +#if IS_ENABLED(CONFIG_HID_PRIMAX) { HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_KEYBOARD) }, - { HID_USB_DEVICE(USB_VENDOR_ID_RISO_KAGAKU, USB_DEVICE_ID_RI_KA_WEBMAIL) }, +#endif +#if IS_ENABLED(CONFIG_HID_PRODIKEYS) + { HID_USB_DEVICE(USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_PRODIKEYS_PCMIDI) }, +#endif +#if IS_ENABLED(CONFIG_HID_RMI) + { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_X1_COVER) }, + { HID_USB_DEVICE(USB_VENDOR_ID_RAZER, USB_DEVICE_ID_RAZER_BLADE_14) }, +#endif #if IS_ENABLED(CONFIG_HID_ROCCAT) { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ARVO) }, { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ISKU) }, @@ -2051,9 +2182,21 @@ static const struct hid_device_id hid_have_special_driver[] = { { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT5) }, { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT9) }, #endif +#if IS_ENABLED(CONFIG_HID_SAMSUNG) { HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_IR_REMOTE) }, { HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD_MOUSE) }, - { HID_USB_DEVICE(USB_VENDOR_ID_SKYCABLE, USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER) }, +#endif +#if IS_ENABLED(CONFIG_HID_SMARTJOYPLUS) + { HID_USB_DEVICE(USB_VENDOR_ID_PLAYDOTCOM, USB_DEVICE_ID_PLAYDOTCOM_EMS_USBII) }, + { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SMARTJOY_PLUS) }, + { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SUPER_JOY_BOX_3) }, + { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_DUAL_USB_JOYPAD) }, + { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_JOY_BOX_3_PRO) }, + { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_DUAL_BOX_PRO) }, + { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_JOY_BOX_5_PRO) }, +#endif +#if IS_ENABLED(CONFIG_HID_SONY) + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_PS3) }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SMK, USB_DEVICE_ID_SMK_PS3_BDREMOTE) }, { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_BUZZ_CONTROLLER) }, { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_WIRELESS_BUZZ_CONTROLLER) }, @@ -2072,9 +2215,17 @@ static const struct hid_device_id hid_have_special_driver[] = { { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE) }, { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGP_MOUSE) }, { HID_USB_DEVICE(USB_VENDOR_ID_SINO_LITE, USB_DEVICE_ID_SINO_LITE_CONTROLLER) }, +#endif +#if IS_ENABLED(CONFIG_HID_SPEEDLINK) + { HID_USB_DEVICE(USB_VENDOR_ID_X_TENSIONS, USB_DEVICE_ID_SPEEDLINK_VAD_CEZANNE) }, +#endif +#if IS_ENABLED(CONFIG_HID_STEELSERIES) { HID_USB_DEVICE(USB_VENDOR_ID_STEELSERIES, USB_DEVICE_ID_STEELSERIES_SRWS1) }, +#endif +#if IS_ENABLED(CONFIG_HID_SUNPLUS) { HID_USB_DEVICE(USB_VENDOR_ID_SUNPLUS, USB_DEVICE_ID_SUNPLUS_WDESKTOP) }, - { HID_USB_DEVICE(USB_VENDOR_ID_THINGM, USB_DEVICE_ID_BLINK1) }, +#endif +#if IS_ENABLED(CONFIG_HID_THRUSTMASTER) { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb300) }, { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb304) }, { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb323) }, @@ -2083,12 +2234,25 @@ static const struct hid_device_id hid_have_special_driver[] = { { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb653) }, { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb654) }, { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb65a) }, +#endif +#if IS_ENABLED(CONFIG_HID_TIVO) { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_BT) }, { HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE) }, { HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_PRO) }, +#endif +#if IS_ENABLED(CONFIG_HID_TOPSEED) + { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE) }, + { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE_2) }, + { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS) }, { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED, USB_DEVICE_ID_TOPSEED_CYBERLINK) }, { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED2, USB_DEVICE_ID_TOPSEED2_RF_COMBO) }, +#endif +#if IS_ENABLED(CONFIG_HID_TWINHAN) { HID_USB_DEVICE(USB_VENDOR_ID_TWINHAN, USB_DEVICE_ID_TWINHAN_IR_REMOTE) }, +#endif +#if IS_ENABLED(CONFIG_HID_UCLOGIC) + { HID_USB_DEVICE(USB_VENDOR_ID_HUION, USB_DEVICE_ID_HUION_TABLET) }, + { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_HUION_TABLET) }, { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_PF1209) }, { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U) }, { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP5540U) }, @@ -2096,20 +2260,17 @@ static const struct hid_device_id hid_have_special_driver[] = { { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP1062) }, { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_WIRELESS_TABLET_TWHL850) }, { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_TWHA60) }, - { HID_USB_DEVICE(USB_VENDOR_ID_THQ, USB_DEVICE_ID_THQ_PS3_UDRAW) }, { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_YIYNOVA_TABLET) }, { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UGEE_TABLET_81) }, { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UGEE_TABLET_45) }, { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_DRAWIMAGE_G3) }, - { HID_USB_DEVICE(USB_VENDOR_ID_UGTIZER, USB_DEVICE_ID_UGTIZER_TABLET_GP0610) }, { HID_USB_DEVICE(USB_VENDOR_ID_UGEE, USB_DEVICE_ID_UGEE_TABLET_EX07S) }, - { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SMARTJOY_PLUS) }, - { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SUPER_JOY_BOX_3) }, - { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_DUAL_USB_JOYPAD) }, - { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_JOY_BOX_3_PRO) }, - { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_DUAL_BOX_PRO) }, - { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_JOY_BOX_5_PRO) }, - { HID_USB_DEVICE(USB_VENDOR_ID_PLAYDOTCOM, USB_DEVICE_ID_PLAYDOTCOM_EMS_USBII) }, + { HID_USB_DEVICE(USB_VENDOR_ID_UGTIZER, USB_DEVICE_ID_UGTIZER_TABLET_GP0610) }, +#endif +#if IS_ENABLED(CONFIG_HID_UDRAW_PS3) + { HID_USB_DEVICE(USB_VENDOR_ID_THQ, USB_DEVICE_ID_THQ_PS3_UDRAW) }, +#endif +#if IS_ENABLED(CONFIG_HID_WALTOP) { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SLIM_TABLET_5_8_INCH) }, { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SLIM_TABLET_12_1_INCH) }, { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_Q_PAD) }, @@ -2117,19 +2278,18 @@ static const struct hid_device_id hid_have_special_driver[] = { { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_10_6_INCH) }, { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_14_1_INCH) }, { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SIRIUS_BATTERY_FREE_TABLET) }, - { HID_USB_DEVICE(USB_VENDOR_ID_X_TENSIONS, USB_DEVICE_ID_SPEEDLINK_VAD_CEZANNE) }, +#endif +#if IS_ENABLED(CONFIG_HID_XINMO) { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_XIN_MO_DUAL_ARCADE) }, { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_THT_2P_ARCADE) }, +#endif +#if IS_ENABLED(CONFIG_HID_ZEROPLUS) { HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0005) }, { HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0030) }, +#endif +#if IS_ENABLED(CONFIG_HID_ZYDACRON) { HID_USB_DEVICE(USB_VENDOR_ID_ZYDACRON, USB_DEVICE_ID_ZYDACRON_REMOTE_CONTROL) }, - - { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_BT) }, - { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE) }, - { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE2) }, - { HID_USB_DEVICE(USB_VENDOR_ID_RAZER, USB_DEVICE_ID_RAZER_BLADE_14) }, - { HID_USB_DEVICE(USB_VENDOR_ID_CMEDIA, USB_DEVICE_ID_CM6533) }, - { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_X1_COVER) }, +#endif { } }; From 97d8b6e3b8538198aefb0003342920a82e062147 Mon Sep 17 00:00:00 2001 From: Ashwanth Goli Date: Tue, 13 Jun 2017 16:54:55 +0530 Subject: [PATCH 192/436] net: rps: fix uninitialized symbol warning This patch fixes uninitialized symbol warning that got introduced by the following commit 773fc8f6e8d6 ("net: rps: send out pending IPI's on CPU hotplug") Signed-off-by: Ashwanth Goli Signed-off-by: David S. Miller --- net/core/dev.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/core/dev.c b/net/core/dev.c index 54bb8d99d26a..6d60149287a1 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -8203,7 +8203,7 @@ static int dev_cpu_dead(unsigned int oldcpu) struct sk_buff **list_skb; struct sk_buff *skb; unsigned int cpu; - struct softnet_data *sd, *oldsd, *remsd; + struct softnet_data *sd, *oldsd, *remsd = NULL; local_irq_disable(); cpu = smp_processor_id(); From 459fa246d8fa4a543ed9a3331f15c8fe1caf9937 Mon Sep 17 00:00:00 2001 From: Stephen Rothwell Date: Sun, 11 Jun 2017 15:22:10 +1000 Subject: [PATCH 193/436] clocksource: Explicitly include linux/clocksource.h when needed MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The kbuild test robot reported errors in these files when doing an ia64 allmodconfig build. drivers/clocksource/timer-sun5i.c:52:21: error: field 'clksrc' has incomplete type struct clocksource clksrc; ^~~~~~ drivers/clocksource/cadence_ttc_timer.c:92:21: error: field 'cs' has incomplete type struct clocksource cs; ^~ (and many more errors for these files) Cc: Michal Simek Cc: "Sören Brinkmann" Cc: Daniel Lezcano Cc: Thomas Gleixner Cc: Maxime Ripard Cc: Chen-Yu Tsai Reported-by: kbuild test robot Signed-off-by: Stephen Rothwell Acked-by: Michal Simek Signed-off-by: Daniel Lezcano --- drivers/clocksource/cadence_ttc_timer.c | 1 + drivers/clocksource/timer-sun5i.c | 1 + 2 files changed, 2 insertions(+) diff --git a/drivers/clocksource/cadence_ttc_timer.c b/drivers/clocksource/cadence_ttc_timer.c index 44e5e951583b..8e64b8460f11 100644 --- a/drivers/clocksource/cadence_ttc_timer.c +++ b/drivers/clocksource/cadence_ttc_timer.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/clocksource/timer-sun5i.c b/drivers/clocksource/timer-sun5i.c index 2e9c830ae1cd..c4656c4d44a6 100644 --- a/drivers/clocksource/timer-sun5i.c +++ b/drivers/clocksource/timer-sun5i.c @@ -12,6 +12,7 @@ #include #include +#include #include #include #include From eb3c28c15555212227cfa8b9a3baa21ad5982a19 Mon Sep 17 00:00:00 2001 From: Oliver Neukum Date: Mon, 12 Jun 2017 13:56:51 +0200 Subject: [PATCH 194/436] r8152: give the device version Getting the device version out of the driver really aids debugging. Signed-off-by: Oliver Neukum Signed-off-by: David S. Miller --- drivers/net/usb/r8152.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index ddc62cb69be8..1a419a45e2a2 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -4368,6 +4368,8 @@ static u8 rtl_get_version(struct usb_interface *intf) break; } + dev_dbg(&intf->dev, "Detected version 0x%04x\n", version); + return version; } From c38b7d327aafd1e3ad7ff53eefac990673b65667 Mon Sep 17 00:00:00 2001 From: WANG Cong Date: Mon, 12 Jun 2017 09:52:26 -0700 Subject: [PATCH 195/436] igmp: acquire pmc lock for ip_mc_clear_src() Andrey reported a use-after-free in add_grec(): for (psf = *psf_list; psf; psf = psf_next) { ... psf_next = psf->sf_next; where the struct ip_sf_list's were already freed by: kfree+0xe8/0x2b0 mm/slub.c:3882 ip_mc_clear_src+0x69/0x1c0 net/ipv4/igmp.c:2078 ip_mc_dec_group+0x19a/0x470 net/ipv4/igmp.c:1618 ip_mc_drop_socket+0x145/0x230 net/ipv4/igmp.c:2609 inet_release+0x4e/0x1c0 net/ipv4/af_inet.c:411 sock_release+0x8d/0x1e0 net/socket.c:597 sock_close+0x16/0x20 net/socket.c:1072 This happens because we don't hold pmc->lock in ip_mc_clear_src() and a parallel mr_ifc_timer timer could jump in and access them. The RCU lock is there but it is merely for pmc itself, this spinlock could actually ensure we don't access them in parallel. Thanks to Eric and Long for discussion on this bug. Reported-by: Andrey Konovalov Cc: Eric Dumazet Cc: Xin Long Signed-off-by: Cong Wang Reviewed-by: Xin Long Signed-off-by: David S. Miller --- net/ipv4/igmp.c | 23 ++++++++++++++--------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index 44fd86de2823..8f6b5bbcbf69 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c @@ -2071,21 +2071,26 @@ static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode, static void ip_mc_clear_src(struct ip_mc_list *pmc) { - struct ip_sf_list *psf, *nextpsf; + struct ip_sf_list *psf, *nextpsf, *tomb, *sources; - for (psf = pmc->tomb; psf; psf = nextpsf) { - nextpsf = psf->sf_next; - kfree(psf); - } + spin_lock_bh(&pmc->lock); + tomb = pmc->tomb; pmc->tomb = NULL; - for (psf = pmc->sources; psf; psf = nextpsf) { - nextpsf = psf->sf_next; - kfree(psf); - } + sources = pmc->sources; pmc->sources = NULL; pmc->sfmode = MCAST_EXCLUDE; pmc->sfcount[MCAST_INCLUDE] = 0; pmc->sfcount[MCAST_EXCLUDE] = 1; + spin_unlock_bh(&pmc->lock); + + for (psf = tomb; psf; psf = nextpsf) { + nextpsf = psf->sf_next; + kfree(psf); + } + for (psf = sources; psf; psf = nextpsf) { + nextpsf = psf->sf_next; + kfree(psf); + } } /* Join a multicast group From 4a6a97e2650c7dd7b1380ba763e9038c814e220c Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Mon, 12 Jun 2017 16:39:51 -0700 Subject: [PATCH 196/436] netconsole: Remove duplicate "netconsole: " logging prefix It's already added by pr_fmt so remove the explicit use. Signed-off-by: Joe Perches Signed-off-by: David S. Miller --- drivers/net/netconsole.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c index 06ee6395117f..0e27920c2b6b 100644 --- a/drivers/net/netconsole.c +++ b/drivers/net/netconsole.c @@ -358,7 +358,7 @@ static ssize_t enabled_store(struct config_item *item, if (err) goto out_unlock; - pr_info("netconsole: network logging started\n"); + pr_info("network logging started\n"); } else { /* false */ /* We need to disable the netconsole before cleaning it up * otherwise we might end up in write_msg() with From 665fff2923323e348728e03a7cdb0ce56f316d39 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Mon, 12 Jun 2017 17:18:51 -0700 Subject: [PATCH 197/436] net: phy: Fix MDIO_THUNDER dependencies After commit 90eff9096c01 ("net: phy: Allow splitting MDIO bus/device support from PHYs") we could create a configuration where MDIO_DEVICE=y and PHYLIB=m which leads to the following undefined references: drivers/built-in.o: In function `thunder_mdiobus_pci_remove': >> mdio-thunder.c:(.text+0x2a212f): undefined reference to >> `mdiobus_unregister' >> mdio-thunder.c:(.text+0x2a2138): undefined reference to >> `mdiobus_free' drivers/built-in.o: In function `thunder_mdiobus_pci_probe': mdio-thunder.c:(.text+0x2a22e7): undefined reference to `devm_mdiobus_alloc_size' mdio-thunder.c:(.text+0x2a236f): undefined reference to `of_mdiobus_register' Reported-by: kbuild test robot Fixes: 90eff9096c01 ("net: phy: Allow splitting MDIO bus/device support from PHYs") Signed-off-by: Florian Fainelli Tested-by: Randy Dunlap Signed-off-by: David S. Miller --- drivers/net/phy/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index c360dd6ead22..3ab6c58d4be6 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -127,6 +127,7 @@ config MDIO_THUNDER tristate "ThunderX SOCs MDIO buses" depends on 64BIT depends on PCI + depends on !(MDIO_DEVICE=y && PHYLIB=m) select MDIO_CAVIUM help This driver supports the MDIO interfaces found on Cavium From 60cfe1eaccb8af598ebe1bdc44e157ea30fcdd81 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B8rn=20Mork?= Date: Tue, 13 Jun 2017 19:10:18 +0200 Subject: [PATCH 198/436] qmi_wwan: new Telewell and Sierra device IDs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit A new Sierra Wireless EM7305 device ID used in a Toshiba laptop, and two Longcheer device IDs entries used by Telewell TW-3G HSPA+ branded modems. Reported-by: Petr Kloc Reported-by: Teemu Likonen Signed-off-by: Bjørn Mork Signed-off-by: David S. Miller --- drivers/net/usb/qmi_wwan.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 949671ce4039..32a22f4e8356 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -1192,6 +1192,8 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x1199, 0x9056, 8)}, /* Sierra Wireless Modem */ {QMI_FIXED_INTF(0x1199, 0x9057, 8)}, {QMI_FIXED_INTF(0x1199, 0x9061, 8)}, /* Sierra Wireless Modem */ + {QMI_FIXED_INTF(0x1199, 0x9063, 8)}, /* Sierra Wireless EM7305 */ + {QMI_FIXED_INTF(0x1199, 0x9063, 10)}, /* Sierra Wireless EM7305 */ {QMI_FIXED_INTF(0x1199, 0x9071, 8)}, /* Sierra Wireless MC74xx */ {QMI_FIXED_INTF(0x1199, 0x9071, 10)}, /* Sierra Wireless MC74xx */ {QMI_FIXED_INTF(0x1199, 0x9079, 8)}, /* Sierra Wireless EM74xx */ @@ -1206,6 +1208,8 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x1bc7, 0x1100, 3)}, /* Telit ME910 */ {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1201, 2)}, /* Telit LE920, LE920A4 */ + {QMI_FIXED_INTF(0x1c9e, 0x9801, 3)}, /* Telewell TW-3G HSPA+ */ + {QMI_FIXED_INTF(0x1c9e, 0x9803, 4)}, /* Telewell TW-3G HSPA+ */ {QMI_FIXED_INTF(0x1c9e, 0x9b01, 3)}, /* XS Stick W100-2 from 4G Systems */ {QMI_FIXED_INTF(0x0b3c, 0xc000, 4)}, /* Olivetti Olicard 100 */ {QMI_FIXED_INTF(0x0b3c, 0xc001, 4)}, /* Olivetti Olicard 120 */ From ace17c369295f088dc8ac8ff468602646fa5cced Mon Sep 17 00:00:00 2001 From: "Tayar, Tomer" Date: Tue, 13 Jun 2017 12:15:59 +0300 Subject: [PATCH 199/436] qed: fix dump of context data Currently when dumping a context data only word number '1' is read for the entire context. Fixes: c965db444629 ("qed: Add support for debug data collection") Signed-off-by: Tomer Tayar Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_debug.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c index 483241b4b05d..a672f6a860dc 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_debug.c +++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c @@ -2956,7 +2956,7 @@ static u32 qed_grc_dump_ctx_data(struct qed_hwfn *p_hwfn, qed_wr(p_hwfn, p_ptt, s_storm_defs[storm_id].cm_ctx_wr_addr, - BIT(9) | lid); + (i << 9) | lid); *(dump_buf + offset) = qed_rd(p_hwfn, p_ptt, rd_reg_addr); From bf5d008164dd84d671ca2dd569a1676051f9faff Mon Sep 17 00:00:00 2001 From: Priyalee Kushwaha Date: Sat, 3 Jun 2017 10:21:24 -0700 Subject: [PATCH 200/436] platform/x86: intel_telemetry_debugfs: fix oops when load/unload module This fixes an oops found while testing load/unload of the intel_telemetry_debugfs module. module_init uses register_pm_notifier for PM callbacks, but unregister_pm_notifier was missing from module_exit. [ 97.481860] BUG: unable to handle kernel paging request at ffffffffa006f010 [ 97.489742] IP: blocking_notifier_chain_register+0x3a/0xa0 [ 97.495898] PGD 2e0a067 [ 97.495899] PUD 2e0b063 [ 97.498737] PMD 179e29067 [ 97.501573] PTE 0 [ 97.508423] Oops: 0000 1 PREEMPT SMP [ 97.512724] Modules linked in: intel_telemetry_debugfs intel_rapl gpio_keys dwc3 udc_core intel_telemetry_pltdrv intel_punit_ipc intel_telemetry_core rtc_cmos efivars x86_pkg_temp_thermal iwlwifi snd_hda_codec_hdmi soc_button_array btusb cfg80211 btrtl mei_me hci_uart btbcm mei btintel i915 bluetooth intel_pmc_ipc snd_hda_intel spi_pxa2xx_platform snd_hda_codec dwc3_pci snd_hda_core tpm_tis tpm_tis_core tpm efivarfs [ 97.558453] CPU: 0 PID: 889 Comm: modprobe Not tainted 4.11.0-rc6-intel-dev-bkc #1 [ 97.566950] Hardware name: Intel Corp. Joule DVT3/SDS, BIOS GTPP181A.X64.0143.B30.1701132137 01/13/2017 [ 97.577518] task: ffff8801793a21c0 task.stack: ffff8801793f0000 [ 97.584162] RIP: 0010:blocking_notifier_chain_register+0x3a/0xa0 [ 97.590903] RSP: 0018:ffff8801793f3c58 EFLAGS: 00010286 [ 97.596802] RAX: ffffffffa006f000 RBX: ffffffff81e3ea20 RCX: 0000000000000000 [ 97.604812] RDX: ffff880179eaf210 RSI: ffffffffa0131000 RDI: ffffffff81e3ea20 [ 97.612821] RBP: ffff8801793f3c68 R08: 0000000000000006 R09: 000000000000005c [ 97.620847] R10: 0000000000000000 R11: 0000000000000006 R12: ffffffffa0131000 [ 97.628855] R13: 0000000000000000 R14: ffff880176e35f48 R15: ffff8801793f3ea8 [ 97.636865] FS: 00007f7eeba07700(0000) GS:ffff88017fc00000(0000) knlGS:0000000000000000 [ 97.645948] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 97.652423] CR2: ffffffffa006f010 CR3: 00000001775ef000 CR4: 00000000003406f0 [ 97.660423] Call Trace: [ 97.663166] ? 0xffffffffa0031000 [ 97.666885] register_pm_notifier+0x18/0x20 [ 97.671581] telemetry_debugfs_init+0x92/0x1000 Signed-off-by: Priyalee Kushwaha Signed-off-by: Andy Shevchenko Signed-off-by: Darren Hart (VMware) --- drivers/platform/x86/intel_telemetry_debugfs.c | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/drivers/platform/x86/intel_telemetry_debugfs.c b/drivers/platform/x86/intel_telemetry_debugfs.c index ef29f18b1951..4cc2f4ea0a25 100644 --- a/drivers/platform/x86/intel_telemetry_debugfs.c +++ b/drivers/platform/x86/intel_telemetry_debugfs.c @@ -97,11 +97,9 @@ } \ } -#ifdef CONFIG_PM_SLEEP static u8 suspend_prep_ok; static u32 suspend_shlw_ctr_temp, suspend_deep_ctr_temp; static u64 suspend_shlw_res_temp, suspend_deep_res_temp; -#endif struct telemetry_susp_stats { u32 shlw_swake_ctr; @@ -807,7 +805,6 @@ static const struct file_operations telem_ioss_trc_verb_ops = { .release = single_release, }; -#ifdef CONFIG_PM_SLEEP static int pm_suspend_prep_cb(void) { struct telemetry_evtlog evtlog[TELEM_MAX_OS_ALLOCATED_EVENTS]; @@ -937,7 +934,6 @@ static int pm_notification(struct notifier_block *this, static struct notifier_block pm_notifier = { .notifier_call = pm_notification, }; -#endif /* CONFIG_PM_SLEEP */ static int __init telemetry_debugfs_init(void) { @@ -960,14 +956,13 @@ static int __init telemetry_debugfs_init(void) if (err < 0) return -EINVAL; - -#ifdef CONFIG_PM_SLEEP register_pm_notifier(&pm_notifier); -#endif /* CONFIG_PM_SLEEP */ debugfs_conf->telemetry_dbg_dir = debugfs_create_dir("telemetry", NULL); - if (!debugfs_conf->telemetry_dbg_dir) - return -ENOMEM; + if (!debugfs_conf->telemetry_dbg_dir) { + err = -ENOMEM; + goto out_pm; + } f = debugfs_create_file("pss_info", S_IFREG | S_IRUGO, debugfs_conf->telemetry_dbg_dir, NULL, @@ -1014,6 +1009,8 @@ static int __init telemetry_debugfs_init(void) out: debugfs_remove_recursive(debugfs_conf->telemetry_dbg_dir); debugfs_conf->telemetry_dbg_dir = NULL; +out_pm: + unregister_pm_notifier(&pm_notifier); return err; } @@ -1022,6 +1019,7 @@ static void __exit telemetry_debugfs_exit(void) { debugfs_remove_recursive(debugfs_conf->telemetry_dbg_dir); debugfs_conf->telemetry_dbg_dir = NULL; + unregister_pm_notifier(&pm_notifier); } late_initcall(telemetry_debugfs_init); From 4f02b50ece11dcf75263fb7a4cfe8a5df1cfabea Mon Sep 17 00:00:00 2001 From: Jonas Gorski Date: Fri, 2 Jun 2017 14:17:05 +0200 Subject: [PATCH 201/436] leds: bcm6328: fix signal source assignment for leds 4 to 7 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Each nibble represents 4 LEDs, and in case of the higher register, bit 0 represents LED 4, so we need to use modulus for the LED number as well. Fixes: fd7b025a238d0a5440bfa26c585eb78097bf48dc ("leds: add BCM6328 LED driver") Signed-off-by: Jonas Gorski Acked-by: Álvaro Fernández Rojas Signed-off-by: Jacek Anaszewski --- drivers/leds/leds-bcm6328.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/leds/leds-bcm6328.c b/drivers/leds/leds-bcm6328.c index 1548259297c1..2cfd9389ee96 100644 --- a/drivers/leds/leds-bcm6328.c +++ b/drivers/leds/leds-bcm6328.c @@ -242,7 +242,7 @@ static int bcm6328_hwled(struct device *dev, struct device_node *nc, u32 reg, spin_lock_irqsave(lock, flags); val = bcm6328_led_read(addr); - val |= (BIT(reg) << (((sel % 4) * 4) + 16)); + val |= (BIT(reg % 4) << (((sel % 4) * 4) + 16)); bcm6328_led_write(addr, val); spin_unlock_irqrestore(lock, flags); } @@ -269,7 +269,7 @@ static int bcm6328_hwled(struct device *dev, struct device_node *nc, u32 reg, spin_lock_irqsave(lock, flags); val = bcm6328_led_read(addr); - val |= (BIT(reg) << ((sel % 4) * 4)); + val |= (BIT(reg % 4) << ((sel % 4) * 4)); bcm6328_led_write(addr, val); spin_unlock_irqrestore(lock, flags); } From 436c4c45b5b9562b59cedbb51b7343ab4a6dd8cc Mon Sep 17 00:00:00 2001 From: Zhang Bo Date: Tue, 13 Jun 2017 10:39:20 +0800 Subject: [PATCH 202/436] Revert "leds: handle suspend/resume in heartbeat trigger" This reverts commit 5ab92a7cb82c66bf30685583a38a18538e3807db. System cannot enter suspend mode because of heartbeat led trigger. In autosleep_wq, try_to_suspend function will try to enter suspend mode in specific period. it will get wakeup_count then call pm_notifier chain callback function and freeze processes. Heartbeat_pm_notifier is called and it call led_trigger_unregister to change the trigger of led device to none. It will send uevent message and the wakeup source count changed. As wakeup_count changed, suspend will abort. Fixes: 5ab92a7cb82c ("leds: handle suspend/resume in heartbeat trigger") Signed-off-by: Zhang Bo Acked-by: Pavel Machek Reviewed-by: Linus Walleij Signed-off-by: Jacek Anaszewski --- drivers/leds/trigger/ledtrig-heartbeat.c | 31 ------------------------ 1 file changed, 31 deletions(-) diff --git a/drivers/leds/trigger/ledtrig-heartbeat.c b/drivers/leds/trigger/ledtrig-heartbeat.c index afa3b4099214..e95ea65380c8 100644 --- a/drivers/leds/trigger/ledtrig-heartbeat.c +++ b/drivers/leds/trigger/ledtrig-heartbeat.c @@ -20,7 +20,6 @@ #include #include #include -#include #include "../leds.h" static int panic_heartbeats; @@ -163,30 +162,6 @@ static struct led_trigger heartbeat_led_trigger = { .deactivate = heartbeat_trig_deactivate, }; -static int heartbeat_pm_notifier(struct notifier_block *nb, - unsigned long pm_event, void *unused) -{ - int rc; - - switch (pm_event) { - case PM_SUSPEND_PREPARE: - case PM_HIBERNATION_PREPARE: - case PM_RESTORE_PREPARE: - led_trigger_unregister(&heartbeat_led_trigger); - break; - case PM_POST_SUSPEND: - case PM_POST_HIBERNATION: - case PM_POST_RESTORE: - rc = led_trigger_register(&heartbeat_led_trigger); - if (rc) - pr_err("could not re-register heartbeat trigger\n"); - break; - default: - break; - } - return NOTIFY_DONE; -} - static int heartbeat_reboot_notifier(struct notifier_block *nb, unsigned long code, void *unused) { @@ -201,10 +176,6 @@ static int heartbeat_panic_notifier(struct notifier_block *nb, return NOTIFY_DONE; } -static struct notifier_block heartbeat_pm_nb = { - .notifier_call = heartbeat_pm_notifier, -}; - static struct notifier_block heartbeat_reboot_nb = { .notifier_call = heartbeat_reboot_notifier, }; @@ -221,14 +192,12 @@ static int __init heartbeat_trig_init(void) atomic_notifier_chain_register(&panic_notifier_list, &heartbeat_panic_nb); register_reboot_notifier(&heartbeat_reboot_nb); - register_pm_notifier(&heartbeat_pm_nb); } return rc; } static void __exit heartbeat_trig_exit(void) { - unregister_pm_notifier(&heartbeat_pm_nb); unregister_reboot_notifier(&heartbeat_reboot_nb); atomic_notifier_chain_unregister(&panic_notifier_list, &heartbeat_panic_nb); From 838519b89fa7e162f7190c70d888a65ee7889e12 Mon Sep 17 00:00:00 2001 From: Phil Reid Date: Tue, 13 Jun 2017 10:31:36 +0800 Subject: [PATCH 203/436] iio: buffer-dma: Add missing header buffer_impl.h Add buffer_impl.h as buffer.h was split into interface for using and for internals. Without this industrialio-buffer-dma.c fails to compile. Fixes: commit 33dd94cb972175249258329c4aaffddcc82c2005 ("iio:buffer.h - split into buffer.h and buffer_impl.h") Signed-off-by: Phil Reid Signed-off-by: Jonathan Cameron --- drivers/iio/buffer/industrialio-buffer-dma.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/iio/buffer/industrialio-buffer-dma.c b/drivers/iio/buffer/industrialio-buffer-dma.c index dd99d273bae9..ff03324dee13 100644 --- a/drivers/iio/buffer/industrialio-buffer-dma.c +++ b/drivers/iio/buffer/industrialio-buffer-dma.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include From 7981dc07fe317e3506d706e5ef91b94da02656f4 Mon Sep 17 00:00:00 2001 From: Phil Reid Date: Tue, 13 Jun 2017 13:12:35 +0800 Subject: [PATCH 204/436] iio: buffer-dmaengine: Add missing header buffer_impl.h Add buffer_impl.h as buffer.h was split into interface for using and for internals. Without this industrialio-buffer-dmaengine.c fails to compile. Fixes: commit 33dd94cb972175249258329c4aaffddcc82c2005 ("iio:buffer.h - split into buffer.h and buffer_impl.h") Signed-off-by: Phil Reid Signed-off-by: Jonathan Cameron --- drivers/iio/buffer/industrialio-buffer-dmaengine.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/iio/buffer/industrialio-buffer-dmaengine.c b/drivers/iio/buffer/industrialio-buffer-dmaengine.c index 9fabed47053d..2b5a320f42c5 100644 --- a/drivers/iio/buffer/industrialio-buffer-dmaengine.c +++ b/drivers/iio/buffer/industrialio-buffer-dmaengine.c @@ -14,6 +14,7 @@ #include #include +#include #include #include From 46464411307746e6297a034a9983a22c9dfc5a0c Mon Sep 17 00:00:00 2001 From: Juergen Gross Date: Thu, 18 May 2017 17:28:47 +0200 Subject: [PATCH 205/436] xen/blkback: fix disconnect while I/Os in flight MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Today disconnecting xen-blkback is broken in case there are still I/Os in flight: xen_blkif_disconnect() will bail out early without releasing all resources in the hope it will be called again when the last request has terminated. This, however, won't happen as xen_blkif_free() won't be called on termination of the last running request: xen_blkif_put() won't decrement the blkif refcnt to 0 as xen_blkif_disconnect() didn't finish before thus some xen_blkif_put() calls in xen_blkif_disconnect() didn't happen. To solve this deadlock xen_blkif_disconnect() and xen_blkif_alloc_rings() shouldn't use xen_blkif_put() and xen_blkif_get() but use some other way to do their accounting of resources. This at once fixes another error in xen_blkif_disconnect(): when it returned early with -EBUSY for another ring than 0 it would call xen_blkif_put() again for already handled rings on a subsequent call. This will lead to inconsistencies in the refcnt handling. Cc: stable@vger.kernel.org Signed-off-by: Juergen Gross Tested-by: Steven Haigh Acked-by: Roger Pau Monné Signed-off-by: Konrad Rzeszutek Wilk --- drivers/block/xen-blkback/common.h | 1 + drivers/block/xen-blkback/xenbus.c | 7 +++++-- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h index dea61f6ab8cb..638597b17a38 100644 --- a/drivers/block/xen-blkback/common.h +++ b/drivers/block/xen-blkback/common.h @@ -281,6 +281,7 @@ struct xen_blkif_ring { wait_queue_head_t wq; atomic_t inflight; + bool active; /* One thread per blkif ring. */ struct task_struct *xenblkd; unsigned int waiting_reqs; diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c index 1f3dfaa54d87..998915174bb8 100644 --- a/drivers/block/xen-blkback/xenbus.c +++ b/drivers/block/xen-blkback/xenbus.c @@ -159,7 +159,7 @@ static int xen_blkif_alloc_rings(struct xen_blkif *blkif) init_waitqueue_head(&ring->shutdown_wq); ring->blkif = blkif; ring->st_print = jiffies; - xen_blkif_get(blkif); + ring->active = true; } return 0; @@ -249,6 +249,9 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif) struct xen_blkif_ring *ring = &blkif->rings[r]; unsigned int i = 0; + if (!ring->active) + continue; + if (ring->xenblkd) { kthread_stop(ring->xenblkd); wake_up(&ring->shutdown_wq); @@ -296,7 +299,7 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif) BUG_ON(ring->free_pages_num != 0); BUG_ON(ring->persistent_gnt_c != 0); WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages)); - xen_blkif_put(blkif); + ring->active = false; } blkif->nr_ring_pages = 0; /* From cc1ec769b87c7dea9092fb537c512e0b6b7a5dac Mon Sep 17 00:00:00 2001 From: Devesh Sharma Date: Mon, 22 May 2017 03:15:31 -0700 Subject: [PATCH 206/436] RDMA/bnxt_re: Fixing the Control path command and response handling Fixing a concurrency issue with creq handling. Each caller was given a globally managed crsq element, which was accessed outside a lock. This could result in corruption, if lot of applications are simultaneously issuing Control Path commands. Now, each caller will provide its own response buffer and the responses will be copied under a lock. Also, Fixing the queue full condition check for the CMDQ. As a part of these changes, the control path code is refactored to remove the code replication in the response status checking. Signed-off-by: Devesh Sharma Signed-off-by: Selvin Xavier Signed-off-by: Doug Ledford --- drivers/infiniband/hw/bnxt_re/qplib_fp.c | 215 ++++---------- drivers/infiniband/hw/bnxt_re/qplib_rcfw.c | 312 ++++++++++---------- drivers/infiniband/hw/bnxt_re/qplib_rcfw.h | 61 ++-- drivers/infiniband/hw/bnxt_re/qplib_res.h | 5 + drivers/infiniband/hw/bnxt_re/qplib_sp.c | 328 +++++---------------- 5 files changed, 317 insertions(+), 604 deletions(-) diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c index 43d08b5e9085..ea9ce4ff8999 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c @@ -284,7 +284,7 @@ int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) { struct bnxt_qplib_rcfw *rcfw = res->rcfw; struct cmdq_create_qp1 req; - struct creq_create_qp1_resp *resp; + struct creq_create_qp1_resp resp; struct bnxt_qplib_pbl *pbl; struct bnxt_qplib_q *sq = &qp->sq; struct bnxt_qplib_q *rq = &qp->rq; @@ -394,31 +394,12 @@ int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) req.pd_id = cpu_to_le32(qp->pd->id); - resp = (struct creq_create_qp1_resp *) - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, - NULL, 0); - if (!resp) { - dev_err(&res->pdev->dev, "QPLIB: FP: CREATE_QP1 send failed"); - rc = -EINVAL; + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, + (void *)&resp, NULL, 0); + if (rc) goto fail; - } - if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) { - /* Cmd timed out */ - dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_QP1 timed out"); - rc = -ETIMEDOUT; - goto fail; - } - if (resp->status || - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { - dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_QP1 failed "); - dev_err(&rcfw->pdev->dev, - "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", - resp->status, le16_to_cpu(req.cookie), - le16_to_cpu(resp->cookie)); - rc = -EINVAL; - goto fail; - } - qp->id = le32_to_cpu(resp->xid); + + qp->id = le32_to_cpu(resp.xid); qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET; sq->flush_in_progress = false; rq->flush_in_progress = false; @@ -442,7 +423,7 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) struct bnxt_qplib_rcfw *rcfw = res->rcfw; struct sq_send *hw_sq_send_hdr, **hw_sq_send_ptr; struct cmdq_create_qp req; - struct creq_create_qp_resp *resp; + struct creq_create_qp_resp resp; struct bnxt_qplib_pbl *pbl; struct sq_psn_search **psn_search_ptr; unsigned long int psn_search, poff = 0; @@ -627,31 +608,12 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) } req.pd_id = cpu_to_le32(qp->pd->id); - resp = (struct creq_create_qp_resp *) - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, - NULL, 0); - if (!resp) { - dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_QP send failed"); - rc = -EINVAL; + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, + (void *)&resp, NULL, 0); + if (rc) goto fail; - } - if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) { - /* Cmd timed out */ - dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_QP timed out"); - rc = -ETIMEDOUT; - goto fail; - } - if (resp->status || - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { - dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_QP failed "); - dev_err(&rcfw->pdev->dev, - "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", - resp->status, le16_to_cpu(req.cookie), - le16_to_cpu(resp->cookie)); - rc = -EINVAL; - goto fail; - } - qp->id = le32_to_cpu(resp->xid); + + qp->id = le32_to_cpu(resp.xid); qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET; sq->flush_in_progress = false; rq->flush_in_progress = false; @@ -769,10 +731,11 @@ int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) { struct bnxt_qplib_rcfw *rcfw = res->rcfw; struct cmdq_modify_qp req; - struct creq_modify_qp_resp *resp; + struct creq_modify_qp_resp resp; u16 cmd_flags = 0, pkey; u32 temp32[4]; u32 bmask; + int rc; RCFW_CMD_PREP(req, MODIFY_QP, cmd_flags); @@ -862,27 +825,10 @@ int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) req.vlan_pcp_vlan_dei_vlan_id = cpu_to_le16(qp->vlan_id); - resp = (struct creq_modify_qp_resp *) - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, - NULL, 0); - if (!resp) { - dev_err(&rcfw->pdev->dev, "QPLIB: FP: MODIFY_QP send failed"); - return -EINVAL; - } - if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) { - /* Cmd timed out */ - dev_err(&rcfw->pdev->dev, "QPLIB: FP: MODIFY_QP timed out"); - return -ETIMEDOUT; - } - if (resp->status || - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { - dev_err(&rcfw->pdev->dev, "QPLIB: FP: MODIFY_QP failed "); - dev_err(&rcfw->pdev->dev, - "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", - resp->status, le16_to_cpu(req.cookie), - le16_to_cpu(resp->cookie)); - return -EINVAL; - } + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, + (void *)&resp, NULL, 0); + if (rc) + return rc; qp->cur_qp_state = qp->state; return 0; } @@ -891,37 +837,26 @@ int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) { struct bnxt_qplib_rcfw *rcfw = res->rcfw; struct cmdq_query_qp req; - struct creq_query_qp_resp *resp; + struct creq_query_qp_resp resp; + struct bnxt_qplib_rcfw_sbuf *sbuf; struct creq_query_qp_resp_sb *sb; u16 cmd_flags = 0; u32 temp32[4]; - int i; + int i, rc = 0; RCFW_CMD_PREP(req, QUERY_QP, cmd_flags); + sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb)); + if (!sbuf) + return -ENOMEM; + sb = sbuf->sb; + req.qp_cid = cpu_to_le32(qp->id); req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS; - resp = (struct creq_query_qp_resp *) - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, - (void **)&sb, 0); - if (!resp) { - dev_err(&rcfw->pdev->dev, "QPLIB: FP: QUERY_QP send failed"); - return -EINVAL; - } - if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) { - /* Cmd timed out */ - dev_err(&rcfw->pdev->dev, "QPLIB: FP: QUERY_QP timed out"); - return -ETIMEDOUT; - } - if (resp->status || - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { - dev_err(&rcfw->pdev->dev, "QPLIB: FP: QUERY_QP failed "); - dev_err(&rcfw->pdev->dev, - "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", - resp->status, le16_to_cpu(req.cookie), - le16_to_cpu(resp->cookie)); - return -EINVAL; - } + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp, + (void *)sbuf, 0); + if (rc) + goto bail; /* Extract the context from the side buffer */ qp->state = sb->en_sqd_async_notify_state & CREQ_QUERY_QP_RESP_SB_STATE_MASK; @@ -976,7 +911,9 @@ int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) qp->dest_qpn = le32_to_cpu(sb->dest_qp_id); memcpy(qp->smac, sb->src_mac, 6); qp->vlan_id = le16_to_cpu(sb->vlan_pcp_vlan_dei_vlan_id); - return 0; +bail: + bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf); + return rc; } static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp) @@ -1021,34 +958,18 @@ int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res, { struct bnxt_qplib_rcfw *rcfw = res->rcfw; struct cmdq_destroy_qp req; - struct creq_destroy_qp_resp *resp; + struct creq_destroy_qp_resp resp; unsigned long flags; u16 cmd_flags = 0; + int rc; RCFW_CMD_PREP(req, DESTROY_QP, cmd_flags); req.qp_cid = cpu_to_le32(qp->id); - resp = (struct creq_destroy_qp_resp *) - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, - NULL, 0); - if (!resp) { - dev_err(&rcfw->pdev->dev, "QPLIB: FP: DESTROY_QP send failed"); - return -EINVAL; - } - if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) { - /* Cmd timed out */ - dev_err(&rcfw->pdev->dev, "QPLIB: FP: DESTROY_QP timed out"); - return -ETIMEDOUT; - } - if (resp->status || - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { - dev_err(&rcfw->pdev->dev, "QPLIB: FP: DESTROY_QP failed "); - dev_err(&rcfw->pdev->dev, - "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", - resp->status, le16_to_cpu(req.cookie), - le16_to_cpu(resp->cookie)); - return -EINVAL; - } + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, + (void *)&resp, NULL, 0); + if (rc) + return rc; /* Must walk the associated CQs to nullified the QP ptr */ spin_lock_irqsave(&qp->scq->hwq.lock, flags); @@ -1483,7 +1404,7 @@ int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq) { struct bnxt_qplib_rcfw *rcfw = res->rcfw; struct cmdq_create_cq req; - struct creq_create_cq_resp *resp; + struct creq_create_cq_resp resp; struct bnxt_qplib_pbl *pbl; u16 cmd_flags = 0; int rc; @@ -1525,30 +1446,12 @@ int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq) (cq->cnq_hw_ring_id & CMDQ_CREATE_CQ_CNQ_ID_MASK) << CMDQ_CREATE_CQ_CNQ_ID_SFT); - resp = (struct creq_create_cq_resp *) - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, - NULL, 0); - if (!resp) { - dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_CQ send failed"); - return -EINVAL; - } - if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) { - /* Cmd timed out */ - dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_CQ timed out"); - rc = -ETIMEDOUT; + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, + (void *)&resp, NULL, 0); + if (rc) goto fail; - } - if (resp->status || - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { - dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_CQ failed "); - dev_err(&rcfw->pdev->dev, - "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", - resp->status, le16_to_cpu(req.cookie), - le16_to_cpu(resp->cookie)); - rc = -EINVAL; - goto fail; - } - cq->id = le32_to_cpu(resp->xid); + + cq->id = le32_to_cpu(resp.xid); cq->dbr_base = res->dpi_tbl.dbr_bar_reg_iomem; cq->period = BNXT_QPLIB_QUEUE_START_PERIOD; init_waitqueue_head(&cq->waitq); @@ -1566,33 +1469,17 @@ int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq) { struct bnxt_qplib_rcfw *rcfw = res->rcfw; struct cmdq_destroy_cq req; - struct creq_destroy_cq_resp *resp; + struct creq_destroy_cq_resp resp; u16 cmd_flags = 0; + int rc; RCFW_CMD_PREP(req, DESTROY_CQ, cmd_flags); req.cq_cid = cpu_to_le32(cq->id); - resp = (struct creq_destroy_cq_resp *) - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, - NULL, 0); - if (!resp) { - dev_err(&rcfw->pdev->dev, "QPLIB: FP: DESTROY_CQ send failed"); - return -EINVAL; - } - if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) { - /* Cmd timed out */ - dev_err(&rcfw->pdev->dev, "QPLIB: FP: DESTROY_CQ timed out"); - return -ETIMEDOUT; - } - if (resp->status || - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { - dev_err(&rcfw->pdev->dev, "QPLIB: FP: DESTROY_CQ failed "); - dev_err(&rcfw->pdev->dev, - "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", - resp->status, le16_to_cpu(req.cookie), - le16_to_cpu(resp->cookie)); - return -EINVAL; - } + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, + (void *)&resp, NULL, 0); + if (rc) + return rc; bnxt_qplib_free_hwq(res->pdev, &cq->hwq); return 0; } diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c index 23fb7260662b..16e42754dbec 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c @@ -39,72 +39,55 @@ #include #include #include +#include + #include "roce_hsi.h" #include "qplib_res.h" #include "qplib_rcfw.h" static void bnxt_qplib_service_creq(unsigned long data); /* Hardware communication channel */ -int bnxt_qplib_rcfw_wait_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie) +static int __wait_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie) { u16 cbit; int rc; - cookie &= RCFW_MAX_COOKIE_VALUE; cbit = cookie % RCFW_MAX_OUTSTANDING_CMD; - if (!test_bit(cbit, rcfw->cmdq_bitmap)) - dev_warn(&rcfw->pdev->dev, - "QPLIB: CMD bit %d for cookie 0x%x is not set?", - cbit, cookie); - rc = wait_event_timeout(rcfw->waitq, !test_bit(cbit, rcfw->cmdq_bitmap), msecs_to_jiffies(RCFW_CMD_WAIT_TIME_MS)); - if (!rc) { - dev_warn(&rcfw->pdev->dev, - "QPLIB: Bono Error: timeout %d msec, msg {0x%x}\n", - RCFW_CMD_WAIT_TIME_MS, cookie); - } - - return rc; + return rc ? 0 : -ETIMEDOUT; }; -int bnxt_qplib_rcfw_block_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie) +static int __block_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie) { - u32 count = -1; + u32 count = RCFW_BLOCKED_CMD_WAIT_COUNT; u16 cbit; - cookie &= RCFW_MAX_COOKIE_VALUE; cbit = cookie % RCFW_MAX_OUTSTANDING_CMD; if (!test_bit(cbit, rcfw->cmdq_bitmap)) goto done; do { + mdelay(1); /* 1m sec */ bnxt_qplib_service_creq((unsigned long)rcfw); } while (test_bit(cbit, rcfw->cmdq_bitmap) && --count); done: - return count; + return count ? 0 : -ETIMEDOUT; }; -void *bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw, - struct cmdq_base *req, void **crsbe, - u8 is_block) +static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req, + struct creq_base *resp, void *sb, u8 is_block) { - struct bnxt_qplib_crsq *crsq = &rcfw->crsq; struct bnxt_qplib_cmdqe *cmdqe, **cmdq_ptr; struct bnxt_qplib_hwq *cmdq = &rcfw->cmdq; - struct bnxt_qplib_hwq *crsb = &rcfw->crsb; - struct bnxt_qplib_crsqe *crsqe = NULL; - struct bnxt_qplib_crsbe **crsb_ptr; + struct bnxt_qplib_crsq *crsqe; u32 sw_prod, cmdq_prod; - u8 retry_cnt = 0xFF; - dma_addr_t dma_addr; unsigned long flags; u32 size, opcode; u16 cookie, cbit; int pg, idx; u8 *preq; -retry: opcode = req->opcode; if (!test_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags) && (opcode != CMDQ_BASE_OPCODE_QUERY_FUNC && @@ -112,63 +95,50 @@ retry: dev_err(&rcfw->pdev->dev, "QPLIB: RCFW not initialized, reject opcode 0x%x", opcode); - return NULL; + return -EINVAL; } if (test_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags) && opcode == CMDQ_BASE_OPCODE_INITIALIZE_FW) { dev_err(&rcfw->pdev->dev, "QPLIB: RCFW already initialized!"); - return NULL; + return -EINVAL; } /* Cmdq are in 16-byte units, each request can consume 1 or more * cmdqe */ spin_lock_irqsave(&cmdq->lock, flags); - if (req->cmd_size > cmdq->max_elements - - ((HWQ_CMP(cmdq->prod, cmdq) - HWQ_CMP(cmdq->cons, cmdq)) & - (cmdq->max_elements - 1))) { + if (req->cmd_size >= HWQ_FREE_SLOTS(cmdq)) { dev_err(&rcfw->pdev->dev, "QPLIB: RCFW: CMDQ is full!"); spin_unlock_irqrestore(&cmdq->lock, flags); - - if (!retry_cnt--) - return NULL; - goto retry; + return -EAGAIN; } - retry_cnt = 0xFF; - cookie = atomic_inc_return(&rcfw->seq_num) & RCFW_MAX_COOKIE_VALUE; + cookie = rcfw->seq_num & RCFW_MAX_COOKIE_VALUE; cbit = cookie % RCFW_MAX_OUTSTANDING_CMD; if (is_block) cookie |= RCFW_CMD_IS_BLOCKING; + + set_bit(cbit, rcfw->cmdq_bitmap); req->cookie = cpu_to_le16(cookie); - if (test_and_set_bit(cbit, rcfw->cmdq_bitmap)) { - dev_err(&rcfw->pdev->dev, - "QPLIB: RCFW MAX outstanding cmd reached!"); - atomic_dec(&rcfw->seq_num); + crsqe = &rcfw->crsqe_tbl[cbit]; + if (crsqe->resp) { spin_unlock_irqrestore(&cmdq->lock, flags); - - if (!retry_cnt--) - return NULL; - goto retry; + return -EBUSY; } - /* Reserve a resp buffer slot if requested */ - if (req->resp_size && crsbe) { - spin_lock(&crsb->lock); - sw_prod = HWQ_CMP(crsb->prod, crsb); - crsb_ptr = (struct bnxt_qplib_crsbe **)crsb->pbl_ptr; - *crsbe = (void *)&crsb_ptr[get_crsb_pg(sw_prod)] - [get_crsb_idx(sw_prod)]; - bnxt_qplib_crsb_dma_next(crsb->pbl_dma_ptr, sw_prod, &dma_addr); - req->resp_addr = cpu_to_le64(dma_addr); - crsb->prod++; - spin_unlock(&crsb->lock); + memset(resp, 0, sizeof(*resp)); + crsqe->resp = (struct creq_qp_event *)resp; + crsqe->resp->cookie = req->cookie; + crsqe->req_size = req->cmd_size; + if (req->resp_size && sb) { + struct bnxt_qplib_rcfw_sbuf *sbuf = sb; - req->resp_size = (sizeof(struct bnxt_qplib_crsbe) + - BNXT_QPLIB_CMDQE_UNITS - 1) / - BNXT_QPLIB_CMDQE_UNITS; + req->resp_addr = cpu_to_le64(sbuf->dma_addr); + req->resp_size = (sbuf->size + BNXT_QPLIB_CMDQE_UNITS - 1) / + BNXT_QPLIB_CMDQE_UNITS; } + cmdq_ptr = (struct bnxt_qplib_cmdqe **)cmdq->pbl_ptr; preq = (u8 *)req; size = req->cmd_size * BNXT_QPLIB_CMDQE_UNITS; @@ -190,23 +160,24 @@ retry: preq += min_t(u32, size, sizeof(*cmdqe)); size -= min_t(u32, size, sizeof(*cmdqe)); cmdq->prod++; + rcfw->seq_num++; } while (size > 0); + rcfw->seq_num++; + cmdq_prod = cmdq->prod; if (rcfw->flags & FIRMWARE_FIRST_FLAG) { - /* The very first doorbell write is required to set this flag - * which prompts the FW to reset its internal pointers + /* The very first doorbell write + * is required to set this flag + * which prompts the FW to reset + * its internal pointers */ cmdq_prod |= FIRMWARE_FIRST_FLAG; rcfw->flags &= ~FIRMWARE_FIRST_FLAG; } - sw_prod = HWQ_CMP(crsq->prod, crsq); - crsqe = &crsq->crsq[sw_prod]; - memset(crsqe, 0, sizeof(*crsqe)); - crsq->prod++; - crsqe->req_size = req->cmd_size; /* ring CMDQ DB */ + wmb(); writel(cmdq_prod, rcfw->cmdq_bar_reg_iomem + rcfw->cmdq_bar_reg_prod_off); writel(RCFW_CMDQ_TRIG_VAL, rcfw->cmdq_bar_reg_iomem + @@ -214,9 +185,56 @@ retry: done: spin_unlock_irqrestore(&cmdq->lock, flags); /* Return the CREQ response pointer */ - return crsqe ? &crsqe->qp_event : NULL; + return 0; } +int bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw, + struct cmdq_base *req, + struct creq_base *resp, + void *sb, u8 is_block) +{ + struct creq_qp_event *evnt = (struct creq_qp_event *)resp; + u16 cookie; + u8 opcode, retry_cnt = 0xFF; + int rc = 0; + + do { + opcode = req->opcode; + rc = __send_message(rcfw, req, resp, sb, is_block); + cookie = le16_to_cpu(req->cookie) & RCFW_MAX_COOKIE_VALUE; + if (!rc) + break; + + if (!retry_cnt || (rc != -EAGAIN && rc != -EBUSY)) { + /* send failed */ + dev_err(&rcfw->pdev->dev, "QPLIB: cmdq[%#x]=%#x send failed", + cookie, opcode); + return rc; + } + is_block ? mdelay(1) : usleep_range(500, 1000); + + } while (retry_cnt--); + + if (is_block) + rc = __block_for_resp(rcfw, cookie); + else + rc = __wait_for_resp(rcfw, cookie); + if (rc) { + /* timed out */ + dev_err(&rcfw->pdev->dev, "QPLIB: cmdq[%#x]=%#x timedout (%d)msec", + cookie, opcode, RCFW_CMD_WAIT_TIME_MS); + return rc; + } + + if (evnt->status) { + /* failed with status */ + dev_err(&rcfw->pdev->dev, "QPLIB: cmdq[%#x]=%#x status %#x", + cookie, opcode, evnt->status); + rc = -EFAULT; + } + + return rc; +} /* Completions */ static int bnxt_qplib_process_func_event(struct bnxt_qplib_rcfw *rcfw, struct creq_func_event *func_event) @@ -260,12 +278,12 @@ static int bnxt_qplib_process_func_event(struct bnxt_qplib_rcfw *rcfw, static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw, struct creq_qp_event *qp_event) { - struct bnxt_qplib_crsq *crsq = &rcfw->crsq; struct bnxt_qplib_hwq *cmdq = &rcfw->cmdq; - struct bnxt_qplib_crsqe *crsqe; - u16 cbit, cookie, blocked = 0; + struct bnxt_qplib_crsq *crsqe; unsigned long flags; - u32 sw_cons; + u16 cbit, blocked = 0; + u16 cookie; + __le16 mcookie; switch (qp_event->event) { case CREQ_QP_EVENT_EVENT_QP_ERROR_NOTIFICATION: @@ -275,24 +293,31 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw, default: /* Command Response */ spin_lock_irqsave(&cmdq->lock, flags); - sw_cons = HWQ_CMP(crsq->cons, crsq); - crsqe = &crsq->crsq[sw_cons]; - crsq->cons++; - memcpy(&crsqe->qp_event, qp_event, sizeof(crsqe->qp_event)); - - cookie = le16_to_cpu(crsqe->qp_event.cookie); + cookie = le16_to_cpu(qp_event->cookie); + mcookie = qp_event->cookie; blocked = cookie & RCFW_CMD_IS_BLOCKING; cookie &= RCFW_MAX_COOKIE_VALUE; cbit = cookie % RCFW_MAX_OUTSTANDING_CMD; + crsqe = &rcfw->crsqe_tbl[cbit]; + if (crsqe->resp && + crsqe->resp->cookie == mcookie) { + memcpy(crsqe->resp, qp_event, sizeof(*qp_event)); + crsqe->resp = NULL; + } else { + dev_err(&rcfw->pdev->dev, + "QPLIB: CMD %s resp->cookie = %#x, evnt->cookie = %#x", + crsqe->resp ? "mismatch" : "collision", + crsqe->resp ? crsqe->resp->cookie : 0, mcookie); + } if (!test_and_clear_bit(cbit, rcfw->cmdq_bitmap)) dev_warn(&rcfw->pdev->dev, "QPLIB: CMD bit %d was not requested", cbit); - cmdq->cons += crsqe->req_size; - spin_unlock_irqrestore(&cmdq->lock, flags); + crsqe->req_size = 0; + if (!blocked) wake_up(&rcfw->waitq); - break; + spin_unlock_irqrestore(&cmdq->lock, flags); } return 0; } @@ -305,12 +330,12 @@ static void bnxt_qplib_service_creq(unsigned long data) struct creq_base *creqe, **creq_ptr; u32 sw_cons, raw_cons; unsigned long flags; - u32 type; + u32 type, budget = CREQ_ENTRY_POLL_BUDGET; - /* Service the CREQ until empty */ + /* Service the CREQ until budget is over */ spin_lock_irqsave(&creq->lock, flags); raw_cons = creq->cons; - while (1) { + while (budget > 0) { sw_cons = HWQ_CMP(raw_cons, creq); creq_ptr = (struct creq_base **)creq->pbl_ptr; creqe = &creq_ptr[get_creq_pg(sw_cons)][get_creq_idx(sw_cons)]; @@ -320,15 +345,9 @@ static void bnxt_qplib_service_creq(unsigned long data) type = creqe->type & CREQ_BASE_TYPE_MASK; switch (type) { case CREQ_BASE_TYPE_QP_EVENT: - if (!bnxt_qplib_process_qp_event - (rcfw, (struct creq_qp_event *)creqe)) - rcfw->creq_qp_event_processed++; - else { - dev_warn(&rcfw->pdev->dev, "QPLIB: crsqe with"); - dev_warn(&rcfw->pdev->dev, - "QPLIB: type = 0x%x not handled", - type); - } + bnxt_qplib_process_qp_event + (rcfw, (struct creq_qp_event *)creqe); + rcfw->creq_qp_event_processed++; break; case CREQ_BASE_TYPE_FUNC_EVENT: if (!bnxt_qplib_process_func_event @@ -346,7 +365,9 @@ static void bnxt_qplib_service_creq(unsigned long data) break; } raw_cons++; + budget--; } + if (creq->cons != raw_cons) { creq->cons = raw_cons; CREQ_DB_REARM(rcfw->creq_bar_reg_iomem, raw_cons, @@ -375,23 +396,16 @@ static irqreturn_t bnxt_qplib_creq_irq(int irq, void *dev_instance) /* RCFW */ int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw *rcfw) { - struct creq_deinitialize_fw_resp *resp; struct cmdq_deinitialize_fw req; + struct creq_deinitialize_fw_resp resp; u16 cmd_flags = 0; + int rc; RCFW_CMD_PREP(req, DEINITIALIZE_FW, cmd_flags); - resp = (struct creq_deinitialize_fw_resp *) - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, - NULL, 0); - if (!resp) - return -EINVAL; - - if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) - return -ETIMEDOUT; - - if (resp->status || - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) - return -EFAULT; + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp, + NULL, 0); + if (rc) + return rc; clear_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags); return 0; @@ -417,9 +431,10 @@ static int __get_pbl_pg_idx(struct bnxt_qplib_pbl *pbl) int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw, struct bnxt_qplib_ctx *ctx, int is_virtfn) { - struct creq_initialize_fw_resp *resp; struct cmdq_initialize_fw req; + struct creq_initialize_fw_resp resp; u16 cmd_flags = 0, level; + int rc; RCFW_CMD_PREP(req, INITIALIZE_FW, cmd_flags); @@ -482,37 +497,19 @@ int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw, skip_ctx_setup: req.stat_ctx_id = cpu_to_le32(ctx->stats.fw_id); - resp = (struct creq_initialize_fw_resp *) - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, - NULL, 0); - if (!resp) { - dev_err(&rcfw->pdev->dev, - "QPLIB: RCFW: INITIALIZE_FW send failed"); - return -EINVAL; - } - if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) { - /* Cmd timed out */ - dev_err(&rcfw->pdev->dev, - "QPLIB: RCFW: INITIALIZE_FW timed out"); - return -ETIMEDOUT; - } - if (resp->status || - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { - dev_err(&rcfw->pdev->dev, - "QPLIB: RCFW: INITIALIZE_FW failed"); - return -EINVAL; - } + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp, + NULL, 0); + if (rc) + return rc; set_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags); return 0; } void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw) { - bnxt_qplib_free_hwq(rcfw->pdev, &rcfw->crsb); - kfree(rcfw->crsq.crsq); + kfree(rcfw->crsqe_tbl); bnxt_qplib_free_hwq(rcfw->pdev, &rcfw->cmdq); bnxt_qplib_free_hwq(rcfw->pdev, &rcfw->creq); - rcfw->pdev = NULL; } @@ -539,21 +536,11 @@ int bnxt_qplib_alloc_rcfw_channel(struct pci_dev *pdev, goto fail; } - rcfw->crsq.max_elements = rcfw->cmdq.max_elements; - rcfw->crsq.crsq = kcalloc(rcfw->crsq.max_elements, - sizeof(*rcfw->crsq.crsq), GFP_KERNEL); - if (!rcfw->crsq.crsq) + rcfw->crsqe_tbl = kcalloc(rcfw->cmdq.max_elements, + sizeof(*rcfw->crsqe_tbl), GFP_KERNEL); + if (!rcfw->crsqe_tbl) goto fail; - rcfw->crsb.max_elements = BNXT_QPLIB_CRSBE_MAX_CNT; - if (bnxt_qplib_alloc_init_hwq(rcfw->pdev, &rcfw->crsb, NULL, 0, - &rcfw->crsb.max_elements, - BNXT_QPLIB_CRSBE_UNITS, 0, PAGE_SIZE, - HWQ_TYPE_CTX)) { - dev_err(&rcfw->pdev->dev, - "QPLIB: HW channel CRSB allocation failed"); - goto fail; - } return 0; fail: @@ -606,7 +593,7 @@ int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev, int rc; /* General */ - atomic_set(&rcfw->seq_num, 0); + rcfw->seq_num = 0; rcfw->flags = FIRMWARE_FIRST_FLAG; bmap_size = BITS_TO_LONGS(RCFW_MAX_OUTSTANDING_CMD * sizeof(unsigned long)); @@ -636,10 +623,6 @@ int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev, rcfw->cmdq_bar_reg_trig_off = RCFW_COMM_TRIG_OFFSET; - /* CRSQ */ - rcfw->crsq.prod = 0; - rcfw->crsq.cons = 0; - /* CREQ */ rcfw->creq_bar_reg = RCFW_COMM_CONS_PCI_BAR_REGION; res_base = pci_resource_start(pdev, rcfw->creq_bar_reg); @@ -692,3 +675,34 @@ int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev, __iowrite32_copy(rcfw->cmdq_bar_reg_iomem, &init, sizeof(init) / 4); return 0; } + +struct bnxt_qplib_rcfw_sbuf *bnxt_qplib_rcfw_alloc_sbuf( + struct bnxt_qplib_rcfw *rcfw, + u32 size) +{ + struct bnxt_qplib_rcfw_sbuf *sbuf; + + sbuf = kzalloc(sizeof(*sbuf), GFP_ATOMIC); + if (!sbuf) + return NULL; + + sbuf->size = size; + sbuf->sb = dma_zalloc_coherent(&rcfw->pdev->dev, sbuf->size, + &sbuf->dma_addr, GFP_ATOMIC); + if (!sbuf->sb) + goto bail; + + return sbuf; +bail: + kfree(sbuf); + return NULL; +} + +void bnxt_qplib_rcfw_free_sbuf(struct bnxt_qplib_rcfw *rcfw, + struct bnxt_qplib_rcfw_sbuf *sbuf) +{ + if (sbuf->sb) + dma_free_coherent(&rcfw->pdev->dev, sbuf->size, + sbuf->sb, sbuf->dma_addr); + kfree(sbuf); +} diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h index d3567d75bf58..09ce121770cd 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h @@ -73,6 +73,7 @@ #define RCFW_MAX_OUTSTANDING_CMD BNXT_QPLIB_CMDQE_MAX_CNT #define RCFW_MAX_COOKIE_VALUE 0x7FFF #define RCFW_CMD_IS_BLOCKING 0x8000 +#define RCFW_BLOCKED_CMD_WAIT_COUNT 0x4E20 /* Cmdq contains a fix number of a 16-Byte slots */ struct bnxt_qplib_cmdqe { @@ -94,32 +95,6 @@ struct bnxt_qplib_crsbe { u8 data[1024]; }; -/* CRSQ SB */ -#define BNXT_QPLIB_CRSBE_MAX_CNT 4 -#define BNXT_QPLIB_CRSBE_UNITS sizeof(struct bnxt_qplib_crsbe) -#define BNXT_QPLIB_CRSBE_CNT_PER_PG (PAGE_SIZE / BNXT_QPLIB_CRSBE_UNITS) - -#define MAX_CRSB_IDX (BNXT_QPLIB_CRSBE_MAX_CNT - 1) -#define MAX_CRSB_IDX_PER_PG (BNXT_QPLIB_CRSBE_CNT_PER_PG - 1) - -static inline u32 get_crsb_pg(u32 val) -{ - return (val & ~MAX_CRSB_IDX_PER_PG) / BNXT_QPLIB_CRSBE_CNT_PER_PG; -} - -static inline u32 get_crsb_idx(u32 val) -{ - return val & MAX_CRSB_IDX_PER_PG; -} - -static inline void bnxt_qplib_crsb_dma_next(dma_addr_t *pg_map_arr, - u32 prod, dma_addr_t *dma_addr) -{ - *dma_addr = pg_map_arr[(prod) / BNXT_QPLIB_CRSBE_CNT_PER_PG]; - *dma_addr += ((prod) % BNXT_QPLIB_CRSBE_CNT_PER_PG) * - BNXT_QPLIB_CRSBE_UNITS; -} - /* CREQ */ /* Allocate 1 per QP for async error notification for now */ #define BNXT_QPLIB_CREQE_MAX_CNT (64 * 1024) @@ -158,17 +133,19 @@ static inline u32 get_creq_idx(u32 val) #define CREQ_DB(db, raw_cons, cp_bit) \ writel(CREQ_DB_CP_FLAGS | ((raw_cons) & ((cp_bit) - 1)), db) +#define CREQ_ENTRY_POLL_BUDGET 0x100 + /* HWQ */ -struct bnxt_qplib_crsqe { - struct creq_qp_event qp_event; + +struct bnxt_qplib_crsq { + struct creq_qp_event *resp; u32 req_size; }; -struct bnxt_qplib_crsq { - struct bnxt_qplib_crsqe *crsq; - u32 prod; - u32 cons; - u32 max_elements; +struct bnxt_qplib_rcfw_sbuf { + void *sb; + dma_addr_t dma_addr; + u32 size; }; /* RCFW Communication Channels */ @@ -185,7 +162,7 @@ struct bnxt_qplib_rcfw { wait_queue_head_t waitq; int (*aeq_handler)(struct bnxt_qplib_rcfw *, struct creq_func_event *); - atomic_t seq_num; + u32 seq_num; /* Bar region info */ void __iomem *cmdq_bar_reg_iomem; @@ -203,8 +180,7 @@ struct bnxt_qplib_rcfw { /* Actual Cmd and Resp Queues */ struct bnxt_qplib_hwq cmdq; - struct bnxt_qplib_crsq crsq; - struct bnxt_qplib_hwq crsb; + struct bnxt_qplib_crsq *crsqe_tbl; }; void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw); @@ -219,11 +195,14 @@ int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev, (struct bnxt_qplib_rcfw *, struct creq_func_event *)); -int bnxt_qplib_rcfw_block_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie); -int bnxt_qplib_rcfw_wait_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie); -void *bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw, - struct cmdq_base *req, void **crsbe, - u8 is_block); +struct bnxt_qplib_rcfw_sbuf *bnxt_qplib_rcfw_alloc_sbuf( + struct bnxt_qplib_rcfw *rcfw, + u32 size); +void bnxt_qplib_rcfw_free_sbuf(struct bnxt_qplib_rcfw *rcfw, + struct bnxt_qplib_rcfw_sbuf *sbuf); +int bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw, + struct cmdq_base *req, struct creq_base *resp, + void *sbuf, u8 is_block); int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw *rcfw); int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw, diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.h b/drivers/infiniband/hw/bnxt_re/qplib_res.h index 6277d802ca4b..4103e602b058 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_res.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_res.h @@ -48,6 +48,11 @@ extern const struct bnxt_qplib_gid bnxt_qplib_gid_zero; #define HWQ_CMP(idx, hwq) ((idx) & ((hwq)->max_elements - 1)) +#define HWQ_FREE_SLOTS(hwq) (hwq->max_elements - \ + ((HWQ_CMP(hwq->prod, hwq)\ + - HWQ_CMP(hwq->cons, hwq))\ + & (hwq->max_elements - 1))) + enum bnxt_qplib_hwq_type { HWQ_TYPE_CTX, HWQ_TYPE_QUEUE, diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c index 7b31eccedf11..cf7c9cb185d5 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c @@ -55,37 +55,30 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw, struct bnxt_qplib_dev_attr *attr) { struct cmdq_query_func req; - struct creq_query_func_resp *resp; + struct creq_query_func_resp resp; + struct bnxt_qplib_rcfw_sbuf *sbuf; struct creq_query_func_resp_sb *sb; u16 cmd_flags = 0; u32 temp; u8 *tqm_alloc; - int i; + int i, rc = 0; RCFW_CMD_PREP(req, QUERY_FUNC, cmd_flags); - req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS; - resp = (struct creq_query_func_resp *) - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void **)&sb, - 0); - if (!resp) { - dev_err(&rcfw->pdev->dev, "QPLIB: SP: QUERY_FUNC send failed"); - return -EINVAL; - } - if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) { - /* Cmd timed out */ - dev_err(&rcfw->pdev->dev, "QPLIB: SP: QUERY_FUNC timed out"); - return -ETIMEDOUT; - } - if (resp->status || - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { - dev_err(&rcfw->pdev->dev, "QPLIB: SP: QUERY_FUNC failed "); + sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb)); + if (!sbuf) { dev_err(&rcfw->pdev->dev, - "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", - resp->status, le16_to_cpu(req.cookie), - le16_to_cpu(resp->cookie)); - return -EINVAL; + "QPLIB: SP: QUERY_FUNC alloc side buffer failed"); + return -ENOMEM; } + + sb = sbuf->sb; + req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS; + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp, + (void *)sbuf, 0); + if (rc) + goto bail; + /* Extract the context from the side buffer */ attr->max_qp = le32_to_cpu(sb->max_qp); attr->max_qp_rd_atom = @@ -130,7 +123,10 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw, attr->tqm_alloc_reqs[i * 4 + 2] = *(++tqm_alloc); attr->tqm_alloc_reqs[i * 4 + 3] = *(++tqm_alloc); } - return 0; + +bail: + bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf); + return rc; } /* SGID */ @@ -178,8 +174,9 @@ int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, /* Remove GID from the SGID table */ if (update) { struct cmdq_delete_gid req; - struct creq_delete_gid_resp *resp; + struct creq_delete_gid_resp resp; u16 cmd_flags = 0; + int rc; RCFW_CMD_PREP(req, DELETE_GID, cmd_flags); if (sgid_tbl->hw_id[index] == 0xFFFF) { @@ -188,31 +185,10 @@ int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, return -EINVAL; } req.gid_index = cpu_to_le16(sgid_tbl->hw_id[index]); - resp = (struct creq_delete_gid_resp *) - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, NULL, - 0); - if (!resp) { - dev_err(&res->pdev->dev, - "QPLIB: SP: DELETE_GID send failed"); - return -EINVAL; - } - if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, - le16_to_cpu(req.cookie))) { - /* Cmd timed out */ - dev_err(&res->pdev->dev, - "QPLIB: SP: DELETE_GID timed out"); - return -ETIMEDOUT; - } - if (resp->status || - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { - dev_err(&res->pdev->dev, - "QPLIB: SP: DELETE_GID failed "); - dev_err(&res->pdev->dev, - "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", - resp->status, le16_to_cpu(req.cookie), - le16_to_cpu(resp->cookie)); - return -EINVAL; - } + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, + (void *)&resp, NULL, 0); + if (rc) + return rc; } memcpy(&sgid_tbl->tbl[index], &bnxt_qplib_gid_zero, sizeof(bnxt_qplib_gid_zero)); @@ -234,7 +210,7 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, struct bnxt_qplib_res, sgid_tbl); struct bnxt_qplib_rcfw *rcfw = res->rcfw; - int i, free_idx, rc = 0; + int i, free_idx; if (!sgid_tbl) { dev_err(&res->pdev->dev, "QPLIB: SGID table not allocated"); @@ -266,10 +242,11 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, } if (update) { struct cmdq_add_gid req; - struct creq_add_gid_resp *resp; + struct creq_add_gid_resp resp; u16 cmd_flags = 0; u32 temp32[4]; u16 temp16[3]; + int rc; RCFW_CMD_PREP(req, ADD_GID, cmd_flags); @@ -290,31 +267,11 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, req.src_mac[1] = cpu_to_be16(temp16[1]); req.src_mac[2] = cpu_to_be16(temp16[2]); - resp = (struct creq_add_gid_resp *) - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, - NULL, 0); - if (!resp) { - dev_err(&res->pdev->dev, - "QPLIB: SP: ADD_GID send failed"); - return -EINVAL; - } - if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, - le16_to_cpu(req.cookie))) { - /* Cmd timed out */ - dev_err(&res->pdev->dev, - "QPIB: SP: ADD_GID timed out"); - return -ETIMEDOUT; - } - if (resp->status || - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { - dev_err(&res->pdev->dev, "QPLIB: SP: ADD_GID failed "); - dev_err(&res->pdev->dev, - "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", - resp->status, le16_to_cpu(req.cookie), - le16_to_cpu(resp->cookie)); - return -EINVAL; - } - sgid_tbl->hw_id[free_idx] = le32_to_cpu(resp->xid); + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, + (void *)&resp, NULL, 0); + if (rc) + return rc; + sgid_tbl->hw_id[free_idx] = le32_to_cpu(resp.xid); } /* Add GID to the sgid_tbl */ memcpy(&sgid_tbl->tbl[free_idx], gid, sizeof(*gid)); @@ -325,7 +282,7 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, *index = free_idx; /* unlock */ - return rc; + return 0; } /* pkeys */ @@ -422,10 +379,11 @@ int bnxt_qplib_create_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah) { struct bnxt_qplib_rcfw *rcfw = res->rcfw; struct cmdq_create_ah req; - struct creq_create_ah_resp *resp; + struct creq_create_ah_resp resp; u16 cmd_flags = 0; u32 temp32[4]; u16 temp16[3]; + int rc; RCFW_CMD_PREP(req, CREATE_AH, cmd_flags); @@ -450,28 +408,12 @@ int bnxt_qplib_create_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah) req.dest_mac[1] = cpu_to_le16(temp16[1]); req.dest_mac[2] = cpu_to_le16(temp16[2]); - resp = (struct creq_create_ah_resp *) - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, - NULL, 1); - if (!resp) { - dev_err(&rcfw->pdev->dev, "QPLIB: SP: CREATE_AH send failed"); - return -EINVAL; - } - if (!bnxt_qplib_rcfw_block_for_resp(rcfw, le16_to_cpu(req.cookie))) { - /* Cmd timed out */ - dev_err(&rcfw->pdev->dev, "QPLIB: SP: CREATE_AH timed out"); - return -ETIMEDOUT; - } - if (resp->status || - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { - dev_err(&rcfw->pdev->dev, "QPLIB: SP: CREATE_AH failed "); - dev_err(&rcfw->pdev->dev, - "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", - resp->status, le16_to_cpu(req.cookie), - le16_to_cpu(resp->cookie)); - return -EINVAL; - } - ah->id = le32_to_cpu(resp->xid); + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp, + NULL, 1); + if (rc) + return rc; + + ah->id = le32_to_cpu(resp.xid); return 0; } @@ -479,35 +421,19 @@ int bnxt_qplib_destroy_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah) { struct bnxt_qplib_rcfw *rcfw = res->rcfw; struct cmdq_destroy_ah req; - struct creq_destroy_ah_resp *resp; + struct creq_destroy_ah_resp resp; u16 cmd_flags = 0; + int rc; /* Clean up the AH table in the device */ RCFW_CMD_PREP(req, DESTROY_AH, cmd_flags); req.ah_cid = cpu_to_le32(ah->id); - resp = (struct creq_destroy_ah_resp *) - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, - NULL, 1); - if (!resp) { - dev_err(&rcfw->pdev->dev, "QPLIB: SP: DESTROY_AH send failed"); - return -EINVAL; - } - if (!bnxt_qplib_rcfw_block_for_resp(rcfw, le16_to_cpu(req.cookie))) { - /* Cmd timed out */ - dev_err(&rcfw->pdev->dev, "QPLIB: SP: DESTROY_AH timed out"); - return -ETIMEDOUT; - } - if (resp->status || - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { - dev_err(&rcfw->pdev->dev, "QPLIB: SP: DESTROY_AH failed "); - dev_err(&rcfw->pdev->dev, - "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", - resp->status, le16_to_cpu(req.cookie), - le16_to_cpu(resp->cookie)); - return -EINVAL; - } + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp, + NULL, 1); + if (rc) + return rc; return 0; } @@ -516,8 +442,9 @@ int bnxt_qplib_free_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw) { struct bnxt_qplib_rcfw *rcfw = res->rcfw; struct cmdq_deallocate_key req; - struct creq_deallocate_key_resp *resp; + struct creq_deallocate_key_resp resp; u16 cmd_flags = 0; + int rc; if (mrw->lkey == 0xFFFFFFFF) { dev_info(&res->pdev->dev, @@ -536,27 +463,11 @@ int bnxt_qplib_free_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw) else req.key = cpu_to_le32(mrw->lkey); - resp = (struct creq_deallocate_key_resp *) - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, - NULL, 0); - if (!resp) { - dev_err(&res->pdev->dev, "QPLIB: SP: FREE_MR send failed"); - return -EINVAL; - } - if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) { - /* Cmd timed out */ - dev_err(&res->pdev->dev, "QPLIB: SP: FREE_MR timed out"); - return -ETIMEDOUT; - } - if (resp->status || - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { - dev_err(&res->pdev->dev, "QPLIB: SP: FREE_MR failed "); - dev_err(&res->pdev->dev, - "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", - resp->status, le16_to_cpu(req.cookie), - le16_to_cpu(resp->cookie)); - return -EINVAL; - } + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp, + NULL, 0); + if (rc) + return rc; + /* Free the qplib's MRW memory */ if (mrw->hwq.max_elements) bnxt_qplib_free_hwq(res->pdev, &mrw->hwq); @@ -568,9 +479,10 @@ int bnxt_qplib_alloc_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw) { struct bnxt_qplib_rcfw *rcfw = res->rcfw; struct cmdq_allocate_mrw req; - struct creq_allocate_mrw_resp *resp; + struct creq_allocate_mrw_resp resp; u16 cmd_flags = 0; unsigned long tmp; + int rc; RCFW_CMD_PREP(req, ALLOCATE_MRW, cmd_flags); @@ -584,33 +496,17 @@ int bnxt_qplib_alloc_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw) tmp = (unsigned long)mrw; req.mrw_handle = cpu_to_le64(tmp); - resp = (struct creq_allocate_mrw_resp *) - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, - NULL, 0); - if (!resp) { - dev_err(&rcfw->pdev->dev, "QPLIB: SP: ALLOC_MRW send failed"); - return -EINVAL; - } - if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) { - /* Cmd timed out */ - dev_err(&rcfw->pdev->dev, "QPLIB: SP: ALLOC_MRW timed out"); - return -ETIMEDOUT; - } - if (resp->status || - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { - dev_err(&rcfw->pdev->dev, "QPLIB: SP: ALLOC_MRW failed "); - dev_err(&rcfw->pdev->dev, - "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", - resp->status, le16_to_cpu(req.cookie), - le16_to_cpu(resp->cookie)); - return -EINVAL; - } + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, + (void *)&resp, NULL, 0); + if (rc) + return rc; + if ((mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1) || (mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2A) || (mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B)) - mrw->rkey = le32_to_cpu(resp->xid); + mrw->rkey = le32_to_cpu(resp.xid); else - mrw->lkey = le32_to_cpu(resp->xid); + mrw->lkey = le32_to_cpu(resp.xid); return 0; } @@ -619,40 +515,17 @@ int bnxt_qplib_dereg_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw, { struct bnxt_qplib_rcfw *rcfw = res->rcfw; struct cmdq_deregister_mr req; - struct creq_deregister_mr_resp *resp; + struct creq_deregister_mr_resp resp; u16 cmd_flags = 0; int rc; RCFW_CMD_PREP(req, DEREGISTER_MR, cmd_flags); req.lkey = cpu_to_le32(mrw->lkey); - resp = (struct creq_deregister_mr_resp *) - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, - NULL, block); - if (!resp) { - dev_err(&rcfw->pdev->dev, "QPLIB: SP: DEREG_MR send failed"); - return -EINVAL; - } - if (block) - rc = bnxt_qplib_rcfw_block_for_resp(rcfw, - le16_to_cpu(req.cookie)); - else - rc = bnxt_qplib_rcfw_wait_for_resp(rcfw, - le16_to_cpu(req.cookie)); - if (!rc) { - /* Cmd timed out */ - dev_err(&res->pdev->dev, "QPLIB: SP: DEREG_MR timed out"); - return -ETIMEDOUT; - } - if (resp->status || - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { - dev_err(&rcfw->pdev->dev, "QPLIB: SP: DEREG_MR failed "); - dev_err(&rcfw->pdev->dev, - "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", - resp->status, le16_to_cpu(req.cookie), - le16_to_cpu(resp->cookie)); - return -EINVAL; - } + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, + (void *)&resp, NULL, block); + if (rc) + return rc; /* Free the qplib's MR memory */ if (mrw->hwq.max_elements) { @@ -669,7 +542,7 @@ int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr, { struct bnxt_qplib_rcfw *rcfw = res->rcfw; struct cmdq_register_mr req; - struct creq_register_mr_resp *resp; + struct creq_register_mr_resp resp; u16 cmd_flags = 0, level; int pg_ptrs, pages, i, rc; dma_addr_t **pbl_ptr; @@ -730,36 +603,11 @@ int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr, req.key = cpu_to_le32(mr->lkey); req.mr_size = cpu_to_le64(mr->total_size); - resp = (struct creq_register_mr_resp *) - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, - NULL, block); - if (!resp) { - dev_err(&res->pdev->dev, "SP: REG_MR send failed"); - rc = -EINVAL; + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, + (void *)&resp, NULL, block); + if (rc) goto fail; - } - if (block) - rc = bnxt_qplib_rcfw_block_for_resp(rcfw, - le16_to_cpu(req.cookie)); - else - rc = bnxt_qplib_rcfw_wait_for_resp(rcfw, - le16_to_cpu(req.cookie)); - if (!rc) { - /* Cmd timed out */ - dev_err(&res->pdev->dev, "SP: REG_MR timed out"); - rc = -ETIMEDOUT; - goto fail; - } - if (resp->status || - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { - dev_err(&res->pdev->dev, "QPLIB: SP: REG_MR failed "); - dev_err(&res->pdev->dev, - "QPLIB: SP: with status 0x%x cmdq 0x%x resp 0x%x", - resp->status, le16_to_cpu(req.cookie), - le16_to_cpu(resp->cookie)); - rc = -EINVAL; - goto fail; - } + return 0; fail: @@ -804,35 +652,15 @@ int bnxt_qplib_map_tc2cos(struct bnxt_qplib_res *res, u16 *cids) { struct bnxt_qplib_rcfw *rcfw = res->rcfw; struct cmdq_map_tc_to_cos req; - struct creq_map_tc_to_cos_resp *resp; + struct creq_map_tc_to_cos_resp resp; u16 cmd_flags = 0; - int tleft; + int rc = 0; RCFW_CMD_PREP(req, MAP_TC_TO_COS, cmd_flags); req.cos0 = cpu_to_le16(cids[0]); req.cos1 = cpu_to_le16(cids[1]); - resp = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, NULL, 0); - if (!resp) { - dev_err(&res->pdev->dev, "QPLIB: SP: MAP_TC2COS send failed"); - return -EINVAL; - } - - tleft = bnxt_qplib_rcfw_block_for_resp(rcfw, le16_to_cpu(req.cookie)); - if (!tleft) { - dev_err(&res->pdev->dev, "QPLIB: SP: MAP_TC2COS timed out"); - return -ETIMEDOUT; - } - - if (resp->status || - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { - dev_err(&res->pdev->dev, "QPLIB: SP: MAP_TC2COS failed "); - dev_err(&res->pdev->dev, - "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", - resp->status, le16_to_cpu(req.cookie), - le16_to_cpu(resp->cookie)); - return -EINVAL; - } - + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, + (void *)&resp, NULL, 0); return 0; } From 71df1d7ccad1c36f7321d6b3b48f2ea42681c363 Mon Sep 17 00:00:00 2001 From: Juergen Gross Date: Thu, 18 May 2017 17:28:48 +0200 Subject: [PATCH 207/436] xen/blkback: don't free be structure too early MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The be structure must not be freed when freeing the blkif structure isn't done. Otherwise a use-after-free of be when unmapping the ring used for communicating with the frontend will occur in case of a late call of xenblk_disconnect() (e.g. due to an I/O still active when trying to disconnect). Signed-off-by: Juergen Gross Tested-by: Steven Haigh Acked-by: Roger Pau Monné Signed-off-by: Konrad Rzeszutek Wilk --- drivers/block/xen-blkback/xenbus.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c index 998915174bb8..4cdf0490983e 100644 --- a/drivers/block/xen-blkback/xenbus.c +++ b/drivers/block/xen-blkback/xenbus.c @@ -315,9 +315,10 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif) static void xen_blkif_free(struct xen_blkif *blkif) { - - xen_blkif_disconnect(blkif); + WARN_ON(xen_blkif_disconnect(blkif)); xen_vbd_free(&blkif->vbd); + kfree(blkif->be->mode); + kfree(blkif->be); /* Make sure everything is drained before shutting down */ kmem_cache_free(xen_blkif_cachep, blkif); @@ -514,8 +515,6 @@ static int xen_blkbk_remove(struct xenbus_device *dev) xen_blkif_put(be->blkif); } - kfree(be->mode); - kfree(be); return 0; } From 20a3d5bf5e5b13c02450ab6178ec374abd830686 Mon Sep 17 00:00:00 2001 From: Mateusz Jurczyk Date: Tue, 13 Jun 2017 20:06:12 +0200 Subject: [PATCH 208/436] caif: Add sockaddr length check before accessing sa_family in connect handler Verify that the caller-provided sockaddr structure is large enough to contain the sa_family field, before accessing it in the connect() handler of the AF_CAIF socket. Since the syscall doesn't enforce a minimum size of the corresponding memory region, very short sockaddrs (zero or one byte long) result in operating on uninitialized memory while referencing sa_family. Signed-off-by: Mateusz Jurczyk Signed-off-by: David S. Miller --- net/caif/caif_socket.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c index adcad344c843..21f18ea2fce4 100644 --- a/net/caif/caif_socket.c +++ b/net/caif/caif_socket.c @@ -754,6 +754,10 @@ static int caif_connect(struct socket *sock, struct sockaddr *uaddr, lock_sock(sk); + err = -EINVAL; + if (addr_len < offsetofend(struct sockaddr, sa_family)) + goto out; + err = -EAFNOSUPPORT; if (uaddr->sa_family != AF_CAIF) goto out; From a24fa22ce22ae302b3bf8f7008896d52d5d57b8d Mon Sep 17 00:00:00 2001 From: Juergen Gross Date: Thu, 18 May 2017 17:28:49 +0200 Subject: [PATCH 209/436] xen/blkback: don't use xen_blkif_get() in xen-blkback kthread MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit There is no need to use xen_blkif_get()/xen_blkif_put() in the kthread of xen-blkback. Thread stopping is synchronous and using the blkif reference counting in the kthread will avoid to ever let the reference count drop to zero at the end of an I/O running concurrent to disconnecting and multiple rings. Setting ring->xenblkd to NULL after stopping the kthread isn't needed as the kthread does this already. Signed-off-by: Juergen Gross Tested-by: Steven Haigh Acked-by: Roger Pau Monné Signed-off-by: Konrad Rzeszutek Wilk --- drivers/block/xen-blkback/blkback.c | 3 --- drivers/block/xen-blkback/xenbus.c | 1 - 2 files changed, 4 deletions(-) diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c index 726c32e35db9..6b14c509f3c7 100644 --- a/drivers/block/xen-blkback/blkback.c +++ b/drivers/block/xen-blkback/blkback.c @@ -609,8 +609,6 @@ int xen_blkif_schedule(void *arg) unsigned long timeout; int ret; - xen_blkif_get(blkif); - set_freezable(); while (!kthread_should_stop()) { if (try_to_freeze()) @@ -665,7 +663,6 @@ purge_gnt_list: print_stats(ring); ring->xenblkd = NULL; - xen_blkif_put(blkif); return 0; } diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c index 4cdf0490983e..792da683e70d 100644 --- a/drivers/block/xen-blkback/xenbus.c +++ b/drivers/block/xen-blkback/xenbus.c @@ -255,7 +255,6 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif) if (ring->xenblkd) { kthread_stop(ring->xenblkd); wake_up(&ring->shutdown_wq); - ring->xenblkd = NULL; } /* The above kthread_stop() guarantees that at this point we From 089bc0143f489bd3a4578bdff5f4ca68fb26f341 Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Tue, 13 Jun 2017 16:28:27 -0400 Subject: [PATCH 210/436] xen-blkback: don't leak stack data via response ring Rather than constructing a local structure instance on the stack, fill the fields directly on the shared ring, just like other backends do. Build on the fact that all response structure flavors are actually identical (the old code did make this assumption too). This is XSA-216. Cc: stable@vger.kernel.org Signed-off-by: Jan Beulich Reviewed-by: Konrad Rzeszutek Wilk Signed-off-by: Konrad Rzeszutek Wilk --- drivers/block/xen-blkback/blkback.c | 23 ++++++++++++----------- drivers/block/xen-blkback/common.h | 25 +++++-------------------- 2 files changed, 17 insertions(+), 31 deletions(-) diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c index 6b14c509f3c7..0e824091a12f 100644 --- a/drivers/block/xen-blkback/blkback.c +++ b/drivers/block/xen-blkback/blkback.c @@ -1433,34 +1433,35 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring, static void make_response(struct xen_blkif_ring *ring, u64 id, unsigned short op, int st) { - struct blkif_response resp; + struct blkif_response *resp; unsigned long flags; union blkif_back_rings *blk_rings; int notify; - resp.id = id; - resp.operation = op; - resp.status = st; - spin_lock_irqsave(&ring->blk_ring_lock, flags); blk_rings = &ring->blk_rings; /* Place on the response ring for the relevant domain. */ switch (ring->blkif->blk_protocol) { case BLKIF_PROTOCOL_NATIVE: - memcpy(RING_GET_RESPONSE(&blk_rings->native, blk_rings->native.rsp_prod_pvt), - &resp, sizeof(resp)); + resp = RING_GET_RESPONSE(&blk_rings->native, + blk_rings->native.rsp_prod_pvt); break; case BLKIF_PROTOCOL_X86_32: - memcpy(RING_GET_RESPONSE(&blk_rings->x86_32, blk_rings->x86_32.rsp_prod_pvt), - &resp, sizeof(resp)); + resp = RING_GET_RESPONSE(&blk_rings->x86_32, + blk_rings->x86_32.rsp_prod_pvt); break; case BLKIF_PROTOCOL_X86_64: - memcpy(RING_GET_RESPONSE(&blk_rings->x86_64, blk_rings->x86_64.rsp_prod_pvt), - &resp, sizeof(resp)); + resp = RING_GET_RESPONSE(&blk_rings->x86_64, + blk_rings->x86_64.rsp_prod_pvt); break; default: BUG(); } + + resp->id = id; + resp->operation = op; + resp->status = st; + blk_rings->common.rsp_prod_pvt++; RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify); spin_unlock_irqrestore(&ring->blk_ring_lock, flags); diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h index 638597b17a38..ecb35fe8ca8d 100644 --- a/drivers/block/xen-blkback/common.h +++ b/drivers/block/xen-blkback/common.h @@ -75,9 +75,8 @@ extern unsigned int xenblk_max_queues; struct blkif_common_request { char dummy; }; -struct blkif_common_response { - char dummy; -}; + +/* i386 protocol version */ struct blkif_x86_32_request_rw { uint8_t nr_segments; /* number of segments */ @@ -129,14 +128,6 @@ struct blkif_x86_32_request { } u; } __attribute__((__packed__)); -/* i386 protocol version */ -#pragma pack(push, 4) -struct blkif_x86_32_response { - uint64_t id; /* copied from request */ - uint8_t operation; /* copied from request */ - int16_t status; /* BLKIF_RSP_??? */ -}; -#pragma pack(pop) /* x86_64 protocol version */ struct blkif_x86_64_request_rw { @@ -193,18 +184,12 @@ struct blkif_x86_64_request { } u; } __attribute__((__packed__)); -struct blkif_x86_64_response { - uint64_t __attribute__((__aligned__(8))) id; - uint8_t operation; /* copied from request */ - int16_t status; /* BLKIF_RSP_??? */ -}; - DEFINE_RING_TYPES(blkif_common, struct blkif_common_request, - struct blkif_common_response); + struct blkif_response); DEFINE_RING_TYPES(blkif_x86_32, struct blkif_x86_32_request, - struct blkif_x86_32_response); + struct blkif_response __packed); DEFINE_RING_TYPES(blkif_x86_64, struct blkif_x86_64_request, - struct blkif_x86_64_response); + struct blkif_response); union blkif_back_rings { struct blkif_back_ring native; From 377aa6b0efbaa29cfeecd8b9244641217f9544ca Mon Sep 17 00:00:00 2001 From: Alistair Popple Date: Wed, 14 Jun 2017 14:47:50 +1000 Subject: [PATCH 211/436] powerpc/npu-dma: Remove spurious WARN_ON when a PCI device has no of_node Commit 4c3b89effc28 ("powerpc/powernv: Add sanity checks to pnv_pci_get_{gpu|npu}_dev") introduced explicit warnings in pnv_pci_get_npu_dev() when a PCIe device has no associated device-tree node. However not all PCIe devices have an of_node and pnv_pci_get_npu_dev() gets indirectly called at least once for every PCIe device in the system. This results in spurious WARN_ON()'s so remove it. The same situation should not exist for pnv_pci_get_gpu_dev() as any NPU based PCIe device requires a device-tree node. Fixes: 4c3b89effc28 ("powerpc/powernv: Add sanity checks to pnv_pci_get_{gpu|npu}_dev") Reported-by: Alexey Kardashevskiy Signed-off-by: Alistair Popple Signed-off-by: Michael Ellerman --- arch/powerpc/platforms/powernv/npu-dma.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/platforms/powernv/npu-dma.c b/arch/powerpc/platforms/powernv/npu-dma.c index 78fa9395b8c5..e6f444b46207 100644 --- a/arch/powerpc/platforms/powernv/npu-dma.c +++ b/arch/powerpc/platforms/powernv/npu-dma.c @@ -75,7 +75,8 @@ struct pci_dev *pnv_pci_get_npu_dev(struct pci_dev *gpdev, int index) if (WARN_ON(!gpdev)) return NULL; - if (WARN_ON(!gpdev->dev.of_node)) + /* Not all PCI devices have device-tree nodes */ + if (!gpdev->dev.of_node) return NULL; /* Get assoicated PCI device */ From 8bfb3676606454ffec836f56c5dc3e69dfc0956a Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Wed, 14 Jun 2017 09:17:38 +0200 Subject: [PATCH 212/436] wireless: wext: remove ndo_do_ioctl fallback There are no longer any drivers (in the tree proper, I didn't check all the staging drivers) that take WEXT ioctls through this API, the only remaining ones that even have ndo_do_ioctl are using it only for private ioctls. Therefore, we can remove this call. Signed-off-by: Johannes Berg --- net/wireless/wext-core.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c index 1a4db6790e20..24ba8a99b946 100644 --- a/net/wireless/wext-core.c +++ b/net/wireless/wext-core.c @@ -957,9 +957,6 @@ static int wireless_process_ioctl(struct net *net, struct ifreq *ifr, else if (private) return private(dev, iwr, cmd, info, handler); } - /* Old driver API : call driver ioctl handler */ - if (dev->netdev_ops->ndo_do_ioctl) - return dev->netdev_ops->ndo_do_ioctl(dev, ifr, cmd); return -EOPNOTSUPP; } From e79b0006c45c9b0b22f3ea54ff6e256b34c1f208 Mon Sep 17 00:00:00 2001 From: Megha Dey Date: Wed, 14 Jun 2017 09:51:56 +0530 Subject: [PATCH 213/436] ALSA: hda - Add Coffelake PCI ID Coffelake is another Intel part, so need to add PCI ID for it. Signed-off-by: Megha Dey Signed-off-by: Subhransu S. Prusty Acked-by: Vinod Koul Cc: Signed-off-by: Takashi Iwai --- sound/pci/hda/hda_intel.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 1770f085c2a6..e3c696c46a21 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -371,9 +371,10 @@ enum { #define IS_KBL_H(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa2f0) #define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98) #define IS_GLK(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x3198) +#define IS_CFL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa348) #define IS_SKL_PLUS(pci) (IS_SKL(pci) || IS_SKL_LP(pci) || IS_BXT(pci)) || \ IS_KBL(pci) || IS_KBL_LP(pci) || IS_KBL_H(pci) || \ - IS_GLK(pci) + IS_GLK(pci) || IS_CFL(pci) static char *driver_short_names[] = { [AZX_DRIVER_ICH] = "HDA Intel", @@ -2378,6 +2379,9 @@ static const struct pci_device_id azx_ids[] = { /* Kabylake-H */ { PCI_DEVICE(0x8086, 0xa2f0), .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE }, + /* Coffelake */ + { PCI_DEVICE(0x8086, 0xa348), + .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE}, /* Broxton-P(Apollolake) */ { PCI_DEVICE(0x8086, 0x5a98), .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_BROXTON }, From c544ad18bd47ee989eb433f09b6574a7f6d415c7 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Wed, 14 Jun 2017 12:40:36 +0200 Subject: [PATCH 214/436] video: fbdev: avoid int-in-bool-context warning gcc-7 suspects this code might be wrong because we use the result of a multiplication as a bool: drivers/video/fbdev/core/fbmon.c: In function 'fb_edid_add_monspecs': drivers/video/fbdev/core/fbmon.c:1051:84: error: '*' in boolean context, suggest '&&' instead [-Werror=int-in-bool-context] It's actually fine, so let's add a comparison to zero to make that clear to the compiler too. Signed-off-by: Arnd Bergmann Cc: Andrew Morton Signed-off-by: Bartlomiej Zolnierkiewicz --- drivers/video/fbdev/core/fbmon.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/video/fbdev/core/fbmon.c b/drivers/video/fbdev/core/fbmon.c index 687ebb053438..41d7979d81c5 100644 --- a/drivers/video/fbdev/core/fbmon.c +++ b/drivers/video/fbdev/core/fbmon.c @@ -1048,7 +1048,7 @@ void fb_edid_add_monspecs(unsigned char *edid, struct fb_monspecs *specs) for (i = 0; i < (128 - edid[2]) / DETAILED_TIMING_DESCRIPTION_SIZE; i++, block += DETAILED_TIMING_DESCRIPTION_SIZE) - if (PIXEL_CLOCK) + if (PIXEL_CLOCK != 0) edt[num++] = block - edid; /* Yikes, EDID data is totally useless */ From 1235185521ba0d9528052578e27c2f4999d489c6 Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Wed, 14 Jun 2017 12:40:36 +0200 Subject: [PATCH 215/436] video: fbdev: add missing USB-descriptor endianness conversions Add the missing endianness conversions when printing the USB device-descriptor idVendor, idProduct and bcdDevice fields during probe. Signed-off-by: Johan Hovold Cc: Steve Glendinning Cc: Bernie Thompson Signed-off-by: Bartlomiej Zolnierkiewicz --- drivers/video/fbdev/smscufx.c | 5 +++-- drivers/video/fbdev/udlfb.c | 5 +++-- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/drivers/video/fbdev/smscufx.c b/drivers/video/fbdev/smscufx.c index ec2e7e353685..449fceaf79d5 100644 --- a/drivers/video/fbdev/smscufx.c +++ b/drivers/video/fbdev/smscufx.c @@ -1646,8 +1646,9 @@ static int ufx_usb_probe(struct usb_interface *interface, dev_dbg(dev->gdev, "%s %s - serial #%s\n", usbdev->manufacturer, usbdev->product, usbdev->serial); dev_dbg(dev->gdev, "vid_%04x&pid_%04x&rev_%04x driver's ufx_data struct at %p\n", - usbdev->descriptor.idVendor, usbdev->descriptor.idProduct, - usbdev->descriptor.bcdDevice, dev); + le16_to_cpu(usbdev->descriptor.idVendor), + le16_to_cpu(usbdev->descriptor.idProduct), + le16_to_cpu(usbdev->descriptor.bcdDevice), dev); dev_dbg(dev->gdev, "console enable=%d\n", console); dev_dbg(dev->gdev, "fb_defio enable=%d\n", fb_defio); diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c index 6a3c353de7c3..a868cbb51a34 100644 --- a/drivers/video/fbdev/udlfb.c +++ b/drivers/video/fbdev/udlfb.c @@ -1613,8 +1613,9 @@ static int dlfb_usb_probe(struct usb_interface *interface, pr_info("%s %s - serial #%s\n", usbdev->manufacturer, usbdev->product, usbdev->serial); pr_info("vid_%04x&pid_%04x&rev_%04x driver's dlfb_data struct at %p\n", - usbdev->descriptor.idVendor, usbdev->descriptor.idProduct, - usbdev->descriptor.bcdDevice, dev); + le16_to_cpu(usbdev->descriptor.idVendor), + le16_to_cpu(usbdev->descriptor.idProduct), + le16_to_cpu(usbdev->descriptor.bcdDevice), dev); pr_info("console enable=%d\n", console); pr_info("fb_defio enable=%d\n", fb_defio); pr_info("shadow enable=%d\n", shadow); From 484c7bbf2649831714da3a0fa30213977458e9b5 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Wed, 14 Jun 2017 12:40:36 +0200 Subject: [PATCH 216/436] video: fbdev: via: remove possibly unused variables When CONFIG_PROC_FS is disabled, we get warnings about unused variables as remove_proc_entry() evaluates to an empty macro. drivers/video/fbdev/via/viafbdev.c: In function 'viafb_remove_proc': drivers/video/fbdev/via/viafbdev.c:1635:4: error: unused variable 'iga2_entry' [-Werror=unused-variable] drivers/video/fbdev/via/viafbdev.c:1634:4: error: unused variable 'iga1_entry' [-Werror=unused-variable] These are easy to avoid by using the pointer from the structure. Signed-off-by: Arnd Bergmann Cc: Florian Tobias Schandinat Signed-off-by: Bartlomiej Zolnierkiewicz --- drivers/video/fbdev/via/viafbdev.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/drivers/video/fbdev/via/viafbdev.c b/drivers/video/fbdev/via/viafbdev.c index f9718f012aae..badee04ef496 100644 --- a/drivers/video/fbdev/via/viafbdev.c +++ b/drivers/video/fbdev/via/viafbdev.c @@ -1630,16 +1630,14 @@ static void viafb_init_proc(struct viafb_shared *shared) } static void viafb_remove_proc(struct viafb_shared *shared) { - struct proc_dir_entry *viafb_entry = shared->proc_entry, - *iga1_entry = shared->iga1_proc_entry, - *iga2_entry = shared->iga2_proc_entry; + struct proc_dir_entry *viafb_entry = shared->proc_entry; if (!viafb_entry) return; - remove_proc_entry("output_devices", iga2_entry); + remove_proc_entry("output_devices", shared->iga2_proc_entry); remove_proc_entry("iga2", viafb_entry); - remove_proc_entry("output_devices", iga1_entry); + remove_proc_entry("output_devices", shared->iga1_proc_entry); remove_proc_entry("iga1", viafb_entry); remove_proc_entry("supported_output_devices", viafb_entry); From b429f96aa5cb4023b915de9c2aa6e7720a939856 Mon Sep 17 00:00:00 2001 From: Mike Gerow Date: Wed, 14 Jun 2017 12:40:36 +0200 Subject: [PATCH 217/436] video: fbdev: udlfb: drop log level for blanking Drop log level for blanking from info to debug. Xorg likes to habitually unblank when already unblanked and this can fill up logs over a long period of time. Signed-off-by: Mike Gerow Cc: bernie@plugable.com Signed-off-by: Bartlomiej Zolnierkiewicz --- drivers/video/fbdev/udlfb.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c index a868cbb51a34..05ef657235df 100644 --- a/drivers/video/fbdev/udlfb.c +++ b/drivers/video/fbdev/udlfb.c @@ -1105,8 +1105,8 @@ static int dlfb_ops_blank(int blank_mode, struct fb_info *info) char *bufptr; struct urb *urb; - pr_info("/dev/fb%d FB_BLANK mode %d --> %d\n", - info->node, dev->blank_mode, blank_mode); + pr_debug("/dev/fb%d FB_BLANK mode %d --> %d\n", + info->node, dev->blank_mode, blank_mode); if ((dev->blank_mode == FB_BLANK_POWERDOWN) && (blank_mode != FB_BLANK_POWERDOWN)) { From ed6456afef0da57edfd87ce9142813d08a571137 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Wed, 14 Jun 2017 12:10:56 +0300 Subject: [PATCH 218/436] Staging: rtl8723bs: fix an error code in isFileReadable() The caller only cares about zero vs non-zero so this code actually works fine but we should be returning a negative error code instead of a valid pointer casted to int. Fixes: 554c0a3abf21 ("staging: Add rtl8723bs sdio wifi driver") Signed-off-by: Dan Carpenter Signed-off-by: Greg Kroah-Hartman --- drivers/staging/rtl8723bs/os_dep/osdep_service.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/staging/rtl8723bs/os_dep/osdep_service.c b/drivers/staging/rtl8723bs/os_dep/osdep_service.c index 02db59e8b593..aa16d1ab955b 100644 --- a/drivers/staging/rtl8723bs/os_dep/osdep_service.c +++ b/drivers/staging/rtl8723bs/os_dep/osdep_service.c @@ -160,7 +160,7 @@ static int isFileReadable(char *path) oldfs = get_fs(); set_fs(get_ds()); if (1!=readFile(fp, &buf, 1)) - ret = PTR_ERR(fp); + ret = -EINVAL; set_fs(oldfs); filp_close(fp, NULL); From 4f39a1f5870104b1670df2c09c831ac281896545 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Wed, 14 Jun 2017 09:21:58 +0200 Subject: [PATCH 219/436] wireless: wext: use struct iwreq earlier in the call chain To make it clear that we never use struct ifreq, cast from it directly in the wext entrypoint and use struct iwreq from there on. The next patch will remove the cast again and pass the correct struct from the beginning. Signed-off-by: Johannes Berg --- net/wireless/wext-core.c | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c index 24ba8a99b946..12949c8d3e5f 100644 --- a/net/wireless/wext-core.c +++ b/net/wireless/wext-core.c @@ -914,13 +914,12 @@ int call_commit_handler(struct net_device *dev) * Main IOCTl dispatcher. * Check the type of IOCTL and call the appropriate wrapper... */ -static int wireless_process_ioctl(struct net *net, struct ifreq *ifr, +static int wireless_process_ioctl(struct net *net, struct iwreq *iwr, unsigned int cmd, struct iw_request_info *info, wext_ioctl_func standard, wext_ioctl_func private) { - struct iwreq *iwr = (struct iwreq *) ifr; struct net_device *dev; iw_handler handler; @@ -928,7 +927,7 @@ static int wireless_process_ioctl(struct net *net, struct ifreq *ifr, * The copy_to/from_user() of ifr is also dealt with in there */ /* Make sure the device exist */ - if ((dev = __dev_get_by_name(net, ifr->ifr_name)) == NULL) + if ((dev = __dev_get_by_name(net, iwr->ifr_name)) == NULL) return -ENODEV; /* A bunch of special cases, then the generic case... @@ -974,7 +973,7 @@ static int wext_permission_check(unsigned int cmd) } /* entry point from dev ioctl */ -static int wext_ioctl_dispatch(struct net *net, struct ifreq *ifr, +static int wext_ioctl_dispatch(struct net *net, struct iwreq *iwr, unsigned int cmd, struct iw_request_info *info, wext_ioctl_func standard, wext_ioctl_func private) @@ -984,9 +983,9 @@ static int wext_ioctl_dispatch(struct net *net, struct ifreq *ifr, if (ret) return ret; - dev_load(net, ifr->ifr_name); + dev_load(net, iwr->ifr_name); rtnl_lock(); - ret = wireless_process_ioctl(net, ifr, cmd, info, standard, private); + ret = wireless_process_ioctl(net, iwr, cmd, info, standard, private); rtnl_unlock(); return ret; @@ -1042,7 +1041,7 @@ int wext_handle_ioctl(struct net *net, struct ifreq *ifr, unsigned int cmd, struct iw_request_info info = { .cmd = cmd, .flags = 0 }; int ret; - ret = wext_ioctl_dispatch(net, ifr, cmd, &info, + ret = wext_ioctl_dispatch(net, (void *)ifr, cmd, &info, ioctl_standard_call, ioctl_private_call); if (ret >= 0 && @@ -1104,7 +1103,7 @@ int compat_wext_handle_ioctl(struct net *net, unsigned int cmd, info.cmd = cmd; info.flags = IW_REQUEST_FLAG_COMPAT; - ret = wext_ioctl_dispatch(net, (struct ifreq *) &iwr, cmd, &info, + ret = wext_ioctl_dispatch(net, &iwr, cmd, &info, compat_standard_call, compat_private_call); From 68dd02d19c811ca8ea60220a9d73e13b4bdad73a Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Wed, 14 Jun 2017 09:28:11 +0200 Subject: [PATCH 220/436] dev_ioctl: copy only the smaller struct iwreq for wext Unfortunately, struct iwreq isn't a proper subset of struct ifreq, but is still handled by the same code path. Robert reported that then applications may (randomly) fault if the struct iwreq they pass happens to land within 8 bytes of the end of a mapping (the struct is only 32 bytes, vs. struct ifreq's 40 bytes). To fix this, pull out the code handling wireless extension ioctls and copy only the smaller structure in this case. This bug goes back a long time, I tracked that it was introduced into mainline in 2.1.15, over 20 years ago! This fixes https://bugzilla.kernel.org/show_bug.cgi?id=195869 Reported-by: Robert O'Callahan Signed-off-by: Johannes Berg --- include/net/wext.h | 4 ++-- net/core/dev_ioctl.c | 19 ++++++++++++++++--- net/wireless/wext-core.c | 6 +++--- 3 files changed, 21 insertions(+), 8 deletions(-) diff --git a/include/net/wext.h b/include/net/wext.h index 345911965dbb..454ff763eeba 100644 --- a/include/net/wext.h +++ b/include/net/wext.h @@ -6,7 +6,7 @@ struct net; #ifdef CONFIG_WEXT_CORE -int wext_handle_ioctl(struct net *net, struct ifreq *ifr, unsigned int cmd, +int wext_handle_ioctl(struct net *net, struct iwreq *iwr, unsigned int cmd, void __user *arg); int compat_wext_handle_ioctl(struct net *net, unsigned int cmd, unsigned long arg); @@ -14,7 +14,7 @@ int compat_wext_handle_ioctl(struct net *net, unsigned int cmd, struct iw_statistics *get_wireless_stats(struct net_device *dev); int call_commit_handler(struct net_device *dev); #else -static inline int wext_handle_ioctl(struct net *net, struct ifreq *ifr, unsigned int cmd, +static inline int wext_handle_ioctl(struct net *net, struct iwreq *iwr, unsigned int cmd, void __user *arg) { return -EINVAL; diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c index b94b1d293506..27fad31784a8 100644 --- a/net/core/dev_ioctl.c +++ b/net/core/dev_ioctl.c @@ -410,6 +410,22 @@ int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg) if (cmd == SIOCGIFNAME) return dev_ifname(net, (struct ifreq __user *)arg); + /* + * Take care of Wireless Extensions. Unfortunately struct iwreq + * isn't a proper subset of struct ifreq (it's 8 byte shorter) + * so we need to treat it specially, otherwise applications may + * fault if the struct they're passing happens to land at the + * end of a mapped page. + */ + if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) { + struct iwreq iwr; + + if (copy_from_user(&iwr, arg, sizeof(iwr))) + return -EFAULT; + + return wext_handle_ioctl(net, &iwr, cmd, arg); + } + if (copy_from_user(&ifr, arg, sizeof(struct ifreq))) return -EFAULT; @@ -559,9 +575,6 @@ int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg) ret = -EFAULT; return ret; } - /* Take care of Wireless Extensions */ - if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) - return wext_handle_ioctl(net, &ifr, cmd, arg); return -ENOTTY; } } diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c index 12949c8d3e5f..6cdb054484d6 100644 --- a/net/wireless/wext-core.c +++ b/net/wireless/wext-core.c @@ -1035,18 +1035,18 @@ static int ioctl_standard_call(struct net_device * dev, } -int wext_handle_ioctl(struct net *net, struct ifreq *ifr, unsigned int cmd, +int wext_handle_ioctl(struct net *net, struct iwreq *iwr, unsigned int cmd, void __user *arg) { struct iw_request_info info = { .cmd = cmd, .flags = 0 }; int ret; - ret = wext_ioctl_dispatch(net, (void *)ifr, cmd, &info, + ret = wext_ioctl_dispatch(net, iwr, cmd, &info, ioctl_standard_call, ioctl_private_call); if (ret >= 0 && IW_IS_GET(cmd) && - copy_to_user(arg, ifr, sizeof(struct iwreq))) + copy_to_user(arg, iwr, sizeof(struct iwreq))) return -EFAULT; return ret; From c0f83da96b906ea331e2eea2bd5d2e7f221d27ab Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Thu, 18 May 2017 10:31:01 +0200 Subject: [PATCH 221/436] drm/radeon: fix "force the UVD DPB into VRAM as well" MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The DPB must be in VRAM, but not in the first segment. Signed-off-by: Christian König Tested-by: Arthur Marsh Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/radeon_uvd.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c index 7431eb4a11b7..d34d1cf33895 100644 --- a/drivers/gpu/drm/radeon/radeon_uvd.c +++ b/drivers/gpu/drm/radeon/radeon_uvd.c @@ -621,7 +621,7 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p, } /* TODO: is this still necessary on NI+ ? */ - if ((cmd == 0 || cmd == 1 || cmd == 0x3) && + if ((cmd == 0 || cmd == 0x3) && (start >> 28) != (p->rdev->uvd.gpu_addr >> 28)) { DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n", start, end); From bea10413934dcf98cb9b2dfcdc56e1d28f192897 Mon Sep 17 00:00:00 2001 From: Mario Kleiner Date: Tue, 13 Jun 2017 07:17:10 +0200 Subject: [PATCH 222/436] drm/amdgpu: Fix overflow of watermark calcs at > 4k resolutions. Commit d63c277dc672e0 ("drm/amdgpu: Make display watermark calculations more accurate") made watermark calculations more accurate, but not for > 4k resolutions on 32-Bit architectures, as it introduced an integer overflow for those setups and resolutions. Fix this by proper u64 casting and division. Signed-off-by: Mario Kleiner Reported-by: Ben Hutchings Fixes: d63c277dc672 ("drm/amdgpu: Make display watermark calculations more accurate") Cc: Ben Hutchings Cc: Alex Deucher Cc: stable@vger.kernel.org Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/dce_v10_0.c | 7 +++++-- drivers/gpu/drm/amd/amdgpu/dce_v11_0.c | 7 +++++-- drivers/gpu/drm/amd/amdgpu/dce_v6_0.c | 7 +++++-- drivers/gpu/drm/amd/amdgpu/dce_v8_0.c | 7 +++++-- 4 files changed, 20 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c index 0cdeb6a2e4a0..5dffa27afa45 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c @@ -1207,8 +1207,11 @@ static void dce_v10_0_program_watermarks(struct amdgpu_device *adev, u32 tmp, wm_mask, lb_vblank_lead_lines = 0; if (amdgpu_crtc->base.enabled && num_heads && mode) { - active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock; - line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535); + active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000, + (u32)mode->clock); + line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000, + (u32)mode->clock); + line_time = min(line_time, (u32)65535); /* watermark for high clocks */ if (adev->pm.dpm_enabled) { diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c index 773654a19749..47bbc87f96d2 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c @@ -1176,8 +1176,11 @@ static void dce_v11_0_program_watermarks(struct amdgpu_device *adev, u32 tmp, wm_mask, lb_vblank_lead_lines = 0; if (amdgpu_crtc->base.enabled && num_heads && mode) { - active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock; - line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535); + active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000, + (u32)mode->clock); + line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000, + (u32)mode->clock); + line_time = min(line_time, (u32)65535); /* watermark for high clocks */ if (adev->pm.dpm_enabled) { diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c index 1f3552967ba3..d8c9a959493e 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c @@ -983,8 +983,11 @@ static void dce_v6_0_program_watermarks(struct amdgpu_device *adev, fixed20_12 a, b, c; if (amdgpu_crtc->base.enabled && num_heads && mode) { - active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock; - line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535); + active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000, + (u32)mode->clock); + line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000, + (u32)mode->clock); + line_time = min(line_time, (u32)65535); priority_a_cnt = 0; priority_b_cnt = 0; diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c index 3c558c170e5e..db30c6ba563a 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c @@ -1091,8 +1091,11 @@ static void dce_v8_0_program_watermarks(struct amdgpu_device *adev, u32 tmp, wm_mask, lb_vblank_lead_lines = 0; if (amdgpu_crtc->base.enabled && num_heads && mode) { - active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock; - line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535); + active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000, + (u32)mode->clock); + line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000, + (u32)mode->clock); + line_time = min(line_time, (u32)65535); /* watermark for high clocks */ if (adev->pm.dpm_enabled) { From 55f61a040e1b1ea0ba962e53ae341b4c51915bd1 Mon Sep 17 00:00:00 2001 From: Mario Kleiner Date: Tue, 13 Jun 2017 07:17:11 +0200 Subject: [PATCH 223/436] drm/radeon: Fix overflow of watermark calcs at > 4k resolutions. Commit e6b9a6c84b93 ("drm/radeon: Make display watermark calculations more accurate") made watermark calculations more accurate, but not for > 4k resolutions on 32-Bit architectures, as it introduced an integer overflow for those setups and resolutions. Fix this by proper u64 casting and division. Signed-off-by: Mario Kleiner Reported-by: Ben Hutchings Fixes: e6b9a6c84b93 ("drm/radeon: Make display watermark calculations more accurate") Cc: Ben Hutchings Cc: Alex Deucher Cc: stable@vger.kernel.org Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/cik.c | 7 +++++-- drivers/gpu/drm/radeon/evergreen.c | 7 +++++-- drivers/gpu/drm/radeon/si.c | 7 +++++-- 3 files changed, 15 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index 008c145b7f29..ca44233ceacc 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c @@ -9267,8 +9267,11 @@ static void dce8_program_watermarks(struct radeon_device *rdev, u32 tmp, wm_mask; if (radeon_crtc->base.enabled && num_heads && mode) { - active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock; - line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535); + active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000, + (u32)mode->clock); + line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000, + (u32)mode->clock); + line_time = min(line_time, (u32)65535); /* watermark for high clocks */ if ((rdev->pm.pm_method == PM_METHOD_DPM) && diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 0bf103536404..534637203e70 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -2266,8 +2266,11 @@ static void evergreen_program_watermarks(struct radeon_device *rdev, fixed20_12 a, b, c; if (radeon_crtc->base.enabled && num_heads && mode) { - active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock; - line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535); + active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000, + (u32)mode->clock); + line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000, + (u32)mode->clock); + line_time = min(line_time, (u32)65535); priority_a_cnt = 0; priority_b_cnt = 0; dram_channels = evergreen_get_number_of_dram_channels(rdev); diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index 76d1888528e6..5303f25d5280 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -2284,8 +2284,11 @@ static void dce6_program_watermarks(struct radeon_device *rdev, fixed20_12 a, b, c; if (radeon_crtc->base.enabled && num_heads && mode) { - active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock; - line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535); + active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000, + (u32)mode->clock); + line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000, + (u32)mode->clock); + line_time = min(line_time, (u32)65535); priority_a_cnt = 0; priority_b_cnt = 0; From 2deaeaf102d692cb6f764123b1df7aa118a8e97c Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Wed, 14 Jun 2017 16:20:32 +0200 Subject: [PATCH 224/436] ALSA: pcm: Don't treat NULL chmap as a fatal error The standard PCM chmap helper callbacks treat the NULL info->chmap as a fatal error and spews the kernel warning with stack trace when CONFIG_SND_DEBUG is on. This was OK, originally it was supposed to be always static and non-NULL. But, as the recent addition of Intel LPE audio driver shows, the chmap content may vary dynamically, and it can be even NULL when disconnected. The user still sees the kernel warning unnecessarily. For clearing such a confusion, this patch simply removes the snd_BUG_ON() in each place, just returns an error without warning. Cc: # v4.11+ Signed-off-by: Takashi Iwai --- sound/core/pcm_lib.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c index 5088d4b8db22..009e6c98754e 100644 --- a/sound/core/pcm_lib.c +++ b/sound/core/pcm_lib.c @@ -2492,7 +2492,7 @@ static int pcm_chmap_ctl_get(struct snd_kcontrol *kcontrol, struct snd_pcm_substream *substream; const struct snd_pcm_chmap_elem *map; - if (snd_BUG_ON(!info->chmap)) + if (!info->chmap) return -EINVAL; substream = snd_pcm_chmap_substream(info, idx); if (!substream) @@ -2524,7 +2524,7 @@ static int pcm_chmap_ctl_tlv(struct snd_kcontrol *kcontrol, int op_flag, unsigned int __user *dst; int c, count = 0; - if (snd_BUG_ON(!info->chmap)) + if (!info->chmap) return -EINVAL; if (size < 8) return -ENOMEM; From 9152e0b722b29092115da3bfbf63d26be1a461df Mon Sep 17 00:00:00 2001 From: Eddie Wai Date: Wed, 14 Jun 2017 03:26:23 -0700 Subject: [PATCH 225/436] RDMA/bnxt_re: HW workarounds for handling specific conditions This patch implements the following HW workarounds 1. The SQ depth needs to be augmented by 128 + 1 to avoid running into an Out of order CQE issue 2. Workaround to handle the problem where the HW fast path engine continues to access DMA memory in retranmission mode even after the WQE has already been completed. If the HW reports this condition, driver detects it and posts a Fence WQE. The driver stops reporting the completions to stack until it receives completion for Fence WQE. Signed-off-by: Eddie Wai Signed-off-by: Sriharsha Basavapatna Signed-off-by: Selvin Xavier Reviewed-by: Leon Romanovsky Signed-off-by: Doug Ledford --- drivers/infiniband/hw/bnxt_re/ib_verbs.c | 373 +++++++++++++++++++--- drivers/infiniband/hw/bnxt_re/ib_verbs.h | 15 + drivers/infiniband/hw/bnxt_re/qplib_fp.c | 166 ++++++++-- drivers/infiniband/hw/bnxt_re/qplib_fp.h | 17 +- drivers/infiniband/hw/bnxt_re/qplib_res.h | 1 - drivers/infiniband/hw/bnxt_re/qplib_sp.c | 5 + drivers/infiniband/hw/bnxt_re/qplib_sp.h | 2 + 7 files changed, 509 insertions(+), 70 deletions(-) diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index 7ba9e699d7ab..169798752895 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c @@ -61,6 +61,48 @@ #include "ib_verbs.h" #include +static int __from_ib_access_flags(int iflags) +{ + int qflags = 0; + + if (iflags & IB_ACCESS_LOCAL_WRITE) + qflags |= BNXT_QPLIB_ACCESS_LOCAL_WRITE; + if (iflags & IB_ACCESS_REMOTE_READ) + qflags |= BNXT_QPLIB_ACCESS_REMOTE_READ; + if (iflags & IB_ACCESS_REMOTE_WRITE) + qflags |= BNXT_QPLIB_ACCESS_REMOTE_WRITE; + if (iflags & IB_ACCESS_REMOTE_ATOMIC) + qflags |= BNXT_QPLIB_ACCESS_REMOTE_ATOMIC; + if (iflags & IB_ACCESS_MW_BIND) + qflags |= BNXT_QPLIB_ACCESS_MW_BIND; + if (iflags & IB_ZERO_BASED) + qflags |= BNXT_QPLIB_ACCESS_ZERO_BASED; + if (iflags & IB_ACCESS_ON_DEMAND) + qflags |= BNXT_QPLIB_ACCESS_ON_DEMAND; + return qflags; +}; + +static enum ib_access_flags __to_ib_access_flags(int qflags) +{ + enum ib_access_flags iflags = 0; + + if (qflags & BNXT_QPLIB_ACCESS_LOCAL_WRITE) + iflags |= IB_ACCESS_LOCAL_WRITE; + if (qflags & BNXT_QPLIB_ACCESS_REMOTE_WRITE) + iflags |= IB_ACCESS_REMOTE_WRITE; + if (qflags & BNXT_QPLIB_ACCESS_REMOTE_READ) + iflags |= IB_ACCESS_REMOTE_READ; + if (qflags & BNXT_QPLIB_ACCESS_REMOTE_ATOMIC) + iflags |= IB_ACCESS_REMOTE_ATOMIC; + if (qflags & BNXT_QPLIB_ACCESS_MW_BIND) + iflags |= IB_ACCESS_MW_BIND; + if (qflags & BNXT_QPLIB_ACCESS_ZERO_BASED) + iflags |= IB_ZERO_BASED; + if (qflags & BNXT_QPLIB_ACCESS_ON_DEMAND) + iflags |= IB_ACCESS_ON_DEMAND; + return iflags; +}; + static int bnxt_re_build_sgl(struct ib_sge *ib_sg_list, struct bnxt_qplib_sge *sg_list, int num) { @@ -410,6 +452,158 @@ enum rdma_link_layer bnxt_re_get_link_layer(struct ib_device *ibdev, return IB_LINK_LAYER_ETHERNET; } +#define BNXT_RE_FENCE_PBL_SIZE DIV_ROUND_UP(BNXT_RE_FENCE_BYTES, PAGE_SIZE) + +static void bnxt_re_create_fence_wqe(struct bnxt_re_pd *pd) +{ + struct bnxt_re_fence_data *fence = &pd->fence; + struct ib_mr *ib_mr = &fence->mr->ib_mr; + struct bnxt_qplib_swqe *wqe = &fence->bind_wqe; + + memset(wqe, 0, sizeof(*wqe)); + wqe->type = BNXT_QPLIB_SWQE_TYPE_BIND_MW; + wqe->wr_id = BNXT_QPLIB_FENCE_WRID; + wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP; + wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE; + wqe->bind.zero_based = false; + wqe->bind.parent_l_key = ib_mr->lkey; + wqe->bind.va = (u64)(unsigned long)fence->va; + wqe->bind.length = fence->size; + wqe->bind.access_cntl = __from_ib_access_flags(IB_ACCESS_REMOTE_READ); + wqe->bind.mw_type = SQ_BIND_MW_TYPE_TYPE1; + + /* Save the initial rkey in fence structure for now; + * wqe->bind.r_key will be set at (re)bind time. + */ + fence->bind_rkey = ib_inc_rkey(fence->mw->rkey); +} + +static int bnxt_re_bind_fence_mw(struct bnxt_qplib_qp *qplib_qp) +{ + struct bnxt_re_qp *qp = container_of(qplib_qp, struct bnxt_re_qp, + qplib_qp); + struct ib_pd *ib_pd = qp->ib_qp.pd; + struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd); + struct bnxt_re_fence_data *fence = &pd->fence; + struct bnxt_qplib_swqe *fence_wqe = &fence->bind_wqe; + struct bnxt_qplib_swqe wqe; + int rc; + + memcpy(&wqe, fence_wqe, sizeof(wqe)); + wqe.bind.r_key = fence->bind_rkey; + fence->bind_rkey = ib_inc_rkey(fence->bind_rkey); + + dev_dbg(rdev_to_dev(qp->rdev), + "Posting bind fence-WQE: rkey: %#x QP: %d PD: %p\n", + wqe.bind.r_key, qp->qplib_qp.id, pd); + rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe); + if (rc) { + dev_err(rdev_to_dev(qp->rdev), "Failed to bind fence-WQE\n"); + return rc; + } + bnxt_qplib_post_send_db(&qp->qplib_qp); + + return rc; +} + +static void bnxt_re_destroy_fence_mr(struct bnxt_re_pd *pd) +{ + struct bnxt_re_fence_data *fence = &pd->fence; + struct bnxt_re_dev *rdev = pd->rdev; + struct device *dev = &rdev->en_dev->pdev->dev; + struct bnxt_re_mr *mr = fence->mr; + + if (fence->mw) { + bnxt_re_dealloc_mw(fence->mw); + fence->mw = NULL; + } + if (mr) { + if (mr->ib_mr.rkey) + bnxt_qplib_dereg_mrw(&rdev->qplib_res, &mr->qplib_mr, + true); + if (mr->ib_mr.lkey) + bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr); + kfree(mr); + fence->mr = NULL; + } + if (fence->dma_addr) { + dma_unmap_single(dev, fence->dma_addr, BNXT_RE_FENCE_BYTES, + DMA_BIDIRECTIONAL); + fence->dma_addr = 0; + } +} + +static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd) +{ + int mr_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_MW_BIND; + struct bnxt_re_fence_data *fence = &pd->fence; + struct bnxt_re_dev *rdev = pd->rdev; + struct device *dev = &rdev->en_dev->pdev->dev; + struct bnxt_re_mr *mr = NULL; + dma_addr_t dma_addr = 0; + struct ib_mw *mw; + u64 pbl_tbl; + int rc; + + dma_addr = dma_map_single(dev, fence->va, BNXT_RE_FENCE_BYTES, + DMA_BIDIRECTIONAL); + rc = dma_mapping_error(dev, dma_addr); + if (rc) { + dev_err(rdev_to_dev(rdev), "Failed to dma-map fence-MR-mem\n"); + rc = -EIO; + fence->dma_addr = 0; + goto fail; + } + fence->dma_addr = dma_addr; + + /* Allocate a MR */ + mr = kzalloc(sizeof(*mr), GFP_KERNEL); + if (!mr) { + rc = -ENOMEM; + goto fail; + } + fence->mr = mr; + mr->rdev = rdev; + mr->qplib_mr.pd = &pd->qplib_pd; + mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR; + mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags); + rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr); + if (rc) { + dev_err(rdev_to_dev(rdev), "Failed to alloc fence-HW-MR\n"); + goto fail; + } + + /* Register MR */ + mr->ib_mr.lkey = mr->qplib_mr.lkey; + mr->qplib_mr.va = (u64)(unsigned long)fence->va; + mr->qplib_mr.total_size = BNXT_RE_FENCE_BYTES; + pbl_tbl = dma_addr; + rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, &pbl_tbl, + BNXT_RE_FENCE_PBL_SIZE, false); + if (rc) { + dev_err(rdev_to_dev(rdev), "Failed to register fence-MR\n"); + goto fail; + } + mr->ib_mr.rkey = mr->qplib_mr.rkey; + + /* Create a fence MW only for kernel consumers */ + mw = bnxt_re_alloc_mw(&pd->ib_pd, IB_MW_TYPE_1, NULL); + if (!mw) { + dev_err(rdev_to_dev(rdev), + "Failed to create fence-MW for PD: %p\n", pd); + rc = -EINVAL; + goto fail; + } + fence->mw = mw; + + bnxt_re_create_fence_wqe(pd); + return 0; + +fail: + bnxt_re_destroy_fence_mr(pd); + return rc; +} + /* Protection Domains */ int bnxt_re_dealloc_pd(struct ib_pd *ib_pd) { @@ -417,6 +611,7 @@ int bnxt_re_dealloc_pd(struct ib_pd *ib_pd) struct bnxt_re_dev *rdev = pd->rdev; int rc; + bnxt_re_destroy_fence_mr(pd); if (ib_pd->uobject && pd->dpi.dbr) { struct ib_ucontext *ib_uctx = ib_pd->uobject->context; struct bnxt_re_ucontext *ucntx; @@ -498,6 +693,10 @@ struct ib_pd *bnxt_re_alloc_pd(struct ib_device *ibdev, } } + if (!udata) + if (bnxt_re_create_fence_mr(pd)) + dev_warn(rdev_to_dev(rdev), + "Failed to create Fence-MR\n"); return &pd->ib_pd; dbfail: (void)bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl, @@ -849,12 +1048,16 @@ static struct bnxt_re_qp *bnxt_re_create_shadow_qp /* Shadow QP SQ depth should be same as QP1 RQ depth */ qp->qplib_qp.sq.max_wqe = qp1_qp->rq.max_wqe; qp->qplib_qp.sq.max_sge = 2; + /* Q full delta can be 1 since it is internal QP */ + qp->qplib_qp.sq.q_full_delta = 1; qp->qplib_qp.scq = qp1_qp->scq; qp->qplib_qp.rcq = qp1_qp->rcq; qp->qplib_qp.rq.max_wqe = qp1_qp->rq.max_wqe; qp->qplib_qp.rq.max_sge = qp1_qp->rq.max_sge; + /* Q full delta can be 1 since it is internal QP */ + qp->qplib_qp.rq.q_full_delta = 1; qp->qplib_qp.mtu = qp1_qp->mtu; @@ -917,10 +1120,6 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd, qp->qplib_qp.sig_type = ((qp_init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ? true : false); - entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr + 1); - qp->qplib_qp.sq.max_wqe = min_t(u32, entries, - dev_attr->max_qp_wqes + 1); - qp->qplib_qp.sq.max_sge = qp_init_attr->cap.max_send_sge; if (qp->qplib_qp.sq.max_sge > dev_attr->max_qp_sges) qp->qplib_qp.sq.max_sge = dev_attr->max_qp_sges; @@ -959,6 +1158,9 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd, qp->qplib_qp.rq.max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + 1); + qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe - + qp_init_attr->cap.max_recv_wr; + qp->qplib_qp.rq.max_sge = qp_init_attr->cap.max_recv_sge; if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges) qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges; @@ -967,6 +1169,12 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd, qp->qplib_qp.mtu = ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu)); if (qp_init_attr->qp_type == IB_QPT_GSI) { + /* Allocate 1 more than what's provided */ + entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr + 1); + qp->qplib_qp.sq.max_wqe = min_t(u32, entries, + dev_attr->max_qp_wqes + 1); + qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe - + qp_init_attr->cap.max_send_wr; qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges; if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges) qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges; @@ -1006,6 +1214,22 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd, } } else { + /* Allocate 128 + 1 more than what's provided */ + entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr + + BNXT_QPLIB_RESERVED_QP_WRS + 1); + qp->qplib_qp.sq.max_wqe = min_t(u32, entries, + dev_attr->max_qp_wqes + + BNXT_QPLIB_RESERVED_QP_WRS + 1); + qp->qplib_qp.sq.q_full_delta = BNXT_QPLIB_RESERVED_QP_WRS + 1; + + /* + * Reserving one slot for Phantom WQE. Application can + * post one extra entry in this case. But allowing this to avoid + * unexpected Queue full condition + */ + + qp->qplib_qp.sq.q_full_delta -= 1; + qp->qplib_qp.max_rd_atomic = dev_attr->max_qp_rd_atom; qp->qplib_qp.max_dest_rd_atomic = dev_attr->max_qp_init_rd_atom; if (udata) { @@ -1129,48 +1353,6 @@ static enum ib_mtu __to_ib_mtu(u32 mtu) } } -static int __from_ib_access_flags(int iflags) -{ - int qflags = 0; - - if (iflags & IB_ACCESS_LOCAL_WRITE) - qflags |= BNXT_QPLIB_ACCESS_LOCAL_WRITE; - if (iflags & IB_ACCESS_REMOTE_READ) - qflags |= BNXT_QPLIB_ACCESS_REMOTE_READ; - if (iflags & IB_ACCESS_REMOTE_WRITE) - qflags |= BNXT_QPLIB_ACCESS_REMOTE_WRITE; - if (iflags & IB_ACCESS_REMOTE_ATOMIC) - qflags |= BNXT_QPLIB_ACCESS_REMOTE_ATOMIC; - if (iflags & IB_ACCESS_MW_BIND) - qflags |= BNXT_QPLIB_ACCESS_MW_BIND; - if (iflags & IB_ZERO_BASED) - qflags |= BNXT_QPLIB_ACCESS_ZERO_BASED; - if (iflags & IB_ACCESS_ON_DEMAND) - qflags |= BNXT_QPLIB_ACCESS_ON_DEMAND; - return qflags; -}; - -static enum ib_access_flags __to_ib_access_flags(int qflags) -{ - enum ib_access_flags iflags = 0; - - if (qflags & BNXT_QPLIB_ACCESS_LOCAL_WRITE) - iflags |= IB_ACCESS_LOCAL_WRITE; - if (qflags & BNXT_QPLIB_ACCESS_REMOTE_WRITE) - iflags |= IB_ACCESS_REMOTE_WRITE; - if (qflags & BNXT_QPLIB_ACCESS_REMOTE_READ) - iflags |= IB_ACCESS_REMOTE_READ; - if (qflags & BNXT_QPLIB_ACCESS_REMOTE_ATOMIC) - iflags |= IB_ACCESS_REMOTE_ATOMIC; - if (qflags & BNXT_QPLIB_ACCESS_MW_BIND) - iflags |= IB_ACCESS_MW_BIND; - if (qflags & BNXT_QPLIB_ACCESS_ZERO_BASED) - iflags |= IB_ZERO_BASED; - if (qflags & BNXT_QPLIB_ACCESS_ON_DEMAND) - iflags |= IB_ACCESS_ON_DEMAND; - return iflags; -}; - static int bnxt_re_modify_shadow_qp(struct bnxt_re_dev *rdev, struct bnxt_re_qp *qp1_qp, int qp_attr_mask) @@ -1378,11 +1560,21 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr, entries = roundup_pow_of_two(qp_attr->cap.max_send_wr); qp->qplib_qp.sq.max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + 1); + qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe - + qp_attr->cap.max_send_wr; + /* + * Reserving one slot for Phantom WQE. Some application can + * post one extra entry in this case. Allowing this to avoid + * unexpected Queue full condition + */ + qp->qplib_qp.sq.q_full_delta -= 1; qp->qplib_qp.sq.max_sge = qp_attr->cap.max_send_sge; if (qp->qplib_qp.rq.max_wqe) { entries = roundup_pow_of_two(qp_attr->cap.max_recv_wr); qp->qplib_qp.rq.max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + 1); + qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe - + qp_attr->cap.max_recv_wr; qp->qplib_qp.rq.max_sge = qp_attr->cap.max_recv_sge; } else { /* SRQ was used prior, just ignore the RQ caps */ @@ -2643,12 +2835,36 @@ static void bnxt_re_process_res_ud_wc(struct ib_wc *wc, wc->opcode = IB_WC_RECV_RDMA_WITH_IMM; } +static int send_phantom_wqe(struct bnxt_re_qp *qp) +{ + struct bnxt_qplib_qp *lib_qp = &qp->qplib_qp; + unsigned long flags; + int rc = 0; + + spin_lock_irqsave(&qp->sq_lock, flags); + + rc = bnxt_re_bind_fence_mw(lib_qp); + if (!rc) { + lib_qp->sq.phantom_wqe_cnt++; + dev_dbg(&lib_qp->sq.hwq.pdev->dev, + "qp %#x sq->prod %#x sw_prod %#x phantom_wqe_cnt %d\n", + lib_qp->id, lib_qp->sq.hwq.prod, + HWQ_CMP(lib_qp->sq.hwq.prod, &lib_qp->sq.hwq), + lib_qp->sq.phantom_wqe_cnt); + } + + spin_unlock_irqrestore(&qp->sq_lock, flags); + return rc; +} + int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc) { struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq); struct bnxt_re_qp *qp; struct bnxt_qplib_cqe *cqe; int i, ncqe, budget; + struct bnxt_qplib_q *sq; + struct bnxt_qplib_qp *lib_qp; u32 tbl_idx; struct bnxt_re_sqp_entries *sqp_entry = NULL; unsigned long flags; @@ -2661,7 +2877,21 @@ int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc) } cqe = &cq->cql[0]; while (budget) { - ncqe = bnxt_qplib_poll_cq(&cq->qplib_cq, cqe, budget); + lib_qp = NULL; + ncqe = bnxt_qplib_poll_cq(&cq->qplib_cq, cqe, budget, &lib_qp); + if (lib_qp) { + sq = &lib_qp->sq; + if (sq->send_phantom) { + qp = container_of(lib_qp, + struct bnxt_re_qp, qplib_qp); + if (send_phantom_wqe(qp) == -ENOMEM) + dev_err(rdev_to_dev(cq->rdev), + "Phantom failed! Scheduled to send again\n"); + else + sq->send_phantom = false; + } + } + if (!ncqe) break; @@ -2914,6 +3144,55 @@ fail: return ERR_PTR(rc); } +struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type, + struct ib_udata *udata) +{ + struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd); + struct bnxt_re_dev *rdev = pd->rdev; + struct bnxt_re_mw *mw; + int rc; + + mw = kzalloc(sizeof(*mw), GFP_KERNEL); + if (!mw) + return ERR_PTR(-ENOMEM); + mw->rdev = rdev; + mw->qplib_mw.pd = &pd->qplib_pd; + + mw->qplib_mw.type = (type == IB_MW_TYPE_1 ? + CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1 : + CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B); + rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mw->qplib_mw); + if (rc) { + dev_err(rdev_to_dev(rdev), "Allocate MW failed!"); + goto fail; + } + mw->ib_mw.rkey = mw->qplib_mw.rkey; + + atomic_inc(&rdev->mw_count); + return &mw->ib_mw; + +fail: + kfree(mw); + return ERR_PTR(rc); +} + +int bnxt_re_dealloc_mw(struct ib_mw *ib_mw) +{ + struct bnxt_re_mw *mw = container_of(ib_mw, struct bnxt_re_mw, ib_mw); + struct bnxt_re_dev *rdev = mw->rdev; + int rc; + + rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mw->qplib_mw); + if (rc) { + dev_err(rdev_to_dev(rdev), "Free MW failed: %#x\n", rc); + return rc; + } + + kfree(mw); + atomic_dec(&rdev->mw_count); + return rc; +} + /* Fast Memory Regions */ struct ib_fmr *bnxt_re_alloc_fmr(struct ib_pd *ib_pd, int mr_access_flags, struct ib_fmr_attr *fmr_attr) diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.h b/drivers/infiniband/hw/bnxt_re/ib_verbs.h index 5c3d71765454..25a4c8f4d939 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.h +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.h @@ -44,11 +44,23 @@ struct bnxt_re_gid_ctx { u32 refcnt; }; +#define BNXT_RE_FENCE_BYTES 64 +struct bnxt_re_fence_data { + u32 size; + u8 va[BNXT_RE_FENCE_BYTES]; + dma_addr_t dma_addr; + struct bnxt_re_mr *mr; + struct ib_mw *mw; + struct bnxt_qplib_swqe bind_wqe; + u32 bind_rkey; +}; + struct bnxt_re_pd { struct bnxt_re_dev *rdev; struct ib_pd ib_pd; struct bnxt_qplib_pd qplib_pd; struct bnxt_qplib_dpi dpi; + struct bnxt_re_fence_data fence; }; struct bnxt_re_ah { @@ -181,6 +193,9 @@ int bnxt_re_map_mr_sg(struct ib_mr *ib_mr, struct scatterlist *sg, int sg_nents, struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type mr_type, u32 max_num_sg); int bnxt_re_dereg_mr(struct ib_mr *mr); +struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type, + struct ib_udata *udata); +int bnxt_re_dealloc_mw(struct ib_mw *mw); struct ib_fmr *bnxt_re_alloc_fmr(struct ib_pd *pd, int mr_access_flags, struct ib_fmr_attr *fmr_attr); int bnxt_re_map_phys_fmr(struct ib_fmr *fmr, u64 *page_list, int list_len, diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c index ea9ce4ff8999..31593fdf6365 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c @@ -1083,8 +1083,12 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp, rc = -EINVAL; goto done; } - if (HWQ_CMP((sq->hwq.prod + 1), &sq->hwq) == - HWQ_CMP(sq->hwq.cons, &sq->hwq)) { + + if (bnxt_qplib_queue_full(sq)) { + dev_err(&sq->hwq.pdev->dev, + "QPLIB: prod = %#x cons = %#x qdepth = %#x delta = %#x", + sq->hwq.prod, sq->hwq.cons, sq->hwq.max_elements, + sq->q_full_delta); rc = -ENOMEM; goto done; } @@ -1332,8 +1336,7 @@ int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp, rc = -EINVAL; goto done; } - if (HWQ_CMP((rq->hwq.prod + 1), &rq->hwq) == - HWQ_CMP(rq->hwq.cons, &rq->hwq)) { + if (bnxt_qplib_queue_full(rq)) { dev_err(&rq->hwq.pdev->dev, "QPLIB: FP: QP (0x%x) RQ is full!", qp->id); rc = -EINVAL; @@ -1551,14 +1554,113 @@ static int __flush_rq(struct bnxt_qplib_q *rq, struct bnxt_qplib_qp *qp, return rc; } +/* Note: SQE is valid from sw_sq_cons up to cqe_sq_cons (exclusive) + * CQE is track from sw_cq_cons to max_element but valid only if VALID=1 + */ +static int do_wa9060(struct bnxt_qplib_qp *qp, struct bnxt_qplib_cq *cq, + u32 cq_cons, u32 sw_sq_cons, u32 cqe_sq_cons) +{ + struct bnxt_qplib_q *sq = &qp->sq; + struct bnxt_qplib_swq *swq; + u32 peek_sw_cq_cons, peek_raw_cq_cons, peek_sq_cons_idx; + struct cq_base *peek_hwcqe, **peek_hw_cqe_ptr; + struct cq_req *peek_req_hwcqe; + struct bnxt_qplib_qp *peek_qp; + struct bnxt_qplib_q *peek_sq; + int i, rc = 0; + + /* Normal mode */ + /* Check for the psn_search marking before completing */ + swq = &sq->swq[sw_sq_cons]; + if (swq->psn_search && + le32_to_cpu(swq->psn_search->flags_next_psn) & 0x80000000) { + /* Unmark */ + swq->psn_search->flags_next_psn = cpu_to_le32 + (le32_to_cpu(swq->psn_search->flags_next_psn) + & ~0x80000000); + dev_dbg(&cq->hwq.pdev->dev, + "FP: Process Req cq_cons=0x%x qp=0x%x sq cons sw=0x%x cqe=0x%x marked!\n", + cq_cons, qp->id, sw_sq_cons, cqe_sq_cons); + sq->condition = true; + sq->send_phantom = true; + + /* TODO: Only ARM if the previous SQE is ARMALL */ + bnxt_qplib_arm_cq(cq, DBR_DBR_TYPE_CQ_ARMALL); + + rc = -EAGAIN; + goto out; + } + if (sq->condition) { + /* Peek at the completions */ + peek_raw_cq_cons = cq->hwq.cons; + peek_sw_cq_cons = cq_cons; + i = cq->hwq.max_elements; + while (i--) { + peek_sw_cq_cons = HWQ_CMP((peek_sw_cq_cons), &cq->hwq); + peek_hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr; + peek_hwcqe = &peek_hw_cqe_ptr[CQE_PG(peek_sw_cq_cons)] + [CQE_IDX(peek_sw_cq_cons)]; + /* If the next hwcqe is VALID */ + if (CQE_CMP_VALID(peek_hwcqe, peek_raw_cq_cons, + cq->hwq.max_elements)) { + /* If the next hwcqe is a REQ */ + if ((peek_hwcqe->cqe_type_toggle & + CQ_BASE_CQE_TYPE_MASK) == + CQ_BASE_CQE_TYPE_REQ) { + peek_req_hwcqe = (struct cq_req *) + peek_hwcqe; + peek_qp = (struct bnxt_qplib_qp *) + ((unsigned long) + le64_to_cpu + (peek_req_hwcqe->qp_handle)); + peek_sq = &peek_qp->sq; + peek_sq_cons_idx = HWQ_CMP(le16_to_cpu( + peek_req_hwcqe->sq_cons_idx) - 1 + , &sq->hwq); + /* If the hwcqe's sq's wr_id matches */ + if (peek_sq == sq && + sq->swq[peek_sq_cons_idx].wr_id == + BNXT_QPLIB_FENCE_WRID) { + /* + * Unbreak only if the phantom + * comes back + */ + dev_dbg(&cq->hwq.pdev->dev, + "FP:Got Phantom CQE"); + sq->condition = false; + sq->single = true; + rc = 0; + goto out; + } + } + /* Valid but not the phantom, so keep looping */ + } else { + /* Not valid yet, just exit and wait */ + rc = -EINVAL; + goto out; + } + peek_sw_cq_cons++; + peek_raw_cq_cons++; + } + dev_err(&cq->hwq.pdev->dev, + "Should not have come here! cq_cons=0x%x qp=0x%x sq cons sw=0x%x hw=0x%x", + cq_cons, qp->id, sw_sq_cons, cqe_sq_cons); + rc = -EINVAL; + } +out: + return rc; +} + static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq, struct cq_req *hwcqe, - struct bnxt_qplib_cqe **pcqe, int *budget) + struct bnxt_qplib_cqe **pcqe, int *budget, + u32 cq_cons, struct bnxt_qplib_qp **lib_qp) { struct bnxt_qplib_qp *qp; struct bnxt_qplib_q *sq; struct bnxt_qplib_cqe *cqe; - u32 sw_cons, cqe_cons; + u32 sw_sq_cons, cqe_sq_cons; + struct bnxt_qplib_swq *swq; int rc = 0; qp = (struct bnxt_qplib_qp *)((unsigned long) @@ -1570,13 +1672,13 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq, } sq = &qp->sq; - cqe_cons = HWQ_CMP(le16_to_cpu(hwcqe->sq_cons_idx), &sq->hwq); - if (cqe_cons > sq->hwq.max_elements) { + cqe_sq_cons = HWQ_CMP(le16_to_cpu(hwcqe->sq_cons_idx), &sq->hwq); + if (cqe_sq_cons > sq->hwq.max_elements) { dev_err(&cq->hwq.pdev->dev, "QPLIB: FP: CQ Process req reported "); dev_err(&cq->hwq.pdev->dev, "QPLIB: sq_cons_idx 0x%x which exceeded max 0x%x", - cqe_cons, sq->hwq.max_elements); + cqe_sq_cons, sq->hwq.max_elements); return -EINVAL; } /* If we were in the middle of flushing the SQ, continue */ @@ -1585,53 +1687,74 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq, /* Require to walk the sq's swq to fabricate CQEs for all previously * signaled SWQEs due to CQE aggregation from the current sq cons - * to the cqe_cons + * to the cqe_sq_cons */ cqe = *pcqe; while (*budget) { - sw_cons = HWQ_CMP(sq->hwq.cons, &sq->hwq); - if (sw_cons == cqe_cons) + sw_sq_cons = HWQ_CMP(sq->hwq.cons, &sq->hwq); + if (sw_sq_cons == cqe_sq_cons) + /* Done */ break; + + swq = &sq->swq[sw_sq_cons]; memset(cqe, 0, sizeof(*cqe)); cqe->opcode = CQ_BASE_CQE_TYPE_REQ; cqe->qp_handle = (u64)(unsigned long)qp; cqe->src_qp = qp->id; - cqe->wr_id = sq->swq[sw_cons].wr_id; - cqe->type = sq->swq[sw_cons].type; + cqe->wr_id = swq->wr_id; + if (cqe->wr_id == BNXT_QPLIB_FENCE_WRID) + goto skip; + cqe->type = swq->type; /* For the last CQE, check for status. For errors, regardless * of the request being signaled or not, it must complete with * the hwcqe error status */ - if (HWQ_CMP((sw_cons + 1), &sq->hwq) == cqe_cons && + if (HWQ_CMP((sw_sq_cons + 1), &sq->hwq) == cqe_sq_cons && hwcqe->status != CQ_REQ_STATUS_OK) { cqe->status = hwcqe->status; dev_err(&cq->hwq.pdev->dev, "QPLIB: FP: CQ Processed Req "); dev_err(&cq->hwq.pdev->dev, "QPLIB: wr_id[%d] = 0x%llx with status 0x%x", - sw_cons, cqe->wr_id, cqe->status); + sw_sq_cons, cqe->wr_id, cqe->status); cqe++; (*budget)--; sq->flush_in_progress = true; /* Must block new posting of SQ and RQ */ qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR; + sq->condition = false; + sq->single = false; } else { - if (sq->swq[sw_cons].flags & - SQ_SEND_FLAGS_SIGNAL_COMP) { + if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) { + /* Before we complete, do WA 9060 */ + if (do_wa9060(qp, cq, cq_cons, sw_sq_cons, + cqe_sq_cons)) { + *lib_qp = qp; + goto out; + } cqe->status = CQ_REQ_STATUS_OK; cqe++; (*budget)--; } } +skip: sq->hwq.cons++; + if (sq->single) + break; } +out: *pcqe = cqe; - if (!*budget && HWQ_CMP(sq->hwq.cons, &sq->hwq) != cqe_cons) { + if (HWQ_CMP(sq->hwq.cons, &sq->hwq) != cqe_sq_cons) { /* Out of budget */ rc = -EAGAIN; goto done; } + /* + * Back to normal completion mode only after it has completed all of + * the WC for this CQE + */ + sq->single = false; if (!sq->flush_in_progress) goto done; flush: @@ -1961,7 +2084,7 @@ static int bnxt_qplib_cq_process_cutoff(struct bnxt_qplib_cq *cq, } int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe, - int num_cqes) + int num_cqes, struct bnxt_qplib_qp **lib_qp) { struct cq_base *hw_cqe, **hw_cqe_ptr; unsigned long flags; @@ -1986,7 +2109,8 @@ int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe, case CQ_BASE_CQE_TYPE_REQ: rc = bnxt_qplib_cq_process_req(cq, (struct cq_req *)hw_cqe, - &cqe, &budget); + &cqe, &budget, + sw_cons, lib_qp); break; case CQ_BASE_CQE_TYPE_RES_RC: rc = bnxt_qplib_cq_process_res_rc(cq, diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.h b/drivers/infiniband/hw/bnxt_re/qplib_fp.h index f0150f8da1e3..71539ea7f421 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_fp.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.h @@ -88,6 +88,7 @@ struct bnxt_qplib_swq { struct bnxt_qplib_swqe { /* General */ +#define BNXT_QPLIB_FENCE_WRID 0x46454E43 /* "FENC" */ u64 wr_id; u8 reqs_type; u8 type; @@ -216,9 +217,16 @@ struct bnxt_qplib_q { struct scatterlist *sglist; u32 nmap; u32 max_wqe; + u16 q_full_delta; u16 max_sge; u32 psn; bool flush_in_progress; + bool condition; + bool single; + bool send_phantom; + u32 phantom_wqe_cnt; + u32 phantom_cqe_cnt; + u32 next_cq_cons; }; struct bnxt_qplib_qp { @@ -301,6 +309,13 @@ struct bnxt_qplib_qp { (!!((hdr)->cqe_type_toggle & CQ_BASE_TOGGLE) == \ !((raw_cons) & (cp_bit))) +static inline bool bnxt_qplib_queue_full(struct bnxt_qplib_q *qplib_q) +{ + return HWQ_CMP((qplib_q->hwq.prod + qplib_q->q_full_delta), + &qplib_q->hwq) == HWQ_CMP(qplib_q->hwq.cons, + &qplib_q->hwq); +} + struct bnxt_qplib_cqe { u8 status; u8 type; @@ -432,7 +447,7 @@ int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp, int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq); int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq); int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe, - int num); + int num, struct bnxt_qplib_qp **qp); void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type); void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq); int bnxt_qplib_alloc_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq); diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.h b/drivers/infiniband/hw/bnxt_re/qplib_res.h index 4103e602b058..2e4855509719 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_res.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_res.h @@ -52,7 +52,6 @@ extern const struct bnxt_qplib_gid bnxt_qplib_gid_zero; ((HWQ_CMP(hwq->prod, hwq)\ - HWQ_CMP(hwq->cons, hwq))\ & (hwq->max_elements - 1))) - enum bnxt_qplib_hwq_type { HWQ_TYPE_CTX, HWQ_TYPE_QUEUE, diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c index cf7c9cb185d5..fde18cf0e406 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c @@ -88,6 +88,11 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw, sb->max_qp_init_rd_atom > BNXT_QPLIB_MAX_OUT_RD_ATOM ? BNXT_QPLIB_MAX_OUT_RD_ATOM : sb->max_qp_init_rd_atom; attr->max_qp_wqes = le16_to_cpu(sb->max_qp_wr); + /* + * 128 WQEs needs to be reserved for the HW (8916). Prevent + * reporting the max number + */ + attr->max_qp_wqes -= BNXT_QPLIB_RESERVED_QP_WRS; attr->max_qp_sges = sb->max_sge; attr->max_cq = le32_to_cpu(sb->max_cq); attr->max_cq_wqes = le32_to_cpu(sb->max_cqe); diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.h b/drivers/infiniband/hw/bnxt_re/qplib_sp.h index 1442a617e968..a543f959098b 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_sp.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.h @@ -40,6 +40,8 @@ #ifndef __BNXT_QPLIB_SP_H__ #define __BNXT_QPLIB_SP_H__ +#define BNXT_QPLIB_RESERVED_QP_WRS 128 + struct bnxt_qplib_dev_attr { char fw_ver[32]; u16 max_sgid; From 1c980b010f06696c9093c5d6a5ac7b5145f89a04 Mon Sep 17 00:00:00 2001 From: Selvin Xavier Date: Mon, 22 May 2017 03:15:34 -0700 Subject: [PATCH 226/436] RDMA/bnxt_re: Dereg MR in FW before freeing the fast_reg_page_list If the host buffers are freed before destroying MR in HW, HW could try accessing these buffers. This could cause a host crash. Fixing the code to avoid this condition. Signed-off-by: Selvin Xavier Signed-off-by: Doug Ledford --- drivers/infiniband/hw/bnxt_re/ib_verbs.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index 169798752895..d94b1b304135 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c @@ -3052,6 +3052,12 @@ int bnxt_re_dereg_mr(struct ib_mr *ib_mr) struct bnxt_re_dev *rdev = mr->rdev; int rc; + rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr); + if (rc) { + dev_err(rdev_to_dev(rdev), "Dereg MR failed: %#x\n", rc); + return rc; + } + if (mr->npages && mr->pages) { rc = bnxt_qplib_free_fast_reg_page_list(&rdev->qplib_res, &mr->qplib_frpl); @@ -3059,8 +3065,6 @@ int bnxt_re_dereg_mr(struct ib_mr *ib_mr) mr->npages = 0; mr->pages = NULL; } - rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr); - if (!IS_ERR_OR_NULL(mr->ib_umem)) ib_umem_release(mr->ib_umem); From 3fb755b3d58084001c89e5f0fd558552bdef9051 Mon Sep 17 00:00:00 2001 From: Somnath Kotur Date: Mon, 22 May 2017 03:15:36 -0700 Subject: [PATCH 227/436] RDMA/bnxt_re: Add HW workaround for avoiding stall for UD QPs HW stalls out after 0x800000 WQEs are posted for UD QPs. To workaround this problem, driver will send a modify_qp cmd to the HW at around the halfway mark(0x400000) so that FW can accordingly modify the QP context in the HW to prevent this stall. This workaround needs to be done for UD, QP1 and Raw Ethertype packets. Added a counter to keep track of WQEs posted during post_send. Signed-off-by: Somnath Kotur Signed-off-by: Selvin Xavier Signed-off-by: Doug Ledford --- drivers/infiniband/hw/bnxt_re/bnxt_re.h | 2 ++ drivers/infiniband/hw/bnxt_re/ib_verbs.c | 18 ++++++++++++++++++ drivers/infiniband/hw/bnxt_re/qplib_fp.c | 3 +++ drivers/infiniband/hw/bnxt_re/qplib_fp.h | 1 + 4 files changed, 24 insertions(+) diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h index ebf7be8d4139..d5e457ee7e7b 100644 --- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h +++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h @@ -56,6 +56,8 @@ #define BNXT_RE_MAX_SRQC_COUNT (64 * 1024) #define BNXT_RE_MAX_CQ_COUNT (64 * 1024) +#define BNXT_RE_UD_QP_HW_STALL 0x400000 + struct bnxt_re_work { struct work_struct work; unsigned long event; diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index d94b1b304135..08e7e59df28c 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c @@ -2075,6 +2075,22 @@ static int bnxt_re_copy_wr_payload(struct bnxt_re_dev *rdev, return payload_sz; } +static void bnxt_ud_qp_hw_stall_workaround(struct bnxt_re_qp *qp) +{ + if ((qp->ib_qp.qp_type == IB_QPT_UD || + qp->ib_qp.qp_type == IB_QPT_GSI || + qp->ib_qp.qp_type == IB_QPT_RAW_ETHERTYPE) && + qp->qplib_qp.wqe_cnt == BNXT_RE_UD_QP_HW_STALL) { + int qp_attr_mask; + struct ib_qp_attr qp_attr; + + qp_attr_mask = IB_QP_STATE; + qp_attr.qp_state = IB_QPS_RTS; + bnxt_re_modify_qp(&qp->ib_qp, &qp_attr, qp_attr_mask, NULL); + qp->qplib_qp.wqe_cnt = 0; + } +} + static int bnxt_re_post_send_shadow_qp(struct bnxt_re_dev *rdev, struct bnxt_re_qp *qp, struct ib_send_wr *wr) @@ -2120,6 +2136,7 @@ bad: wr = wr->next; } bnxt_qplib_post_send_db(&qp->qplib_qp); + bnxt_ud_qp_hw_stall_workaround(qp); spin_unlock_irqrestore(&qp->sq_lock, flags); return rc; } @@ -2216,6 +2233,7 @@ bad: wr = wr->next; } bnxt_qplib_post_send_db(&qp->qplib_qp); + bnxt_ud_qp_hw_stall_workaround(qp); spin_unlock_irqrestore(&qp->sq_lock, flags); return rc; diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c index 31593fdf6365..f05500bcdcf1 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c @@ -1298,6 +1298,9 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp, } sq->hwq.prod++; + + qp->wqe_cnt++; + done: return rc; } diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.h b/drivers/infiniband/hw/bnxt_re/qplib_fp.h index 71539ea7f421..36b7b7db0e3f 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_fp.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.h @@ -250,6 +250,7 @@ struct bnxt_qplib_qp { u8 timeout; u8 retry_cnt; u8 rnr_retry; + u64 wqe_cnt; u32 min_rnr_timer; u32 max_rd_atomic; u32 max_dest_rd_atomic; From 018cf5995d69d25be7330c3b8bef4c31bf2b273b Mon Sep 17 00:00:00 2001 From: Devesh Sharma Date: Mon, 22 May 2017 03:15:40 -0700 Subject: [PATCH 228/436] RDMA/bnxt_re: Fix RQE posting logic This patch adds code to ring RQ Doorbell aggressively so that the adapter can DMA RQ buffers sooner, instead of DMA all WQEs in the post_recv WR list together at the end of the post_recv verb. Also use spinlock to serialize RQ posting Signed-off-by: Kalesh AP Signed-off-by: Devesh Sharma Signed-off-by: Selvin Xavier Signed-off-by: Doug Ledford --- drivers/infiniband/hw/bnxt_re/bnxt_re.h | 2 ++ drivers/infiniband/hw/bnxt_re/ib_verbs.c | 18 +++++++++++++++++- drivers/infiniband/hw/bnxt_re/ib_verbs.h | 1 + 3 files changed, 20 insertions(+), 1 deletion(-) diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h index d5e457ee7e7b..08772836fded 100644 --- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h +++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h @@ -58,6 +58,8 @@ #define BNXT_RE_UD_QP_HW_STALL 0x400000 +#define BNXT_RE_RQ_WQE_THRESHOLD 32 + struct bnxt_re_work { struct work_struct work; unsigned long event; diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index 08e7e59df28c..491932e70638 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c @@ -1249,6 +1249,7 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd, qp->ib_qp.qp_num = qp->qplib_qp.id; spin_lock_init(&qp->sq_lock); + spin_lock_init(&qp->rq_lock); if (udata) { struct bnxt_re_qp_resp resp; @@ -2281,7 +2282,10 @@ int bnxt_re_post_recv(struct ib_qp *ib_qp, struct ib_recv_wr *wr, struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp); struct bnxt_qplib_swqe wqe; int rc = 0, payload_sz = 0; + unsigned long flags; + u32 count = 0; + spin_lock_irqsave(&qp->rq_lock, flags); while (wr) { /* House keeping */ memset(&wqe, 0, sizeof(wqe)); @@ -2310,9 +2314,21 @@ int bnxt_re_post_recv(struct ib_qp *ib_qp, struct ib_recv_wr *wr, *bad_wr = wr; break; } + + /* Ring DB if the RQEs posted reaches a threshold value */ + if (++count >= BNXT_RE_RQ_WQE_THRESHOLD) { + bnxt_qplib_post_recv_db(&qp->qplib_qp); + count = 0; + } + wr = wr->next; } - bnxt_qplib_post_recv_db(&qp->qplib_qp); + + if (count) + bnxt_qplib_post_recv_db(&qp->qplib_qp); + + spin_unlock_irqrestore(&qp->rq_lock, flags); + return rc; } diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.h b/drivers/infiniband/hw/bnxt_re/ib_verbs.h index 25a4c8f4d939..16f62dc456f6 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.h +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.h @@ -74,6 +74,7 @@ struct bnxt_re_qp { struct bnxt_re_dev *rdev; struct ib_qp ib_qp; spinlock_t sq_lock; /* protect sq */ + spinlock_t rq_lock; /* protect rq */ struct bnxt_qplib_qp qplib_qp; struct ib_umem *sumem; struct ib_umem *rumem; From 86816a00ca57caf7e4bf5d30e20eb683f5a3ae35 Mon Sep 17 00:00:00 2001 From: Selvin Xavier Date: Mon, 22 May 2017 03:15:44 -0700 Subject: [PATCH 229/436] RDMA/bnxt_re: Remove FMR support Some issues observed with FMR implementation while running stress traffic. So removing the FMR verbs support for now. Signed-off-by: Selvin Xavier Acked-by: Christoph Hellwig Reviewed-by: Leon Romanovsky Signed-off-by: Doug Ledford --- drivers/infiniband/hw/bnxt_re/ib_verbs.c | 98 +----------------------- drivers/infiniband/hw/bnxt_re/ib_verbs.h | 6 -- drivers/infiniband/hw/bnxt_re/main.c | 4 - 3 files changed, 2 insertions(+), 106 deletions(-) diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index 491932e70638..c7bd68311d0c 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c @@ -191,8 +191,8 @@ int bnxt_re_query_device(struct ib_device *ibdev, ib_attr->max_total_mcast_qp_attach = 0; ib_attr->max_ah = dev_attr->max_ah; - ib_attr->max_fmr = dev_attr->max_fmr; - ib_attr->max_map_per_fmr = 1; /* ? */ + ib_attr->max_fmr = 0; + ib_attr->max_map_per_fmr = 0; ib_attr->max_srq = dev_attr->max_srq; ib_attr->max_srq_wr = dev_attr->max_srq_wqes; @@ -3231,100 +3231,6 @@ int bnxt_re_dealloc_mw(struct ib_mw *ib_mw) return rc; } -/* Fast Memory Regions */ -struct ib_fmr *bnxt_re_alloc_fmr(struct ib_pd *ib_pd, int mr_access_flags, - struct ib_fmr_attr *fmr_attr) -{ - struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd); - struct bnxt_re_dev *rdev = pd->rdev; - struct bnxt_re_fmr *fmr; - int rc; - - if (fmr_attr->max_pages > MAX_PBL_LVL_2_PGS || - fmr_attr->max_maps > rdev->dev_attr.max_map_per_fmr) { - dev_err(rdev_to_dev(rdev), "Allocate FMR exceeded Max limit"); - return ERR_PTR(-ENOMEM); - } - fmr = kzalloc(sizeof(*fmr), GFP_KERNEL); - if (!fmr) - return ERR_PTR(-ENOMEM); - - fmr->rdev = rdev; - fmr->qplib_fmr.pd = &pd->qplib_pd; - fmr->qplib_fmr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR; - - rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &fmr->qplib_fmr); - if (rc) - goto fail; - - fmr->qplib_fmr.flags = __from_ib_access_flags(mr_access_flags); - fmr->ib_fmr.lkey = fmr->qplib_fmr.lkey; - fmr->ib_fmr.rkey = fmr->ib_fmr.lkey; - - atomic_inc(&rdev->mr_count); - return &fmr->ib_fmr; -fail: - kfree(fmr); - return ERR_PTR(rc); -} - -int bnxt_re_map_phys_fmr(struct ib_fmr *ib_fmr, u64 *page_list, int list_len, - u64 iova) -{ - struct bnxt_re_fmr *fmr = container_of(ib_fmr, struct bnxt_re_fmr, - ib_fmr); - struct bnxt_re_dev *rdev = fmr->rdev; - int rc; - - fmr->qplib_fmr.va = iova; - fmr->qplib_fmr.total_size = list_len * PAGE_SIZE; - - rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &fmr->qplib_fmr, page_list, - list_len, true); - if (rc) - dev_err(rdev_to_dev(rdev), "Failed to map FMR for lkey = 0x%x!", - fmr->ib_fmr.lkey); - return rc; -} - -int bnxt_re_unmap_fmr(struct list_head *fmr_list) -{ - struct bnxt_re_dev *rdev; - struct bnxt_re_fmr *fmr; - struct ib_fmr *ib_fmr; - int rc = 0; - - /* Validate each FMRs inside the fmr_list */ - list_for_each_entry(ib_fmr, fmr_list, list) { - fmr = container_of(ib_fmr, struct bnxt_re_fmr, ib_fmr); - rdev = fmr->rdev; - - if (rdev) { - rc = bnxt_qplib_dereg_mrw(&rdev->qplib_res, - &fmr->qplib_fmr, true); - if (rc) - break; - } - } - return rc; -} - -int bnxt_re_dealloc_fmr(struct ib_fmr *ib_fmr) -{ - struct bnxt_re_fmr *fmr = container_of(ib_fmr, struct bnxt_re_fmr, - ib_fmr); - struct bnxt_re_dev *rdev = fmr->rdev; - int rc; - - rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &fmr->qplib_fmr); - if (rc) - dev_err(rdev_to_dev(rdev), "Failed to free FMR"); - - kfree(fmr); - atomic_dec(&rdev->mr_count); - return rc; -} - /* uverbs */ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length, u64 virt_addr, int mr_access_flags, diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.h b/drivers/infiniband/hw/bnxt_re/ib_verbs.h index 16f62dc456f6..6c160f6a5398 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.h +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.h @@ -197,12 +197,6 @@ int bnxt_re_dereg_mr(struct ib_mr *mr); struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type, struct ib_udata *udata); int bnxt_re_dealloc_mw(struct ib_mw *mw); -struct ib_fmr *bnxt_re_alloc_fmr(struct ib_pd *pd, int mr_access_flags, - struct ib_fmr_attr *fmr_attr); -int bnxt_re_map_phys_fmr(struct ib_fmr *fmr, u64 *page_list, int list_len, - u64 iova); -int bnxt_re_unmap_fmr(struct list_head *fmr_list); -int bnxt_re_dealloc_fmr(struct ib_fmr *fmr); struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, u64 virt_addr, int mr_access_flags, struct ib_udata *udata); diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c index 5d355401179b..1fce5e73216b 100644 --- a/drivers/infiniband/hw/bnxt_re/main.c +++ b/drivers/infiniband/hw/bnxt_re/main.c @@ -507,10 +507,6 @@ static int bnxt_re_register_ib(struct bnxt_re_dev *rdev) ibdev->dereg_mr = bnxt_re_dereg_mr; ibdev->alloc_mr = bnxt_re_alloc_mr; ibdev->map_mr_sg = bnxt_re_map_mr_sg; - ibdev->alloc_fmr = bnxt_re_alloc_fmr; - ibdev->map_phys_fmr = bnxt_re_map_phys_fmr; - ibdev->unmap_fmr = bnxt_re_unmap_fmr; - ibdev->dealloc_fmr = bnxt_re_dealloc_fmr; ibdev->reg_user_mr = bnxt_re_reg_user_mr; ibdev->alloc_ucontext = bnxt_re_alloc_ucontext; From dac2738607de3d2bb99bae91c9054a2d753af18b Mon Sep 17 00:00:00 2001 From: Michal Kalderon Date: Mon, 5 Jun 2017 16:32:26 +0300 Subject: [PATCH 230/436] RDMA/qedr: Initialize byte_len in WC of READ and SEND commands Initialize byte_len in work completion of RDMA_READ and RDMA_SEND. Exposed by uDAPL application. Signed-off-by: Michal Kalderon Signed-off-by: Doug Ledford --- drivers/infiniband/hw/qedr/verbs.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index 17685cfea6a2..7add0cd09412 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -3209,6 +3209,10 @@ static int process_req(struct qedr_dev *dev, struct qedr_qp *qp, case IB_WC_REG_MR: qp->wqe_wr_id[qp->sq.cons].mr->info.completed++; break; + case IB_WC_RDMA_READ: + case IB_WC_SEND: + wc->byte_len = qp->wqe_wr_id[qp->sq.cons].bytes_len; + break; default: break; } From e57bb6be5e095351086d3e6de9853a0763342535 Mon Sep 17 00:00:00 2001 From: Ram Amrani Date: Mon, 5 Jun 2017 16:32:27 +0300 Subject: [PATCH 231/436] RDMA/qedr: Add 64KB PAGE_SIZE support to user-space queues Add 64KB PAGE_SIZE support to user-space CQ, SQ and RQ queues. De-facto it means that code was added to translate 64KB pages to smaller 4KB pages that the FW can handle. Otherwise, the FW would wrap (or jump to the next page) when reaching 4KB while the user space library will continue on the same large page. Note that MR code remains as is since the FW supports larger pages for MRs. Signed-off-by: Ram Amrani Signed-off-by: Michal Kalderon Signed-off-by: Doug Ledford --- drivers/infiniband/hw/qedr/qedr.h | 5 ++- drivers/infiniband/hw/qedr/verbs.c | 60 +++++++++++++++++------------- 2 files changed, 39 insertions(+), 26 deletions(-) diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h index aa08c76a4245..d961f79b317c 100644 --- a/drivers/infiniband/hw/qedr/qedr.h +++ b/drivers/infiniband/hw/qedr/qedr.h @@ -58,7 +58,10 @@ #define QEDR_MSG_QP " QP" #define QEDR_MSG_GSI " GSI" -#define QEDR_CQ_MAGIC_NUMBER (0x11223344) +#define QEDR_CQ_MAGIC_NUMBER (0x11223344) + +#define FW_PAGE_SIZE (RDMA_RING_PAGE_SIZE) +#define FW_PAGE_SHIFT (12) struct qedr_dev; diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index 7add0cd09412..d6723c365c7f 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -653,14 +653,15 @@ static int qedr_prepare_pbl_tbl(struct qedr_dev *dev, static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem, struct qedr_pbl *pbl, - struct qedr_pbl_info *pbl_info) + struct qedr_pbl_info *pbl_info, u32 pg_shift) { int shift, pg_cnt, pages, pbe_cnt, total_num_pbes = 0; + u32 fw_pg_cnt, fw_pg_per_umem_pg; struct qedr_pbl *pbl_tbl; struct scatterlist *sg; struct regpair *pbe; + u64 pg_addr; int entry; - u32 addr; if (!pbl_info->num_pbes) return; @@ -683,29 +684,35 @@ static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem, shift = umem->page_shift; + fw_pg_per_umem_pg = BIT(umem->page_shift - pg_shift); + for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { pages = sg_dma_len(sg) >> shift; + pg_addr = sg_dma_address(sg); for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) { - /* store the page address in pbe */ - pbe->lo = cpu_to_le32(sg_dma_address(sg) + - (pg_cnt << shift)); - addr = upper_32_bits(sg_dma_address(sg) + - (pg_cnt << shift)); - pbe->hi = cpu_to_le32(addr); - pbe_cnt++; - total_num_pbes++; - pbe++; + for (fw_pg_cnt = 0; fw_pg_cnt < fw_pg_per_umem_pg;) { + pbe->lo = cpu_to_le32(pg_addr); + pbe->hi = cpu_to_le32(upper_32_bits(pg_addr)); - if (total_num_pbes == pbl_info->num_pbes) - return; + pg_addr += BIT(pg_shift); + pbe_cnt++; + total_num_pbes++; + pbe++; - /* If the given pbl is full storing the pbes, - * move to next pbl. - */ - if (pbe_cnt == (pbl_info->pbl_size / sizeof(u64))) { - pbl_tbl++; - pbe = (struct regpair *)pbl_tbl->va; - pbe_cnt = 0; + if (total_num_pbes == pbl_info->num_pbes) + return; + + /* If the given pbl is full storing the pbes, + * move to next pbl. + */ + if (pbe_cnt == + (pbl_info->pbl_size / sizeof(u64))) { + pbl_tbl++; + pbe = (struct regpair *)pbl_tbl->va; + pbe_cnt = 0; + } + + fw_pg_cnt++; } } } @@ -754,7 +761,7 @@ static inline int qedr_init_user_queue(struct ib_ucontext *ib_ctx, u64 buf_addr, size_t buf_len, int access, int dmasync) { - int page_cnt; + u32 fw_pages; int rc; q->buf_addr = buf_addr; @@ -766,8 +773,10 @@ static inline int qedr_init_user_queue(struct ib_ucontext *ib_ctx, return PTR_ERR(q->umem); } - page_cnt = ib_umem_page_count(q->umem); - rc = qedr_prepare_pbl_tbl(dev, &q->pbl_info, page_cnt, 0); + fw_pages = ib_umem_page_count(q->umem) << + (q->umem->page_shift - FW_PAGE_SHIFT); + + rc = qedr_prepare_pbl_tbl(dev, &q->pbl_info, fw_pages, 0); if (rc) goto err0; @@ -777,7 +786,8 @@ static inline int qedr_init_user_queue(struct ib_ucontext *ib_ctx, goto err0; } - qedr_populate_pbls(dev, q->umem, q->pbl_tbl, &q->pbl_info); + qedr_populate_pbls(dev, q->umem, q->pbl_tbl, &q->pbl_info, + FW_PAGE_SHIFT); return 0; @@ -2226,7 +2236,7 @@ struct ib_mr *qedr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len, goto err1; qedr_populate_pbls(dev, mr->umem, mr->info.pbl_table, - &mr->info.pbl_info); + &mr->info.pbl_info, mr->umem->page_shift); rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid); if (rc) { From 07d432bb97f19dd5e784175152f9fce3b2646133 Mon Sep 17 00:00:00 2001 From: Jia-Ju Bai Date: Mon, 5 Jun 2017 20:23:40 +0800 Subject: [PATCH 232/436] rxe: Fix a sleep-in-atomic bug in post_one_send The driver may sleep under a spin lock, and the function call path is: post_one_send (acquire the lock by spin_lock_irqsave) init_send_wqe copy_from_user --> may sleep There is no flow that makes "qp->is_user" true, and copy_from_user may cause bug when a non-user pointer is used. So the lines of copy_from_user and check of "qp->is_user" are removed. Signed-off-by: Jia-Ju Bai Reviewed-by: Leon Romanovsky Acked-by: Moni Shoua Signed-off-by: Doug Ledford --- drivers/infiniband/sw/rxe/rxe_verbs.c | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c index 83d709e74dfb..073e66783f1d 100644 --- a/drivers/infiniband/sw/rxe/rxe_verbs.c +++ b/drivers/infiniband/sw/rxe/rxe_verbs.c @@ -740,13 +740,8 @@ static int init_send_wqe(struct rxe_qp *qp, struct ib_send_wr *ibwr, sge = ibwr->sg_list; for (i = 0; i < num_sge; i++, sge++) { - if (qp->is_user && copy_from_user(p, (__user void *) - (uintptr_t)sge->addr, sge->length)) - return -EFAULT; - - else if (!qp->is_user) - memcpy(p, (void *)(uintptr_t)sge->addr, - sge->length); + memcpy(p, (void *)(uintptr_t)sge->addr, + sge->length); p += sge->length; } From 03f219041fdbeb31cecff41bb1cb4e1018f9cf75 Mon Sep 17 00:00:00 2001 From: Luis Henriques Date: Wed, 17 May 2017 12:21:07 +0100 Subject: [PATCH 233/436] ceph: check i_nlink while converting a file handle to dentry Converting a file handle to a dentry can be done call after the inode unlink. This means that __fh_to_dentry() requires an extra check to verify the number of links is not 0. The issue can be easily reproduced using xfstest generic/426, which does something like: name_to_handle_at(&fh) echo 3 > /proc/sys/vm/drop_caches unlink() open_by_handle_at(&fh) The call to open_by_handle_at() should fail, as the file doesn't exist anymore. Link: http://tracker.ceph.com/issues/19958 Signed-off-by: Luis Henriques Reviewed-by: "Yan, Zheng" Signed-off-by: Ilya Dryomov --- fs/ceph/export.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/fs/ceph/export.c b/fs/ceph/export.c index e8f11fa565c5..7df550c13d7f 100644 --- a/fs/ceph/export.c +++ b/fs/ceph/export.c @@ -91,6 +91,10 @@ static struct dentry *__fh_to_dentry(struct super_block *sb, u64 ino) ceph_mdsc_put_request(req); if (!inode) return ERR_PTR(-ESTALE); + if (inode->i_nlink == 0) { + iput(inode); + return ERR_PTR(-ESTALE); + } } return d_obtain_alias(inode); From 56199016e8672feb7b903eda003a863d5bf2b8c4 Mon Sep 17 00:00:00 2001 From: "Yan, Zheng" Date: Thu, 1 Jun 2017 16:44:53 +0800 Subject: [PATCH 234/436] ceph: use current_kernel_time() to get request time stamp ceph uses ktime_get_real_ts() to get request time stamp. In most other cases, current_kernel_time() is used to get time stamp for filesystem operations (called by current_time()). There is granularity difference between ktime_get_real_ts() and current_kernel_time(). The later one can be up to one jiffy behind the former one. This can causes inode's ctime to go back. Signed-off-by: "Yan, Zheng" Signed-off-by: Ilya Dryomov --- fs/ceph/mds_client.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index f38e56fa9712..0c05df44cc6c 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -1687,7 +1687,6 @@ struct ceph_mds_request * ceph_mdsc_create_request(struct ceph_mds_client *mdsc, int op, int mode) { struct ceph_mds_request *req = kzalloc(sizeof(*req), GFP_NOFS); - struct timespec ts; if (!req) return ERR_PTR(-ENOMEM); @@ -1706,8 +1705,7 @@ ceph_mdsc_create_request(struct ceph_mds_client *mdsc, int op, int mode) init_completion(&req->r_safe_completion); INIT_LIST_HEAD(&req->r_unsafe_item); - ktime_get_real_ts(&ts); - req->r_stamp = timespec_trunc(ts, mdsc->fsc->sb->s_time_gran); + req->r_stamp = timespec_trunc(current_kernel_time(), mdsc->fsc->sb->s_time_gran); req->r_op = op; req->r_direct_mode = mode; From 4ca2fea6f8277ab381bd08b996d641255b6f7b00 Mon Sep 17 00:00:00 2001 From: "Yan, Zheng" Date: Thu, 1 Jun 2017 17:08:00 +0800 Subject: [PATCH 235/436] ceph: unify inode i_ctime update Current __ceph_setattr() can set inode's i_ctime to current_time(), req->r_stamp or attr->ia_ctime. These time stamps may have minor differences. It may cause potential problem. Signed-off-by: "Yan, Zheng" Acked-by: Arnd Bergmann Signed-off-by: Ilya Dryomov --- fs/ceph/acl.c | 1 + fs/ceph/inode.c | 5 ++--- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/fs/ceph/acl.c b/fs/ceph/acl.c index 987044bca1c2..59cb307b15fb 100644 --- a/fs/ceph/acl.c +++ b/fs/ceph/acl.c @@ -131,6 +131,7 @@ int ceph_set_acl(struct inode *inode, struct posix_acl *acl, int type) } if (new_mode != old_mode) { + newattrs.ia_ctime = current_time(inode); newattrs.ia_mode = new_mode; newattrs.ia_valid = ATTR_MODE; ret = __ceph_setattr(inode, &newattrs); diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index dcce79b84406..4de6cdddf059 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c @@ -2022,7 +2022,6 @@ int __ceph_setattr(struct inode *inode, struct iattr *attr) attr->ia_size > inode->i_size) { i_size_write(inode, attr->ia_size); inode->i_blocks = calc_inode_blocks(attr->ia_size); - inode->i_ctime = attr->ia_ctime; ci->i_reported_size = attr->ia_size; dirtied |= CEPH_CAP_FILE_EXCL; } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 || @@ -2044,7 +2043,6 @@ int __ceph_setattr(struct inode *inode, struct iattr *attr) inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec, attr->ia_ctime.tv_sec, attr->ia_ctime.tv_nsec, only ? "ctime only" : "ignored"); - inode->i_ctime = attr->ia_ctime; if (only) { /* * if kernel wants to dirty ctime but nothing else, @@ -2067,7 +2065,7 @@ int __ceph_setattr(struct inode *inode, struct iattr *attr) if (dirtied) { inode_dirty_flags = __ceph_mark_dirty_caps(ci, dirtied, &prealloc_cf); - inode->i_ctime = current_time(inode); + inode->i_ctime = attr->ia_ctime; } release &= issued; @@ -2085,6 +2083,7 @@ int __ceph_setattr(struct inode *inode, struct iattr *attr) req->r_inode_drop = release; req->r_args.setattr.mask = cpu_to_le32(mask); req->r_num_caps = 1; + req->r_stamp = attr->ia_ctime; err = ceph_mdsc_do_request(mdsc, NULL, req); } dout("setattr %p result=%d (%s locally, %d remote)\n", inode, err, From 74030603dfd9f76c0f279f19f1dd1ee3028fee7a Mon Sep 17 00:00:00 2001 From: WANG Cong Date: Tue, 13 Jun 2017 13:36:24 -0700 Subject: [PATCH 236/436] net_sched: move tcf_lock down after gen_replace_estimator() Laura reported a sleep-in-atomic kernel warning inside tcf_act_police_init() which calls gen_replace_estimator() with spinlock protection. It is not necessary in this case, we already have RTNL lock here so it is enough to protect concurrent writers. For the reader, i.e. tcf_act_police(), it needs to make decision based on this rate estimator, in the worst case we drop more/less packets than necessary while changing the rate in parallel, it is still acceptable. Reported-by: Laura Abbott Reported-by: Nick Huber Cc: Jamal Hadi Salim Signed-off-by: Cong Wang Acked-by: Jamal Hadi Salim Signed-off-by: David S. Miller --- net/sched/act_police.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/net/sched/act_police.c b/net/sched/act_police.c index f42008b29311..b062bc80c7cb 100644 --- a/net/sched/act_police.c +++ b/net/sched/act_police.c @@ -132,21 +132,21 @@ static int tcf_act_police_init(struct net *net, struct nlattr *nla, } } - spin_lock_bh(&police->tcf_lock); if (est) { err = gen_replace_estimator(&police->tcf_bstats, NULL, &police->tcf_rate_est, &police->tcf_lock, NULL, est); if (err) - goto failure_unlock; + goto failure; } else if (tb[TCA_POLICE_AVRATE] && (ret == ACT_P_CREATED || !gen_estimator_active(&police->tcf_rate_est))) { err = -EINVAL; - goto failure_unlock; + goto failure; } + spin_lock_bh(&police->tcf_lock); /* No failure allowed after this point */ police->tcfp_mtu = parm->mtu; if (police->tcfp_mtu == 0) { @@ -192,8 +192,6 @@ static int tcf_act_police_init(struct net *net, struct nlattr *nla, return ret; -failure_unlock: - spin_unlock_bh(&police->tcf_lock); failure: qdisc_put_rtab(P_tab); qdisc_put_rtab(R_tab); From 7a1ac110c22eb726684c837544a2d42c33e07be7 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Fri, 9 Jun 2017 16:54:28 -0300 Subject: [PATCH 237/436] perf evsel: Fix probing of precise_ip level for default cycles event Since commit 18e7a45af91a ("perf/x86: Reject non sampling events with precise_ip") returns -EINVAL for sys_perf_event_open() with an attribute with (attr.precise_ip > 0 && attr.sample_period == 0), just like is done in the routine used to probe the max precise level when no events were passed to 'perf record' or 'perf top', i.e.: perf_evsel__new_cycles() perf_event_attr__set_max_precise_ip() The x86 code, in x86_pmu_hw_config(), which is called all the way from sys_perf_event_open() did, starting with the aforementioned commit: /* There's no sense in having PEBS for non sampling events: */ if (!is_sampling_event(event)) return -EINVAL; Which makes it fail for cycles:ppp, cycles:pp and cycles:p, always using just the non precise cycles variant. To make sure that this is the case, I tested it, before this patch, with: # perf probe -L x86_pmu_hw_config 0 int x86_pmu_hw_config(struct perf_event *event) 1 { 2 if (event->attr.precise_ip) { 17 if (event->attr.precise_ip > precise) 18 return -EOPNOTSUPP; /* There's no sense in having PEBS for non sampling events: */ 21 if (!is_sampling_event(event)) 22 return -EINVAL; } # perf probe x86_pmu_hw_config:22 Added new events: probe:x86_pmu_hw_config (on x86_pmu_hw_config:22) probe:x86_pmu_hw_config_1 (on x86_pmu_hw_config:22) You can now use it in all perf tools, such as: perf record -e probe:x86_pmu_hw_config_1 -aR sleep 1 # perf trace -e perf_event_open,probe:x86_pmu_hwconfig*/max-stack=16/ perf record usleep 1 0.000 ( 0.015 ms): perf/4150 perf_event_open(attr_uptr: 0x7ffebc8ba110, cpu: -1, group_fd: -1 ) ... 0.015 ( ): probe:x86_pmu_hw_config:(ffffffff9c0065e1)) x86_pmu_hw_config ([kernel.kallsyms]) hsw_hw_config ([kernel.kallsyms]) x86_pmu_event_init ([kernel.kallsyms]) perf_try_init_event ([kernel.kallsyms]) perf_event_alloc ([kernel.kallsyms]) SYSC_perf_event_open ([kernel.kallsyms]) sys_perf_event_open ([kernel.kallsyms]) do_syscall_64 ([kernel.kallsyms]) return_from_SYSCALL_64 ([kernel.kallsyms]) syscall (/usr/lib64/libc-2.24.so) perf_event_attr__set_max_precise_ip (/home/acme/bin/perf) perf_evsel__new_cycles (/home/acme/bin/perf) perf_evlist__add_default (/home/acme/bin/perf) cmd_record (/home/acme/bin/perf) run_builtin (/home/acme/bin/perf) handle_internal_command (/home/acme/bin/perf) 0.000 ( 0.021 ms): perf/4150 ... [continued]: perf_event_open()) = -1 EINVAL Invalid argument 0.023 ( 0.002 ms): perf/4150 perf_event_open(attr_uptr: 0x7ffebc8ba110, cpu: -1, group_fd: -1 ) ... 0.025 ( ): probe:x86_pmu_hw_config:(ffffffff9c0065e1)) x86_pmu_hw_config ([kernel.kallsyms]) hsw_hw_config ([kernel.kallsyms]) x86_pmu_event_init ([kernel.kallsyms]) perf_try_init_event ([kernel.kallsyms]) perf_event_alloc ([kernel.kallsyms]) SYSC_perf_event_open ([kernel.kallsyms]) sys_perf_event_open ([kernel.kallsyms]) do_syscall_64 ([kernel.kallsyms]) return_from_SYSCALL_64 ([kernel.kallsyms]) syscall (/usr/lib64/libc-2.24.so) perf_event_attr__set_max_precise_ip (/home/acme/bin/perf) perf_evsel__new_cycles (/home/acme/bin/perf) perf_evlist__add_default (/home/acme/bin/perf) cmd_record (/home/acme/bin/perf) run_builtin (/home/acme/bin/perf) handle_internal_command (/home/acme/bin/perf) 0.023 ( 0.004 ms): perf/4150 ... [continued]: perf_event_open()) = -1 EINVAL Invalid argument 0.028 ( 0.002 ms): perf/4150 perf_event_open(attr_uptr: 0x7ffebc8ba110, cpu: -1, group_fd: -1 ) ... 0.030 ( ): probe:x86_pmu_hw_config:(ffffffff9c0065e1)) x86_pmu_hw_config ([kernel.kallsyms]) hsw_hw_config ([kernel.kallsyms]) x86_pmu_event_init ([kernel.kallsyms]) perf_try_init_event ([kernel.kallsyms]) perf_event_alloc ([kernel.kallsyms]) SYSC_perf_event_open ([kernel.kallsyms]) sys_perf_event_open ([kernel.kallsyms]) do_syscall_64 ([kernel.kallsyms]) return_from_SYSCALL_64 ([kernel.kallsyms]) syscall (/usr/lib64/libc-2.24.so) perf_event_attr__set_max_precise_ip (/home/acme/bin/perf) perf_evsel__new_cycles (/home/acme/bin/perf) perf_evlist__add_default (/home/acme/bin/perf) cmd_record (/home/acme/bin/perf) run_builtin (/home/acme/bin/perf) handle_internal_command (/home/acme/bin/perf) 0.028 ( 0.004 ms): perf/4150 ... [continued]: perf_event_open()) = -1 EINVAL Invalid argument 41.018 ( 0.012 ms): perf/4150 perf_event_open(attr_uptr: 0x7ffebc8b5dd0, pid: -1, group_fd: -1, flags: FD_CLOEXEC) = 4 41.065 ( 0.011 ms): perf/4150 perf_event_open(attr_uptr: 0x3c7db78, pid: -1, group_fd: -1, flags: FD_CLOEXEC) = 4 41.080 ( 0.006 ms): perf/4150 perf_event_open(attr_uptr: 0x3c7db78, pid: -1, group_fd: -1, flags: FD_CLOEXEC) = 4 41.103 ( 0.010 ms): perf/4150 perf_event_open(attr_uptr: 0x3c4e748, pid: 4151 (perf), group_fd: -1, flags: FD_CLOEXEC) = 4 41.115 ( 0.006 ms): perf/4150 perf_event_open(attr_uptr: 0x3c4e748, pid: 4151 (perf), cpu: 1, group_fd: -1, flags: FD_CLOEXEC) = 5 41.122 ( 0.004 ms): perf/4150 perf_event_open(attr_uptr: 0x3c4e748, pid: 4151 (perf), cpu: 2, group_fd: -1, flags: FD_CLOEXEC) = 6 41.128 ( 0.008 ms): perf/4150 perf_event_open(attr_uptr: 0x3c4e748, pid: 4151 (perf), cpu: 3, group_fd: -1, flags: FD_CLOEXEC) = 8 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.017 MB perf.data (2 samples) ] # I.e. that return -EINVAL in x86_pmu_hw_config() is hit three times. So fix it by just setting attr.sample_period Now, after this patch: # perf trace --max-stack=2 -e perf_event_open,probe:x86_pmu_hw_config* perf record usleep 1 [ perf record: Woken up 1 times to write data ] 0.000 ( 0.017 ms): perf/8469 perf_event_open(attr_uptr: 0x7ffe36c27d10, pid: -1, cpu: 3, group_fd: -1, flags: FD_CLOEXEC) = 4 syscall (/usr/lib64/libc-2.24.so) perf_event_open_cloexec_flag (/home/acme/bin/perf) 0.050 ( 0.031 ms): perf/8469 perf_event_open(attr_uptr: 0x24ebb78, pid: -1, group_fd: -1, flags: FD_CLOEXEC) = 4 syscall (/usr/lib64/libc-2.24.so) perf_evlist__config (/home/acme/bin/perf) 0.092 ( 0.040 ms): perf/8469 perf_event_open(attr_uptr: 0x24ebb78, pid: -1, group_fd: -1, flags: FD_CLOEXEC) = 4 syscall (/usr/lib64/libc-2.24.so) perf_evlist__config (/home/acme/bin/perf) 0.143 ( 0.007 ms): perf/8469 perf_event_open(attr_uptr: 0x24bc748, cpu: -1, group_fd: -1 ) = 4 syscall (/usr/lib64/libc-2.24.so) perf_event_attr__set_max_precise_ip (/home/acme/bin/perf) 0.161 ( 0.007 ms): perf/8469 perf_event_open(attr_uptr: 0x24bc748, pid: 8470 (perf), group_fd: -1, flags: FD_CLOEXEC) = 4 syscall (/usr/lib64/libc-2.24.so) perf_evsel__open (/home/acme/bin/perf) 0.171 ( 0.005 ms): perf/8469 perf_event_open(attr_uptr: 0x24bc748, pid: 8470 (perf), cpu: 1, group_fd: -1, flags: FD_CLOEXEC) = 5 syscall (/usr/lib64/libc-2.24.so) perf_evsel__open (/home/acme/bin/perf) 0.180 ( 0.007 ms): perf/8469 perf_event_open(attr_uptr: 0x24bc748, pid: 8470 (perf), cpu: 2, group_fd: -1, flags: FD_CLOEXEC) = 6 syscall (/usr/lib64/libc-2.24.so) perf_evsel__open (/home/acme/bin/perf) 0.190 ( 0.005 ms): perf/8469 perf_event_open(attr_uptr: 0x24bc748, pid: 8470 (perf), cpu: 3, group_fd: -1, flags: FD_CLOEXEC) = 8 syscall (/usr/lib64/libc-2.24.so) perf_evsel__open (/home/acme/bin/perf) [ perf record: Captured and wrote 0.017 MB perf.data (7 samples) ] # The probe one called from perf_event_attr__set_max_precise_ip() works the first time, with attr.precise_ip = 3, wit hthe next ones being the per cpu ones for the cycles:ppp event. And here is the text from a report and alternative proposed patch by Thomas-Mich Richter: --- On s390 the counter and sampling facility do not support a precise IP skid level and sometimes returns EOPNOTSUPP when structure member precise_ip in struct perf_event_attr is not set to zero. On s390 commnd 'perf record -- true' fails with error EOPNOTSUPP. This happens only when no events are specified on command line. The functions called are ... --> perf_evlist__add_default --> perf_evsel__new_cycles --> perf_event_attr__set_max_precise_ip The last function determines the value of structure member precise_ip by invoking the perf_event_open() system call and checking the return code. The first successful open is the value for precise_ip. However the value is determined without setting member sample_period and indicates no sampling. On s390 the counter facility and sampling facility are different. The above procedure determines a precise_ip value of 3 using the counter facility. Later it uses the sampling facility with a value of 3 and fails with EOPNOTSUPP. --- v2: Older compilers (e.g. gcc 4.4.7) don't support referencing members of unnamed union members in the container struct initialization, so move from: struct perf_event_attr attr = { ... .sample_period = 1, }; to right after it as: struct perf_event_attr attr = { ... }; attr.sample_period = 1; v3: We need to reset .sample_period to 0 to let the users of perf_evsel__new_cycles() to properly setup attr.sample_period or attr.sample_freq. Reported by Ingo Molnar. Reported-and-Acked-by: Thomas-Mich Richter Acked-by: Hendrik Brueckner Acked-by: Jiri Olsa Cc: Adrian Hunter Cc: Alexander Shishkin Cc: David Ahern Cc: Namhyung Kim Cc: Peter Zijlstra Cc: Wang Nan Fixes: 18e7a45af91a ("perf/x86: Reject non sampling events with precise_ip") Link: http://lkml.kernel.org/n/tip-yv6nnkl7tzqocrm0hl3x7vf1@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/tests/task-exit.c | 2 +- tools/perf/util/evsel.c | 12 ++++++++++++ 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/tools/perf/tests/task-exit.c b/tools/perf/tests/task-exit.c index 32873ec91a4e..cf00ebad2ef5 100644 --- a/tools/perf/tests/task-exit.c +++ b/tools/perf/tests/task-exit.c @@ -83,7 +83,7 @@ int test__task_exit(int subtest __maybe_unused) evsel = perf_evlist__first(evlist); evsel->attr.task = 1; - evsel->attr.sample_freq = 0; + evsel->attr.sample_freq = 1; evsel->attr.inherit = 0; evsel->attr.watermark = 0; evsel->attr.wakeup_events = 1; diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index e4f7902d5afa..cda44b0e821c 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c @@ -273,8 +273,20 @@ struct perf_evsel *perf_evsel__new_cycles(void) struct perf_evsel *evsel; event_attr_init(&attr); + /* + * Unnamed union member, not supported as struct member named + * initializer in older compilers such as gcc 4.4.7 + * + * Just for probing the precise_ip: + */ + attr.sample_period = 1; perf_event_attr__set_max_precise_ip(&attr); + /* + * Now let the usual logic to set up the perf_event_attr defaults + * to kick in when we return and before perf_evsel__open() is called. + */ + attr.sample_period = 0; evsel = perf_evsel__new(&attr); if (evsel == NULL) From 7a759cd8e8272ee18922838ee711219c7c796a31 Mon Sep 17 00:00:00 2001 From: Jiada Wang Date: Sun, 9 Apr 2017 20:02:37 -0700 Subject: [PATCH 238/436] perf tools: Fix build with ARCH=x86_64 With commit: 0a943cb10ce78 (tools build: Add HOSTARCH Makefile variable) when building for ARCH=x86_64, ARCH=x86_64 is passed to perf instead of ARCH=x86, so the perf build process searchs header files from tools/arch/x86_64/include, which doesn't exist. The following build failure is seen: In file included from util/event.c:2:0: tools/include/uapi/linux/mman.h:4:27: fatal error: uapi/asm/mman.h: No such file or directory compilation terminated. Fix this issue by using SRCARCH instead of ARCH in perf, just like the main kernel Makefile and tools/objtool's. Signed-off-by: Jiada Wang Tested-by: Arnaldo Carvalho de Melo Acked-by: Jiri Olsa Cc: Alexander Shishkin Cc: Andi Kleen Cc: Eugeniu Rosca Cc: Jan Stancek Cc: Masami Hiramatsu Cc: Peter Zijlstra Cc: Ravi Bangoria Cc: Rui Teng Cc: Sukadev Bhattiprolu Cc: Wang Nan Fixes: 0a943cb10ce7 ("tools build: Add HOSTARCH Makefile variable") Link: http://lkml.kernel.org/r/1491793357-14977-2-git-send-email-jiada_wang@mentor.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/Makefile.config | 38 ++++++++++++++++++------------------- tools/perf/Makefile.perf | 2 +- tools/perf/arch/Build | 2 +- tools/perf/pmu-events/Build | 4 ++-- tools/perf/tests/Build | 2 +- tools/perf/util/header.c | 2 +- 6 files changed, 25 insertions(+), 25 deletions(-) diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config index 8354d04b392f..1f4fbc9a3292 100644 --- a/tools/perf/Makefile.config +++ b/tools/perf/Makefile.config @@ -19,18 +19,18 @@ CFLAGS := $(EXTRA_CFLAGS) $(EXTRA_WARNINGS) include $(srctree)/tools/scripts/Makefile.arch -$(call detected_var,ARCH) +$(call detected_var,SRCARCH) NO_PERF_REGS := 1 # Additional ARCH settings for ppc -ifeq ($(ARCH),powerpc) +ifeq ($(SRCARCH),powerpc) NO_PERF_REGS := 0 LIBUNWIND_LIBS := -lunwind -lunwind-ppc64 endif # Additional ARCH settings for x86 -ifeq ($(ARCH),x86) +ifeq ($(SRCARCH),x86) $(call detected,CONFIG_X86) ifeq (${IS_64_BIT}, 1) CFLAGS += -DHAVE_ARCH_X86_64_SUPPORT -DHAVE_SYSCALL_TABLE -I$(OUTPUT)arch/x86/include/generated @@ -43,12 +43,12 @@ ifeq ($(ARCH),x86) NO_PERF_REGS := 0 endif -ifeq ($(ARCH),arm) +ifeq ($(SRCARCH),arm) NO_PERF_REGS := 0 LIBUNWIND_LIBS = -lunwind -lunwind-arm endif -ifeq ($(ARCH),arm64) +ifeq ($(SRCARCH),arm64) NO_PERF_REGS := 0 LIBUNWIND_LIBS = -lunwind -lunwind-aarch64 endif @@ -61,7 +61,7 @@ endif # Disable it on all other architectures in case libdw unwind # support is detected in system. Add supported architectures # to the check. -ifneq ($(ARCH),$(filter $(ARCH),x86 arm)) +ifneq ($(SRCARCH),$(filter $(SRCARCH),x86 arm)) NO_LIBDW_DWARF_UNWIND := 1 endif @@ -115,9 +115,9 @@ endif FEATURE_CHECK_CFLAGS-libbabeltrace := $(LIBBABELTRACE_CFLAGS) FEATURE_CHECK_LDFLAGS-libbabeltrace := $(LIBBABELTRACE_LDFLAGS) -lbabeltrace-ctf -FEATURE_CHECK_CFLAGS-bpf = -I. -I$(srctree)/tools/include -I$(srctree)/tools/arch/$(ARCH)/include/uapi -I$(srctree)/tools/include/uapi +FEATURE_CHECK_CFLAGS-bpf = -I. -I$(srctree)/tools/include -I$(srctree)/tools/arch/$(SRCARCH)/include/uapi -I$(srctree)/tools/include/uapi # include ARCH specific config --include $(src-perf)/arch/$(ARCH)/Makefile +-include $(src-perf)/arch/$(SRCARCH)/Makefile ifdef PERF_HAVE_ARCH_REGS_QUERY_REGISTER_OFFSET CFLAGS += -DHAVE_ARCH_REGS_QUERY_REGISTER_OFFSET @@ -228,12 +228,12 @@ ifeq ($(DEBUG),0) endif INC_FLAGS += -I$(src-perf)/util/include -INC_FLAGS += -I$(src-perf)/arch/$(ARCH)/include +INC_FLAGS += -I$(src-perf)/arch/$(SRCARCH)/include INC_FLAGS += -I$(srctree)/tools/include/uapi INC_FLAGS += -I$(srctree)/tools/include/ -INC_FLAGS += -I$(srctree)/tools/arch/$(ARCH)/include/uapi -INC_FLAGS += -I$(srctree)/tools/arch/$(ARCH)/include/ -INC_FLAGS += -I$(srctree)/tools/arch/$(ARCH)/ +INC_FLAGS += -I$(srctree)/tools/arch/$(SRCARCH)/include/uapi +INC_FLAGS += -I$(srctree)/tools/arch/$(SRCARCH)/include/ +INC_FLAGS += -I$(srctree)/tools/arch/$(SRCARCH)/ # $(obj-perf) for generated common-cmds.h # $(obj-perf)/util for generated bison/flex headers @@ -355,7 +355,7 @@ ifndef NO_LIBELF ifndef NO_DWARF ifeq ($(origin PERF_HAVE_DWARF_REGS), undefined) - msg := $(warning DWARF register mappings have not been defined for architecture $(ARCH), DWARF support disabled); + msg := $(warning DWARF register mappings have not been defined for architecture $(SRCARCH), DWARF support disabled); NO_DWARF := 1 else CFLAGS += -DHAVE_DWARF_SUPPORT $(LIBDW_CFLAGS) @@ -380,7 +380,7 @@ ifndef NO_LIBELF CFLAGS += -DHAVE_BPF_PROLOGUE $(call detected,CONFIG_BPF_PROLOGUE) else - msg := $(warning BPF prologue is not supported by architecture $(ARCH), missing regs_query_register_offset()); + msg := $(warning BPF prologue is not supported by architecture $(SRCARCH), missing regs_query_register_offset()); endif else msg := $(warning DWARF support is off, BPF prologue is disabled); @@ -406,7 +406,7 @@ ifdef PERF_HAVE_JITDUMP endif endif -ifeq ($(ARCH),powerpc) +ifeq ($(SRCARCH),powerpc) ifndef NO_DWARF CFLAGS += -DHAVE_SKIP_CALLCHAIN_IDX endif @@ -487,7 +487,7 @@ else endif ifndef NO_LOCAL_LIBUNWIND - ifeq ($(ARCH),$(filter $(ARCH),arm arm64)) + ifeq ($(SRCARCH),$(filter $(SRCARCH),arm arm64)) $(call feature_check,libunwind-debug-frame) ifneq ($(feature-libunwind-debug-frame), 1) msg := $(warning No debug_frame support found in libunwind); @@ -740,7 +740,7 @@ ifeq (${IS_64_BIT}, 1) NO_PERF_READ_VDSO32 := 1 endif endif - ifneq ($(ARCH), x86) + ifneq ($(SRCARCH), x86) NO_PERF_READ_VDSOX32 := 1 endif ifndef NO_PERF_READ_VDSOX32 @@ -769,7 +769,7 @@ ifdef LIBBABELTRACE endif ifndef NO_AUXTRACE - ifeq ($(ARCH),x86) + ifeq ($(SRCARCH),x86) ifeq ($(feature-get_cpuid), 0) msg := $(warning Your gcc lacks the __get_cpuid() builtin, disables support for auxtrace/Intel PT, please install a newer gcc); NO_AUXTRACE := 1 @@ -872,7 +872,7 @@ sysconfdir = $(prefix)/etc ETC_PERFCONFIG = etc/perfconfig endif ifndef lib -ifeq ($(ARCH)$(IS_64_BIT), x861) +ifeq ($(SRCARCH)$(IS_64_BIT), x861) lib = lib64 else lib = lib diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf index 79fe31f20a17..5008f51a08a2 100644 --- a/tools/perf/Makefile.perf +++ b/tools/perf/Makefile.perf @@ -226,7 +226,7 @@ endif ifeq ($(config),0) include $(srctree)/tools/scripts/Makefile.arch --include arch/$(ARCH)/Makefile +-include arch/$(SRCARCH)/Makefile endif # The FEATURE_DUMP_EXPORT holds location of the actual diff --git a/tools/perf/arch/Build b/tools/perf/arch/Build index 109eb75cf7de..d9b6af837c7d 100644 --- a/tools/perf/arch/Build +++ b/tools/perf/arch/Build @@ -1,2 +1,2 @@ libperf-y += common.o -libperf-y += $(ARCH)/ +libperf-y += $(SRCARCH)/ diff --git a/tools/perf/pmu-events/Build b/tools/perf/pmu-events/Build index 9213a1273697..999a4e878162 100644 --- a/tools/perf/pmu-events/Build +++ b/tools/perf/pmu-events/Build @@ -2,7 +2,7 @@ hostprogs := jevents jevents-y += json.o jsmn.o jevents.o pmu-events-y += pmu-events.o -JDIR = pmu-events/arch/$(ARCH) +JDIR = pmu-events/arch/$(SRCARCH) JSON = $(shell [ -d $(JDIR) ] && \ find $(JDIR) -name '*.json' -o -name 'mapfile.csv') # @@ -10,4 +10,4 @@ JSON = $(shell [ -d $(JDIR) ] && \ # directory and create tables in pmu-events.c. # $(OUTPUT)pmu-events/pmu-events.c: $(JSON) $(JEVENTS) - $(Q)$(call echo-cmd,gen)$(JEVENTS) $(ARCH) pmu-events/arch $(OUTPUT)pmu-events/pmu-events.c $(V) + $(Q)$(call echo-cmd,gen)$(JEVENTS) $(SRCARCH) pmu-events/arch $(OUTPUT)pmu-events/pmu-events.c $(V) diff --git a/tools/perf/tests/Build b/tools/perf/tests/Build index af58ebc243ef..84222bdb8689 100644 --- a/tools/perf/tests/Build +++ b/tools/perf/tests/Build @@ -75,7 +75,7 @@ $(OUTPUT)tests/llvm-src-relocation.c: tests/bpf-script-test-relocation.c tests/B $(Q)sed -e 's/"/\\"/g' -e 's/\(.*\)/"\1\\n"/g' $< >> $@ $(Q)echo ';' >> $@ -ifeq ($(ARCH),$(filter $(ARCH),x86 arm arm64 powerpc)) +ifeq ($(SRCARCH),$(filter $(SRCARCH),x86 arm arm64 powerpc)) perf-$(CONFIG_DWARF_UNWIND) += dwarf-unwind.o endif diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index 5cac8d5e009a..b5baff3007bb 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -841,7 +841,7 @@ static int write_group_desc(int fd, struct perf_header *h __maybe_unused, /* * default get_cpuid(): nothing gets recorded - * actual implementation must be in arch/$(ARCH)/util/header.c + * actual implementation must be in arch/$(SRCARCH)/util/header.c */ int __weak get_cpuid(char *buffer __maybe_unused, size_t sz __maybe_unused) { From db46a0e1be7eac45d0bb1bdcd438b8d47c920451 Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Wed, 14 Jun 2017 16:15:24 +0900 Subject: [PATCH 239/436] net: update undefined ->ndo_change_mtu() comment Update ->ndo_change_mtu() callback comment to remove text about returning error in case of undefined callback. This change makes the comment match the existing code behavior. Signed-off-by: Magnus Damm Signed-off-by: David S. Miller --- include/linux/netdevice.h | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 846193dfb0ac..4ed952c17fc7 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -914,8 +914,7 @@ struct xfrmdev_ops { * * int (*ndo_change_mtu)(struct net_device *dev, int new_mtu); * Called when a user wants to change the Maximum Transfer Unit - * of a device. If not defined, any request to change MTU will - * will return an error. + * of a device. * * void (*ndo_tx_timeout)(struct net_device *dev); * Callback used when the transmitter has not made any progress From ab156afd3eeb68ce7b875ec8d9ff4f64d1427776 Mon Sep 17 00:00:00 2001 From: Alex Vesker Date: Wed, 14 Jun 2017 09:59:05 +0300 Subject: [PATCH 240/436] IB/ipoib: Fix memory leaks for child interfaces priv There is a need to free priv explicitly and not just to release the device, child priv is freed explicitly on remove flow and this patch also includes priv free on error flow in P_key creation and also in add_port. Fixes: cd565b4b51e5 ('IB/IPoIB: Support acceleration options callbacks') Signed-off-by: Alex Vesker Signed-off-by: Leon Romanovsky Signed-off-by: Doug Ledford --- drivers/infiniband/ulp/ipoib/ipoib_main.c | 7 ++++++- drivers/infiniband/ulp/ipoib/ipoib_vlan.c | 5 ++++- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index a115c0b7a310..0ddd9709e1df 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -2237,6 +2237,7 @@ event_failed: device_init_failed: free_netdev(priv->dev); + kfree(priv); alloc_mem_failed: return ERR_PTR(result); @@ -2277,7 +2278,7 @@ static void ipoib_add_one(struct ib_device *device) static void ipoib_remove_one(struct ib_device *device, void *client_data) { - struct ipoib_dev_priv *priv, *tmp; + struct ipoib_dev_priv *priv, *tmp, *cpriv, *tcpriv; struct list_head *dev_list = client_data; if (!dev_list) @@ -2301,6 +2302,10 @@ static void ipoib_remove_one(struct ib_device *device, void *client_data) unregister_netdev(priv->dev); free_netdev(priv->dev); + + list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list) + kfree(cpriv); + kfree(priv); } diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c index 36dc4fcaa3cd..1ee46194bbf5 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c @@ -167,8 +167,10 @@ out: rtnl_unlock(); - if (result) + if (result) { free_netdev(priv->dev); + kfree(priv); + } return result; } @@ -209,6 +211,7 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey) if (dev) { free_netdev(dev); + kfree(priv); return 0; } From 022d038a163f9e889428789d681b97bf07730fb7 Mon Sep 17 00:00:00 2001 From: Alex Vesker Date: Wed, 14 Jun 2017 09:59:06 +0300 Subject: [PATCH 241/436] IB/ipoib: Limit call to free rdma_netdev for capable devices Limit calls to free_rdma_netdev() for capable devices only. Fixes: cd565b4b51e5 ('IB/IPoIB: Support acceleration options callbacks') Signed-off-by: Alex Vesker Signed-off-by: Leon Romanovsky Signed-off-by: Doug Ledford --- drivers/infiniband/hw/mlx5/main.c | 6 ++++-- drivers/infiniband/ulp/ipoib/ipoib_main.c | 5 ++++- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 0c79983c8b1a..9ecc089d4529 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -3692,8 +3692,10 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev) dev->ib_dev.check_mr_status = mlx5_ib_check_mr_status; dev->ib_dev.get_port_immutable = mlx5_port_immutable; dev->ib_dev.get_dev_fw_str = get_dev_fw_str; - dev->ib_dev.alloc_rdma_netdev = mlx5_ib_alloc_rdma_netdev; - dev->ib_dev.free_rdma_netdev = mlx5_ib_free_rdma_netdev; + if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads)) { + dev->ib_dev.alloc_rdma_netdev = mlx5_ib_alloc_rdma_netdev; + dev->ib_dev.free_rdma_netdev = mlx5_ib_free_rdma_netdev; + } if (mlx5_core_is_pf(mdev)) { dev->ib_dev.get_vf_config = mlx5_ib_get_vf_config; dev->ib_dev.set_vf_link_state = mlx5_ib_set_vf_link_state; diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 0ddd9709e1df..91fae34bdd4f 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -2301,7 +2301,10 @@ static void ipoib_remove_one(struct ib_device *device, void *client_data) flush_workqueue(priv->wq); unregister_netdev(priv->dev); - free_netdev(priv->dev); + if (device->free_rdma_netdev) + device->free_rdma_netdev(priv->dev); + else + free_netdev(priv->dev); list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list) kfree(cpriv); From b53d4566cc117d9ab8d20b365ba7c2a519439725 Mon Sep 17 00:00:00 2001 From: Alex Vesker Date: Wed, 14 Jun 2017 09:59:07 +0300 Subject: [PATCH 242/436] IB/ipoib: Delete napi in device uninit default This patch mekas init_default and uninit_default symmetric with a call to delete napi. Additionally, the uninit_default gained delete napi call in case of init_default fails. Fixes: 515ed4f3aab4 ('IB/IPoIB: Separate control and data related initializations') Signed-off-by: Alex Vesker Signed-off-by: Leon Romanovsky Signed-off-by: Doug Ledford --- drivers/infiniband/ulp/ipoib/ipoib_main.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 91fae34bdd4f..1015a63de6ae 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -1596,6 +1596,8 @@ static void ipoib_dev_uninit_default(struct net_device *dev) ipoib_transport_dev_cleanup(dev); + netif_napi_del(&priv->napi); + ipoib_cm_dev_cleanup(dev); kfree(priv->rx_ring); @@ -1649,6 +1651,7 @@ out_rx_ring_cleanup: kfree(priv->rx_ring); out: + netif_napi_del(&priv->napi); return -ENOMEM; } From 560b7c3ffec1ca7d5de250d8ca7b1ca2349e98a4 Mon Sep 17 00:00:00 2001 From: Alex Vesker Date: Wed, 14 Jun 2017 09:59:08 +0300 Subject: [PATCH 243/436] IB/ipoib: Fix access to un-initialized napi struct There is no need to re-enable napi since we set the initialized flag before calling ipoib_ib_dev_stop which will disable napi, disabling napi twice is harmless in case it was already disabled. One more reason for this fix is that when using IPoIB new device driver napi is not added to priv, this can lead to kernel panic when rn_ops ndo_open fails. [ 289.755840] invalid opcode: 0000 [#1] SMP [ 289.757111] task: ffff880036964440 ti: ffff880178ee8000 task.ti: ffff880178ee8000 [ 289.757111] RIP: 0010:[] [] napi_enable.part.24+0x4/0x6 [ib_ipoib] [ 289.757111] RSP: 0018:ffff880178eeb6d8 EFLAGS: 00010246 [ 289.757111] RAX: 0000000000000000 RBX: ffff880177a80010 RCX: 000000007fffffff [ 289.757111] RDX: ffffffff81d5f118 RSI: 0000000000000000 RDI: ffff880177a80010 [ 289.757111] RBP: ffff880178eeb6d8 R08: 0000000000000082 R09: 0000000000000283 [ 289.757111] R10: 0000000000000000 R11: 0000000000000000 R12: ffff880175a00000 [ 289.757111] R13: ffff880177a80080 R14: 0000000000000000 R15: 0000000000000001 [ 289.757111] FS: 00007fe2ee346880(0000) GS:ffff88017fc00000(0000) knlGS:0000000000000000 [ 289.757111] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 289.757111] CR2: 00007fffca979020 CR3: 00000001792e4000 CR4: 00000000000006f0 [ 289.757111] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 [ 289.757111] DR3: 0000000000000000 DR6: 00000000ffff0ff0 DR7: 0000000000000400 [ 289.757111] Stack: [ 289.796027] ffff880178eeb6f0 ffffffffa05251f5 ffff880177a80000 ffff880178eeb718 [ 289.796027] ffffffffa0528505 ffff880175a00000 ffff880177a80000 0000000000000000 [ 289.796027] ffff880178eeb748 ffffffffa051f0ab ffff880175a00000 ffffffffa0537d60 [ 289.796027] Call Trace: [ 289.796027] [] napi_enable+0x25/0x30 [ib_ipoib] [ 289.796027] [] ipoib_ib_dev_open+0x175/0x190 [ib_ipoib] [ 289.796027] [] ipoib_open+0x4b/0x160 [ib_ipoib] [ 289.796027] [] _dev_open+0xbf/0x130 [ 289.796027] [] __dev_change_flags+0x9d/0x170 [ 289.796027] [] dev_change_flags+0x29/0x60 [ 289.796027] [] do_setlink+0x397/0xa40 Fixes: cd565b4b51e5 ('IB/IPoIB: Support acceleration options callbacks') Signed-off-by: Alex Vesker Signed-off-by: Leon Romanovsky Signed-off-by: Doug Ledford --- drivers/infiniband/ulp/ipoib/ipoib_ib.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index 0060b2f9f659..efe7402f4885 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c @@ -863,7 +863,6 @@ dev_stop: set_bit(IPOIB_STOP_REAPER, &priv->flags); cancel_delayed_work(&priv->ah_reap_task); set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags); - napi_enable(&priv->napi); ipoib_ib_dev_stop(dev); return -1; } From 4542d66bb26f2d021c70a78e46f183c6675fc4c9 Mon Sep 17 00:00:00 2001 From: Feras Daoud Date: Wed, 14 Jun 2017 09:59:09 +0300 Subject: [PATCH 244/436] IB/ipoib: Fix memory leak in create child syscall The flow of creating a new child goes through ipoib_vlan_add which allocates a new interface and checks the rtnl_lock. If the lock is taken, restart_syscall will be called to restart the system call again. In this case we are not releasing the already allocated interface, causing a leak. Fixes: 9baa0b036410 ("IB/ipoib: Add rtnl_link_ops support") Signed-off-by: Feras Daoud Signed-off-by: Leon Romanovsky Signed-off-by: Doug Ledford --- drivers/infiniband/ulp/ipoib/ipoib_vlan.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c index 1ee46194bbf5..081b33deff1b 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c @@ -133,13 +133,13 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey) snprintf(intf_name, sizeof intf_name, "%s.%04x", ppriv->dev->name, pkey); + if (!rtnl_trylock()) + return restart_syscall(); + priv = ipoib_intf_alloc(ppriv->ca, ppriv->port, intf_name); if (!priv) return -ENOMEM; - if (!rtnl_trylock()) - return restart_syscall(); - down_write(&ppriv->vlan_rwsem); /* From 96ecff14225ad40a29f4d5cfa6bd9266c8e1e89a Mon Sep 17 00:00:00 2001 From: Al Viro Date: Wed, 14 Jun 2017 15:17:32 -0400 Subject: [PATCH 245/436] ufs: fix logics in "ufs: make fsck -f happy" Storing stats _only_ at new locations is wrong for UFS1; old locations should always be kept updated. The check for "has been converted to use of new locations" is also wrong - it should be "->fs_maxbsize is equal to ->fs_bsize". Signed-off-by: Al Viro --- fs/ufs/super.c | 41 ++++++++++++++++++++++++++++------------- 1 file changed, 28 insertions(+), 13 deletions(-) diff --git a/fs/ufs/super.c b/fs/ufs/super.c index d9aa2627c9df..eca838a8b43e 100644 --- a/fs/ufs/super.c +++ b/fs/ufs/super.c @@ -480,7 +480,7 @@ static void ufs_setup_cstotal(struct super_block *sb) usb3 = ubh_get_usb_third(uspi); if ((mtype == UFS_MOUNT_UFSTYPE_44BSD && - (usb1->fs_flags & UFS_FLAGS_UPDATED)) || + (usb2->fs_un.fs_u2.fs_maxbsize == usb1->fs_bsize)) || mtype == UFS_MOUNT_UFSTYPE_UFS2) { /*we have statistic in different place, then usual*/ uspi->cs_total.cs_ndir = fs64_to_cpu(sb, usb2->fs_un.fs_u2.cs_ndir); @@ -596,9 +596,7 @@ static void ufs_put_cstotal(struct super_block *sb) usb2 = ubh_get_usb_second(uspi); usb3 = ubh_get_usb_third(uspi); - if ((mtype == UFS_MOUNT_UFSTYPE_44BSD && - (usb1->fs_flags & UFS_FLAGS_UPDATED)) || - mtype == UFS_MOUNT_UFSTYPE_UFS2) { + if (mtype == UFS_MOUNT_UFSTYPE_UFS2) { /*we have statistic in different place, then usual*/ usb2->fs_un.fs_u2.cs_ndir = cpu_to_fs64(sb, uspi->cs_total.cs_ndir); @@ -608,16 +606,26 @@ static void ufs_put_cstotal(struct super_block *sb) cpu_to_fs64(sb, uspi->cs_total.cs_nifree); usb3->fs_un1.fs_u2.cs_nffree = cpu_to_fs64(sb, uspi->cs_total.cs_nffree); - } else { - usb1->fs_cstotal.cs_ndir = - cpu_to_fs32(sb, uspi->cs_total.cs_ndir); - usb1->fs_cstotal.cs_nbfree = - cpu_to_fs32(sb, uspi->cs_total.cs_nbfree); - usb1->fs_cstotal.cs_nifree = - cpu_to_fs32(sb, uspi->cs_total.cs_nifree); - usb1->fs_cstotal.cs_nffree = - cpu_to_fs32(sb, uspi->cs_total.cs_nffree); + goto out; } + + if (mtype == UFS_MOUNT_UFSTYPE_44BSD && + (usb2->fs_un.fs_u2.fs_maxbsize == usb1->fs_bsize)) { + /* store stats in both old and new places */ + usb2->fs_un.fs_u2.cs_ndir = + cpu_to_fs64(sb, uspi->cs_total.cs_ndir); + usb2->fs_un.fs_u2.cs_nbfree = + cpu_to_fs64(sb, uspi->cs_total.cs_nbfree); + usb3->fs_un1.fs_u2.cs_nifree = + cpu_to_fs64(sb, uspi->cs_total.cs_nifree); + usb3->fs_un1.fs_u2.cs_nffree = + cpu_to_fs64(sb, uspi->cs_total.cs_nffree); + } + usb1->fs_cstotal.cs_ndir = cpu_to_fs32(sb, uspi->cs_total.cs_ndir); + usb1->fs_cstotal.cs_nbfree = cpu_to_fs32(sb, uspi->cs_total.cs_nbfree); + usb1->fs_cstotal.cs_nifree = cpu_to_fs32(sb, uspi->cs_total.cs_nifree); + usb1->fs_cstotal.cs_nffree = cpu_to_fs32(sb, uspi->cs_total.cs_nffree); +out: ubh_mark_buffer_dirty(USPI_UBH(uspi)); ufs_print_super_stuff(sb, usb1, usb2, usb3); UFSD("EXIT\n"); @@ -997,6 +1005,13 @@ again: flags |= UFS_ST_SUN; } + if ((flags & UFS_ST_MASK) == UFS_ST_44BSD && + uspi->s_postblformat == UFS_42POSTBLFMT) { + if (!silent) + pr_err("this is not a 44bsd filesystem"); + goto failed; + } + /* * Check ufs magic number */ From c4f65b09b459c6f0ec27b1a1a65302f7fea5c96f Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Wed, 14 Jun 2017 13:29:31 +0300 Subject: [PATCH 246/436] net/act_pedit: fix an error code I'm reviewing static checker warnings where we do ERR_PTR(0), which is the same as NULL. I'm pretty sure we intended to return ERR_PTR(-EINVAL) here. Sometimes these bugs lead to a NULL dereference but I don't immediately see that problem here. Fixes: 71d0ed7079df ("net/act_pedit: Support using offset relative to the conventional network headers") Signed-off-by: Dan Carpenter Acked-by: Amir Vadai Signed-off-by: David S. Miller --- net/sched/act_pedit.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c index 164b5ac094be..7dc5892671c8 100644 --- a/net/sched/act_pedit.c +++ b/net/sched/act_pedit.c @@ -94,8 +94,10 @@ static struct tcf_pedit_key_ex *tcf_pedit_keys_ex_parse(struct nlattr *nla, k++; } - if (n) + if (n) { + err = -EINVAL; goto err_out; + } return keys_ex; From d4702645838c8e04893383b50406249382b4e6bf Mon Sep 17 00:00:00 2001 From: Raju Rangoju Date: Fri, 9 Jun 2017 22:17:49 +0530 Subject: [PATCH 247/436] rdma/cxgb4: Fix memory leaks during module exit Fix memory leaks of iw_cxgb4 module in the exit path Signed-off-by: Raju Rangoju Reviewed-by: Steve Wise Signed-off-by: Doug Ledford --- drivers/infiniband/hw/cxgb4/device.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c index f96a96dbcf1f..ae0b79aeea2e 100644 --- a/drivers/infiniband/hw/cxgb4/device.c +++ b/drivers/infiniband/hw/cxgb4/device.c @@ -767,7 +767,7 @@ void c4iw_release_dev_ucontext(struct c4iw_rdev *rdev, kfree(entry); } - list_for_each_safe(pos, nxt, &uctx->qpids) { + list_for_each_safe(pos, nxt, &uctx->cqids) { entry = list_entry(pos, struct c4iw_qid_list, entry); list_del_init(&entry->entry); kfree(entry); @@ -880,13 +880,15 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev) rdev->free_workq = create_singlethread_workqueue("iw_cxgb4_free"); if (!rdev->free_workq) { err = -ENOMEM; - goto err_free_status_page; + goto err_free_status_page_and_wr_log; } rdev->status_page->db_off = 0; return 0; -err_free_status_page: +err_free_status_page_and_wr_log: + if (c4iw_wr_log && rdev->wr_log) + kfree(rdev->wr_log); free_page((unsigned long)rdev->status_page); destroy_ocqp_pool: c4iw_ocqp_pool_destroy(rdev); @@ -903,9 +905,11 @@ static void c4iw_rdev_close(struct c4iw_rdev *rdev) { destroy_workqueue(rdev->free_workq); kfree(rdev->wr_log); + c4iw_release_dev_ucontext(rdev, &rdev->uctx); free_page((unsigned long)rdev->status_page); c4iw_pblpool_destroy(rdev); c4iw_rqtpool_destroy(rdev); + c4iw_ocqp_pool_destroy(rdev); c4iw_destroy_resource(&rdev->resource); } From dc9edc44de6cd7cc8cc7f5b36c1adb221eda3207 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Wed, 14 Jun 2017 13:27:50 -0600 Subject: [PATCH 248/436] block: Fix a blk_exit_rl() regression Avoid that the following complaint is reported: BUG: sleeping function called from invalid context at kernel/workqueue.c:2790 in_atomic(): 1, irqs_disabled(): 0, pid: 41, name: rcuop/3 1 lock held by rcuop/3/41: #0: (rcu_callback){......}, at: [] rcu_nocb_kthread+0x282/0x500 Call Trace: dump_stack+0x86/0xcf ___might_sleep+0x174/0x260 __might_sleep+0x4a/0x80 flush_work+0x7e/0x2e0 __cancel_work_timer+0x143/0x1c0 cancel_work_sync+0x10/0x20 blk_throtl_exit+0x25/0x60 blkcg_exit_queue+0x35/0x40 blk_release_queue+0x42/0x130 kobject_put+0xa9/0x190 This happens since we invoke callbacks that need to block from the queue release handler. Fix this by pushing the final release to a workqueue. Reported-by: Ross Zwisler Fixes: commit b425e5049258 ("block: Avoid that blk_exit_rl() triggers a use-after-free") Signed-off-by: Bart Van Assche Tested-by: Ross Zwisler Updated changelog Signed-off-by: Jens Axboe --- block/blk-sysfs.c | 34 ++++++++++++++++++++++------------ include/linux/blkdev.h | 2 ++ 2 files changed, 24 insertions(+), 12 deletions(-) diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 283da7fbe034..27aceab1cc31 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -777,24 +777,25 @@ static void blk_free_queue_rcu(struct rcu_head *rcu_head) } /** - * blk_release_queue: - release a &struct request_queue when it is no longer needed - * @kobj: the kobj belonging to the request queue to be released + * __blk_release_queue - release a request queue when it is no longer needed + * @work: pointer to the release_work member of the request queue to be released * * Description: - * blk_release_queue is the pair to blk_init_queue() or - * blk_queue_make_request(). It should be called when a request queue is - * being released; typically when a block device is being de-registered. - * Currently, its primary task it to free all the &struct request - * structures that were allocated to the queue and the queue itself. + * blk_release_queue is the counterpart of blk_init_queue(). It should be + * called when a request queue is being released; typically when a block + * device is being de-registered. Its primary task it to free the queue + * itself. * - * Note: + * Notes: * The low level driver must have finished any outstanding requests first * via blk_cleanup_queue(). - **/ -static void blk_release_queue(struct kobject *kobj) + * + * Although blk_release_queue() may be called with preemption disabled, + * __blk_release_queue() may sleep. + */ +static void __blk_release_queue(struct work_struct *work) { - struct request_queue *q = - container_of(kobj, struct request_queue, kobj); + struct request_queue *q = container_of(work, typeof(*q), release_work); if (test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags)) blk_stat_remove_callback(q, q->poll_cb); @@ -834,6 +835,15 @@ static void blk_release_queue(struct kobject *kobj) call_rcu(&q->rcu_head, blk_free_queue_rcu); } +static void blk_release_queue(struct kobject *kobj) +{ + struct request_queue *q = + container_of(kobj, struct request_queue, kobj); + + INIT_WORK(&q->release_work, __blk_release_queue); + schedule_work(&q->release_work); +} + static const struct sysfs_ops queue_sysfs_ops = { .show = queue_attr_show, .store = queue_attr_store, diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index ab92c4ea138b..b74a3edcb3da 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -586,6 +586,8 @@ struct request_queue { size_t cmd_size; void *rq_alloc_data; + + struct work_struct release_work; }; #define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ From 849a44de91636c24cea799cb8ad8c36433feb913 Mon Sep 17 00:00:00 2001 From: Jesper Dangaard Brouer Date: Wed, 14 Jun 2017 13:27:37 +0200 Subject: [PATCH 249/436] net: don't global ICMP rate limit packets originating from loopback Florian Weimer seems to have a glibc test-case which requires that loopback interfaces does not get ICMP ratelimited. This was broken by commit c0303efeab73 ("net: reduce cycles spend on ICMP replies that gets rate limited"). An ICMP response will usually be routed back-out the same incoming interface. Thus, take advantage of this and skip global ICMP ratelimit when the incoming device is loopback. In the unlikely event that the outgoing it not loopback, due to strange routing policy rules, ICMP rate limiting still works via peer ratelimiting via icmpv4_xrlim_allow(). Thus, we should still comply with RFC1812 (section 4.3.2.8 "Rate Limiting"). This seems to fix the reproducer given by Florian. While still avoiding to perform expensive and unneeded outgoing route lookup for rate limited packets (in the non-loopback case). Fixes: c0303efeab73 ("net: reduce cycles spend on ICMP replies that gets rate limited") Reported-by: Florian Weimer Reported-by: "H.J. Lu" Signed-off-by: Jesper Dangaard Brouer Signed-off-by: David S. Miller --- net/ipv4/icmp.c | 8 ++++++-- net/ipv6/icmp.c | 2 +- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index 43318b5f5647..9144fa7df2ad 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c @@ -657,8 +657,12 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info) /* Needed by both icmp_global_allow and icmp_xmit_lock */ local_bh_disable(); - /* Check global sysctl_icmp_msgs_per_sec ratelimit */ - if (!icmpv4_global_allow(net, type, code)) + /* Check global sysctl_icmp_msgs_per_sec ratelimit, unless + * incoming dev is loopback. If outgoing dev change to not be + * loopback, then peer ratelimit still work (in icmpv4_xrlim_allow) + */ + if (!(skb_in->dev && (skb_in->dev->flags&IFF_LOOPBACK)) && + !icmpv4_global_allow(net, type, code)) goto out_bh_enable; sk = icmp_xmit_lock(net); diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index 230b5aac9f03..8d7b113958b1 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c @@ -491,7 +491,7 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, local_bh_disable(); /* Check global sysctl_icmp_msgs_per_sec ratelimit */ - if (!icmpv6_global_allow(type)) + if (!(skb->dev->flags&IFF_LOOPBACK) && !icmpv6_global_allow(type)) goto out_bh_enable; mip6_addr_swap(skb); From fffd70f58864f5a48b2c17d02730a460f86d4254 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Wed, 14 Jun 2017 15:36:31 -0400 Subject: [PATCH 250/436] ufs: make ufs_freespace() return signed as it is, checking that its return value is <= 0 is useless and that's how it's being used. Signed-off-by: Al Viro --- fs/ufs/util.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/fs/ufs/util.h b/fs/ufs/util.h index 398019fb1448..1e1639f8a58b 100644 --- a/fs/ufs/util.h +++ b/fs/ufs/util.h @@ -354,12 +354,12 @@ static inline void *ubh_get_data_ptr(struct ufs_sb_private_info *uspi, * Determine the number of available frags given a * percentage to hold in reserve. */ -static inline u64 +static inline s64 ufs_freespace(struct ufs_sb_private_info *uspi, int percentreserved) { return ufs_blkstofrags(uspi->cs_total.cs_nbfree) + uspi->cs_total.cs_nffree - - (uspi->s_dsize * (percentreserved) / 100); + (uspi->s_dsize * percentreserved) / 100; } /* From b451cec4bbd913688f5381efad407762a64a92ce Mon Sep 17 00:00:00 2001 From: Al Viro Date: Wed, 14 Jun 2017 15:41:17 -0400 Subject: [PATCH 251/436] ufs: fix reserved blocks check a) honour ->s_minfree; don't just go with default (5) b) don't bother with capability checks until we know we'll need them Signed-off-by: Al Viro --- fs/ufs/balloc.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/fs/ufs/balloc.c b/fs/ufs/balloc.c index d642cc0a8271..52d1ef415f6f 100644 --- a/fs/ufs/balloc.c +++ b/fs/ufs/balloc.c @@ -400,10 +400,12 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment, /* * There is not enough space for user on the device */ - if (!capable(CAP_SYS_RESOURCE) && ufs_freespace(uspi, UFS_MINFREE) <= 0) { - mutex_unlock(&UFS_SB(sb)->s_lock); - UFSD("EXIT (FAILED)\n"); - return 0; + if (unlikely(ufs_freespace(uspi, uspi->s_minfree) <= 0)) { + if (!capable(CAP_SYS_RESOURCE)) { + mutex_unlock(&UFS_SB(sb)->s_lock); + UFSD("EXIT (FAILED)\n"); + return 0; + } } if (goal >= uspi->s_size) From c596961d1b4ccc6f15754fe5a49c37ac6da57145 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Wed, 14 Jun 2017 16:36:29 -0400 Subject: [PATCH 252/436] ufs: fix s_size/s_dsize users For UFS2 we need 64bit variants; we even store them in uspi, but use 32bit ones instead. One wrinkle is in handling of reserved space - recalculating it every time had been stupid all along, but now it would become really ugly. Just calculate it once... Signed-off-by: Al Viro --- fs/ufs/balloc.c | 2 +- fs/ufs/super.c | 23 ++++++++++++----------- fs/ufs/ufs_fs.h | 7 +++---- fs/ufs/util.h | 11 +++-------- 4 files changed, 19 insertions(+), 24 deletions(-) diff --git a/fs/ufs/balloc.c b/fs/ufs/balloc.c index 52d1ef415f6f..af0473a851af 100644 --- a/fs/ufs/balloc.c +++ b/fs/ufs/balloc.c @@ -400,7 +400,7 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment, /* * There is not enough space for user on the device */ - if (unlikely(ufs_freespace(uspi, uspi->s_minfree) <= 0)) { + if (unlikely(ufs_freefrags(uspi) <= uspi->s_root_blocks)) { if (!capable(CAP_SYS_RESOURCE)) { mutex_unlock(&UFS_SB(sb)->s_lock); UFSD("EXIT (FAILED)\n"); diff --git a/fs/ufs/super.c b/fs/ufs/super.c index eca838a8b43e..34656c7a8e22 100644 --- a/fs/ufs/super.c +++ b/fs/ufs/super.c @@ -1159,8 +1159,8 @@ magic_found: uspi->s_cgmask = fs32_to_cpu(sb, usb1->fs_cgmask); if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) { - uspi->s_u2_size = fs64_to_cpu(sb, usb3->fs_un1.fs_u2.fs_size); - uspi->s_u2_dsize = fs64_to_cpu(sb, usb3->fs_un1.fs_u2.fs_dsize); + uspi->s_size = fs64_to_cpu(sb, usb3->fs_un1.fs_u2.fs_size); + uspi->s_dsize = fs64_to_cpu(sb, usb3->fs_un1.fs_u2.fs_dsize); } else { uspi->s_size = fs32_to_cpu(sb, usb1->fs_size); uspi->s_dsize = fs32_to_cpu(sb, usb1->fs_dsize); @@ -1209,6 +1209,9 @@ magic_found: uspi->s_postbloff = fs32_to_cpu(sb, usb3->fs_postbloff); uspi->s_rotbloff = fs32_to_cpu(sb, usb3->fs_rotbloff); + uspi->s_root_blocks = mul_u64_u32_div(uspi->s_dsize, + uspi->s_minfree, 100); + /* * Compute another frequently used values */ @@ -1398,19 +1401,17 @@ static int ufs_statfs(struct dentry *dentry, struct kstatfs *buf) mutex_lock(&UFS_SB(sb)->s_lock); usb3 = ubh_get_usb_third(uspi); - if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) { + if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) buf->f_type = UFS2_MAGIC; - buf->f_blocks = fs64_to_cpu(sb, usb3->fs_un1.fs_u2.fs_dsize); - } else { + else buf->f_type = UFS_MAGIC; - buf->f_blocks = uspi->s_dsize; - } - buf->f_bfree = ufs_blkstofrags(uspi->cs_total.cs_nbfree) + - uspi->cs_total.cs_nffree; + + buf->f_blocks = uspi->s_dsize; + buf->f_bfree = ufs_freefrags(uspi); buf->f_ffree = uspi->cs_total.cs_nifree; buf->f_bsize = sb->s_blocksize; - buf->f_bavail = (buf->f_bfree > (((long)buf->f_blocks / 100) * uspi->s_minfree)) - ? (buf->f_bfree - (((long)buf->f_blocks / 100) * uspi->s_minfree)) : 0; + buf->f_bavail = (buf->f_bfree > uspi->s_root_blocks) + ? (buf->f_bfree - uspi->s_root_blocks) : 0; buf->f_files = uspi->s_ncg * uspi->s_ipg; buf->f_namelen = UFS_MAXNAMLEN; buf->f_fsid.val[0] = (u32)id; diff --git a/fs/ufs/ufs_fs.h b/fs/ufs/ufs_fs.h index 0cbd5d340b67..823d55a37586 100644 --- a/fs/ufs/ufs_fs.h +++ b/fs/ufs/ufs_fs.h @@ -733,10 +733,8 @@ struct ufs_sb_private_info { __u32 s_dblkno; /* offset of first data after cg */ __u32 s_cgoffset; /* cylinder group offset in cylinder */ __u32 s_cgmask; /* used to calc mod fs_ntrak */ - __u32 s_size; /* number of blocks (fragments) in fs */ - __u32 s_dsize; /* number of data blocks in fs */ - __u64 s_u2_size; /* ufs2: number of blocks (fragments) in fs */ - __u64 s_u2_dsize; /*ufs2: number of data blocks in fs */ + __u64 s_size; /* number of blocks (fragments) in fs */ + __u64 s_dsize; /* number of data blocks in fs */ __u32 s_ncg; /* number of cylinder groups */ __u32 s_bsize; /* size of basic blocks */ __u32 s_fsize; /* size of fragments */ @@ -793,6 +791,7 @@ struct ufs_sb_private_info { __u32 s_maxsymlinklen;/* upper limit on fast symlinks' size */ __s32 fs_magic; /* filesystem magic */ unsigned int s_dirblksize; + __u64 s_root_blocks; }; /* diff --git a/fs/ufs/util.h b/fs/ufs/util.h index 1e1639f8a58b..9fc7119a1551 100644 --- a/fs/ufs/util.h +++ b/fs/ufs/util.h @@ -350,16 +350,11 @@ static inline void *ubh_get_data_ptr(struct ufs_sb_private_info *uspi, #define ubh_blkmap(ubh,begin,bit) \ ((*ubh_get_addr(ubh, (begin) + ((bit) >> 3)) >> ((bit) & 7)) & (0xff >> (UFS_MAXFRAG - uspi->s_fpb))) -/* - * Determine the number of available frags given a - * percentage to hold in reserve. - */ -static inline s64 -ufs_freespace(struct ufs_sb_private_info *uspi, int percentreserved) +static inline u64 +ufs_freefrags(struct ufs_sb_private_info *uspi) { return ufs_blkstofrags(uspi->cs_total.cs_nbfree) + - uspi->cs_total.cs_nffree - - (uspi->s_dsize * percentreserved) / 100; + uspi->cs_total.cs_nffree; } /* From 267309f394bf3cd8db001992890b1fa52b97974e Mon Sep 17 00:00:00 2001 From: Al Viro Date: Wed, 14 Jun 2017 23:32:19 -0400 Subject: [PATCH 253/436] ufs_get_locked_page(): make sure we have buffer_heads callers rely upon that, but find_lock_page() racing with attempt of page eviction by memory pressure might have left us with * try_to_free_buffers() successfully done * __remove_mapping() failed, leaving the page in our mapping * find_lock_page() returning an uptodate page with no buffer_heads attached. Signed-off-by: Al Viro --- fs/ufs/util.c | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/fs/ufs/util.c b/fs/ufs/util.c index f41ad0a6106f..02497a492eb2 100644 --- a/fs/ufs/util.c +++ b/fs/ufs/util.c @@ -243,9 +243,8 @@ ufs_set_inode_dev(struct super_block *sb, struct ufs_inode_info *ufsi, dev_t dev struct page *ufs_get_locked_page(struct address_space *mapping, pgoff_t index) { - struct page *page; - - page = find_lock_page(mapping, index); + struct inode *inode = mapping->host; + struct page *page = find_lock_page(mapping, index); if (!page) { page = read_mapping_page(mapping, index, NULL); @@ -253,7 +252,7 @@ struct page *ufs_get_locked_page(struct address_space *mapping, printk(KERN_ERR "ufs_change_blocknr: " "read_mapping_page error: ino %lu, index: %lu\n", mapping->host->i_ino, index); - goto out; + return page; } lock_page(page); @@ -262,8 +261,7 @@ struct page *ufs_get_locked_page(struct address_space *mapping, /* Truncate got there first */ unlock_page(page); put_page(page); - page = NULL; - goto out; + return NULL; } if (!PageUptodate(page) || PageError(page)) { @@ -272,11 +270,12 @@ struct page *ufs_get_locked_page(struct address_space *mapping, printk(KERN_ERR "ufs_change_blocknr: " "can not read page: ino %lu, index: %lu\n", - mapping->host->i_ino, index); + inode->i_ino, index); - page = ERR_PTR(-EIO); + return ERR_PTR(-EIO); } } -out: + if (!page_has_buffers(page)) + create_empty_buffers(page, 1 << inode->i_blkbits, 0); return page; } From 640f93cc6ea7327588be3cc0849d1342aac0393a Mon Sep 17 00:00:00 2001 From: Jia-Ju Bai Date: Wed, 14 Jun 2017 16:35:31 -0700 Subject: [PATCH 254/436] i40e: Fix a sleep-in-atomic bug The driver may sleep under a spin lock, and the function call path is: i40e_ndo_set_vf_port_vlan (acquire the lock by spin_lock_bh) i40e_vsi_remove_pvid i40e_vlan_stripping_disable i40e_aq_update_vsi_params i40e_asq_send_command mutex_lock --> may sleep To fixed it, the spin lock is released before "i40e_vsi_remove_pvid", and the lock is acquired again after this function. Signed-off-by: Jia-Ju Bai Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher Signed-off-by: David S. Miller --- drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 95c23fbaa211..0fb38ca78900 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -3017,10 +3017,12 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id, VLAN_VID_MASK)); } + spin_unlock_bh(&vsi->mac_filter_hash_lock); if (vlan_id || qos) ret = i40e_vsi_add_pvid(vsi, vlanprio); else i40e_vsi_remove_pvid(vsi); + spin_lock_bh(&vsi->mac_filter_hash_lock); if (vlan_id) { dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n", From 09bf4f5b6e6013f0ad6b090d4a8deebd4e56d878 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Thu, 15 Jun 2017 00:17:30 -0400 Subject: [PATCH 255/436] ufs: avoid grabbing ->truncate_mutex if possible tail unpacking is done in a wrong place; the deadlocks galore is best dealt with by doing that in ->write_iter() (and switching to iomap, while we are at it), but that's rather painful to backport. The trouble comes from grabbing pages that cover the beginning of tail from inside of ufs_new_fragments(); ongoing pageout of any of those is going to deadlock on ->truncate_mutex with process that got around to extending the tail holding that and waiting for page to get unlocked, while ->writepage() on that page is waiting on ->truncate_mutex. The thing is, we don't need ->truncate_mutex when the fragment we are trying to map is within the tail - the damn thing is allocated (tail can't contain holes). Let's do a plain lookup and if the fragment is present, we can just pretend that we'd won the race in almost all cases. The only exception is a fragment between the end of tail and the end of block containing tail. Protect ->i_lastfrag with ->meta_lock - read_seqlock_excl() is sufficient. Signed-off-by: Al Viro --- fs/ufs/balloc.c | 10 ++++++---- fs/ufs/inode.c | 26 ++++++++++++++++++++------ 2 files changed, 26 insertions(+), 10 deletions(-) diff --git a/fs/ufs/balloc.c b/fs/ufs/balloc.c index af0473a851af..d56d9bc705fe 100644 --- a/fs/ufs/balloc.c +++ b/fs/ufs/balloc.c @@ -423,12 +423,12 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment, if (result) { ufs_clear_frags(inode, result + oldcount, newcount - oldcount, locked_page != NULL); + *err = 0; write_seqlock(&UFS_I(inode)->meta_lock); ufs_cpu_to_data_ptr(sb, p, result); - write_sequnlock(&UFS_I(inode)->meta_lock); - *err = 0; UFS_I(inode)->i_lastfrag = max(UFS_I(inode)->i_lastfrag, fragment + count); + write_sequnlock(&UFS_I(inode)->meta_lock); } mutex_unlock(&UFS_SB(sb)->s_lock); UFSD("EXIT, result %llu\n", (unsigned long long)result); @@ -441,8 +441,10 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment, result = ufs_add_fragments(inode, tmp, oldcount, newcount); if (result) { *err = 0; + read_seqlock_excl(&UFS_I(inode)->meta_lock); UFS_I(inode)->i_lastfrag = max(UFS_I(inode)->i_lastfrag, fragment + count); + read_sequnlock_excl(&UFS_I(inode)->meta_lock); ufs_clear_frags(inode, result + oldcount, newcount - oldcount, locked_page != NULL); mutex_unlock(&UFS_SB(sb)->s_lock); @@ -479,12 +481,12 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment, ufs_change_blocknr(inode, fragment - oldcount, oldcount, uspi->s_sbbase + tmp, uspi->s_sbbase + result, locked_page); + *err = 0; write_seqlock(&UFS_I(inode)->meta_lock); ufs_cpu_to_data_ptr(sb, p, result); - write_sequnlock(&UFS_I(inode)->meta_lock); - *err = 0; UFS_I(inode)->i_lastfrag = max(UFS_I(inode)->i_lastfrag, fragment + count); + write_sequnlock(&UFS_I(inode)->meta_lock); mutex_unlock(&UFS_SB(sb)->s_lock); if (newcount < request) ufs_free_fragments (inode, result + newcount, request - newcount); diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c index da553ffec85b..1dda6c4875f9 100644 --- a/fs/ufs/inode.c +++ b/fs/ufs/inode.c @@ -401,13 +401,20 @@ static int ufs_getfrag_block(struct inode *inode, sector_t fragment, struct buff u64 phys64 = 0; unsigned frag = fragment & uspi->s_fpbmask; - if (!create) { - phys64 = ufs_frag_map(inode, offsets, depth); - if (phys64) - map_bh(bh_result, sb, phys64 + frag); - return 0; - } + phys64 = ufs_frag_map(inode, offsets, depth); + if (!create) + goto done; + if (phys64) { + if (fragment >= UFS_NDIR_FRAGMENT) + goto done; + read_seqlock_excl(&UFS_I(inode)->meta_lock); + if (fragment < UFS_I(inode)->i_lastfrag) { + read_sequnlock_excl(&UFS_I(inode)->meta_lock); + goto done; + } + read_sequnlock_excl(&UFS_I(inode)->meta_lock); + } /* This code entered only while writing ....? */ mutex_lock(&UFS_I(inode)->truncate_mutex); @@ -451,6 +458,11 @@ out: } mutex_unlock(&UFS_I(inode)->truncate_mutex); return err; + +done: + if (phys64) + map_bh(bh_result, sb, phys64 + frag); + return 0; } static int ufs_writepage(struct page *page, struct writeback_control *wbc) @@ -1161,7 +1173,9 @@ static void ufs_truncate_blocks(struct inode *inode) free_full_branch(inode, block, i - UFS_IND_BLOCK + 1); } } + read_seqlock_excl(&ufsi->meta_lock); ufsi->i_lastfrag = DIRECT_FRAGMENT; + read_sequnlock_excl(&ufsi->meta_lock); mark_inode_dirty(inode); mutex_unlock(&ufsi->truncate_mutex); } From 289dec5b895a7ecefb2f49da109e6aed9b0f1754 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Thu, 15 Jun 2017 00:42:56 -0400 Subject: [PATCH 256/436] ufs: more deadlock prevention on tail unpacking ->s_lock is not needed for ufs_change_blocknr() Signed-off-by: Al Viro --- fs/ufs/balloc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/ufs/balloc.c b/fs/ufs/balloc.c index d56d9bc705fe..0315fea1d589 100644 --- a/fs/ufs/balloc.c +++ b/fs/ufs/balloc.c @@ -478,6 +478,7 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment, if (result) { ufs_clear_frags(inode, result + oldcount, newcount - oldcount, locked_page != NULL); + mutex_unlock(&UFS_SB(sb)->s_lock); ufs_change_blocknr(inode, fragment - oldcount, oldcount, uspi->s_sbbase + tmp, uspi->s_sbbase + result, locked_page); @@ -487,7 +488,6 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment, UFS_I(inode)->i_lastfrag = max(UFS_I(inode)->i_lastfrag, fragment + count); write_sequnlock(&UFS_I(inode)->meta_lock); - mutex_unlock(&UFS_SB(sb)->s_lock); if (newcount < request) ufs_free_fragments (inode, result + newcount, request - newcount); ufs_free_fragments (inode, tmp, oldcount); From 4c3bb4ccd074e1a0552078c0bf94c662367a1658 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Thu, 15 Jun 2017 15:43:17 +1000 Subject: [PATCH 257/436] KVM: PPC: Book3S HV: Restore critical SPRs to host values on guest exit This restores several special-purpose registers (SPRs) to sane values on guest exit that were missed before. TAR and VRSAVE are readable and writable by userspace, and we need to save and restore them to prevent the guest from potentially affecting userspace execution (not that TAR or VRSAVE are used by any known program that run uses the KVM_RUN ioctl). We save/restore these in kvmppc_vcpu_run_hv() rather than on every guest entry/exit. FSCR affects userspace execution in that it can prohibit access to certain facilities by userspace. We restore it to the normal value for the task on exit from the KVM_RUN ioctl. IAMR is normally 0, and is restored to 0 on guest exit. However, with a radix host on POWER9, it is set to a value that prevents the kernel from executing user-accessible memory. On POWER9, we save IAMR on guest entry and restore it on guest exit to the saved value rather than 0. On POWER8 we continue to set it to 0 on guest exit. PSPB is normally 0. We restore it to 0 on guest exit to prevent userspace taking advantage of the guest having set it non-zero (which would allow userspace to set its SMT priority to high). UAMOR is normally 0. We restore it to 0 on guest exit to prevent the AMR from being used as a covert channel between userspace processes, since the AMR is not context-switched at present. Fixes: b005255e12a3 ("KVM: PPC: Book3S HV: Context-switch new POWER8 SPRs", 2014-01-08) Cc: stable@vger.kernel.org # v3.14+ Signed-off-by: Paul Mackerras --- arch/powerpc/kvm/book3s_hv.c | 11 +++++++++-- arch/powerpc/kvm/book3s_hv_rmhandlers.S | 9 ++++++++- 2 files changed, 17 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 400a5992b121..a963762a031f 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -2908,6 +2908,8 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu) int r; int srcu_idx; unsigned long ebb_regs[3] = {}; /* shut up GCC */ + unsigned long user_tar = 0; + unsigned int user_vrsave; if (!vcpu->arch.sane) { run->exit_reason = KVM_EXIT_INTERNAL_ERROR; @@ -2935,12 +2937,14 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu) flush_all_to_thread(current); - /* Save userspace EBB register values */ + /* Save userspace EBB and other register values */ if (cpu_has_feature(CPU_FTR_ARCH_207S)) { ebb_regs[0] = mfspr(SPRN_EBBHR); ebb_regs[1] = mfspr(SPRN_EBBRR); ebb_regs[2] = mfspr(SPRN_BESCR); + user_tar = mfspr(SPRN_TAR); } + user_vrsave = mfspr(SPRN_VRSAVE); vcpu->arch.wqp = &vcpu->arch.vcore->wq; vcpu->arch.pgdir = current->mm->pgd; @@ -2968,12 +2972,15 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu) } } while (is_kvmppc_resume_guest(r)); - /* Restore userspace EBB register values */ + /* Restore userspace EBB and other register values */ if (cpu_has_feature(CPU_FTR_ARCH_207S)) { mtspr(SPRN_EBBHR, ebb_regs[0]); mtspr(SPRN_EBBRR, ebb_regs[1]); mtspr(SPRN_BESCR, ebb_regs[2]); + mtspr(SPRN_TAR, user_tar); + mtspr(SPRN_FSCR, current->thread.fscr); } + mtspr(SPRN_VRSAVE, user_vrsave); out: vcpu->arch.state = KVMPPC_VCPU_NOTREADY; diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index e390b383b4d6..4e4390564276 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -558,6 +558,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) #define STACK_SLOT_TID (112-16) #define STACK_SLOT_PSSCR (112-24) #define STACK_SLOT_PID (112-32) +#define STACK_SLOT_IAMR (112-40) .global kvmppc_hv_entry kvmppc_hv_entry: @@ -758,9 +759,11 @@ BEGIN_FTR_SECTION mfspr r5, SPRN_TIDR mfspr r6, SPRN_PSSCR mfspr r7, SPRN_PID + mfspr r8, SPRN_IAMR std r5, STACK_SLOT_TID(r1) std r6, STACK_SLOT_PSSCR(r1) std r7, STACK_SLOT_PID(r1) + std r8, STACK_SLOT_IAMR(r1) END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) BEGIN_FTR_SECTION @@ -1515,11 +1518,12 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300) * set by the guest could disrupt the host. */ li r0, 0 - mtspr SPRN_IAMR, r0 mtspr SPRN_CIABR, r0 mtspr SPRN_DAWRX, r0 + mtspr SPRN_PSPB, r0 mtspr SPRN_WORT, r0 BEGIN_FTR_SECTION + mtspr SPRN_IAMR, r0 mtspr SPRN_TCSCR, r0 /* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */ li r0, 1 @@ -1535,6 +1539,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) std r6,VCPU_UAMOR(r9) li r6,0 mtspr SPRN_AMR,r6 + mtspr SPRN_UAMOR, r6 /* Switch DSCR back to host value */ mfspr r8, SPRN_DSCR @@ -1683,9 +1688,11 @@ BEGIN_FTR_SECTION ld r5, STACK_SLOT_TID(r1) ld r6, STACK_SLOT_PSSCR(r1) ld r7, STACK_SLOT_PID(r1) + ld r8, STACK_SLOT_IAMR(r1) mtspr SPRN_TIDR, r5 mtspr SPRN_PSSCR, r6 mtspr SPRN_PID, r7 + mtspr SPRN_IAMR, r8 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) BEGIN_FTR_SECTION PPC_INVALIDATE_ERAT From 46a704f8409f79fd66567ad3f8a7304830a84293 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Thu, 15 Jun 2017 16:10:27 +1000 Subject: [PATCH 258/436] KVM: PPC: Book3S HV: Preserve userspace HTM state properly If userspace attempts to call the KVM_RUN ioctl when it has hardware transactional memory (HTM) enabled, the values that it has put in the HTM-related SPRs TFHAR, TFIAR and TEXASR will get overwritten by guest values. To fix this, we detect this condition and save those SPR values in the thread struct, and disable HTM for the task. If userspace goes to access those SPRs or the HTM facility in future, a TM-unavailable interrupt will occur and the handler will reload those SPRs and re-enable HTM. If userspace has started a transaction and suspended it, we would currently lose the transactional state in the guest entry path and would almost certainly get a "TM Bad Thing" interrupt, which would cause the host to crash. To avoid this, we detect this case and return from the KVM_RUN ioctl with an EINVAL error, with the KVM exit reason set to KVM_EXIT_FAIL_ENTRY. Fixes: b005255e12a3 ("KVM: PPC: Book3S HV: Context-switch new POWER8 SPRs", 2014-01-08) Cc: stable@vger.kernel.org # v3.14+ Signed-off-by: Paul Mackerras --- arch/powerpc/kvm/book3s_hv.c | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index a963762a031f..fd4d978d5257 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -2916,6 +2916,27 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu) return -EINVAL; } + /* + * Don't allow entry with a suspended transaction, because + * the guest entry/exit code will lose it. + * If the guest has TM enabled, save away their TM-related SPRs + * (they will get restored by the TM unavailable interrupt). + */ +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM + if (cpu_has_feature(CPU_FTR_TM) && current->thread.regs && + (current->thread.regs->msr & MSR_TM)) { + if (MSR_TM_ACTIVE(current->thread.regs->msr)) { + run->exit_reason = KVM_EXIT_FAIL_ENTRY; + run->fail_entry.hardware_entry_failure_reason = 0; + return -EINVAL; + } + current->thread.tm_tfhar = mfspr(SPRN_TFHAR); + current->thread.tm_tfiar = mfspr(SPRN_TFIAR); + current->thread.tm_texasr = mfspr(SPRN_TEXASR); + current->thread.regs->msr &= ~MSR_TM; + } +#endif + kvmppc_core_prepare_to_enter(vcpu); /* No need to go into the guest when all we'll do is come back out */ From a8fad984833832d5ca11a9ed64ddc55646da30e3 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Thu, 15 Jun 2017 03:57:46 -0400 Subject: [PATCH 259/436] ufs_truncate_blocks(): fix the case when size is in the last direct block The logics when deciding whether we need to do anything with direct blocks is broken when new size is within the last direct block. It's better to find the path to the last byte _not_ to be removed and use that instead of the path to the beginning of the first block to be freed... Signed-off-by: Al Viro --- fs/ufs/inode.c | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c index 1dda6c4875f9..9f4590261134 100644 --- a/fs/ufs/inode.c +++ b/fs/ufs/inode.c @@ -886,7 +886,6 @@ static inline void free_data(struct to_free *ctx, u64 from, unsigned count) ctx->to = from + count; } -#define DIRECT_BLOCK ((inode->i_size + uspi->s_bsize - 1) >> uspi->s_bshift) #define DIRECT_FRAGMENT ((inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift) static void ufs_trunc_direct(struct inode *inode) @@ -1124,19 +1123,24 @@ static void ufs_truncate_blocks(struct inode *inode) struct super_block *sb = inode->i_sb; struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; unsigned offsets[4]; - int depth = ufs_block_to_path(inode, DIRECT_BLOCK, offsets); + int depth; int depth2; unsigned i; struct ufs_buffer_head *ubh[3]; void *p; u64 block; - if (!depth) - return; + if (inode->i_size) { + sector_t last = (inode->i_size - 1) >> uspi->s_bshift; + depth = ufs_block_to_path(inode, last, offsets); + if (!depth) + return; + } else { + depth = 1; + } - /* find the last non-zero in offsets[] */ for (depth2 = depth - 1; depth2; depth2--) - if (offsets[depth2]) + if (offsets[depth2] != uspi->s_apb - 1) break; mutex_lock(&ufsi->truncate_mutex); @@ -1145,9 +1149,8 @@ static void ufs_truncate_blocks(struct inode *inode) offsets[0] = UFS_IND_BLOCK; } else { /* get the blocks that should be partially emptied */ - p = ufs_get_direct_data_ptr(uspi, ufsi, offsets[0]); + p = ufs_get_direct_data_ptr(uspi, ufsi, offsets[0]++); for (i = 0; i < depth2; i++) { - offsets[i]++; /* next branch is fully freed */ block = ufs_data_ptr_to_cpu(sb, p); if (!block) break; @@ -1158,7 +1161,7 @@ static void ufs_truncate_blocks(struct inode *inode) write_sequnlock(&ufsi->meta_lock); break; } - p = ubh_get_data_ptr(uspi, ubh[i], offsets[i + 1]); + p = ubh_get_data_ptr(uspi, ubh[i], offsets[i + 1]++); } while (i--) free_branch_tail(inode, offsets[i + 1], ubh[i], depth - i - 1); From 1a73d9310e093fc3adffba4d0a67b9fab2ee3f63 Mon Sep 17 00:00:00 2001 From: Paul Burton Date: Fri, 2 Jun 2017 11:35:01 -0700 Subject: [PATCH 260/436] MIPS: Fix bnezc/jialc return address calculation The code handling the pop76 opcode (ie. bnezc & jialc instructions) in __compute_return_epc_for_insn() needs to set the value of $31 in the jialc case, which is encoded with rs = 0. However its check to differentiate bnezc (rs != 0) from jialc (rs = 0) was unfortunately backwards, meaning that if we emulate a bnezc instruction we clobber $31 & if we emulate a jialc instruction it actually behaves like a jic instruction. Fix this by inverting the check of rs to match the way the instructions are actually encoded. Signed-off-by: Paul Burton Fixes: 28d6f93d201d ("MIPS: Emulate the new MIPS R6 BNEZC and JIALC instructions") Cc: stable # v4.0+ Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/16178/ Signed-off-by: Ralf Baechle --- arch/mips/kernel/branch.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c index b11facd11c9d..f702a459a830 100644 --- a/arch/mips/kernel/branch.c +++ b/arch/mips/kernel/branch.c @@ -804,8 +804,10 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, break; } /* Compact branch: BNEZC || JIALC */ - if (insn.i_format.rs) + if (!insn.i_format.rs) { + /* JIALC: set $31/ra */ regs->regs[31] = epc + 4; + } regs->cp0_epc += 8; break; #endif From bcd7c45e0d5a82be9a64b90050f0e09d41a50758 Mon Sep 17 00:00:00 2001 From: Paul Burton Date: Fri, 2 Jun 2017 12:02:08 -0700 Subject: [PATCH 261/436] MIPS: .its targets depend on vmlinux The .its targets require information about the kernel binary, such as its entry point, which is extracted from the vmlinux ELF. We therefore require that the ELF is built before the .its files are generated. Declare this requirement in the Makefile such that make will ensure this is always the case, otherwise in corner cases we can hit issues as the .its is generated with an incorrect (either invalid or stale) entry point. Signed-off-by: Paul Burton Fixes: cf2a5e0bb4c6 ("MIPS: Support generating Flattened Image Trees (.itb)") Cc: linux-mips@linux-mips.org Cc: stable # v4.9+ Patchwork: https://patchwork.linux-mips.org/patch/16179/ Signed-off-by: Ralf Baechle --- arch/mips/boot/Makefile | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/arch/mips/boot/Makefile b/arch/mips/boot/Makefile index 2728a9a9c7c5..145b5ce8eb7e 100644 --- a/arch/mips/boot/Makefile +++ b/arch/mips/boot/Makefile @@ -128,19 +128,19 @@ quiet_cmd_cpp_its_S = ITS $@ -DADDR_BITS=$(ADDR_BITS) \ -DADDR_CELLS=$(itb_addr_cells) -$(obj)/vmlinux.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S FORCE +$(obj)/vmlinux.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S $(VMLINUX) FORCE $(call if_changed_dep,cpp_its_S,none,vmlinux.bin) -$(obj)/vmlinux.gz.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S FORCE +$(obj)/vmlinux.gz.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S $(VMLINUX) FORCE $(call if_changed_dep,cpp_its_S,gzip,vmlinux.bin.gz) -$(obj)/vmlinux.bz2.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S FORCE +$(obj)/vmlinux.bz2.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S $(VMLINUX) FORCE $(call if_changed_dep,cpp_its_S,bzip2,vmlinux.bin.bz2) -$(obj)/vmlinux.lzma.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S FORCE +$(obj)/vmlinux.lzma.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S $(VMLINUX) FORCE $(call if_changed_dep,cpp_its_S,lzma,vmlinux.bin.lzma) -$(obj)/vmlinux.lzo.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S FORCE +$(obj)/vmlinux.lzo.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S $(VMLINUX) FORCE $(call if_changed_dep,cpp_its_S,lzo,vmlinux.bin.lzo) quiet_cmd_itb-image = ITB $@ From 81be24d263dbeddaba35827036d6f6787a59c2c3 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 3 Jun 2017 07:20:09 +0100 Subject: [PATCH 262/436] Hang/soft lockup in d_invalidate with simultaneous calls It's not hard to trigger a bunch of d_invalidate() on the same dentry in parallel. They end up fighting each other - any dentry picked for removal by one will be skipped by the rest and we'll go for the next iteration through the entire subtree, even if everything is being skipped. Morevoer, we immediately go back to scanning the subtree. The only thing we really need is to dissolve all mounts in the subtree and as soon as we've nothing left to do, we can just unhash the dentry and bugger off. Signed-off-by: Al Viro --- fs/dcache.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/fs/dcache.c b/fs/dcache.c index cddf39777835..a9f995f6859e 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -1494,7 +1494,7 @@ static void check_and_drop(void *_data) { struct detach_data *data = _data; - if (!data->mountpoint && !data->select.found) + if (!data->mountpoint && list_empty(&data->select.dispose)) __d_drop(data->select.start); } @@ -1536,17 +1536,15 @@ void d_invalidate(struct dentry *dentry) d_walk(dentry, &data, detach_and_collect, check_and_drop); - if (data.select.found) + if (!list_empty(&data.select.dispose)) shrink_dentry_list(&data.select.dispose); + else if (!data.mountpoint) + return; if (data.mountpoint) { detach_mounts(data.mountpoint); dput(data.mountpoint); } - - if (!data.mountpoint && !data.select.found) - break; - cond_resched(); } } From 4068367c9ca7b515a209f9c0c8741309a1e90495 Mon Sep 17 00:00:00 2001 From: Andrei Vagin Date: Thu, 8 Jun 2017 17:32:29 -0700 Subject: [PATCH 263/436] fs: don't forget to put old mntns in mntns_install Fixes: 4f757f3cbf54 ("make sure that mntns_install() doesn't end up with referral for root") Cc: Al Viro Signed-off-by: Andrei Vagin Signed-off-by: Al Viro --- fs/namespace.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/fs/namespace.c b/fs/namespace.c index 8bd3e4d448b9..5a4438445bf7 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -3488,6 +3488,8 @@ static int mntns_install(struct nsproxy *nsproxy, struct ns_common *ns) return err; } + put_mnt_ns(old_mnt_ns); + /* Update the pwd and root */ set_fs_pwd(fs, &root); set_fs_root(fs, &root); From c9aba14362a6eec583819ec8f4b872c1816f5cbe Mon Sep 17 00:00:00 2001 From: Jean Delvare Date: Thu, 15 Jun 2017 13:46:00 +0200 Subject: [PATCH 264/436] firmware: dmi_scan: Look for SMBIOS 3 entry point first Since version 3.0.0 of the SMBIOS specification, there can be multiple entry points in memory, pointing to one or two DMI tables. If both a 32-bit ("_SM_") entry point and a 64-bit ("_SM3_") entry point are present, the specification requires that the latter points to a table which is a super-set of the table pointed to by the former. Therefore we should give preference to the 64-bit ("_SM3_") entry point. However, currently the code is picking the first valid entry point it finds. Per specification, we should look for a 64-bit ("_SM3_") entry point first, and if we can't find any, look for a 32-bit ("_SM_" or "_DMI_") entry point. Modify the code to do that. Signed-off-by: Jean Delvare --- drivers/firmware/dmi_scan.c | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c index 93f7acdaac7a..82ee042f075c 100644 --- a/drivers/firmware/dmi_scan.c +++ b/drivers/firmware/dmi_scan.c @@ -649,6 +649,21 @@ void __init dmi_scan_machine(void) if (p == NULL) goto error; + /* + * Same logic as above, look for a 64-bit entry point + * first, and if not found, fall back to 32-bit entry point. + */ + memcpy_fromio(buf, p, 16); + for (q = p + 16; q < p + 0x10000; q += 16) { + memcpy_fromio(buf + 16, q, 16); + if (!dmi_smbios3_present(buf)) { + dmi_available = 1; + dmi_early_unmap(p, 0x10000); + goto out; + } + memcpy(buf, buf + 16, 16); + } + /* * Iterate over all possible DMI header addresses q. * Maintain the 32 bytes around q in buf. On the @@ -659,7 +674,7 @@ void __init dmi_scan_machine(void) memset(buf, 0, 16); for (q = p; q < p + 0x10000; q += 16) { memcpy_fromio(buf + 16, q, 16); - if (!dmi_smbios3_present(buf) || !dmi_present(buf)) { + if (!dmi_present(buf)) { dmi_available = 1; dmi_early_unmap(p, 0x10000); goto out; From c926820085437a27b27e78996b2c7a5ad94e8055 Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Thu, 15 Jun 2017 13:46:00 +0200 Subject: [PATCH 265/436] firmware: dmi_scan: Make dmi_walk and dmi_walk_early return real error codes Currently they return -1 on error, which will confuse callers if they try to interpret it as a normal negative error code. Signed-off-by: Andy Lutomirski Signed-off-by: Darren Hart (VMware) Signed-off-by: Jean Delvare --- drivers/firmware/dmi_scan.c | 9 +++++---- include/linux/dmi.h | 2 +- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c index 82ee042f075c..fc30249132f5 100644 --- a/drivers/firmware/dmi_scan.c +++ b/drivers/firmware/dmi_scan.c @@ -144,7 +144,7 @@ static int __init dmi_walk_early(void (*decode)(const struct dmi_header *, buf = dmi_early_remap(dmi_base, orig_dmi_len); if (buf == NULL) - return -1; + return -ENOMEM; dmi_decode_table(buf, decode, NULL); @@ -1008,7 +1008,8 @@ EXPORT_SYMBOL(dmi_get_date); * @decode: Callback function * @private_data: Private data to be passed to the callback function * - * Returns -1 when the DMI table can't be reached, 0 on success. + * Returns 0 on success, -ENXIO if DMI is not selected or not present, + * or a different negative error code if DMI walking fails. */ int dmi_walk(void (*decode)(const struct dmi_header *, void *), void *private_data) @@ -1016,11 +1017,11 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *), u8 *buf; if (!dmi_available) - return -1; + return -ENXIO; buf = dmi_remap(dmi_base, dmi_len); if (buf == NULL) - return -1; + return -ENOMEM; dmi_decode_table(buf, decode, private_data); diff --git a/include/linux/dmi.h b/include/linux/dmi.h index 5e9c74cf8894..9bbf21a516e4 100644 --- a/include/linux/dmi.h +++ b/include/linux/dmi.h @@ -136,7 +136,7 @@ static inline int dmi_name_in_vendors(const char *s) { return 0; } static inline int dmi_name_in_serial(const char *s) { return 0; } #define dmi_available 0 static inline int dmi_walk(void (*decode)(const struct dmi_header *, void *), - void *private_data) { return -1; } + void *private_data) { return -ENXIO; } static inline bool dmi_match(enum dmi_field f, const char *str) { return false; } static inline void dmi_memdev_name(u16 handle, const char **bank, From e0733e975385d25d88c1384dafb8ea4dcf7513c0 Mon Sep 17 00:00:00 2001 From: Jean Delvare Date: Thu, 15 Jun 2017 13:46:01 +0200 Subject: [PATCH 266/436] firmware: dmi: Fix permissions of product_family This is not sensitive information like serial numbers, we can allow all users to read it. Fix odd alignment while we're here. Signed-off-by: Jean Delvare Fixes: c61872c9833d ("firmware: dmi: Add DMI_PRODUCT_FAMILY identification string") Reviewed-by: Andy Shevchenko Reviewed-by: Mika Westerberg Cc: Dmitry Torokhov Cc: Linus Walleij --- drivers/firmware/dmi-id.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/firmware/dmi-id.c b/drivers/firmware/dmi-id.c index dc269cb288c2..951b6c79f166 100644 --- a/drivers/firmware/dmi-id.c +++ b/drivers/firmware/dmi-id.c @@ -47,7 +47,7 @@ DEFINE_DMI_ATTR_WITH_SHOW(product_name, 0444, DMI_PRODUCT_NAME); DEFINE_DMI_ATTR_WITH_SHOW(product_version, 0444, DMI_PRODUCT_VERSION); DEFINE_DMI_ATTR_WITH_SHOW(product_serial, 0400, DMI_PRODUCT_SERIAL); DEFINE_DMI_ATTR_WITH_SHOW(product_uuid, 0400, DMI_PRODUCT_UUID); -DEFINE_DMI_ATTR_WITH_SHOW(product_family, 0400, DMI_PRODUCT_FAMILY); +DEFINE_DMI_ATTR_WITH_SHOW(product_family, 0444, DMI_PRODUCT_FAMILY); DEFINE_DMI_ATTR_WITH_SHOW(board_vendor, 0444, DMI_BOARD_VENDOR); DEFINE_DMI_ATTR_WITH_SHOW(board_name, 0444, DMI_BOARD_NAME); DEFINE_DMI_ATTR_WITH_SHOW(board_version, 0444, DMI_BOARD_VERSION); @@ -192,7 +192,7 @@ static void __init dmi_id_init_attr_table(void) ADD_DMI_ATTR(product_version, DMI_PRODUCT_VERSION); ADD_DMI_ATTR(product_serial, DMI_PRODUCT_SERIAL); ADD_DMI_ATTR(product_uuid, DMI_PRODUCT_UUID); - ADD_DMI_ATTR(product_family, DMI_PRODUCT_FAMILY); + ADD_DMI_ATTR(product_family, DMI_PRODUCT_FAMILY); ADD_DMI_ATTR(board_vendor, DMI_BOARD_VENDOR); ADD_DMI_ATTR(board_name, DMI_BOARD_NAME); ADD_DMI_ATTR(board_version, DMI_BOARD_VERSION); From a814c3597a6b6040e2ef9459748081a6d5b7312d Mon Sep 17 00:00:00 2001 From: Jean Delvare Date: Thu, 15 Jun 2017 13:46:01 +0200 Subject: [PATCH 267/436] firmware: dmi_scan: Check DMI structure length Before accessing DMI data to record it for later, we should ensure that the DMI structures are large enough to contain the data in question. Signed-off-by: Jean Delvare Reviewed-by: Mika Westerberg Cc: Dmitry Torokhov Cc: Andy Shevchenko Cc: Linus Walleij --- drivers/firmware/dmi_scan.c | 23 ++++++++++++++++------- 1 file changed, 16 insertions(+), 7 deletions(-) diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c index fc30249132f5..783041964439 100644 --- a/drivers/firmware/dmi_scan.c +++ b/drivers/firmware/dmi_scan.c @@ -178,7 +178,7 @@ static void __init dmi_save_ident(const struct dmi_header *dm, int slot, const char *d = (const char *) dm; const char *p; - if (dmi_ident[slot]) + if (dmi_ident[slot] || dm->length <= string) return; p = dmi_string(dm, d[string]); @@ -191,13 +191,14 @@ static void __init dmi_save_ident(const struct dmi_header *dm, int slot, static void __init dmi_save_uuid(const struct dmi_header *dm, int slot, int index) { - const u8 *d = (u8 *) dm + index; + const u8 *d; char *s; int is_ff = 1, is_00 = 1, i; - if (dmi_ident[slot]) + if (dmi_ident[slot] || dm->length <= index + 16) return; + d = (u8 *) dm + index; for (i = 0; i < 16 && (is_ff || is_00); i++) { if (d[i] != 0x00) is_00 = 0; @@ -228,16 +229,17 @@ static void __init dmi_save_uuid(const struct dmi_header *dm, int slot, static void __init dmi_save_type(const struct dmi_header *dm, int slot, int index) { - const u8 *d = (u8 *) dm + index; + const u8 *d; char *s; - if (dmi_ident[slot]) + if (dmi_ident[slot] || dm->length <= index) return; s = dmi_alloc(4); if (!s) return; + d = (u8 *) dm + index; sprintf(s, "%u", *d & 0x7F); dmi_ident[slot] = s; } @@ -278,9 +280,13 @@ static void __init dmi_save_devices(const struct dmi_header *dm) static void __init dmi_save_oem_strings_devices(const struct dmi_header *dm) { - int i, count = *(u8 *)(dm + 1); + int i, count; struct dmi_device *dev; + if (dm->length < 0x05) + return; + + count = *(u8 *)(dm + 1); for (i = 1; i <= count; i++) { const char *devname = dmi_string(dm, i); @@ -353,6 +359,9 @@ static void __init dmi_save_extended_devices(const struct dmi_header *dm) const char *name; const u8 *d = (u8 *)dm; + if (dm->length < 0x0B) + return; + /* Skip disabled device */ if ((d[0x5] & 0x80) == 0) return; @@ -387,7 +396,7 @@ static void __init save_mem_devices(const struct dmi_header *dm, void *v) const char *d = (const char *)dm; static int nr; - if (dm->type != DMI_ENTRY_MEM_DEVICE) + if (dm->type != DMI_ENTRY_MEM_DEVICE || dm->length < 0x12) return; if (nr >= dmi_memdev_nr) { pr_warn(FW_BUG "Too many DIMM entries in SMBIOS table\n"); From 59e04bc20de4bd1bd5bfa810bf04a4e50a27b9c1 Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Mon, 10 Apr 2017 22:29:22 +0200 Subject: [PATCH 268/436] gpu: host1x: Fix error handling If 'devm_reset_control_get' returns an error, then we erroneously return success because error code is taken from 'host->clk' instead of 'host->rst'. Fixes: b386c6b73ac6 ("gpu: host1x: Support module reset") Signed-off-by: Christophe JAILLET Reviewed-by: Mikko Perttunen Signed-off-by: Thierry Reding Link: http://patchwork.freedesktop.org/patch/msgid/20170410202922.17665-1-christophe.jaillet@wanadoo.fr --- drivers/gpu/host1x/dev.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c index f05ebb14fa63..ac65f52850a6 100644 --- a/drivers/gpu/host1x/dev.c +++ b/drivers/gpu/host1x/dev.c @@ -172,7 +172,7 @@ static int host1x_probe(struct platform_device *pdev) host->rst = devm_reset_control_get(&pdev->dev, "host1x"); if (IS_ERR(host->rst)) { - err = PTR_ERR(host->clk); + err = PTR_ERR(host->rst); dev_err(&pdev->dev, "failed to get reset: %d\n", err); return err; } From 1066a8959d04785b9d2e9cbb91aaef922b710c4a Mon Sep 17 00:00:00 2001 From: Dmitry Osipenko Date: Thu, 15 Jun 2017 02:18:24 +0300 Subject: [PATCH 269/436] drm/tegra: Fix lockup on a use of staging API Commit bdd2f9cd10eb ("Don't leak kernel pointer to userspace") added a mutex around staging IOCTL's, some of those mutexes are taken twice. Fixes: bdd2f9cd10eb ("drm/tegra: Don't leak kernel pointer to userspace") Signed-off-by: Dmitry Osipenko Reviewed-by: Mikko Perttunen Reviewed-by: Erik Faye-Lund Signed-off-by: Thierry Reding Link: http://patchwork.freedesktop.org/patch/msgid/7b70a506a9d2355ea6ff19a8c4f4d726b67719b3.1497480754.git.digetx@gmail.com --- drivers/gpu/drm/tegra/drm.c | 20 ++++---------------- 1 file changed, 4 insertions(+), 16 deletions(-) diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c index 9a1e34e48f64..fefa715c4315 100644 --- a/drivers/gpu/drm/tegra/drm.c +++ b/drivers/gpu/drm/tegra/drm.c @@ -451,18 +451,6 @@ fail: #ifdef CONFIG_DRM_TEGRA_STAGING -static struct tegra_drm_context * -tegra_drm_file_get_context(struct tegra_drm_file *file, u32 id) -{ - struct tegra_drm_context *context; - - mutex_lock(&file->lock); - context = idr_find(&file->contexts, id); - mutex_unlock(&file->lock); - - return context; -} - static int tegra_gem_create(struct drm_device *drm, void *data, struct drm_file *file) { @@ -606,7 +594,7 @@ static int tegra_close_channel(struct drm_device *drm, void *data, mutex_lock(&fpriv->lock); - context = tegra_drm_file_get_context(fpriv, args->context); + context = idr_find(&fpriv->contexts, args->context); if (!context) { err = -EINVAL; goto unlock; @@ -631,7 +619,7 @@ static int tegra_get_syncpt(struct drm_device *drm, void *data, mutex_lock(&fpriv->lock); - context = tegra_drm_file_get_context(fpriv, args->context); + context = idr_find(&fpriv->contexts, args->context); if (!context) { err = -ENODEV; goto unlock; @@ -660,7 +648,7 @@ static int tegra_submit(struct drm_device *drm, void *data, mutex_lock(&fpriv->lock); - context = tegra_drm_file_get_context(fpriv, args->context); + context = idr_find(&fpriv->contexts, args->context); if (!context) { err = -ENODEV; goto unlock; @@ -685,7 +673,7 @@ static int tegra_get_syncpt_base(struct drm_device *drm, void *data, mutex_lock(&fpriv->lock); - context = tegra_drm_file_get_context(fpriv, args->context); + context = idr_find(&fpriv->contexts, args->context); if (!context) { err = -ENODEV; goto unlock; From d6c153ec85856f5ad29adc20739ec28edeb7c042 Mon Sep 17 00:00:00 2001 From: Dmitry Osipenko Date: Thu, 15 Jun 2017 02:18:25 +0300 Subject: [PATCH 270/436] drm/tegra: Correct idr_alloc() minimum id The client ID 0 is reserved by the host1x/cdma to mark the timeout timer work as already been scheduled and context ID is used as the clients one. This fixes spurious CDMA timeouts. Fixes: bdd2f9cd10eb ("drm/tegra: Don't leak kernel pointer to userspace") Signed-off-by: Dmitry Osipenko Reviewed-by: Mikko Perttunen Signed-off-by: Thierry Reding Link: http://patchwork.freedesktop.org/patch/msgid/9c19a44219acd988e678cf9abe21363911184625.1497480754.git.digetx@gmail.com --- drivers/gpu/drm/tegra/drm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c index fefa715c4315..81f86a67c10d 100644 --- a/drivers/gpu/drm/tegra/drm.c +++ b/drivers/gpu/drm/tegra/drm.c @@ -539,7 +539,7 @@ static int tegra_client_open(struct tegra_drm_file *fpriv, if (err < 0) return err; - err = idr_alloc(&fpriv->contexts, context, 0, 0, GFP_KERNEL); + err = idr_alloc(&fpriv->contexts, context, 1, 0, GFP_KERNEL); if (err < 0) { client->ops->close_channel(context); return err; From cd15fb64ee56192760ad5c1e2ad97a65e735b18b Mon Sep 17 00:00:00 2001 From: Mike Snitzer Date: Thu, 15 Jun 2017 08:39:15 -0400 Subject: [PATCH 271/436] Revert "dm mirror: use all available legs on multiple failures" This reverts commit 12a7cf5ba6c776a2621d8972c7d42e8d3d959d20. This commit apparently attempted to fix an issue that didn't really exist, furthermore: this commit is the source of deadlocks and crashes seen in multiple cases related to failing the primary mirror dev while syncing. Reported-by: Jonathan Brassow Signed-off-by: Mike Snitzer --- drivers/md/dm-raid1.c | 21 +++++++++++++++++++-- 1 file changed, 19 insertions(+), 2 deletions(-) diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c index e61c45047c25..4da8858856fb 100644 --- a/drivers/md/dm-raid1.c +++ b/drivers/md/dm-raid1.c @@ -145,6 +145,7 @@ static void dispatch_bios(void *context, struct bio_list *bio_list) struct dm_raid1_bio_record { struct mirror *m; + /* if details->bi_bdev == NULL, details were not saved */ struct dm_bio_details details; region_t write_region; }; @@ -1198,6 +1199,8 @@ static int mirror_map(struct dm_target *ti, struct bio *bio) struct dm_raid1_bio_record *bio_record = dm_per_bio_data(bio, sizeof(struct dm_raid1_bio_record)); + bio_record->details.bi_bdev = NULL; + if (rw == WRITE) { /* Save region for mirror_end_io() handler */ bio_record->write_region = dm_rh_bio_to_region(ms->rh, bio); @@ -1256,12 +1259,22 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error) } if (error == -EOPNOTSUPP) - return error; + goto out; if ((error == -EWOULDBLOCK) && (bio->bi_opf & REQ_RAHEAD)) - return error; + goto out; if (unlikely(error)) { + if (!bio_record->details.bi_bdev) { + /* + * There wasn't enough memory to record necessary + * information for a retry or there was no other + * mirror in-sync. + */ + DMERR_LIMIT("Mirror read failed."); + return -EIO; + } + m = bio_record->m; DMERR("Mirror read failed from %s. Trying alternative device.", @@ -1277,6 +1290,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error) bd = &bio_record->details; dm_bio_restore(bd, bio); + bio_record->details.bi_bdev = NULL; bio->bi_error = 0; queue_bio(ms, bio, rw); @@ -1285,6 +1299,9 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error) DMERR("All replicated volumes dead, failing I/O"); } +out: + bio_record->details.bi_bdev = NULL; + return error; } From 25642705b2359a705784bbbf1655c25a8f8efde2 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Wed, 14 Jun 2017 10:19:25 +1000 Subject: [PATCH 272/436] powerpc/xive: Fix offset for store EOI MMIOs Architecturally we should apply a 0x400 offset for these. Not doing it will break future HW implementations. The offset of 0 is supposed to remain for "triggers" though not all sources support both trigger and store EOI, and in P9 specifically, some sources will treat 0 as a store EOI. But future chips will not. So this makes us use the properly architected offset which should work always. Fixes: 243e25112d06 ("powerpc/xive: Native exploitation of the XIVE interrupt controller") Signed-off-by: Benjamin Herrenschmidt Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/xive.h | 12 +++++++----- arch/powerpc/kvm/book3s_xive_template.c | 4 ++-- arch/powerpc/sysdev/xive/common.c | 2 +- 3 files changed, 10 insertions(+), 8 deletions(-) diff --git a/arch/powerpc/include/asm/xive.h b/arch/powerpc/include/asm/xive.h index c8a822acf962..c23ff4389ca2 100644 --- a/arch/powerpc/include/asm/xive.h +++ b/arch/powerpc/include/asm/xive.h @@ -94,11 +94,13 @@ struct xive_q { * store at 0 and some ESBs support doing a trigger via a * separate trigger page. */ -#define XIVE_ESB_GET 0x800 -#define XIVE_ESB_SET_PQ_00 0xc00 -#define XIVE_ESB_SET_PQ_01 0xd00 -#define XIVE_ESB_SET_PQ_10 0xe00 -#define XIVE_ESB_SET_PQ_11 0xf00 +#define XIVE_ESB_STORE_EOI 0x400 /* Store */ +#define XIVE_ESB_LOAD_EOI 0x000 /* Load */ +#define XIVE_ESB_GET 0x800 /* Load */ +#define XIVE_ESB_SET_PQ_00 0xc00 /* Load */ +#define XIVE_ESB_SET_PQ_01 0xd00 /* Load */ +#define XIVE_ESB_SET_PQ_10 0xe00 /* Load */ +#define XIVE_ESB_SET_PQ_11 0xf00 /* Load */ #define XIVE_ESB_VAL_P 0x2 #define XIVE_ESB_VAL_Q 0x1 diff --git a/arch/powerpc/kvm/book3s_xive_template.c b/arch/powerpc/kvm/book3s_xive_template.c index 023a31133c37..4636ca6e7d38 100644 --- a/arch/powerpc/kvm/book3s_xive_template.c +++ b/arch/powerpc/kvm/book3s_xive_template.c @@ -69,7 +69,7 @@ static void GLUE(X_PFX,source_eoi)(u32 hw_irq, struct xive_irq_data *xd) { /* If the XIVE supports the new "store EOI facility, use it */ if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI) - __x_writeq(0, __x_eoi_page(xd)); + __x_writeq(0, __x_eoi_page(xd) + XIVE_ESB_STORE_EOI); else if (hw_irq && xd->flags & XIVE_IRQ_FLAG_EOI_FW) { opal_int_eoi(hw_irq); } else { @@ -89,7 +89,7 @@ static void GLUE(X_PFX,source_eoi)(u32 hw_irq, struct xive_irq_data *xd) * properly. */ if (xd->flags & XIVE_IRQ_FLAG_LSI) - __x_readq(__x_eoi_page(xd)); + __x_readq(__x_eoi_page(xd) + XIVE_ESB_LOAD_EOI); else { eoi_val = GLUE(X_PFX,esb_load)(xd, XIVE_ESB_SET_PQ_00); diff --git a/arch/powerpc/sysdev/xive/common.c b/arch/powerpc/sysdev/xive/common.c index 913825086b8d..8f5e3035483b 100644 --- a/arch/powerpc/sysdev/xive/common.c +++ b/arch/powerpc/sysdev/xive/common.c @@ -297,7 +297,7 @@ void xive_do_source_eoi(u32 hw_irq, struct xive_irq_data *xd) { /* If the XIVE supports the new "store EOI facility, use it */ if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI) - out_be64(xd->eoi_mmio, 0); + out_be64(xd->eoi_mmio + XIVE_ESB_STORE_EOI, 0); else if (hw_irq && xd->flags & XIVE_IRQ_FLAG_EOI_FW) { /* * The FW told us to call it. This happens for some From 916335036d4fe33f9806240cb0d1900f4975b959 Mon Sep 17 00:00:00 2001 From: Wolfram Sang Date: Sun, 28 May 2017 09:52:17 +0200 Subject: [PATCH 273/436] i2c: rcar: use correct length when unmapping DMA MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Because we need to transfer some bytes with PIO, the msg length is not the length of the DMA buffer. Use the correct value which we used when doing the mapping. Fixes: 73e8b0528346e8 ("i2c: rcar: add DMA support") Signed-off-by: Wolfram Sang Reviewed-by: Geert Uytterhoeven Reviewed-by: Niklas Söderlund Signed-off-by: Wolfram Sang --- drivers/i2c/busses/i2c-rcar.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c index 214bf2835d1f..8be3e6cb8fe6 100644 --- a/drivers/i2c/busses/i2c-rcar.c +++ b/drivers/i2c/busses/i2c-rcar.c @@ -319,7 +319,7 @@ static void rcar_i2c_dma_unmap(struct rcar_i2c_priv *priv) rcar_i2c_write(priv, ICFBSCR, TCYC06); dma_unmap_single(chan->device->dev, sg_dma_address(&priv->sg), - priv->msg->len, priv->dma_direction); + sg_dma_len(&priv->sg), priv->dma_direction); priv->dma_direction = DMA_NONE; } From 17e83549e199d89aace7788a9f11c108671eecf5 Mon Sep 17 00:00:00 2001 From: Liwei Song Date: Tue, 13 Jun 2017 00:59:53 -0400 Subject: [PATCH 274/436] i2c: ismt: fix wrong device address when unmap the data buffer Fix the following kernel bug: kernel BUG at drivers/iommu/intel-iommu.c:3260! invalid opcode: 0000 [#5] PREEMPT SMP Hardware name: Intel Corp. Harcuvar/Server, BIOS HAVLCRB0.X64.0013.D39.1608311820 08/31/2016 task: ffff880175389950 ti: ffff880176bec000 task.ti: ffff880176bec000 RIP: 0010:[] [] intel_unmap+0x25b/0x260 RSP: 0018:ffff880176bef5e8 EFLAGS: 00010296 RAX: 0000000000000024 RBX: ffff8800773c7c88 RCX: 000000000000ce04 RDX: 0000000080000000 RSI: 0000000000000000 RDI: 0000000000000009 RBP: ffff880176bef638 R08: 0000000000000010 R09: 0000000000000004 R10: ffff880175389c78 R11: 0000000000000a4f R12: ffff8800773c7868 R13: 00000000ffffac88 R14: ffff8800773c7818 R15: 0000000000000001 FS: 00007fef21258700(0000) GS:ffff88017b5c0000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 000000000066d6d8 CR3: 000000007118c000 CR4: 00000000003406e0 Stack: 00000000ffffac88 ffffffff8199867f ffff880176bef5f8 ffff880100000030 ffff880176bef668 ffff8800773c7c88 ffff880178288098 ffff8800772c0010 ffff8800773c7818 0000000000000001 ffff880176bef648 ffffffff8150a86e Call Trace: [] ? printk+0x46/0x48 [] intel_unmap_page+0xe/0x10 [] ismt_access+0x27b/0x8fa [i2c_ismt] [] ? __pm_runtime_suspend+0xa0/0xa0 [] ? pm_suspend_timer_fn+0x80/0x80 [] ? __pm_runtime_suspend+0xa0/0xa0 [] ? pm_suspend_timer_fn+0x80/0x80 [] ? pci_bus_read_dev_vendor_id+0xf0/0xf0 [] i2c_smbus_xfer+0xec/0x4b0 [] ? vprintk_emit+0x345/0x530 [] i2cdev_ioctl_smbus+0x12b/0x240 [i2c_dev] [] ? vprintk_default+0x29/0x40 [] i2cdev_ioctl+0x63/0x1ec [i2c_dev] [] do_vfs_ioctl+0x328/0x5d0 [] ? vfs_write+0x11c/0x190 [] ? rt_up_read+0x19/0x20 [] SyS_ioctl+0x81/0xa0 [] system_call_fastpath+0x16/0x6e This happen When run "i2cdetect -y 0" detect SMBus iSMT adapter. After finished I2C block read/write, when unmap the data buffer, a wrong device address was pass to dma_unmap_single(). To fix this, give dma_unmap_single() the "dev" parameter, just like what dma_map_single() does, then unmap can find the right devices. Fixes: 13f35ac14cd0 ("i2c: Adding support for Intel iSMT SMBus 2.0 host controller") Signed-off-by: Liwei Song Reviewed-by: Andy Shevchenko Signed-off-by: Wolfram Sang --- drivers/i2c/busses/i2c-ismt.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c index f573448d2132..e98e44e584a4 100644 --- a/drivers/i2c/busses/i2c-ismt.c +++ b/drivers/i2c/busses/i2c-ismt.c @@ -584,7 +584,7 @@ static int ismt_access(struct i2c_adapter *adap, u16 addr, /* unmap the data buffer */ if (dma_size != 0) - dma_unmap_single(&adap->dev, dma_addr, dma_size, dma_direction); + dma_unmap_single(dev, dma_addr, dma_size, dma_direction); if (unlikely(!time_left)) { dev_err(dev, "completion wait timed out\n"); From f8a894b218138888542a5058d0e902378fd0d4ec Mon Sep 17 00:00:00 2001 From: Xin Long Date: Thu, 15 Jun 2017 16:33:58 +0800 Subject: [PATCH 275/436] ipv6: fix calling in6_ifa_hold incorrectly for dad work Now when starting the dad work in addrconf_mod_dad_work, if the dad work is idle and queued, it needs to hold ifa. The problem is there's one gap in [1], during which if the pending dad work is removed elsewhere. It will miss to hold ifa, but the dad word is still idea and queue. if (!delayed_work_pending(&ifp->dad_work)) in6_ifa_hold(ifp); <--------------[1] mod_delayed_work(addrconf_wq, &ifp->dad_work, delay); An use-after-free issue can be caused by this. Chen Wei found this issue when WARN_ON(!hlist_unhashed(&ifp->addr_lst)) in net6_ifa_finish_destroy was hit because of it. As Hannes' suggestion, this patch is to fix it by holding ifa first in addrconf_mod_dad_work, then calling mod_delayed_work and putting ifa if the dad_work is already in queue. Note that this patch did not choose to fix it with: if (!mod_delayed_work(delay)) in6_ifa_hold(ifp); As with it, when delay == 0, dad_work would be scheduled immediately, all addrconf_mod_dad_work(0) callings had to be moved under ifp->lock. Reported-by: Wei Chen Suggested-by: Hannes Frederic Sowa Acked-by: Hannes Frederic Sowa Signed-off-by: Xin Long Signed-off-by: David S. Miller --- net/ipv6/addrconf.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 6a4fb1e629fb..686c92375e81 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -332,9 +332,9 @@ static void addrconf_mod_rs_timer(struct inet6_dev *idev, static void addrconf_mod_dad_work(struct inet6_ifaddr *ifp, unsigned long delay) { - if (!delayed_work_pending(&ifp->dad_work)) - in6_ifa_hold(ifp); - mod_delayed_work(addrconf_wq, &ifp->dad_work, delay); + in6_ifa_hold(ifp); + if (mod_delayed_work(addrconf_wq, &ifp->dad_work, delay)) + in6_ifa_put(ifp); } static int snmp6_alloc_dev(struct inet6_dev *idev) From 6d0507a777fbc533f7f1bf5664a81982dd50dece Mon Sep 17 00:00:00 2001 From: Arend Van Spriel Date: Mon, 12 Jun 2017 12:47:32 +0100 Subject: [PATCH 276/436] brcmfmac: add parameter to pass error code in firmware callback Extend the parameters in the firmware callback so it can be called upon success and failure. This allows the caller to properly clear all resources in the failure path. Right now the error code is always zero, ie. success. Cc: stable@vger.kernel.org # 4.9.x- Reviewed-by: Hante Meuleman Reviewed-by: Pieter-Paul Giesberts Reviewed-by: Franky Lin Signed-off-by: Arend van Spriel Signed-off-by: Kalle Valo --- .../broadcom/brcm80211/brcmfmac/firmware.c | 10 +++++----- .../broadcom/brcm80211/brcmfmac/firmware.h | 4 ++-- .../wireless/broadcom/brcm80211/brcmfmac/pcie.c | 17 ++++++++++++----- .../wireless/broadcom/brcm80211/brcmfmac/sdio.c | 17 +++++++++++------ .../wireless/broadcom/brcm80211/brcmfmac/usb.c | 6 ++++-- 5 files changed, 34 insertions(+), 20 deletions(-) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c index c7c1e9906500..ae61a24202ac 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c @@ -442,7 +442,7 @@ struct brcmf_fw { const char *nvram_name; u16 domain_nr; u16 bus_nr; - void (*done)(struct device *dev, const struct firmware *fw, + void (*done)(struct device *dev, int err, const struct firmware *fw, void *nvram_image, u32 nvram_len); }; @@ -477,7 +477,7 @@ static void brcmf_fw_request_nvram_done(const struct firmware *fw, void *ctx) if (!nvram && !(fwctx->flags & BRCMF_FW_REQ_NV_OPTIONAL)) goto fail; - fwctx->done(fwctx->dev, fwctx->code, nvram, nvram_length); + fwctx->done(fwctx->dev, 0, fwctx->code, nvram, nvram_length); kfree(fwctx); return; @@ -499,7 +499,7 @@ static void brcmf_fw_request_code_done(const struct firmware *fw, void *ctx) /* only requested code so done here */ if (!(fwctx->flags & BRCMF_FW_REQUEST_NVRAM)) { - fwctx->done(fwctx->dev, fw, NULL, 0); + fwctx->done(fwctx->dev, 0, fw, NULL, 0); kfree(fwctx); return; } @@ -522,7 +522,7 @@ fail: int brcmf_fw_get_firmwares_pcie(struct device *dev, u16 flags, const char *code, const char *nvram, - void (*fw_cb)(struct device *dev, + void (*fw_cb)(struct device *dev, int err, const struct firmware *fw, void *nvram_image, u32 nvram_len), u16 domain_nr, u16 bus_nr) @@ -555,7 +555,7 @@ int brcmf_fw_get_firmwares_pcie(struct device *dev, u16 flags, int brcmf_fw_get_firmwares(struct device *dev, u16 flags, const char *code, const char *nvram, - void (*fw_cb)(struct device *dev, + void (*fw_cb)(struct device *dev, int err, const struct firmware *fw, void *nvram_image, u32 nvram_len)) { diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h index d3c9f0d52ae3..8fa4b7e1ab3d 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h @@ -73,13 +73,13 @@ void brcmf_fw_nvram_free(void *nvram); */ int brcmf_fw_get_firmwares_pcie(struct device *dev, u16 flags, const char *code, const char *nvram, - void (*fw_cb)(struct device *dev, + void (*fw_cb)(struct device *dev, int err, const struct firmware *fw, void *nvram_image, u32 nvram_len), u16 domain_nr, u16 bus_nr); int brcmf_fw_get_firmwares(struct device *dev, u16 flags, const char *code, const char *nvram, - void (*fw_cb)(struct device *dev, + void (*fw_cb)(struct device *dev, int err, const struct firmware *fw, void *nvram_image, u32 nvram_len)); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c index f36b96dc6acd..f878706613e6 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c @@ -1650,16 +1650,23 @@ static const struct brcmf_buscore_ops brcmf_pcie_buscore_ops = { .write32 = brcmf_pcie_buscore_write32, }; -static void brcmf_pcie_setup(struct device *dev, const struct firmware *fw, +static void brcmf_pcie_setup(struct device *dev, int ret, + const struct firmware *fw, void *nvram, u32 nvram_len) { - struct brcmf_bus *bus = dev_get_drvdata(dev); - struct brcmf_pciedev *pcie_bus_dev = bus->bus_priv.pcie; - struct brcmf_pciedev_info *devinfo = pcie_bus_dev->devinfo; + struct brcmf_bus *bus; + struct brcmf_pciedev *pcie_bus_dev; + struct brcmf_pciedev_info *devinfo; struct brcmf_commonring **flowrings; - int ret; u32 i; + /* check firmware loading result */ + if (ret) + goto fail; + + bus = dev_get_drvdata(dev); + pcie_bus_dev = bus->bus_priv.pcie; + devinfo = pcie_bus_dev->devinfo; brcmf_pcie_attach(devinfo); /* Some of the firmwares have the size of the memory of the device diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c index e03450059b06..6e1fcdcde11c 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c @@ -3982,21 +3982,26 @@ static const struct brcmf_bus_ops brcmf_sdio_bus_ops = { .get_memdump = brcmf_sdio_bus_get_memdump, }; -static void brcmf_sdio_firmware_callback(struct device *dev, +static void brcmf_sdio_firmware_callback(struct device *dev, int err, const struct firmware *code, void *nvram, u32 nvram_len) { - struct brcmf_bus *bus_if = dev_get_drvdata(dev); - struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio; - struct brcmf_sdio *bus = sdiodev->bus; - int err = 0; + struct brcmf_bus *bus_if; + struct brcmf_sdio_dev *sdiodev; + struct brcmf_sdio *bus; u8 saveclk; - brcmf_dbg(TRACE, "Enter: dev=%s\n", dev_name(dev)); + brcmf_dbg(TRACE, "Enter: dev=%s, err=%d\n", dev_name(dev), err); + if (err) + goto fail; + bus_if = dev_get_drvdata(dev); if (!bus_if->drvr) return; + sdiodev = bus_if->bus_priv.sdio; + bus = sdiodev->bus; + /* try to download image and nvram to the dongle */ bus->alp_only = true; err = brcmf_sdio_download_firmware(bus, code, nvram, nvram_len); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c index e4d545f9edee..9ce3b55c3ffe 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c @@ -1159,13 +1159,15 @@ fail: return ret; } -static void brcmf_usb_probe_phase2(struct device *dev, +static void brcmf_usb_probe_phase2(struct device *dev, int ret, const struct firmware *fw, void *nvram, u32 nvlen) { struct brcmf_bus *bus = dev_get_drvdata(dev); struct brcmf_usbdev_info *devinfo; - int ret; + + if (ret) + goto error; brcmf_dbg(USB, "Start fw downloading\n"); From 03fb0e8393fae8ebb6710a99387853ed0becbc8e Mon Sep 17 00:00:00 2001 From: Arend Van Spriel Date: Mon, 12 Jun 2017 12:47:33 +0100 Subject: [PATCH 277/436] brcmfmac: use firmware callback upon failure to load When firmware loading failed the code used to unbind the device provided by the calling code. However, for the sdio driver two devices are bound and both need to be released upon failure. The callback has been extended with parameter to pass error code so add that in this commit upon firmware loading failure. Cc: stable@vger.kernel.org # 4.9.x- Reviewed-by: Hante Meuleman Reviewed-by: Pieter-Paul Giesberts Reviewed-by: Franky Lin Signed-off-by: Arend van Spriel Signed-off-by: Kalle Valo --- .../broadcom/brcm80211/brcmfmac/firmware.c | 27 +++++++++---------- 1 file changed, 13 insertions(+), 14 deletions(-) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c index ae61a24202ac..d231042f19d6 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c @@ -484,39 +484,38 @@ static void brcmf_fw_request_nvram_done(const struct firmware *fw, void *ctx) fail: brcmf_dbg(TRACE, "failed: dev=%s\n", dev_name(fwctx->dev)); release_firmware(fwctx->code); - device_release_driver(fwctx->dev); + fwctx->done(fwctx->dev, -ENOENT, NULL, NULL, 0); kfree(fwctx); } static void brcmf_fw_request_code_done(const struct firmware *fw, void *ctx) { struct brcmf_fw *fwctx = ctx; - int ret; + int ret = 0; brcmf_dbg(TRACE, "enter: dev=%s\n", dev_name(fwctx->dev)); - if (!fw) + if (!fw) { + ret = -ENOENT; goto fail; - - /* only requested code so done here */ - if (!(fwctx->flags & BRCMF_FW_REQUEST_NVRAM)) { - fwctx->done(fwctx->dev, 0, fw, NULL, 0); - kfree(fwctx); - return; } + /* only requested code so done here */ + if (!(fwctx->flags & BRCMF_FW_REQUEST_NVRAM)) + goto done; + fwctx->code = fw; ret = request_firmware_nowait(THIS_MODULE, true, fwctx->nvram_name, fwctx->dev, GFP_KERNEL, fwctx, brcmf_fw_request_nvram_done); - if (!ret) - return; - - brcmf_fw_request_nvram_done(NULL, fwctx); + /* pass NULL to nvram callback for bcm47xx fallback */ + if (ret) + brcmf_fw_request_nvram_done(NULL, fwctx); return; fail: brcmf_dbg(TRACE, "failed: dev=%s\n", dev_name(fwctx->dev)); - device_release_driver(fwctx->dev); +done: + fwctx->done(fwctx->dev, ret, fw, NULL, 0); kfree(fwctx); } From 7a51461fc2da82a6c565a3ee65c41c197f28225d Mon Sep 17 00:00:00 2001 From: Arend Van Spriel Date: Mon, 12 Jun 2017 12:47:34 +0100 Subject: [PATCH 278/436] brcmfmac: unbind all devices upon failure in firmware callback When request firmware fails, brcmf_ops_sdio_remove is being called and brcmf_bus freed. In such circumstancies if you do a suspend/resume cycle the kernel hangs on resume due a NULL pointer dereference in resume function. So in brcmf_sdio_firmware_callback() we need to unbind the driver from both sdio_func devices when firmware load failure is indicated. Cc: stable@vger.kernel.org # 4.9.x- Tested-by: Enric Balletbo i Serra Reviewed-by: Hante Meuleman Reviewed-by: Pieter-Paul Giesberts Reviewed-by: Franky Lin Signed-off-by: Arend van Spriel Signed-off-by: Kalle Valo --- drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c index 6e1fcdcde11c..5653d6dd38f6 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c @@ -3992,14 +3992,14 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err, u8 saveclk; brcmf_dbg(TRACE, "Enter: dev=%s, err=%d\n", dev_name(dev), err); + bus_if = dev_get_drvdata(dev); + sdiodev = bus_if->bus_priv.sdio; if (err) goto fail; - bus_if = dev_get_drvdata(dev); if (!bus_if->drvr) return; - sdiodev = bus_if->bus_priv.sdio; bus = sdiodev->bus; /* try to download image and nvram to the dongle */ @@ -4088,6 +4088,7 @@ release: fail: brcmf_dbg(TRACE, "failed: dev=%s, err=%d\n", dev_name(dev), err); device_release_driver(dev); + device_release_driver(&sdiodev->func[2]->dev); } struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev) From a2b7a622d6292b693544d7c94bb5d11c3f4676f8 Mon Sep 17 00:00:00 2001 From: Arend Van Spriel Date: Mon, 12 Jun 2017 12:56:35 +0100 Subject: [PATCH 279/436] brcmfmac: fix brcmf_fws_add_interface() for USB devices USB devices rely on queuing functionality provided by the fwsignal module regardless the mode fwsignal is operating in. For this some data structure needs to be reserved which is tied to the interface, which is done by brcmf_fws_add_interface(). However, it checks the mode. Replace that by checking result from brcmf_fws_queue_skbs(). Otherwise the driver will crash in a null pointer dereference when data is transmitted on the interface. Fixes: fc0471e3e884 ("brcmfmac: ignore interfaces when fwsignal is disabled") Reviewed-by: Franky Lin Signed-off-by: Arend van Spriel Signed-off-by: Kalle Valo --- drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c index 72373e59308e..f59642b2c935 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c @@ -2145,7 +2145,7 @@ void brcmf_fws_add_interface(struct brcmf_if *ifp) struct brcmf_fws_info *fws = drvr_to_fws(ifp->drvr); struct brcmf_fws_mac_descriptor *entry; - if (!ifp->ndev || fws->fcmode == BRCMF_FWS_FCMODE_NONE) + if (!ifp->ndev || !brcmf_fws_queue_skbs(fws)) return; entry = &fws->desc.iface[ifp->ifidx]; From 0cbb738108927916a659b5b0b96e386fcd7cc6e1 Mon Sep 17 00:00:00 2001 From: Mathieu Larouche Date: Wed, 14 Jun 2017 10:39:42 -0400 Subject: [PATCH 280/436] drm/mgag200: Fix to always set HiPri for G200e4 V2 - Changed the HiPri value for G200e4 to always be 0. - Added Bandwith limitation to block resolution above 1920x1200x60Hz Signed-off-by: Mathieu Larouche Acked-by: Dave Airlie [seanpaul removed some trailing whitespace from the patch] Signed-off-by: Sean Paul Link: http://patchwork.freedesktop.org/patch/msgid/ec0f8568d7ec41904dfe593c5deccf3f062d7bd8.1497450944.git.mathieu.larouche@matrox.com --- drivers/gpu/drm/mgag200/mgag200_mode.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c index adb411a078e8..f4b53588e071 100644 --- a/drivers/gpu/drm/mgag200/mgag200_mode.c +++ b/drivers/gpu/drm/mgag200/mgag200_mode.c @@ -1173,7 +1173,10 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc, if (IS_G200_SE(mdev)) { - if (mdev->unique_rev_id >= 0x02) { + if (mdev->unique_rev_id >= 0x04) { + WREG8(MGAREG_CRTCEXT_INDEX, 0x06); + WREG8(MGAREG_CRTCEXT_DATA, 0); + } else if (mdev->unique_rev_id >= 0x02) { u8 hi_pri_lvl; u32 bpp; u32 mb; @@ -1639,6 +1642,10 @@ static int mga_vga_mode_valid(struct drm_connector *connector, if (mga_vga_calculate_mode_bandwidth(mode, bpp) > (30100 * 1024)) return MODE_BANDWIDTH; + } else { + if (mga_vga_calculate_mode_bandwidth(mode, bpp) + > (55000 * 1024)) + return MODE_BANDWIDTH; } } else if (mdev->type == G200_WB) { if (mode->hdisplay > 1280) From 5f2f97656ada8d811d3c1bef503ced266fcd53a0 Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 15 Jun 2017 00:12:24 +0100 Subject: [PATCH 281/436] rxrpc: Fix several cases where a padded len isn't checked in ticket decode MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This fixes CVE-2017-7482. When a kerberos 5 ticket is being decoded so that it can be loaded into an rxrpc-type key, there are several places in which the length of a variable-length field is checked to make sure that it's not going to overrun the available data - but the data is padded to the nearest four-byte boundary and the code doesn't check for this extra. This could lead to the size-remaining variable wrapping and the data pointer going over the end of the buffer. Fix this by making the various variable-length data checks use the padded length. Reported-by: 石磊 Signed-off-by: David Howells Reviewed-by: Marc Dionne Reviewed-by: Dan Carpenter Signed-off-by: David S. Miller --- net/rxrpc/key.c | 64 ++++++++++++++++++++++++++----------------------- 1 file changed, 34 insertions(+), 30 deletions(-) diff --git a/net/rxrpc/key.c b/net/rxrpc/key.c index 0a4e28477ad9..54369225766e 100644 --- a/net/rxrpc/key.c +++ b/net/rxrpc/key.c @@ -217,7 +217,7 @@ static int rxrpc_krb5_decode_principal(struct krb5_principal *princ, unsigned int *_toklen) { const __be32 *xdr = *_xdr; - unsigned int toklen = *_toklen, n_parts, loop, tmp; + unsigned int toklen = *_toklen, n_parts, loop, tmp, paddedlen; /* there must be at least one name, and at least #names+1 length * words */ @@ -247,16 +247,16 @@ static int rxrpc_krb5_decode_principal(struct krb5_principal *princ, toklen -= 4; if (tmp <= 0 || tmp > AFSTOKEN_STRING_MAX) return -EINVAL; - if (tmp > toklen) + paddedlen = (tmp + 3) & ~3; + if (paddedlen > toklen) return -EINVAL; princ->name_parts[loop] = kmalloc(tmp + 1, GFP_KERNEL); if (!princ->name_parts[loop]) return -ENOMEM; memcpy(princ->name_parts[loop], xdr, tmp); princ->name_parts[loop][tmp] = 0; - tmp = (tmp + 3) & ~3; - toklen -= tmp; - xdr += tmp >> 2; + toklen -= paddedlen; + xdr += paddedlen >> 2; } if (toklen < 4) @@ -265,16 +265,16 @@ static int rxrpc_krb5_decode_principal(struct krb5_principal *princ, toklen -= 4; if (tmp <= 0 || tmp > AFSTOKEN_K5_REALM_MAX) return -EINVAL; - if (tmp > toklen) + paddedlen = (tmp + 3) & ~3; + if (paddedlen > toklen) return -EINVAL; princ->realm = kmalloc(tmp + 1, GFP_KERNEL); if (!princ->realm) return -ENOMEM; memcpy(princ->realm, xdr, tmp); princ->realm[tmp] = 0; - tmp = (tmp + 3) & ~3; - toklen -= tmp; - xdr += tmp >> 2; + toklen -= paddedlen; + xdr += paddedlen >> 2; _debug("%s/...@%s", princ->name_parts[0], princ->realm); @@ -293,7 +293,7 @@ static int rxrpc_krb5_decode_tagged_data(struct krb5_tagged_data *td, unsigned int *_toklen) { const __be32 *xdr = *_xdr; - unsigned int toklen = *_toklen, len; + unsigned int toklen = *_toklen, len, paddedlen; /* there must be at least one tag and one length word */ if (toklen <= 8) @@ -307,15 +307,17 @@ static int rxrpc_krb5_decode_tagged_data(struct krb5_tagged_data *td, toklen -= 8; if (len > max_data_size) return -EINVAL; + paddedlen = (len + 3) & ~3; + if (paddedlen > toklen) + return -EINVAL; td->data_len = len; if (len > 0) { td->data = kmemdup(xdr, len, GFP_KERNEL); if (!td->data) return -ENOMEM; - len = (len + 3) & ~3; - toklen -= len; - xdr += len >> 2; + toklen -= paddedlen; + xdr += paddedlen >> 2; } _debug("tag %x len %x", td->tag, td->data_len); @@ -387,7 +389,7 @@ static int rxrpc_krb5_decode_ticket(u8 **_ticket, u16 *_tktlen, const __be32 **_xdr, unsigned int *_toklen) { const __be32 *xdr = *_xdr; - unsigned int toklen = *_toklen, len; + unsigned int toklen = *_toklen, len, paddedlen; /* there must be at least one length word */ if (toklen <= 4) @@ -399,6 +401,9 @@ static int rxrpc_krb5_decode_ticket(u8 **_ticket, u16 *_tktlen, toklen -= 4; if (len > AFSTOKEN_K5_TIX_MAX) return -EINVAL; + paddedlen = (len + 3) & ~3; + if (paddedlen > toklen) + return -EINVAL; *_tktlen = len; _debug("ticket len %u", len); @@ -407,9 +412,8 @@ static int rxrpc_krb5_decode_ticket(u8 **_ticket, u16 *_tktlen, *_ticket = kmemdup(xdr, len, GFP_KERNEL); if (!*_ticket) return -ENOMEM; - len = (len + 3) & ~3; - toklen -= len; - xdr += len >> 2; + toklen -= paddedlen; + xdr += paddedlen >> 2; } *_xdr = xdr; @@ -552,7 +556,7 @@ static int rxrpc_preparse_xdr(struct key_preparsed_payload *prep) { const __be32 *xdr = prep->data, *token; const char *cp; - unsigned int len, tmp, loop, ntoken, toklen, sec_ix; + unsigned int len, paddedlen, loop, ntoken, toklen, sec_ix; size_t datalen = prep->datalen; int ret; @@ -578,22 +582,21 @@ static int rxrpc_preparse_xdr(struct key_preparsed_payload *prep) if (len < 1 || len > AFSTOKEN_CELL_MAX) goto not_xdr; datalen -= 4; - tmp = (len + 3) & ~3; - if (tmp > datalen) + paddedlen = (len + 3) & ~3; + if (paddedlen > datalen) goto not_xdr; cp = (const char *) xdr; for (loop = 0; loop < len; loop++) if (!isprint(cp[loop])) goto not_xdr; - if (len < tmp) - for (; loop < tmp; loop++) - if (cp[loop]) - goto not_xdr; + for (; loop < paddedlen; loop++) + if (cp[loop]) + goto not_xdr; _debug("cellname: [%u/%u] '%*.*s'", - len, tmp, len, len, (const char *) xdr); - datalen -= tmp; - xdr += tmp >> 2; + len, paddedlen, len, len, (const char *) xdr); + datalen -= paddedlen; + xdr += paddedlen >> 2; /* get the token count */ if (datalen < 12) @@ -614,10 +617,11 @@ static int rxrpc_preparse_xdr(struct key_preparsed_payload *prep) sec_ix = ntohl(*xdr); datalen -= 4; _debug("token: [%x/%zx] %x", toklen, datalen, sec_ix); - if (toklen < 20 || toklen > datalen) + paddedlen = (toklen + 3) & ~3; + if (toklen < 20 || toklen > datalen || paddedlen > datalen) goto not_xdr; - datalen -= (toklen + 3) & ~3; - xdr += (toklen + 3) >> 2; + datalen -= paddedlen; + xdr += paddedlen >> 2; } while (--loop > 0); From 0f933328f018c118ded2035f12068bbdfdef399d Mon Sep 17 00:00:00 2001 From: Fabio Estevam Date: Fri, 5 May 2017 15:01:41 -0300 Subject: [PATCH 282/436] drm: mxsfb_crtc: Reset the eLCDIF controller According to the eLCDIF initialization steps listed in the MX6SX Reference Manual the eLCDIF block reset is mandatory. Without performing the eLCDIF reset the display shows garbage content when the kernel boots. In earlier tests this issue has not been observed because the bootloader was previously showing a splash screen and the bootloader display driver does properly implement the eLCDIF reset. Add the eLCDIF reset to the driver, so that it can operate correctly independently of the bootloader. Tested on a imx6sx-sdb board. Cc: Signed-off-by: Fabio Estevam Signed-off-by: Sean Paul Link: http://patchwork.freedesktop.org/patch/msgid/1494007301-14535-1-git-send-email-fabio.estevam@nxp.com --- drivers/gpu/drm/mxsfb/mxsfb_crtc.c | 42 ++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) diff --git a/drivers/gpu/drm/mxsfb/mxsfb_crtc.c b/drivers/gpu/drm/mxsfb/mxsfb_crtc.c index 1144e0c9e894..0abe77675b76 100644 --- a/drivers/gpu/drm/mxsfb/mxsfb_crtc.c +++ b/drivers/gpu/drm/mxsfb/mxsfb_crtc.c @@ -35,6 +35,13 @@ #include "mxsfb_drv.h" #include "mxsfb_regs.h" +#define MXS_SET_ADDR 0x4 +#define MXS_CLR_ADDR 0x8 +#define MODULE_CLKGATE BIT(30) +#define MODULE_SFTRST BIT(31) +/* 1 second delay should be plenty of time for block reset */ +#define RESET_TIMEOUT 1000000 + static u32 set_hsync_pulse_width(struct mxsfb_drm_private *mxsfb, u32 val) { return (val & mxsfb->devdata->hs_wdth_mask) << @@ -159,6 +166,36 @@ static void mxsfb_disable_controller(struct mxsfb_drm_private *mxsfb) clk_disable_unprepare(mxsfb->clk_disp_axi); } +/* + * Clear the bit and poll it cleared. This is usually called with + * a reset address and mask being either SFTRST(bit 31) or CLKGATE + * (bit 30). + */ +static int clear_poll_bit(void __iomem *addr, u32 mask) +{ + u32 reg; + + writel(mask, addr + MXS_CLR_ADDR); + return readl_poll_timeout(addr, reg, !(reg & mask), 0, RESET_TIMEOUT); +} + +static int mxsfb_reset_block(void __iomem *reset_addr) +{ + int ret; + + ret = clear_poll_bit(reset_addr, MODULE_SFTRST); + if (ret) + return ret; + + writel(MODULE_CLKGATE, reset_addr + MXS_CLR_ADDR); + + ret = clear_poll_bit(reset_addr, MODULE_SFTRST); + if (ret) + return ret; + + return clear_poll_bit(reset_addr, MODULE_CLKGATE); +} + static void mxsfb_crtc_mode_set_nofb(struct mxsfb_drm_private *mxsfb) { struct drm_display_mode *m = &mxsfb->pipe.crtc.state->adjusted_mode; @@ -173,6 +210,11 @@ static void mxsfb_crtc_mode_set_nofb(struct mxsfb_drm_private *mxsfb) */ mxsfb_enable_axi_clk(mxsfb); + /* Mandatory eLCDIF reset as per the Reference Manual */ + err = mxsfb_reset_block(mxsfb->base); + if (err) + return; + /* Clear the FIFOs */ writel(CTRL1_FIFO_CLEAR, mxsfb->base + LCDC_CTRL1 + REG_SET); From 988c7322116970696211e902b468aefec95b6ec4 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Thu, 15 Jun 2017 17:49:08 +0800 Subject: [PATCH 283/436] sctp: return next obj by passing pos + 1 into sctp_transport_get_idx In sctp_for_each_transport, pos is used to save how many objs it has dumped. Now it gets the last obj by sctp_transport_get_idx, then gets the next obj by sctp_transport_get_next. The issue is that in the meanwhile if some objs in transport hashtable are removed and the objs nums are less than pos, sctp_transport_get_idx would return NULL and hti.walker.tbl is NULL as well. At this moment it should stop hti, instead of continue getting the next obj. Or it would cause a NULL pointer dereference in sctp_transport_get_next. This patch is to pass pos + 1 into sctp_transport_get_idx to get the next obj directly, even if pos > objs nums, it would return NULL and stop hti. Fixes: 626d16f50f39 ("sctp: export some apis or variables for sctp_diag and reuse some for proc") Signed-off-by: Xin Long Signed-off-by: David S. Miller --- net/sctp/socket.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 30aa0a529215..3a8318e518f1 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -4666,9 +4666,8 @@ int sctp_for_each_transport(int (*cb)(struct sctp_transport *, void *), if (err) return err; - sctp_transport_get_idx(net, &hti, pos); - obj = sctp_transport_get_next(net, &hti); - for (; obj && !IS_ERR(obj); obj = sctp_transport_get_next(net, &hti)) { + obj = sctp_transport_get_idx(net, &hti, pos + 1); + for (; !IS_ERR_OR_NULL(obj); obj = sctp_transport_get_next(net, &hti)) { struct sctp_transport *transport = obj; if (!sctp_transport_hold(transport)) From 3b1bbafbfd14474fee61487552c9916ec1b25c58 Mon Sep 17 00:00:00 2001 From: "xypron.glpk@gmx.de" Date: Thu, 15 Jun 2017 20:59:57 +0200 Subject: [PATCH 284/436] Doc: net: dsa: b53: update location of referenced dsa.txt The referenced file dsa.txt is located at Documentation/devicetree/bindings/net/dsa/dsa.txt Reviewed-by: Florian Fainelli Signed-off-by: Heinrich Schuchardt Signed-off-by: David S. Miller --- Documentation/devicetree/bindings/net/dsa/b53.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Documentation/devicetree/bindings/net/dsa/b53.txt b/Documentation/devicetree/bindings/net/dsa/b53.txt index d6c6e41648d4..8ec2ca21adeb 100644 --- a/Documentation/devicetree/bindings/net/dsa/b53.txt +++ b/Documentation/devicetree/bindings/net/dsa/b53.txt @@ -34,7 +34,7 @@ Required properties: "brcm,bcm6328-switch" "brcm,bcm6368-switch" and the mandatory "brcm,bcm63xx-switch" -See Documentation/devicetree/bindings/dsa/dsa.txt for a list of additional +See Documentation/devicetree/bindings/net/dsa/dsa.txt for a list of additional required and optional properties. Examples: From f16443a034c7aa359ddf6f0f9bc40d01ca31faea Mon Sep 17 00:00:00 2001 From: Alan Stern Date: Tue, 13 Jun 2017 15:23:42 -0400 Subject: [PATCH 285/436] USB: gadgetfs, dummy-hcd, net2280: fix locking for callbacks Using the syzkaller kernel fuzzer, Andrey Konovalov generated the following error in gadgetfs: > BUG: KASAN: use-after-free in __lock_acquire+0x3069/0x3690 > kernel/locking/lockdep.c:3246 > Read of size 8 at addr ffff88003a2bdaf8 by task kworker/3:1/903 > > CPU: 3 PID: 903 Comm: kworker/3:1 Not tainted 4.12.0-rc4+ #35 > Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Bochs 01/01/2011 > Workqueue: usb_hub_wq hub_event > Call Trace: > __dump_stack lib/dump_stack.c:16 [inline] > dump_stack+0x292/0x395 lib/dump_stack.c:52 > print_address_description+0x78/0x280 mm/kasan/report.c:252 > kasan_report_error mm/kasan/report.c:351 [inline] > kasan_report+0x230/0x340 mm/kasan/report.c:408 > __asan_report_load8_noabort+0x19/0x20 mm/kasan/report.c:429 > __lock_acquire+0x3069/0x3690 kernel/locking/lockdep.c:3246 > lock_acquire+0x22d/0x560 kernel/locking/lockdep.c:3855 > __raw_spin_lock include/linux/spinlock_api_smp.h:142 [inline] > _raw_spin_lock+0x2f/0x40 kernel/locking/spinlock.c:151 > spin_lock include/linux/spinlock.h:299 [inline] > gadgetfs_suspend+0x89/0x130 drivers/usb/gadget/legacy/inode.c:1682 > set_link_state+0x88e/0xae0 drivers/usb/gadget/udc/dummy_hcd.c:455 > dummy_hub_control+0xd7e/0x1fb0 drivers/usb/gadget/udc/dummy_hcd.c:2074 > rh_call_control drivers/usb/core/hcd.c:689 [inline] > rh_urb_enqueue drivers/usb/core/hcd.c:846 [inline] > usb_hcd_submit_urb+0x92f/0x20b0 drivers/usb/core/hcd.c:1650 > usb_submit_urb+0x8b2/0x12c0 drivers/usb/core/urb.c:542 > usb_start_wait_urb+0x148/0x5b0 drivers/usb/core/message.c:56 > usb_internal_control_msg drivers/usb/core/message.c:100 [inline] > usb_control_msg+0x341/0x4d0 drivers/usb/core/message.c:151 > usb_clear_port_feature+0x74/0xa0 drivers/usb/core/hub.c:412 > hub_port_disable+0x123/0x510 drivers/usb/core/hub.c:4177 > hub_port_init+0x1ed/0x2940 drivers/usb/core/hub.c:4648 > hub_port_connect drivers/usb/core/hub.c:4826 [inline] > hub_port_connect_change drivers/usb/core/hub.c:4999 [inline] > port_event drivers/usb/core/hub.c:5105 [inline] > hub_event+0x1ae1/0x3d40 drivers/usb/core/hub.c:5185 > process_one_work+0xc08/0x1bd0 kernel/workqueue.c:2097 > process_scheduled_works kernel/workqueue.c:2157 [inline] > worker_thread+0xb2b/0x1860 kernel/workqueue.c:2233 > kthread+0x363/0x440 kernel/kthread.c:231 > ret_from_fork+0x2a/0x40 arch/x86/entry/entry_64.S:424 > > Allocated by task 9958: > save_stack_trace+0x1b/0x20 arch/x86/kernel/stacktrace.c:59 > save_stack+0x43/0xd0 mm/kasan/kasan.c:513 > set_track mm/kasan/kasan.c:525 [inline] > kasan_kmalloc+0xad/0xe0 mm/kasan/kasan.c:617 > kmem_cache_alloc_trace+0x87/0x280 mm/slub.c:2745 > kmalloc include/linux/slab.h:492 [inline] > kzalloc include/linux/slab.h:665 [inline] > dev_new drivers/usb/gadget/legacy/inode.c:170 [inline] > gadgetfs_fill_super+0x24f/0x540 drivers/usb/gadget/legacy/inode.c:1993 > mount_single+0xf6/0x160 fs/super.c:1192 > gadgetfs_mount+0x31/0x40 drivers/usb/gadget/legacy/inode.c:2019 > mount_fs+0x9c/0x2d0 fs/super.c:1223 > vfs_kern_mount.part.25+0xcb/0x490 fs/namespace.c:976 > vfs_kern_mount fs/namespace.c:2509 [inline] > do_new_mount fs/namespace.c:2512 [inline] > do_mount+0x41b/0x2d90 fs/namespace.c:2834 > SYSC_mount fs/namespace.c:3050 [inline] > SyS_mount+0xb0/0x120 fs/namespace.c:3027 > entry_SYSCALL_64_fastpath+0x1f/0xbe > > Freed by task 9960: > save_stack_trace+0x1b/0x20 arch/x86/kernel/stacktrace.c:59 > save_stack+0x43/0xd0 mm/kasan/kasan.c:513 > set_track mm/kasan/kasan.c:525 [inline] > kasan_slab_free+0x72/0xc0 mm/kasan/kasan.c:590 > slab_free_hook mm/slub.c:1357 [inline] > slab_free_freelist_hook mm/slub.c:1379 [inline] > slab_free mm/slub.c:2961 [inline] > kfree+0xed/0x2b0 mm/slub.c:3882 > put_dev+0x124/0x160 drivers/usb/gadget/legacy/inode.c:163 > gadgetfs_kill_sb+0x33/0x60 drivers/usb/gadget/legacy/inode.c:2027 > deactivate_locked_super+0x8d/0xd0 fs/super.c:309 > deactivate_super+0x21e/0x310 fs/super.c:340 > cleanup_mnt+0xb7/0x150 fs/namespace.c:1112 > __cleanup_mnt+0x1b/0x20 fs/namespace.c:1119 > task_work_run+0x1a0/0x280 kernel/task_work.c:116 > exit_task_work include/linux/task_work.h:21 [inline] > do_exit+0x18a8/0x2820 kernel/exit.c:878 > do_group_exit+0x14e/0x420 kernel/exit.c:982 > get_signal+0x784/0x1780 kernel/signal.c:2318 > do_signal+0xd7/0x2130 arch/x86/kernel/signal.c:808 > exit_to_usermode_loop+0x1ac/0x240 arch/x86/entry/common.c:157 > prepare_exit_to_usermode arch/x86/entry/common.c:194 [inline] > syscall_return_slowpath+0x3ba/0x410 arch/x86/entry/common.c:263 > entry_SYSCALL_64_fastpath+0xbc/0xbe > > The buggy address belongs to the object at ffff88003a2bdae0 > which belongs to the cache kmalloc-1024 of size 1024 > The buggy address is located 24 bytes inside of > 1024-byte region [ffff88003a2bdae0, ffff88003a2bdee0) > The buggy address belongs to the page: > page:ffffea0000e8ae00 count:1 mapcount:0 mapping: (null) > index:0x0 compound_mapcount: 0 > flags: 0x100000000008100(slab|head) > raw: 0100000000008100 0000000000000000 0000000000000000 0000000100170017 > raw: ffffea0000ed3020 ffffea0000f5f820 ffff88003e80efc0 0000000000000000 > page dumped because: kasan: bad access detected > > Memory state around the buggy address: > ffff88003a2bd980: fb fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc > ffff88003a2bda00: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc > >ffff88003a2bda80: fc fc fc fc fc fc fc fc fc fc fc fc fb fb fb fb > ^ > ffff88003a2bdb00: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb > ffff88003a2bdb80: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb > ================================================================== What this means is that the gadgetfs_suspend() routine was trying to access dev->lock after it had been deallocated. The root cause is a race in the dummy_hcd driver; the dummy_udc_stop() routine can race with the rest of the driver because it contains no locking. And even when proper locking is added, it can still race with the set_link_state() function because that function incorrectly drops the private spinlock before invoking any gadget driver callbacks. The result of this race, as seen above, is that set_link_state() can invoke a callback in gadgetfs even after gadgetfs has been unbound from dummy_hcd's UDC and its private data structures have been deallocated. include/linux/usb/gadget.h documents that the ->reset, ->disconnect, ->suspend, and ->resume callbacks may be invoked in interrupt context. In general this is necessary, to prevent races with gadget driver removal. This patch fixes dummy_hcd to retain the spinlock across these calls, and it adds a spinlock acquisition to dummy_udc_stop() to prevent the race. The net2280 driver makes the same mistake of dropping the private spinlock for its ->disconnect and ->reset callback invocations. The patch fixes it too. Lastly, since gadgetfs_suspend() may be invoked in interrupt context, it cannot assume that interrupts are enabled when it runs. It must use spin_lock_irqsave() instead of spin_lock_irq(). The patch fixes that bug as well. Signed-off-by: Alan Stern Reported-and-tested-by: Andrey Konovalov CC: Acked-by: Felipe Balbi Signed-off-by: Greg Kroah-Hartman --- drivers/usb/gadget/legacy/inode.c | 5 +++-- drivers/usb/gadget/udc/dummy_hcd.c | 13 ++++--------- drivers/usb/gadget/udc/net2280.c | 9 +-------- 3 files changed, 8 insertions(+), 19 deletions(-) diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c index 5ffd879f7886..684900fcfe24 100644 --- a/drivers/usb/gadget/legacy/inode.c +++ b/drivers/usb/gadget/legacy/inode.c @@ -1679,9 +1679,10 @@ static void gadgetfs_suspend (struct usb_gadget *gadget) { struct dev_data *dev = get_gadget_data (gadget); + unsigned long flags; INFO (dev, "suspended from state %d\n", dev->state); - spin_lock (&dev->lock); + spin_lock_irqsave(&dev->lock, flags); switch (dev->state) { case STATE_DEV_SETUP: // VERY odd... host died?? case STATE_DEV_CONNECTED: @@ -1692,7 +1693,7 @@ gadgetfs_suspend (struct usb_gadget *gadget) default: break; } - spin_unlock (&dev->lock); + spin_unlock_irqrestore(&dev->lock, flags); } static struct usb_gadget_driver gadgetfs_driver = { diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c index ccabb51cb98d..7635fd7cc328 100644 --- a/drivers/usb/gadget/udc/dummy_hcd.c +++ b/drivers/usb/gadget/udc/dummy_hcd.c @@ -442,23 +442,16 @@ static void set_link_state(struct dummy_hcd *dum_hcd) /* Report reset and disconnect events to the driver */ if (dum->driver && (disconnect || reset)) { stop_activity(dum); - spin_unlock(&dum->lock); if (reset) usb_gadget_udc_reset(&dum->gadget, dum->driver); else dum->driver->disconnect(&dum->gadget); - spin_lock(&dum->lock); } } else if (dum_hcd->active != dum_hcd->old_active) { - if (dum_hcd->old_active && dum->driver->suspend) { - spin_unlock(&dum->lock); + if (dum_hcd->old_active && dum->driver->suspend) dum->driver->suspend(&dum->gadget); - spin_lock(&dum->lock); - } else if (!dum_hcd->old_active && dum->driver->resume) { - spin_unlock(&dum->lock); + else if (!dum_hcd->old_active && dum->driver->resume) dum->driver->resume(&dum->gadget); - spin_lock(&dum->lock); - } } dum_hcd->old_status = dum_hcd->port_status; @@ -983,7 +976,9 @@ static int dummy_udc_stop(struct usb_gadget *g) struct dummy_hcd *dum_hcd = gadget_to_dummy_hcd(g); struct dummy *dum = dum_hcd->dum; + spin_lock_irq(&dum->lock); dum->driver = NULL; + spin_unlock_irq(&dum->lock); return 0; } diff --git a/drivers/usb/gadget/udc/net2280.c b/drivers/usb/gadget/udc/net2280.c index 6cf07857eaca..f2cbd7f8005e 100644 --- a/drivers/usb/gadget/udc/net2280.c +++ b/drivers/usb/gadget/udc/net2280.c @@ -2470,11 +2470,8 @@ static void stop_activity(struct net2280 *dev, struct usb_gadget_driver *driver) nuke(&dev->ep[i]); /* report disconnect; the driver is already quiesced */ - if (driver) { - spin_unlock(&dev->lock); + if (driver) driver->disconnect(&dev->gadget); - spin_lock(&dev->lock); - } usb_reinit(dev); } @@ -3348,8 +3345,6 @@ next_endpoints: BIT(PCI_RETRY_ABORT_INTERRUPT)) static void handle_stat1_irqs(struct net2280 *dev, u32 stat) -__releases(dev->lock) -__acquires(dev->lock) { struct net2280_ep *ep; u32 tmp, num, mask, scratch; @@ -3390,14 +3385,12 @@ __acquires(dev->lock) if (disconnect || reset) { stop_activity(dev, dev->driver); ep0_start(dev); - spin_unlock(&dev->lock); if (reset) usb_gadget_udc_reset (&dev->gadget, dev->driver); else (dev->driver->disconnect) (&dev->gadget); - spin_lock(&dev->lock); return; } } From 6c780a0267b8a1075f40b39851132eeaefefcff5 Mon Sep 17 00:00:00 2001 From: Eli Cohen Date: Thu, 8 Jun 2017 11:33:16 -0500 Subject: [PATCH 286/436] net/mlx5: Wait for FW readiness before initializing command interface Before attempting to initialize the command interface we must wait till the fw_initializing bit is clear. If we fail to meet this condition the hardware will drop our configuration, specifically the descriptors page address. This scenario can happen when the firmware is still executing an FLR flow and did not finish yet so the driver needs to wait for that to finish. Fixes: e3297246c2c8 ('net/mlx5_core: Wait for FW readiness on startup') Signed-off-by: Eli Cohen Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/main.c | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 4f577a5abf88..13be264587f1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -175,8 +175,9 @@ static struct mlx5_profile profile[] = { }, }; -#define FW_INIT_TIMEOUT_MILI 2000 -#define FW_INIT_WAIT_MS 2 +#define FW_INIT_TIMEOUT_MILI 2000 +#define FW_INIT_WAIT_MS 2 +#define FW_PRE_INIT_TIMEOUT_MILI 10000 static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili) { @@ -1013,6 +1014,15 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, */ dev->state = MLX5_DEVICE_STATE_UP; + /* wait for firmware to accept initialization segments configurations + */ + err = wait_fw_init(dev, FW_PRE_INIT_TIMEOUT_MILI); + if (err) { + dev_err(&dev->pdev->dev, "Firmware over %d MS in pre-initializing state, aborting\n", + FW_PRE_INIT_TIMEOUT_MILI); + goto out; + } + err = mlx5_cmd_init(dev); if (err) { dev_err(&pdev->dev, "Failed initializing command interface, aborting\n"); From f0b381178b01b831f9907d72f467d6443afdea67 Mon Sep 17 00:00:00 2001 From: Maor Dickman Date: Thu, 18 May 2017 15:15:08 +0300 Subject: [PATCH 287/436] net/mlx5e: Fix timestamping capabilities reporting Misuse of (BIT) macro caused to report wrong flags for "Hardware Transmit Timestamp Modes" and "Hardware Receive Filter Modes" Fixes: ef9814deafd0 ('net/mlx5e: Add HW timestamping (TS) support') Signed-off-by: Maor Dickman Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 8209affa75c3..16486dff1493 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -1242,11 +1242,11 @@ static int mlx5e_get_ts_info(struct net_device *dev, SOF_TIMESTAMPING_RX_HARDWARE | SOF_TIMESTAMPING_RAW_HARDWARE; - info->tx_types = (BIT(1) << HWTSTAMP_TX_OFF) | - (BIT(1) << HWTSTAMP_TX_ON); + info->tx_types = BIT(HWTSTAMP_TX_OFF) | + BIT(HWTSTAMP_TX_ON); - info->rx_filters = (BIT(1) << HWTSTAMP_FILTER_NONE) | - (BIT(1) << HWTSTAMP_FILTER_ALL); + info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | + BIT(HWTSTAMP_FILTER_ALL); return 0; } From 5f195c2c5cba60241004146cd12d71451d6b0fc4 Mon Sep 17 00:00:00 2001 From: Chris Mi Date: Tue, 16 May 2017 07:07:11 -0400 Subject: [PATCH 288/436] net/mlx5e: Fix min inline value for VF rep SQs The offending commit only changed the code path for PF/VF, but it didn't take care of VF representors. As a result, since params->tx_min_inline_mode for VF representors is kzalloced to 0 (MLX5_INLINE_MODE_NONE), all VF reps SQs were set to that mode. This actually works on CX5 by default but broke CX4. Fix that by adding a call to query the min inline mode from the VF rep build up code. Fixes: a6f402e49901 ("net/mlx5e: Tx, no inline copy on ConnectX-5") Signed-off-by: Chris Mi Reviewed-by: Or Gerlitz Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 79462c0368a0..46984a52a94b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -791,6 +791,8 @@ static void mlx5e_build_rep_params(struct mlx5_core_dev *mdev, params->tx_max_inline = mlx5e_get_max_inline_cap(mdev); params->num_tc = 1; params->lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ; + + mlx5_query_min_inline(mdev, ¶ms->tx_min_inline_mode); } static void mlx5e_build_rep_netdev(struct net_device *netdev) From 9d1cef196b2687e9338c4268a3aa0ca521686bc9 Mon Sep 17 00:00:00 2001 From: Or Gerlitz Date: Sun, 4 Jun 2017 19:36:17 +0300 Subject: [PATCH 289/436] net/mlx5: Properly check applicability of devlink eswitch commands Currently we don't check that the link type is Eth and hence crash on IB ports when attempting to deref esw->xxx, fix that. To avoid repeating this check over and over, put the existing checks and the one on link type in a single helper. Fixes: 7768d1971de6 ('net/mlx5: E-Switch, Add control for encapsulation') Signed-off-by: Or Gerlitz Reported-by: Mohamad Badarnah Reviewed-by: Roi Dayan Signed-off-by: Saeed Mahameed --- .../mellanox/mlx5/core/eswitch_offloads.c | 77 ++++++++++--------- 1 file changed, 40 insertions(+), 37 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index f991f669047e..a53e982a6863 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -906,21 +906,34 @@ static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode) return 0; } -int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode) +static int mlx5_devlink_eswitch_check(struct devlink *devlink) { - struct mlx5_core_dev *dev; - u16 cur_mlx5_mode, mlx5_mode = 0; + struct mlx5_core_dev *dev = devlink_priv(devlink); - dev = devlink_priv(devlink); + if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) + return -EOPNOTSUPP; if (!MLX5_CAP_GEN(dev, vport_group_manager)) return -EOPNOTSUPP; - cur_mlx5_mode = dev->priv.eswitch->mode; - - if (cur_mlx5_mode == SRIOV_NONE) + if (dev->priv.eswitch->mode == SRIOV_NONE) return -EOPNOTSUPP; + return 0; +} + +int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode) +{ + struct mlx5_core_dev *dev = devlink_priv(devlink); + u16 cur_mlx5_mode, mlx5_mode = 0; + int err; + + err = mlx5_devlink_eswitch_check(devlink); + if (err) + return err; + + cur_mlx5_mode = dev->priv.eswitch->mode; + if (esw_mode_from_devlink(mode, &mlx5_mode)) return -EINVAL; @@ -937,15 +950,12 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode) int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode) { - struct mlx5_core_dev *dev; + struct mlx5_core_dev *dev = devlink_priv(devlink); + int err; - dev = devlink_priv(devlink); - - if (!MLX5_CAP_GEN(dev, vport_group_manager)) - return -EOPNOTSUPP; - - if (dev->priv.eswitch->mode == SRIOV_NONE) - return -EOPNOTSUPP; + err = mlx5_devlink_eswitch_check(devlink); + if (err) + return err; return esw_mode_to_devlink(dev->priv.eswitch->mode, mode); } @@ -954,15 +964,12 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode) { struct mlx5_core_dev *dev = devlink_priv(devlink); struct mlx5_eswitch *esw = dev->priv.eswitch; - int num_vports = esw->enabled_vports; int err, vport; u8 mlx5_mode; - if (!MLX5_CAP_GEN(dev, vport_group_manager)) - return -EOPNOTSUPP; - - if (esw->mode == SRIOV_NONE) - return -EOPNOTSUPP; + err = mlx5_devlink_eswitch_check(devlink); + if (err) + return err; switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) { case MLX5_CAP_INLINE_MODE_NOT_REQUIRED: @@ -985,7 +992,7 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode) if (err) goto out; - for (vport = 1; vport < num_vports; vport++) { + for (vport = 1; vport < esw->enabled_vports; vport++) { err = mlx5_modify_nic_vport_min_inline(dev, vport, mlx5_mode); if (err) { esw_warn(dev, "Failed to set min inline on vport %d\n", @@ -1010,12 +1017,11 @@ int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode) { struct mlx5_core_dev *dev = devlink_priv(devlink); struct mlx5_eswitch *esw = dev->priv.eswitch; + int err; - if (!MLX5_CAP_GEN(dev, vport_group_manager)) - return -EOPNOTSUPP; - - if (esw->mode == SRIOV_NONE) - return -EOPNOTSUPP; + err = mlx5_devlink_eswitch_check(devlink); + if (err) + return err; return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode); } @@ -1062,11 +1068,9 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap) struct mlx5_eswitch *esw = dev->priv.eswitch; int err; - if (!MLX5_CAP_GEN(dev, vport_group_manager)) - return -EOPNOTSUPP; - - if (esw->mode == SRIOV_NONE) - return -EOPNOTSUPP; + err = mlx5_devlink_eswitch_check(devlink); + if (err) + return err; if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE && (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, encap) || @@ -1105,12 +1109,11 @@ int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, u8 *encap) { struct mlx5_core_dev *dev = devlink_priv(devlink); struct mlx5_eswitch *esw = dev->priv.eswitch; + int err; - if (!MLX5_CAP_GEN(dev, vport_group_manager)) - return -EOPNOTSUPP; - - if (esw->mode == SRIOV_NONE) - return -EOPNOTSUPP; + err = mlx5_devlink_eswitch_check(devlink); + if (err) + return err; *encap = esw->offloads.encap; return 0; From 9cfb4f719264f3eeb68122371ad70fd5bf2e10bb Mon Sep 17 00:00:00 2001 From: Or Gerlitz Date: Sun, 11 Jun 2017 19:32:12 +0300 Subject: [PATCH 290/436] net/mlx5e: Remove TC header re-write offloading of ip tos Currently the firmware API is partial and allows to offload only the dscp part of the tos, also, ipv6 support isn't there yet. As such, remove the offloading option of ipv4 dscp till the FW APIs are more comprehensive. Fixes: d79b6df6b10a ('net/mlx5e: Add parsing of TC pedit actions to HW format') Signed-off-by: Or Gerlitz Reviewed-by: Paul Blakey Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index ec63158ab643..9df9fc0d26f5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -895,7 +895,6 @@ static struct mlx5_fields fields[] = { {MLX5_ACTION_IN_FIELD_OUT_SMAC_15_0, 2, offsetof(struct pedit_headers, eth.h_source[4])}, {MLX5_ACTION_IN_FIELD_OUT_ETHERTYPE, 2, offsetof(struct pedit_headers, eth.h_proto)}, - {MLX5_ACTION_IN_FIELD_OUT_IP_DSCP, 1, offsetof(struct pedit_headers, ip4.tos)}, {MLX5_ACTION_IN_FIELD_OUT_IP_TTL, 1, offsetof(struct pedit_headers, ip4.ttl)}, {MLX5_ACTION_IN_FIELD_OUT_SIPV4, 4, offsetof(struct pedit_headers, ip4.saddr)}, {MLX5_ACTION_IN_FIELD_OUT_DIPV4, 4, offsetof(struct pedit_headers, ip4.daddr)}, From 31ac93386d135a6c96de9c8bab406f5ccabf5a4d Mon Sep 17 00:00:00 2001 From: Or Gerlitz Date: Thu, 15 Jun 2017 20:08:32 +0300 Subject: [PATCH 291/436] net/mlx5e: Avoid doing a cleanup call if the profile doesn't have it The error flow of mlx5e_create_netdev calls the cleanup call of the given profile without checking if it exists, fix that. Currently the VF reps don't register that callback and we crash if getting into error -- can be reproduced by the user doing ctrl^C while attempting to change the sriov mode from legacy to switchdev. Fixes: 26e59d8077a3 '(net/mlx5e: Implement mlx5e interface attach/detach callbacks') Signed-off-by: Or Gerlitz Reported-by: Sabrina Dubroca Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 41cd22a223dc..277f4de30375 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -4241,7 +4241,8 @@ struct net_device *mlx5e_create_netdev(struct mlx5_core_dev *mdev, return netdev; err_cleanup_nic: - profile->cleanup(priv); + if (profile->cleanup) + profile->cleanup(priv); free_netdev(netdev); return NULL; From 7ceaa6dcd8c6f59588428cec37f3c8093dd1011f Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Fri, 16 Jun 2017 11:53:19 +1000 Subject: [PATCH 292/436] KVM: PPC: Book3S HV: Save/restore host values of debug registers At present, HV KVM on POWER8 and POWER9 machines loses any instruction or data breakpoint set in the host whenever a guest is run. Instruction breakpoints are currently only used by xmon, but ptrace and the perf_event subsystem can set data breakpoints as well as xmon. To fix this, we save the host values of the debug registers (CIABR, DAWR and DAWRX) before entering the guest and restore them on exit. To provide space to save them in the stack frame, we expand the stack frame allocated by kvmppc_hv_entry() from 112 to 144 bytes. Fixes: b005255e12a3 ("KVM: PPC: Book3S HV: Context-switch new POWER8 SPRs", 2014-01-08) Cc: stable@vger.kernel.org # v3.14+ Signed-off-by: Paul Mackerras --- arch/powerpc/kvm/book3s_hv_rmhandlers.S | 45 ++++++++++++++++++------- 1 file changed, 32 insertions(+), 13 deletions(-) diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index 4e4390564276..4888dd494604 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -44,6 +44,17 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) #define NAPPING_CEDE 1 #define NAPPING_NOVCPU 2 +/* Stack frame offsets for kvmppc_hv_entry */ +#define SFS 144 +#define STACK_SLOT_TRAP (SFS-4) +#define STACK_SLOT_TID (SFS-16) +#define STACK_SLOT_PSSCR (SFS-24) +#define STACK_SLOT_PID (SFS-32) +#define STACK_SLOT_IAMR (SFS-40) +#define STACK_SLOT_CIABR (SFS-48) +#define STACK_SLOT_DAWR (SFS-56) +#define STACK_SLOT_DAWRX (SFS-64) + /* * Call kvmppc_hv_entry in real mode. * Must be called with interrupts hard-disabled. @@ -328,10 +339,10 @@ kvm_novcpu_exit: bl kvmhv_accumulate_time #endif 13: mr r3, r12 - stw r12, 112-4(r1) + stw r12, STACK_SLOT_TRAP(r1) bl kvmhv_commence_exit nop - lwz r12, 112-4(r1) + lwz r12, STACK_SLOT_TRAP(r1) b kvmhv_switch_to_host /* @@ -554,12 +565,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) * * *****************************************************************************/ -/* Stack frame offsets */ -#define STACK_SLOT_TID (112-16) -#define STACK_SLOT_PSSCR (112-24) -#define STACK_SLOT_PID (112-32) -#define STACK_SLOT_IAMR (112-40) - .global kvmppc_hv_entry kvmppc_hv_entry: @@ -575,7 +580,7 @@ kvmppc_hv_entry: */ mflr r0 std r0, PPC_LR_STKOFF(r1) - stdu r1, -112(r1) + stdu r1, -SFS(r1) /* Save R1 in the PACA */ std r1, HSTATE_HOST_R1(r13) @@ -765,6 +770,14 @@ BEGIN_FTR_SECTION std r7, STACK_SLOT_PID(r1) std r8, STACK_SLOT_IAMR(r1) END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) +BEGIN_FTR_SECTION + mfspr r5, SPRN_CIABR + mfspr r6, SPRN_DAWR + mfspr r7, SPRN_DAWRX + std r5, STACK_SLOT_CIABR(r1) + std r6, STACK_SLOT_DAWR(r1) + std r7, STACK_SLOT_DAWRX(r1) +END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) BEGIN_FTR_SECTION /* Set partition DABR */ @@ -1518,8 +1531,6 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300) * set by the guest could disrupt the host. */ li r0, 0 - mtspr SPRN_CIABR, r0 - mtspr SPRN_DAWRX, r0 mtspr SPRN_PSPB, r0 mtspr SPRN_WORT, r0 BEGIN_FTR_SECTION @@ -1684,6 +1695,14 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) ptesync /* Restore host values of some registers */ +BEGIN_FTR_SECTION + ld r5, STACK_SLOT_CIABR(r1) + ld r6, STACK_SLOT_DAWR(r1) + ld r7, STACK_SLOT_DAWRX(r1) + mtspr SPRN_CIABR, r5 + mtspr SPRN_DAWR, r6 + mtspr SPRN_DAWRX, r7 +END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) BEGIN_FTR_SECTION ld r5, STACK_SLOT_TID(r1) ld r6, STACK_SLOT_PSSCR(r1) @@ -1836,8 +1855,8 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX) li r0, KVM_GUEST_MODE_NONE stb r0, HSTATE_IN_GUEST(r13) - ld r0, 112+PPC_LR_STKOFF(r1) - addi r1, r1, 112 + ld r0, SFS+PPC_LR_STKOFF(r1) + addi r1, r1, SFS mtlr r0 blr From 3d3efb68c19e539f0535c93a5258c1299270215f Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Tue, 6 Jun 2017 14:35:30 +1000 Subject: [PATCH 293/436] KVM: PPC: Book3S HV: Ignore timebase offset on POWER9 DD1 POWER9 DD1 has an erratum where writing to the TBU40 register, which is used to apply an offset to the timebase, can cause the timebase to lose counts. This results in the timebase on some CPUs getting out of sync with other CPUs, which then results in misbehaviour of the timekeeping code. To work around the problem, we make KVM ignore the timebase offset for all guests on POWER9 DD1 machines. This means that live migration cannot be supported on POWER9 DD1 machines. Cc: stable@vger.kernel.org # v4.10+ Signed-off-by: Paul Mackerras --- arch/powerpc/kvm/book3s_hv.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index fd4d978d5257..8d1a365b8edc 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -1486,6 +1486,14 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, r = set_vpa(vcpu, &vcpu->arch.dtl, addr, len); break; case KVM_REG_PPC_TB_OFFSET: + /* + * POWER9 DD1 has an erratum where writing TBU40 causes + * the timebase to lose ticks. So we don't let the + * timebase offset be changed on P9 DD1. (It is + * initialized to zero.) + */ + if (cpu_has_feature(CPU_FTR_POWER9_DD1)) + break; /* round up to multiple of 2^24 */ vcpu->arch.vcore->tb_offset = ALIGN(set_reg_val(id, *val), 1UL << 24); From a093c92dc7f96a15de98ec8cfe38e6f7610a5969 Mon Sep 17 00:00:00 2001 From: Alexey Kardashevskiy Date: Wed, 14 Jun 2017 13:01:25 +1000 Subject: [PATCH 294/436] powerpc/debug: Add missing warn flag to WARN_ON's non-builtin path When trapped on WARN_ON(), report_bug() is expected to return BUG_TRAP_TYPE_WARN so the caller will increment NIP by 4 and continue. The __builtin_constant_p() path of the PPC's WARN_ON() calls (indirectly) __WARN_FLAGS() which has BUGFLAG_WARNING set, however the other branch does not which makes report_bug() report a bug rather than a warning. Fixes: f26dee15103f ("debug: Avoid setting BUGFLAG_WARNING twice") Signed-off-by: Alexey Kardashevskiy Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/bug.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/include/asm/bug.h b/arch/powerpc/include/asm/bug.h index f2c562a0a427..0151af6c2a50 100644 --- a/arch/powerpc/include/asm/bug.h +++ b/arch/powerpc/include/asm/bug.h @@ -104,7 +104,7 @@ "1: "PPC_TLNEI" %4,0\n" \ _EMIT_BUG_ENTRY \ : : "i" (__FILE__), "i" (__LINE__), \ - "i" (BUGFLAG_TAINT(TAINT_WARN)), \ + "i" (BUGFLAG_WARNING|BUGFLAG_TAINT(TAINT_WARN)),\ "i" (sizeof(struct bug_entry)), \ "r" (__ret_warn_on)); \ } \ From 54bd63570484167cb13edf81e31fff107b879981 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Thu, 15 Jun 2017 10:36:22 +0200 Subject: [PATCH 295/436] iommu/amd: Suppress IO_PAGE_FAULTs in kdump kernel When booting into a kdump kernel, suppress IO_PAGE_FAULTs by default for all devices. But allow the faults again when a domain is assigned to a device. Signed-off-by: Joerg Roedel --- drivers/iommu/amd_iommu.c | 3 ++- drivers/iommu/amd_iommu_init.c | 9 +++++++++ drivers/iommu/amd_iommu_types.h | 1 + 3 files changed, 12 insertions(+), 1 deletion(-) diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 6498f5baa7ea..95ee360a5199 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -2078,7 +2078,8 @@ static void set_dte_entry(u16 devid, struct protection_domain *domain, bool ats) flags |= tmp; } - flags &= ~(0xffffUL); + + flags &= ~(DTE_FLAG_SA | 0xffffULL); flags |= domain->id; amd_iommu_dev_table[devid].data[1] = flags; diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c index 3fa7e3b35507..cf7896550e75 100644 --- a/drivers/iommu/amd_iommu_init.c +++ b/drivers/iommu/amd_iommu_init.c @@ -29,6 +29,7 @@ #include #include #include +#include #include #include #include @@ -1900,6 +1901,14 @@ static void init_device_table_dma(void) for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) { set_dev_entry_bit(devid, DEV_ENTRY_VALID); set_dev_entry_bit(devid, DEV_ENTRY_TRANSLATION); + /* + * In kdump kernels in-flight DMA from the old kernel might + * cause IO_PAGE_FAULTs. There are no reports that a kdump + * actually failed because of that, so just disable fault + * reporting in the hardware to get rid of the messages + */ + if (is_kdump_kernel()) + set_dev_entry_bit(devid, DEV_ENTRY_NO_PAGE_FAULT); } } diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h index 6960d7db2fab..294a409e283b 100644 --- a/drivers/iommu/amd_iommu_types.h +++ b/drivers/iommu/amd_iommu_types.h @@ -322,6 +322,7 @@ #define IOMMU_PTE_IW (1ULL << 62) #define DTE_FLAG_IOTLB (1ULL << 32) +#define DTE_FLAG_SA (1ULL << 34) #define DTE_FLAG_GV (1ULL << 55) #define DTE_FLAG_MASK (0x3ffULL << 32) #define DTE_GLX_SHIFT (56) From 92b0a1416be587b87c8ff489b6a74fd929048ca7 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Thu, 15 Jun 2017 08:20:35 -0500 Subject: [PATCH 296/436] objtool: Add fortify_panic as __noreturn function CONFIG_FORTIFY_SOURCE=y implements fortify_panic() as a __noreturn function, so objtool needs to know about it too. Suggested-by: Daniel Micay Tested-by: Stephen Rothwell Signed-off-by: Kees Cook Signed-off-by: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/1497532835-32704-1-git-send-email-jpoimboe@redhat.com Signed-off-by: Ingo Molnar --- tools/objtool/builtin-check.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tools/objtool/builtin-check.c b/tools/objtool/builtin-check.c index 282a60368b14..5f66697fe1e0 100644 --- a/tools/objtool/builtin-check.c +++ b/tools/objtool/builtin-check.c @@ -192,7 +192,8 @@ static int __dead_end_function(struct objtool_file *file, struct symbol *func, "complete_and_exit", "kvm_spurious_fault", "__reiserfs_panic", - "lbug_with_loc" + "lbug_with_loc", + "fortify_panic", }; if (func->bind == STB_WEAK) From 35abcd4f9f303ac4f10f99b3f7e993e5f2e6fa37 Mon Sep 17 00:00:00 2001 From: Arend Van Spriel Date: Fri, 16 Jun 2017 09:36:35 +0100 Subject: [PATCH 297/436] brcmfmac: fix uninitialized warning in brcmf_usb_probe_phase2() This fixes the following warning: drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c: In function 'brcmf_usb_probe_phase2': drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c:1198:2: warning: 'devinfo' may be used uninitialized in this function [-Wmaybe-uninitialized] mutex_unlock(&devinfo->dev_init_lock); Fixes: 6d0507a777fb ("brcmfmac: add parameter to pass error code in firmware callback") Cc: Stephen Rothwell Reported-by: Kalle Valo Signed-off-by: Arend van Spriel Signed-off-by: Kalle Valo --- drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c index 9ce3b55c3ffe..0eea48e73331 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c @@ -1164,14 +1164,13 @@ static void brcmf_usb_probe_phase2(struct device *dev, int ret, void *nvram, u32 nvlen) { struct brcmf_bus *bus = dev_get_drvdata(dev); - struct brcmf_usbdev_info *devinfo; + struct brcmf_usbdev_info *devinfo = bus->bus_priv.usb->devinfo; if (ret) goto error; brcmf_dbg(USB, "Start fw downloading\n"); - devinfo = bus->bus_priv.usb->devinfo; ret = check_file(fw->data); if (ret < 0) { brcmf_err("invalid firmware\n"); From 20223f0f39ea9d31ece08f04ac79f8c4e8d98246 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 16 Jun 2017 11:08:24 +0200 Subject: [PATCH 298/436] fs: pass on flags in compat_writev Fixes: 793b80ef14af ("vfs: pass a flags argument to vfs_readv/vfs_writev") Signed-off-by: Christoph Hellwig Cc: stable@vger.kernel.org Signed-off-by: Linus Torvalds --- fs/read_write.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/read_write.c b/fs/read_write.c index 47c1d4484df9..19d4d88fa285 100644 --- a/fs/read_write.c +++ b/fs/read_write.c @@ -1285,7 +1285,7 @@ static size_t compat_writev(struct file *file, if (!(file->f_mode & FMODE_CAN_WRITE)) goto out; - ret = compat_do_readv_writev(WRITE, file, vec, vlen, pos, 0); + ret = compat_do_readv_writev(WRITE, file, vec, vlen, pos, flags); out: if (ret > 0) From a9f8553e935f26cb5447f67e280946b0923cd2dc Mon Sep 17 00:00:00 2001 From: "Naveen N. Rao" Date: Thu, 1 Jun 2017 16:18:15 +0530 Subject: [PATCH 299/436] powerpc/kprobes: Pause function_graph tracing during jprobes handling This fixes a crash when function_graph and jprobes are used together. This is essentially commit 237d28db036e ("ftrace/jprobes/x86: Fix conflict between jprobes and function graph tracing"), but for powerpc. Jprobes breaks function_graph tracing since the jprobe hook needs to use jprobe_return(), which never returns back to the hook, but instead to the original jprobe'd function. The solution is to momentarily pause function_graph tracing before invoking the jprobe hook and re-enable it when returning back to the original jprobe'd function. Fixes: 6794c78243bf ("powerpc64: port of the function graph tracer") Cc: stable@vger.kernel.org # v2.6.30+ Signed-off-by: Naveen N. Rao Acked-by: Masami Hiramatsu Acked-by: Steven Rostedt (VMware) Signed-off-by: Michael Ellerman --- arch/powerpc/kernel/kprobes.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c index fc4343514bed..5075a4d6f1d7 100644 --- a/arch/powerpc/kernel/kprobes.c +++ b/arch/powerpc/kernel/kprobes.c @@ -617,6 +617,15 @@ int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) regs->gpr[2] = (unsigned long)(((func_descr_t *)jp->entry)->toc); #endif + /* + * jprobes use jprobe_return() which skips the normal return + * path of the function, and this messes up the accounting of the + * function graph tracer. + * + * Pause function graph tracing while performing the jprobe function. + */ + pause_graph_tracing(); + return 1; } NOKPROBE_SYMBOL(setjmp_pre_handler); @@ -642,6 +651,8 @@ int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) * saved regs... */ memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs)); + /* It's OK to start function graph tracing again */ + unpause_graph_tracing(); preempt_enable_no_resched(); return 1; } From a4979a7e71eb8da976cbe4a0a1fa50636e76b04f Mon Sep 17 00:00:00 2001 From: "Naveen N. Rao" Date: Thu, 1 Jun 2017 16:18:16 +0530 Subject: [PATCH 300/436] powerpc/ftrace: Pass the correct stack pointer for DYNAMIC_FTRACE_WITH_REGS For DYNAMIC_FTRACE_WITH_REGS, we should be passing-in the original set of registers in pt_regs, to capture the state _before_ ftrace_caller. However, we are instead passing the stack pointer *after* allocating a stack frame in ftrace_caller. Fix this by saving the proper value of r1 in pt_regs. Also, use SAVE_10GPRS() to simplify the code. Fixes: 153086644fd1 ("powerpc/ftrace: Add support for -mprofile-kernel ftrace ABI") Cc: stable@vger.kernel.org # v4.6+ Signed-off-by: Naveen N. Rao Signed-off-by: Michael Ellerman --- .../powerpc/kernel/trace/ftrace_64_mprofile.S | 20 +++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/arch/powerpc/kernel/trace/ftrace_64_mprofile.S b/arch/powerpc/kernel/trace/ftrace_64_mprofile.S index 7c933a99f5d5..fa0921410fa4 100644 --- a/arch/powerpc/kernel/trace/ftrace_64_mprofile.S +++ b/arch/powerpc/kernel/trace/ftrace_64_mprofile.S @@ -45,10 +45,14 @@ _GLOBAL(ftrace_caller) stdu r1,-SWITCH_FRAME_SIZE(r1) /* Save all gprs to pt_regs */ - SAVE_8GPRS(0,r1) - SAVE_8GPRS(8,r1) - SAVE_8GPRS(16,r1) - SAVE_8GPRS(24,r1) + SAVE_GPR(0, r1) + SAVE_10GPRS(2, r1) + SAVE_10GPRS(12, r1) + SAVE_10GPRS(22, r1) + + /* Save previous stack pointer (r1) */ + addi r8, r1, SWITCH_FRAME_SIZE + std r8, GPR1(r1) /* Load special regs for save below */ mfmsr r8 @@ -103,10 +107,10 @@ ftrace_call: #endif /* Restore gprs */ - REST_8GPRS(0,r1) - REST_8GPRS(8,r1) - REST_8GPRS(16,r1) - REST_8GPRS(24,r1) + REST_GPR(0,r1) + REST_10GPRS(2,r1) + REST_10GPRS(12,r1) + REST_10GPRS(22,r1) /* Restore possibly modified LR */ ld r0, _LINK(r1) From c05b8c4474c03026aaa7f8872e78369f69f1bb08 Mon Sep 17 00:00:00 2001 From: "Naveen N. Rao" Date: Thu, 1 Jun 2017 16:18:17 +0530 Subject: [PATCH 301/436] powerpc/kprobes: Skip livepatch_handler() for jprobes ftrace_caller() depends on a modified regs->nip to detect if a certain function has been livepatched. However, with KPROBES_ON_FTRACE, it is possible for regs->nip to have been modified by the kprobes pre_handler (jprobes, for instance). In this case, we do not want to invoke the livepatch_handler so as not to consume the livepatch stack. To distinguish between the two (kprobes and livepatch), we check if there is an active kprobe on the current function. If there is, then we know for sure that it must have modified the NIP as we don't support livepatching a kprobe'd function. In this case, we simply skip the livepatch_handler and branch to the new NIP. Otherwise, the livepatch_handler is invoked. Fixes: ead514d5fb30 ("powerpc/kprobes: Add support for KPROBES_ON_FTRACE") Signed-off-by: Naveen N. Rao Reviewed-by: Masami Hiramatsu Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/kprobes.h | 1 + arch/powerpc/kernel/kprobes.c | 6 +++ .../powerpc/kernel/trace/ftrace_64_mprofile.S | 39 ++++++++++++++++--- 3 files changed, 41 insertions(+), 5 deletions(-) diff --git a/arch/powerpc/include/asm/kprobes.h b/arch/powerpc/include/asm/kprobes.h index a83821f33ea3..8814a7249ceb 100644 --- a/arch/powerpc/include/asm/kprobes.h +++ b/arch/powerpc/include/asm/kprobes.h @@ -103,6 +103,7 @@ extern int kprobe_exceptions_notify(struct notifier_block *self, extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr); extern int kprobe_handler(struct pt_regs *regs); extern int kprobe_post_handler(struct pt_regs *regs); +extern int is_current_kprobe_addr(unsigned long addr); #ifdef CONFIG_KPROBES_ON_FTRACE extern int skip_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb); diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c index 5075a4d6f1d7..01addfb0ed0a 100644 --- a/arch/powerpc/kernel/kprobes.c +++ b/arch/powerpc/kernel/kprobes.c @@ -43,6 +43,12 @@ DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}}; +int is_current_kprobe_addr(unsigned long addr) +{ + struct kprobe *p = kprobe_running(); + return (p && (unsigned long)p->addr == addr) ? 1 : 0; +} + bool arch_within_kprobe_blacklist(unsigned long addr) { return (addr >= (unsigned long)__kprobes_text_start && diff --git a/arch/powerpc/kernel/trace/ftrace_64_mprofile.S b/arch/powerpc/kernel/trace/ftrace_64_mprofile.S index fa0921410fa4..c98e90b4ea7b 100644 --- a/arch/powerpc/kernel/trace/ftrace_64_mprofile.S +++ b/arch/powerpc/kernel/trace/ftrace_64_mprofile.S @@ -99,13 +99,39 @@ ftrace_call: bl ftrace_stub nop - /* Load ctr with the possibly modified NIP */ - ld r3, _NIP(r1) - mtctr r3 + /* Load the possibly modified NIP */ + ld r15, _NIP(r1) + #ifdef CONFIG_LIVEPATCH - cmpd r14,r3 /* has NIP been altered? */ + cmpd r14, r15 /* has NIP been altered? */ #endif +#if defined(CONFIG_LIVEPATCH) && defined(CONFIG_KPROBES_ON_FTRACE) + /* NIP has not been altered, skip over further checks */ + beq 1f + + /* Check if there is an active kprobe on us */ + subi r3, r14, 4 + bl is_current_kprobe_addr + nop + + /* + * If r3 == 1, then this is a kprobe/jprobe. + * else, this is livepatched function. + * + * The conditional branch for livepatch_handler below will use the + * result of this comparison. For kprobe/jprobe, we just need to branch to + * the new NIP, not call livepatch_handler. The branch below is bne, so we + * want CR0[EQ] to be true if this is a kprobe/jprobe. Which means we want + * CR0[EQ] = (r3 == 1). + */ + cmpdi r3, 1 +1: +#endif + + /* Load CTR with the possibly modified NIP */ + mtctr r15 + /* Restore gprs */ REST_GPR(0,r1) REST_10GPRS(2,r1) @@ -123,7 +149,10 @@ ftrace_call: addi r1, r1, SWITCH_FRAME_SIZE #ifdef CONFIG_LIVEPATCH - /* Based on the cmpd above, if the NIP was altered handle livepatch */ + /* + * Based on the cmpd or cmpdi above, if the NIP was altered and we're + * not on a kprobe/jprobe, then handle livepatch. + */ bne- livepatch_handler #endif From d89ba5353f301971dd7d2f9fdf25c4432728f38e Mon Sep 17 00:00:00 2001 From: "Naveen N. Rao" Date: Wed, 14 Jun 2017 00:12:00 +0530 Subject: [PATCH 302/436] powerpc/64s: Handle data breakpoints in Radix mode On Power9, trying to use data breakpoints throws the splat shown below. This is because the check for a data breakpoint in DSISR is in do_hash_page(), which is not called when in Radix mode. Unable to handle kernel paging request for data at address 0xc000000000e19218 Faulting instruction address: 0xc0000000001155e8 cpu 0x0: Vector: 300 (Data Access) at [c0000000ef1e7b20] pc: c0000000001155e8: find_pid_ns+0x48/0xe0 lr: c000000000116ac4: find_task_by_vpid+0x44/0x90 sp: c0000000ef1e7da0 msr: 9000000000009033 dar: c000000000e19218 dsisr: 400000 Move the check to handle_page_fault() so as to catch data breakpoints in both Hash and Radix MMU modes. We have to change the check in do_hash_page() against 0xa410 to use 0xa450, so as to include the value of (DSISR_DABRMATCH << 16). There are two sites that call handle_page_fault() when in Radix, both already pass DSISR in r4. Fixes: caca285e5ab4 ("powerpc/mm/radix: Use STD_MMU_64 to properly isolate hash related code") Cc: stable@vger.kernel.org # v4.7+ Reported-by: Shriya R. Kulkarni Signed-off-by: Naveen N. Rao [mpe: Fix the fall-through case on hash, we need to reload DSISR] Signed-off-by: Michael Ellerman --- arch/powerpc/kernel/exceptions-64s.S | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index ae418b85c17c..b886795060fd 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -1411,10 +1411,8 @@ USE_TEXT_SECTION() .balign IFETCH_ALIGN_BYTES do_hash_page: #ifdef CONFIG_PPC_STD_MMU_64 - andis. r0,r4,0xa410 /* weird error? */ + andis. r0,r4,0xa450 /* weird error? */ bne- handle_page_fault /* if not, try to insert a HPTE */ - andis. r0,r4,DSISR_DABRMATCH@h - bne- handle_dabr_fault CURRENT_THREAD_INFO(r11, r1) lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */ andis. r0,r0,NMI_MASK@h /* (i.e. an irq when soft-disabled) */ @@ -1438,11 +1436,16 @@ do_hash_page: /* Error */ blt- 13f + + /* Reload DSISR into r4 for the DABR check below */ + ld r4,_DSISR(r1) #endif /* CONFIG_PPC_STD_MMU_64 */ /* Here we have a page fault that hash_page can't handle. */ handle_page_fault: -11: ld r4,_DAR(r1) +11: andis. r0,r4,DSISR_DABRMATCH@h + bne- handle_dabr_fault + ld r4,_DAR(r1) ld r5,_DSISR(r1) addi r3,r1,STACK_FRAME_OVERHEAD bl do_page_fault From bf05fc25f268cd62f147f368fe65ad3e5b04fe9f Mon Sep 17 00:00:00 2001 From: Ravi Bangoria Date: Thu, 15 Jun 2017 19:16:48 +0530 Subject: [PATCH 303/436] powerpc/perf: Fix oops when kthread execs user process When a kthread calls call_usermodehelper() the steps are: 1. allocate current->mm 2. load_elf_binary() 3. populate current->thread.regs While doing this, interrupts are not disabled. If there is a perf interrupt in the middle of this process (i.e. step 1 has completed but not yet reached to step 3) and if perf tries to read userspace regs, kernel oops with following log: Unable to handle kernel paging request for data at address 0x00000000 Faulting instruction address: 0xc0000000000da0fc ... Call Trace: perf_output_sample_regs+0x6c/0xd0 perf_output_sample+0x4e4/0x830 perf_event_output_forward+0x64/0x90 __perf_event_overflow+0x8c/0x1e0 record_and_restart+0x220/0x5c0 perf_event_interrupt+0x2d8/0x4d0 performance_monitor_exception+0x54/0x70 performance_monitor_common+0x158/0x160 --- interrupt: f01 at avtab_search_node+0x150/0x1a0 LR = avtab_search_node+0x100/0x1a0 ... load_elf_binary+0x6e8/0x15a0 search_binary_handler+0xe8/0x290 do_execveat_common.isra.14+0x5f4/0x840 call_usermodehelper_exec_async+0x170/0x210 ret_from_kernel_thread+0x5c/0x7c Fix it by setting abi to PERF_SAMPLE_REGS_ABI_NONE when userspace pt_regs are not set. Fixes: ed4a4ef85cf5 ("powerpc/perf: Add support for sampling interrupt register state") Cc: stable@vger.kernel.org # v4.7+ Signed-off-by: Ravi Bangoria Acked-by: Naveen N. Rao Signed-off-by: Michael Ellerman --- arch/powerpc/perf/perf_regs.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/perf/perf_regs.c b/arch/powerpc/perf/perf_regs.c index cbd82fde5770..09ceea6175ba 100644 --- a/arch/powerpc/perf/perf_regs.c +++ b/arch/powerpc/perf/perf_regs.c @@ -101,5 +101,6 @@ void perf_get_regs_user(struct perf_regs *regs_user, struct pt_regs *regs_user_copy) { regs_user->regs = task_pt_regs(current); - regs_user->abi = perf_reg_abi(current); + regs_user->abi = (regs_user->regs) ? perf_reg_abi(current) : + PERF_SAMPLE_REGS_ABI_NONE; } From 469f87e158628fe66dcbbce9dd5e7b7acfe934a9 Mon Sep 17 00:00:00 2001 From: Haishuang Yan Date: Thu, 15 Jun 2017 10:29:29 +0800 Subject: [PATCH 304/436] ip_tunnel: fix potential issue in ip_tunnel_rcv When ip_tunnel_rcv fails, the tun_dst won't be freed, so call dst_release to free it in error code path. Fixes: 2e15ea390e6f ("ip_gre: Add support to collect tunnel metadata.") Acked-by: Eric Dumazet Acked-by: Pravin B Shelar Tested-by: Zhang Shengju Signed-off-by: Haishuang Yan Signed-off-by: David S. Miller --- net/ipv4/ip_tunnel.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c index b436d0775631..129d1a3616f8 100644 --- a/net/ipv4/ip_tunnel.c +++ b/net/ipv4/ip_tunnel.c @@ -446,6 +446,8 @@ int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb, return 0; drop: + if (tun_dst) + dst_release((struct dst_entry *)tun_dst); kfree_skb(skb); return 0; } From f1925ca50deb48eddafc01bc12c2a17bfbf54425 Mon Sep 17 00:00:00 2001 From: Haishuang Yan Date: Thu, 15 Jun 2017 10:29:30 +0800 Subject: [PATCH 305/436] ip6_tunnel: fix potential issue in __ip6_tnl_rcv When __ip6_tnl_rcv fails, the tun_dst won't be freed, so call dst_release to free it in error code path. Fixes: 8d79266bc48c ("ip6_tunnel: add collect_md mode to IPv6 tunnels") CC: Alexei Starovoitov Tested-by: Zhang Shengju Signed-off-by: Haishuang Yan Signed-off-by: David S. Miller --- net/ipv6/ip6_tunnel.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index c3581973f5d7..035c0496b92a 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -858,6 +858,8 @@ static int __ip6_tnl_rcv(struct ip6_tnl *tunnel, struct sk_buff *skb, return 0; drop: + if (tun_dst) + dst_release((struct dst_entry *)tun_dst); kfree_skb(skb); return 0; } From 9126cbbacecb8917bd0418809ef1d26616b2061e Mon Sep 17 00:00:00 2001 From: Milian Wolff Date: Fri, 2 Jun 2017 16:37:53 +0200 Subject: [PATCH 306/436] perf unwind: Report module before querying isactivation in dwfl unwind The PC returned by dwfl_frame_pc() may map into a not-yet-reported module. We have to report it before we continue unwinding. But when we query for the isactivation flag in dwfl_frame_pc, libdw will actually do one more unwinding step internally which can then break and lead to missed frames or broken stacks. With libunwind we get e.g.: ~~~~~ heaptrack_gui 2228 135073.400474: 613969 cycles: 108c8e [unknown] (/usr/lib/libQt5Core.so.5.8.0) 1093bc [unknown] (/usr/lib/libQt5Core.so.5.8.0) 109e7b QLocale::QLocale (/usr/lib/libQt5Core.so.5.8.0) 1470ff [unknown] (/usr/lib/libQt5Core.so.5.8.0) 147f67 QSystemLocale::query (/usr/lib/libQt5Core.so.5.8.0) 109fbf QLocalePrivate::updateSystemPrivate (/usr/lib/libQt5Core.so.5.8.0) 10aa27 QLocale::QLocale (/usr/lib/libQt5Core.so.5.8.0) 1e02c3 [unknown] (/usr/lib/libQt5Core.so.5.8.0) 2113bb [unknown] (/usr/lib/libQt5Core.so.5.8.0) 211505 [unknown] (/usr/lib/libQt5Core.so.5.8.0) 1b5df0 QFileInfo::exists (/usr/lib/libQt5Core.so.5.8.0) 92eb2 [unknown] (/usr/lib/libQt5Core.so.5.8.0) 93423 [unknown] (/usr/lib/libQt5Core.so.5.8.0) 93d2a QLibraryInfo::location (/usr/lib/libQt5Core.so.5.8.0) 2170af [unknown] (/usr/lib/libQt5Core.so.5.8.0) 297c53 QCoreApplicationPrivate::init (/usr/lib/libQt5Core.so.5.8.0) f7cde QGuiApplicationPrivate::init (/usr/lib/libQt5Gui.so.5.8.0) 1589e8 QApplicationPrivate::init (/usr/lib/libQt5Widgets.so.5.8.0) 78622 main (/home/milian/projects/compiled/other/bin/heaptrack_gui) 20439 __libc_start_main (/usr/lib/libc-2.25.so) 78299 _start (/home/milian/projects/compiled/other/bin/heaptrack_gui) heaptrack_gui 2228 135073.401156: 569521 cycles: 131633 QString::endsWith (/usr/lib/libQt5Core.so.5.8.0) 1a0701 QDir::cleanPath (/usr/lib/libQt5Core.so.5.8.0) 21b82d [unknown] (/usr/lib/libQt5Core.so.5.8.0) 1b3727 QFileInfo::canonicalFilePath (/usr/lib/libQt5Core.so.5.8.0) 2780c7 QFactoryLoader::update (/usr/lib/libQt5Core.so.5.8.0) 279525 QFactoryLoader::QFactoryLoader (/usr/lib/libQt5Core.so.5.8.0) e5bd0 QPlatformIntegrationFactory::create (/usr/lib/libQt5Gui.so.5.8.0) f5a1c QGuiApplicationPrivate::createPlatformIntegration (/usr/lib/libQt5Gui.so.5.8.0) f650c QGuiApplicationPrivate::createEventDispatcher (/usr/lib/libQt5Gui.so.5.8.0) 298524 QCoreApplicationPrivate::init (/usr/lib/libQt5Core.so.5.8.0) f7cde QGuiApplicationPrivate::init (/usr/lib/libQt5Gui.so.5.8.0) 1589e8 QApplicationPrivate::init (/usr/lib/libQt5Widgets.so.5.8.0) 78622 main (/home/milian/projects/compiled/other/bin/heaptrack_gui) 20439 __libc_start_main (/usr/lib/libc-2.25.so) 78299 _start (/home/milian/projects/compiled/other/bin/heaptrack_gui) ~~~~~ Note the two frames 1589e8 and 78622 in the first sample. These are missing when unwinding with libdw. The second sample's breakage is more obvious: ~~~~~ heaptrack_gui 2228 135073.400474: 613969 cycles: 108c8e [unknown] (/usr/lib/libQt5Core.so.5.8.0) 1093bc [unknown] (/usr/lib/libQt5Core.so.5.8.0) 109e7b QLocale::QLocale (/usr/lib/libQt5Core.so.5.8.0) 1470ff [unknown] (/usr/lib/libQt5Core.so.5.8.0) 147f67 QSystemLocale::query (/usr/lib/libQt5Core.so.5.8.0) 109fbf QLocalePrivate::updateSystemPrivate (/usr/lib/libQt5Core.so.5.8.0) 10aa27 QLocale::QLocale (/usr/lib/libQt5Core.so.5.8.0) 1e02c3 [unknown] (/usr/lib/libQt5Core.so.5.8.0) 2113bb [unknown] (/usr/lib/libQt5Core.so.5.8.0) 211505 [unknown] (/usr/lib/libQt5Core.so.5.8.0) 1b5df0 QFileInfo::exists (/usr/lib/libQt5Core.so.5.8.0) 92eb2 [unknown] (/usr/lib/libQt5Core.so.5.8.0) 93423 [unknown] (/usr/lib/libQt5Core.so.5.8.0) 93d2a QLibraryInfo::location (/usr/lib/libQt5Core.so.5.8.0) 2170af [unknown] (/usr/lib/libQt5Core.so.5.8.0) 297c53 QCoreApplicationPrivate::init (/usr/lib/libQt5Core.so.5.8.0) f7cde QGuiApplicationPrivate::init (/usr/lib/libQt5Gui.so.5.8.0) 20439 __libc_start_main (/usr/lib/libc-2.25.so) 78299 _start (/home/milian/projects/compiled/other/bin/heaptrack_gui) heaptrack_gui 2228 135073.401156: 569521 cycles: 131633 QString::endsWith (/usr/lib/libQt5Core.so.5.8.0) 1a0701 QDir::cleanPath (/usr/lib/libQt5Core.so.5.8.0) 21b82d [unknown] (/usr/lib/libQt5Core.so.5.8.0) 1b3727 QFileInfo::canonicalFilePath (/usr/lib/libQt5Core.so.5.8.0) 2780c7 QFactoryLoader::update (/usr/lib/libQt5Core.so.5.8.0) 279525 QFactoryLoader::QFactoryLoader (/usr/lib/libQt5Core.so.5.8.0) e5bd0 QPlatformIntegrationFactory::create (/usr/lib/libQt5Gui.so.5.8.0) 723dbf [unknown] ([unknown]) ~~~~~ This patch fixes this issue and the libdw unwinder mimicks the libunwind behavior more closely. Signed-off-by: Milian Wolff Acked-by: Jan Kratochvil Cc: Jiri Olsa Cc: Namhyung Kim Link: http://lkml.kernel.org/r/20170602143753.16907-2-milian.wolff@kdab.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/unwind-libdw.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/tools/perf/util/unwind-libdw.c b/tools/perf/util/unwind-libdw.c index da45c4be5fb3..7755a5e0fe5e 100644 --- a/tools/perf/util/unwind-libdw.c +++ b/tools/perf/util/unwind-libdw.c @@ -178,6 +178,14 @@ frame_callback(Dwfl_Frame *state, void *arg) Dwarf_Addr pc; bool isactivation; + if (!dwfl_frame_pc(state, &pc, NULL)) { + pr_err("%s", dwfl_errmsg(-1)); + return DWARF_CB_ABORT; + } + + // report the module before we query for isactivation + report_module(pc, ui); + if (!dwfl_frame_pc(state, &pc, &isactivation)) { pr_err("%s", dwfl_errmsg(-1)); return DWARF_CB_ABORT; From 76371d2e3ad1f84426a30ebcd8c3b9b98f4c724f Mon Sep 17 00:00:00 2001 From: Wei Wang Date: Fri, 16 Jun 2017 10:46:37 -0700 Subject: [PATCH 307/436] decnet: always not take dst->__refcnt when inserting dst into hash table In the existing dn_route.c code, dn_route_output_slow() takes dst->__refcnt before calling dn_insert_route() while dn_route_input_slow() does not take dst->__refcnt before calling dn_insert_route(). This makes the whole routing code very buggy. In dn_dst_check_expire(), dnrt_free() is called when rt expires. This makes the routes inserted by dn_route_output_slow() not able to be freed as the refcnt is not released. In dn_dst_gc(), dnrt_drop() is called to release rt which could potentially cause the dst->__refcnt to be dropped to -1. In dn_run_flush(), dst_free() is called to release all the dst. Again, it makes the dst inserted by dn_route_output_slow() not able to be released and also, it does not wait on the rcu and could potentially cause crash in the path where other users still refer to this dst. This patch makes sure both input and output path do not take dst->__refcnt before calling dn_insert_route() and also makes sure dnrt_free()/dst_free() is called when removing dst from the hash table. The only difference between those 2 calls is that dnrt_free() waits on the rcu while dst_free() does not. Signed-off-by: Wei Wang Acked-by: Martin KaFai Lau Signed-off-by: David S. Miller --- net/decnet/dn_route.c | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c index 4b9518a0d248..6f95612b4d32 100644 --- a/net/decnet/dn_route.c +++ b/net/decnet/dn_route.c @@ -188,12 +188,6 @@ static inline void dnrt_free(struct dn_route *rt) call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free); } -static inline void dnrt_drop(struct dn_route *rt) -{ - dst_release(&rt->dst); - call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free); -} - static void dn_dst_check_expire(unsigned long dummy) { int i; @@ -248,7 +242,7 @@ static int dn_dst_gc(struct dst_ops *ops) } *rtp = rt->dst.dn_next; rt->dst.dn_next = NULL; - dnrt_drop(rt); + dnrt_free(rt); break; } spin_unlock_bh(&dn_rt_hash_table[i].lock); @@ -350,7 +344,7 @@ static int dn_insert_route(struct dn_route *rt, unsigned int hash, struct dn_rou dst_use(&rth->dst, now); spin_unlock_bh(&dn_rt_hash_table[hash].lock); - dnrt_drop(rt); + dst_free(&rt->dst); *rp = rth; return 0; } @@ -380,7 +374,7 @@ static void dn_run_flush(unsigned long dummy) for(; rt; rt = next) { next = rcu_dereference_raw(rt->dst.dn_next); RCU_INIT_POINTER(rt->dst.dn_next, NULL); - dst_free((struct dst_entry *)rt); + dnrt_free(rt); } nothing_to_declare: @@ -1187,7 +1181,7 @@ make_route: if (dev_out->flags & IFF_LOOPBACK) flags |= RTCF_LOCAL; - rt = dst_alloc(&dn_dst_ops, dev_out, 1, DST_OBSOLETE_NONE, DST_HOST); + rt = dst_alloc(&dn_dst_ops, dev_out, 0, DST_OBSOLETE_NONE, DST_HOST); if (rt == NULL) goto e_nobufs; From 7258ae5c5a2ce2f5969e8b18b881be40ab55433d Mon Sep 17 00:00:00 2001 From: James Morse Date: Fri, 16 Jun 2017 14:02:29 -0700 Subject: [PATCH 308/436] mm/memory-failure.c: use compound_head() flags for huge pages memory_failure() chooses a recovery action function based on the page flags. For huge pages it uses the tail page flags which don't have anything interesting set, resulting in: > Memory failure: 0x9be3b4: Unknown page state > Memory failure: 0x9be3b4: recovery action for unknown page: Failed Instead, save a copy of the head page's flags if this is a huge page, this means if there are no relevant flags for this tail page, we use the head pages flags instead. This results in the me_huge_page() recovery action being called: > Memory failure: 0x9b7969: recovery action for huge page: Delayed For hugepages that have not yet been allocated, this allows the hugepage to be dequeued. Fixes: 524fca1e7356 ("HWPOISON: fix misjudgement of page_action() for errors on mlocked pages") Link: http://lkml.kernel.org/r/20170524130204.21845-1-james.morse@arm.com Signed-off-by: James Morse Tested-by: Punit Agrawal Acked-by: Punit Agrawal Acked-by: Naoya Horiguchi Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memory-failure.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 342fac9ba89b..ecc183fd94f3 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -1184,7 +1184,10 @@ int memory_failure(unsigned long pfn, int trapno, int flags) * page_remove_rmap() in try_to_unmap_one(). So to determine page status * correctly, we save a copy of the page flags at this time. */ - page_flags = p->flags; + if (PageHuge(p)) + page_flags = hpage->flags; + else + page_flags = p->flags; /* * unpoison always clear PG_hwpoison inside page lock From ef70762948dde012146926720b70e79736336764 Mon Sep 17 00:00:00 2001 From: Yu Zhao Date: Fri, 16 Jun 2017 14:02:31 -0700 Subject: [PATCH 309/436] swap: cond_resched in swap_cgroup_prepare() I saw need_resched() warnings when swapping on large swapfile (TBs) because continuously allocating many pages in swap_cgroup_prepare() took too long. We already cond_resched when freeing page in swap_cgroup_swapoff(). Do the same for the page allocation. Link: http://lkml.kernel.org/r/20170604200109.17606-1-yuzhao@google.com Signed-off-by: Yu Zhao Acked-by: Michal Hocko Acked-by: Vladimir Davydov Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/swap_cgroup.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/mm/swap_cgroup.c b/mm/swap_cgroup.c index ac6318a064d3..3405b4ee1757 100644 --- a/mm/swap_cgroup.c +++ b/mm/swap_cgroup.c @@ -48,6 +48,9 @@ static int swap_cgroup_prepare(int type) if (!page) goto not_enough_page; ctrl->map[idx] = page; + + if (!(idx % SWAP_CLUSTER_MAX)) + cond_resched(); } return 0; not_enough_page: From 3c226c637b69104f6b9f1c6ec5b08d7b741b3229 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Fri, 16 Jun 2017 14:02:34 -0700 Subject: [PATCH 310/436] mm: numa: avoid waiting on freed migrated pages In do_huge_pmd_numa_page(), we attempt to handle a migrating thp pmd by waiting until the pmd is unlocked before we return and retry. However, we can race with migrate_misplaced_transhuge_page(): // do_huge_pmd_numa_page // migrate_misplaced_transhuge_page() // Holds 0 refs on page // Holds 2 refs on page vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); /* ... */ if (pmd_trans_migrating(*vmf->pmd)) { page = pmd_page(*vmf->pmd); spin_unlock(vmf->ptl); ptl = pmd_lock(mm, pmd); if (page_count(page) != 2)) { /* roll back */ } /* ... */ mlock_migrate_page(new_page, page); /* ... */ spin_unlock(ptl); put_page(page); put_page(page); // page freed here wait_on_page_locked(page); goto out; } This can result in the freed page having its waiters flag set unexpectedly, which trips the PAGE_FLAGS_CHECK_AT_PREP checks in the page alloc/free functions. This has been observed on arm64 KVM guests. We can avoid this by having do_huge_pmd_numa_page() take a reference on the page before dropping the pmd lock, mirroring what we do in __migration_entry_wait(). When we hit the race, migrate_misplaced_transhuge_page() will see the reference and abort the migration, as it may do today in other cases. Fixes: b8916634b77bffb2 ("mm: Prevent parallel splits during THP migration") Link: http://lkml.kernel.org/r/1497349722-6731-2-git-send-email-will.deacon@arm.com Signed-off-by: Mark Rutland Signed-off-by: Will Deacon Acked-by: Steve Capper Acked-by: Kirill A. Shutemov Acked-by: Vlastimil Babka Cc: Mel Gorman Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/huge_memory.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index a84909cf20d3..88c6167f194d 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1426,8 +1426,11 @@ int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t pmd) */ if (unlikely(pmd_trans_migrating(*vmf->pmd))) { page = pmd_page(*vmf->pmd); + if (!get_page_unless_zero(page)) + goto out_unlock; spin_unlock(vmf->ptl); wait_on_page_locked(page); + put_page(page); goto out; } @@ -1459,9 +1462,12 @@ int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t pmd) /* Migration could have started since the pmd_trans_migrating check */ if (!page_locked) { + page_nid = -1; + if (!get_page_unless_zero(page)) + goto out_unlock; spin_unlock(vmf->ptl); wait_on_page_locked(page); - page_nid = -1; + put_page(page); goto out; } From 64c2b20301f62c697352c8028c569b1b2bdd8e82 Mon Sep 17 00:00:00 2001 From: Andrea Arcangeli Date: Fri, 16 Jun 2017 14:02:37 -0700 Subject: [PATCH 311/436] userfaultfd: shmem: handle coredumping in handle_userfault() Anon and hugetlbfs handle FOLL_DUMP set by get_dump_page() internally to __get_user_pages(). shmem as opposed has no special FOLL_DUMP handling there so handle_mm_fault() is invoked without mmap_sem and ends up calling handle_userfault() that isn't expecting to be invoked without mmap_sem held. This makes handle_userfault() fail immediately if invoked through shmem_vm_ops->fault during coredumping and solves the problem. The side effect is a BUG_ON with no lock held triggered by the coredumping process which exits. Only 4.11 is affected, pre-4.11 anon memory holes are skipped in __get_user_pages by checking FOLL_DUMP explicitly against empty pagetables (mm/gup.c:no_page_table()). It's zero cost as we already had a check for current->flags to prevent futex to trigger userfaults during exit (PF_EXITING). Link: http://lkml.kernel.org/r/20170615214838.27429-1-aarcange@redhat.com Signed-off-by: Andrea Arcangeli Reported-by: "Dr. David Alan Gilbert" Cc: [4.11+] Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/userfaultfd.c | 29 +++++++++++++++++++++-------- 1 file changed, 21 insertions(+), 8 deletions(-) diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c index f7555fc25877..1d622f276e3a 100644 --- a/fs/userfaultfd.c +++ b/fs/userfaultfd.c @@ -340,9 +340,28 @@ int handle_userfault(struct vm_fault *vmf, unsigned long reason) bool must_wait, return_to_userland; long blocking_state; - BUG_ON(!rwsem_is_locked(&mm->mmap_sem)); - ret = VM_FAULT_SIGBUS; + + /* + * We don't do userfault handling for the final child pid update. + * + * We also don't do userfault handling during + * coredumping. hugetlbfs has the special + * follow_hugetlb_page() to skip missing pages in the + * FOLL_DUMP case, anon memory also checks for FOLL_DUMP with + * the no_page_table() helper in follow_page_mask(), but the + * shmem_vm_ops->fault method is invoked even during + * coredumping without mmap_sem and it ends up here. + */ + if (current->flags & (PF_EXITING|PF_DUMPCORE)) + goto out; + + /* + * Coredumping runs without mmap_sem so we can only check that + * the mmap_sem is held, if PF_DUMPCORE was not set. + */ + WARN_ON_ONCE(!rwsem_is_locked(&mm->mmap_sem)); + ctx = vmf->vma->vm_userfaultfd_ctx.ctx; if (!ctx) goto out; @@ -360,12 +379,6 @@ int handle_userfault(struct vm_fault *vmf, unsigned long reason) if (unlikely(ACCESS_ONCE(ctx->released))) goto out; - /* - * We don't do userfault handling for the final child pid update. - */ - if (current->flags & PF_EXITING) - goto out; - /* * Check that we can return VM_FAULT_RETRY. * From d7143e31259cb029e207619209b31aa7520f8e28 Mon Sep 17 00:00:00 2001 From: zhongjiang Date: Fri, 16 Jun 2017 14:02:40 -0700 Subject: [PATCH 312/436] mm: correct the comment when reclaimed pages exceed the scanned pages Commit e1587a494540 ("mm: vmpressure: fix sending wrong events on underflow") declared that reclaimed pages exceed the scanned pages due to the thp reclaim. That is incorrect because THP will be spilt to normal page and loop again, which will result in the scanned pages increment. [akpm@linux-foundation.org: tweak comment text] Link: http://lkml.kernel.org/r/1496824266-25235-1-git-send-email-zhongjiang@huawei.com Signed-off-by: zhongjiang Acked-by: Minchan Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/vmpressure.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/mm/vmpressure.c b/mm/vmpressure.c index 6063581f705c..ce0618bfa8d0 100644 --- a/mm/vmpressure.c +++ b/mm/vmpressure.c @@ -115,9 +115,9 @@ static enum vmpressure_levels vmpressure_calc_level(unsigned long scanned, unsigned long pressure = 0; /* - * reclaimed can be greater than scanned in cases - * like THP, where the scanned is 1 and reclaimed - * could be 512 + * reclaimed can be greater than scanned for things such as reclaimed + * slab pages. shrink_node() just adds reclaimed pages without a + * related increment to scanned pages. */ if (reclaimed >= scanned) goto out; From 23ac7cba73bb2c6e80f9cdebeb39dc3dad34ebb3 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Fri, 16 Jun 2017 23:49:17 -0400 Subject: [PATCH 313/436] fix signedness of timestamps on ufs1 Signed-off-by: Al Viro --- fs/ufs/inode.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c index 9f4590261134..7b1b810a8ab1 100644 --- a/fs/ufs/inode.c +++ b/fs/ufs/inode.c @@ -578,9 +578,9 @@ static int ufs1_read_inode(struct inode *inode, struct ufs_inode *ufs_inode) i_gid_write(inode, ufs_get_inode_gid(sb, ufs_inode)); inode->i_size = fs64_to_cpu(sb, ufs_inode->ui_size); - inode->i_atime.tv_sec = fs32_to_cpu(sb, ufs_inode->ui_atime.tv_sec); - inode->i_ctime.tv_sec = fs32_to_cpu(sb, ufs_inode->ui_ctime.tv_sec); - inode->i_mtime.tv_sec = fs32_to_cpu(sb, ufs_inode->ui_mtime.tv_sec); + inode->i_atime.tv_sec = (signed)fs32_to_cpu(sb, ufs_inode->ui_atime.tv_sec); + inode->i_ctime.tv_sec = (signed)fs32_to_cpu(sb, ufs_inode->ui_ctime.tv_sec); + inode->i_mtime.tv_sec = (signed)fs32_to_cpu(sb, ufs_inode->ui_mtime.tv_sec); inode->i_mtime.tv_nsec = 0; inode->i_atime.tv_nsec = 0; inode->i_ctime.tv_nsec = 0; From c0ef65d2928249e822b813beb41b6c1478c556ab Mon Sep 17 00:00:00 2001 From: Al Viro Date: Fri, 16 Jun 2017 23:54:47 -0400 Subject: [PATCH 314/436] ufs_iget(): fail with -ESTALE on deleted inode Signed-off-by: Al Viro --- fs/ufs/inode.c | 21 ++++++++------------- 1 file changed, 8 insertions(+), 13 deletions(-) diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c index 7b1b810a8ab1..f36d6a53687d 100644 --- a/fs/ufs/inode.c +++ b/fs/ufs/inode.c @@ -566,10 +566,8 @@ static int ufs1_read_inode(struct inode *inode, struct ufs_inode *ufs_inode) */ inode->i_mode = mode = fs16_to_cpu(sb, ufs_inode->ui_mode); set_nlink(inode, fs16_to_cpu(sb, ufs_inode->ui_nlink)); - if (inode->i_nlink == 0) { - ufs_error (sb, "ufs_read_inode", "inode %lu has zero nlink\n", inode->i_ino); - return -1; - } + if (inode->i_nlink == 0) + return -ESTALE; /* * Linux now has 32-bit uid and gid, so we can support EFT. @@ -614,10 +612,8 @@ static int ufs2_read_inode(struct inode *inode, struct ufs2_inode *ufs2_inode) */ inode->i_mode = mode = fs16_to_cpu(sb, ufs2_inode->ui_mode); set_nlink(inode, fs16_to_cpu(sb, ufs2_inode->ui_nlink)); - if (inode->i_nlink == 0) { - ufs_error (sb, "ufs_read_inode", "inode %lu has zero nlink\n", inode->i_ino); - return -1; - } + if (inode->i_nlink == 0) + return -ESTALE; /* * Linux now has 32-bit uid and gid, so we can support EFT. @@ -657,7 +653,7 @@ struct inode *ufs_iget(struct super_block *sb, unsigned long ino) struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; struct buffer_head * bh; struct inode *inode; - int err; + int err = -EIO; UFSD("ENTER, ino %lu\n", ino); @@ -692,9 +688,10 @@ struct inode *ufs_iget(struct super_block *sb, unsigned long ino) err = ufs1_read_inode(inode, ufs_inode + ufs_inotofsbo(inode->i_ino)); } - + brelse(bh); if (err) goto bad_inode; + inode->i_version++; ufsi->i_lastfrag = (inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift; @@ -703,15 +700,13 @@ struct inode *ufs_iget(struct super_block *sb, unsigned long ino) ufs_set_inode_ops(inode); - brelse(bh); - UFSD("EXIT\n"); unlock_new_inode(inode); return inode; bad_inode: iget_failed(inode); - return ERR_PTR(-EIO); + return ERR_PTR(err); } static void ufs1_update_inode(struct inode *inode, struct ufs_inode *ufs_inode) From 57db7e4a2d92c2d3dfbca4ef8057849b2682436b Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Tue, 13 Jun 2017 04:31:16 -0500 Subject: [PATCH 315/436] signal: Only reschedule timers on signals timers have sent Thomas Gleixner wrote: > The CRIU support added a 'feature' which allows a user space task to send > arbitrary (kernel) signals to itself. The changelog says: > > The kernel prevents sending of siginfo with positive si_code, because > these codes are reserved for kernel. I think we can allow a task to > send such a siginfo to itself. This operation should not be dangerous. > > Quite contrary to that claim, it turns out that it is outright dangerous > for signals with info->si_code == SI_TIMER. The following code sequence in > a user space task allows to crash the kernel: > > id = timer_create(CLOCK_XXX, ..... signo = SIGX); > timer_set(id, ....); > info->si_signo = SIGX; > info->si_code = SI_TIMER: > info->_sifields._timer._tid = id; > info->_sifields._timer._sys_private = 2; > rt_[tg]sigqueueinfo(..., SIGX, info); > sigemptyset(&sigset); > sigaddset(&sigset, SIGX); > rt_sigtimedwait(sigset, info); > > For timers based on CLOCK_PROCESS_CPUTIME_ID, CLOCK_THREAD_CPUTIME_ID this > results in a kernel crash because sigwait() dequeues the signal and the > dequeue code observes: > > info->si_code == SI_TIMER && info->_sifields._timer._sys_private != 0 > > which triggers the following callchain: > > do_schedule_next_timer() -> posix_cpu_timer_schedule() -> arm_timer() > > arm_timer() executes a list_add() on the timer, which is already armed via > the timer_set() syscall. That's a double list add which corrupts the posix > cpu timer list. As a consequence the kernel crashes on the next operation > touching the posix cpu timer list. > > Posix clocks which are internally implemented based on hrtimers are not > affected by this because hrtimer_start() can handle already armed timers > nicely, but it's a reliable way to trigger the WARN_ON() in > hrtimer_forward(), which complains about calling that function on an > already armed timer. This problem has existed since the posix timer code was merged into 2.5.63. A few releases earlier in 2.5.60 ptrace gained the ability to inject not just a signal (which linux has supported since 1.0) but the full siginfo of a signal. The core problem is that the code will reschedule in response to signals getting dequeued not just for signals the timers sent but for other signals that happen to a si_code of SI_TIMER. Avoid this confusion by testing to see if the queued signal was preallocated as all timer signals are preallocated, and so far only the timer code preallocates signals. Move the check for if a timer needs to be rescheduled up into collect_signal where the preallocation check must be performed, and pass the result back to dequeue_signal where the code reschedules timers. This makes it clear why the code cares about preallocated timers. Cc: stable@vger.kernel.org Reported-by: Thomas Gleixner History Tree: https://git.kernel.org/pub/scm/linux/kernel/git/tglx/history.git Reference: 66dd34ad31e5 ("signal: allow to send any siginfo to itself") Reference: 1669ce53e2ff ("Add PTRACE_GETSIGINFO and PTRACE_SETSIGINFO") Fixes: db8b50ba75f2 ("[PATCH] POSIX clocks & timers") Signed-off-by: "Eric W. Biederman" --- kernel/signal.c | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/kernel/signal.c b/kernel/signal.c index ca92bcfeb322..45b4c1ffe14e 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -510,7 +510,8 @@ int unhandled_signal(struct task_struct *tsk, int sig) return !tsk->ptrace; } -static void collect_signal(int sig, struct sigpending *list, siginfo_t *info) +static void collect_signal(int sig, struct sigpending *list, siginfo_t *info, + bool *resched_timer) { struct sigqueue *q, *first = NULL; @@ -532,6 +533,12 @@ static void collect_signal(int sig, struct sigpending *list, siginfo_t *info) still_pending: list_del_init(&first->list); copy_siginfo(info, &first->info); + + *resched_timer = + (first->flags & SIGQUEUE_PREALLOC) && + (info->si_code == SI_TIMER) && + (info->si_sys_private); + __sigqueue_free(first); } else { /* @@ -548,12 +555,12 @@ still_pending: } static int __dequeue_signal(struct sigpending *pending, sigset_t *mask, - siginfo_t *info) + siginfo_t *info, bool *resched_timer) { int sig = next_signal(pending, mask); if (sig) - collect_signal(sig, pending, info); + collect_signal(sig, pending, info, resched_timer); return sig; } @@ -565,15 +572,16 @@ static int __dequeue_signal(struct sigpending *pending, sigset_t *mask, */ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) { + bool resched_timer = false; int signr; /* We only dequeue private signals from ourselves, we don't let * signalfd steal them */ - signr = __dequeue_signal(&tsk->pending, mask, info); + signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer); if (!signr) { signr = __dequeue_signal(&tsk->signal->shared_pending, - mask, info); + mask, info, &resched_timer); #ifdef CONFIG_POSIX_TIMERS /* * itimer signal ? @@ -621,7 +629,7 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) current->jobctl |= JOBCTL_STOP_DEQUEUED; } #ifdef CONFIG_POSIX_TIMERS - if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) { + if (resched_timer) { /* * Release the siglock to ensure proper locking order * of timer locks outside of siglocks. Note, we leave From 77e9ce327d9b607cd6e57c0f4524a654dc59c4b1 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 17 Jun 2017 15:44:06 -0400 Subject: [PATCH 316/436] ufs: fix the logics for tail relocation * original hysteresis loop got broken by typo back in 2002; now it never switches out of OPTTIME state. Fixed. * critical levels for switching from OPTTIME to OPTSPACE and back ought to be calculated once, at mount time. * we should use mul_u64_u32_div() for those calculations, now that ->s_dsize is 64bit. * to quote Kirk McKusick (in 1995 FreeBSD commit message): The threshold for switching from time-space and space-time is too small when minfree is 5%...so make it stay at space in this case. Signed-off-by: Al Viro --- fs/ufs/balloc.c | 22 ++++++---------------- fs/ufs/super.c | 9 +++++++++ fs/ufs/ufs_fs.h | 2 ++ 3 files changed, 17 insertions(+), 16 deletions(-) diff --git a/fs/ufs/balloc.c b/fs/ufs/balloc.c index 0315fea1d589..f80be4c5df9d 100644 --- a/fs/ufs/balloc.c +++ b/fs/ufs/balloc.c @@ -455,24 +455,14 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment, /* * allocate new block and move data */ - switch (fs32_to_cpu(sb, usb1->fs_optim)) { - case UFS_OPTSPACE: + if (fs32_to_cpu(sb, usb1->fs_optim) == UFS_OPTSPACE) { request = newcount; - if (uspi->s_minfree < 5 || uspi->cs_total.cs_nffree - > uspi->s_dsize * uspi->s_minfree / (2 * 100)) - break; - usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME); - break; - default: - usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME); - - case UFS_OPTTIME: + if (uspi->cs_total.cs_nffree < uspi->s_space_to_time) + usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME); + } else { request = uspi->s_fpb; - if (uspi->cs_total.cs_nffree < uspi->s_dsize * - (uspi->s_minfree - 2) / 100) - break; - usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME); - break; + if (uspi->cs_total.cs_nffree > uspi->s_time_to_space) + usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTSPACE); } result = ufs_alloc_fragments (inode, cgno, goal, request, err); if (result) { diff --git a/fs/ufs/super.c b/fs/ufs/super.c index 34656c7a8e22..f211b662dd92 100644 --- a/fs/ufs/super.c +++ b/fs/ufs/super.c @@ -1211,6 +1211,15 @@ magic_found: uspi->s_root_blocks = mul_u64_u32_div(uspi->s_dsize, uspi->s_minfree, 100); + if (uspi->s_minfree <= 5) { + uspi->s_time_to_space = ~0ULL; + uspi->s_space_to_time = 0; + usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTSPACE); + } else { + uspi->s_time_to_space = (uspi->s_root_blocks / 2) + 1; + uspi->s_space_to_time = mul_u64_u32_div(uspi->s_dsize, + uspi->s_minfree - 2, 100) - 1; + } /* * Compute another frequently used values diff --git a/fs/ufs/ufs_fs.h b/fs/ufs/ufs_fs.h index 823d55a37586..150eef6f1233 100644 --- a/fs/ufs/ufs_fs.h +++ b/fs/ufs/ufs_fs.h @@ -792,6 +792,8 @@ struct ufs_sb_private_info { __s32 fs_magic; /* filesystem magic */ unsigned int s_dirblksize; __u64 s_root_blocks; + __u64 s_time_to_space; + __u64 s_space_to_time; }; /* From e41b1355508debe45fda33ef8c03ff3ba5d206b9 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Tue, 13 Jun 2017 20:56:44 +0300 Subject: [PATCH 317/436] virtio_balloon: disable VIOMMU support virtio balloon bypasses the DMA API entirely so does not support the VIOMMU right now. It's not clear we need that support, for now let's just make sure we don't pretend to support it. Cc: stable@vger.kernel.org Cc: Wei Wang Fixes: 1a937693993f ("virtio: new feature to detect IOMMU device quirk") Signed-off-by: Michael S. Tsirkin Acked-by: Jason Wang --- drivers/virtio/virtio_balloon.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c index 408c174ef0d5..22caf808bfab 100644 --- a/drivers/virtio/virtio_balloon.c +++ b/drivers/virtio/virtio_balloon.c @@ -663,6 +663,12 @@ static int virtballoon_restore(struct virtio_device *vdev) } #endif +static int virtballoon_validate(struct virtio_device *vdev) +{ + __virtio_clear_bit(vdev, VIRTIO_F_IOMMU_PLATFORM); + return 0; +} + static unsigned int features[] = { VIRTIO_BALLOON_F_MUST_TELL_HOST, VIRTIO_BALLOON_F_STATS_VQ, @@ -675,6 +681,7 @@ static struct virtio_driver virtio_balloon_driver = { .driver.name = KBUILD_MODNAME, .driver.owner = THIS_MODULE, .id_table = id_table, + .validate = virtballoon_validate, .probe = virtballoon_probe, .remove = virtballoon_remove, .config_changed = virtballoon_changed, From 779f19ac9d5858a2c159030c0c166f7da46b74ae Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Sun, 18 Jun 2017 15:10:26 -0700 Subject: [PATCH 318/436] Input: soc_button_array - fix leaking the ACPI button descriptor buffer We are passing a buffer with ACPI_ALLOCATE_BUFFER set to acpi_evaluate_object, so we must free it when we are done with it. Signed-off-by: Hans de Goede Signed-off-by: Dmitry Torokhov --- drivers/input/misc/soc_button_array.c | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/drivers/input/misc/soc_button_array.c b/drivers/input/misc/soc_button_array.c index e37d37273182..f600f3a7a3c6 100644 --- a/drivers/input/misc/soc_button_array.c +++ b/drivers/input/misc/soc_button_array.c @@ -248,7 +248,8 @@ static struct soc_button_info *soc_button_get_button_info(struct device *dev) if (!btns_desc) { dev_err(dev, "ACPI Button Descriptors not found\n"); - return ERR_PTR(-ENODEV); + button_info = ERR_PTR(-ENODEV); + goto out; } /* The first package describes the collection */ @@ -264,24 +265,31 @@ static struct soc_button_info *soc_button_get_button_info(struct device *dev) } if (collection_uid == -1) { dev_err(dev, "Invalid Button Collection Descriptor\n"); - return ERR_PTR(-ENODEV); + button_info = ERR_PTR(-ENODEV); + goto out; } /* There are package.count - 1 buttons + 1 terminating empty entry */ button_info = devm_kcalloc(dev, btns_desc->package.count, sizeof(*button_info), GFP_KERNEL); - if (!button_info) - return ERR_PTR(-ENOMEM); + if (!button_info) { + button_info = ERR_PTR(-ENOMEM); + goto out; + } /* Parse the button descriptors */ for (i = 1, btn = 0; i < btns_desc->package.count; i++, btn++) { if (soc_button_parse_btn_desc(dev, &btns_desc->package.elements[i], collection_uid, - &button_info[btn])) - return ERR_PTR(-ENODEV); + &button_info[btn])) { + button_info = ERR_PTR(-ENODEV); + goto out; + } } +out: + kfree(buf.pointer); return button_info; } From 46f8cd9d2fc1e4e8b82b53a0007f6c92e80c930b Mon Sep 17 00:00:00 2001 From: Haishuang Yan Date: Sat, 17 Jun 2017 11:38:05 +0800 Subject: [PATCH 319/436] ip6_tunnel: Correct tos value in collect_md mode MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Same as ip_gre, geneve and vxlan, use key->tos as traffic class value. CC: Peter Dawson Fixes: 0e9a709560db ("ip6_tunnel, ip6_gre: fix setting of DSCP on encapsulated packets”) Signed-off-by: Haishuang Yan Acked-by: Peter Dawson Signed-off-by: David S. Miller --- net/ipv6/ip6_tunnel.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 035c0496b92a..8c6c3c8e7eef 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -1248,7 +1248,7 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) fl6.flowi6_proto = IPPROTO_IPIP; fl6.daddr = key->u.ipv6.dst; fl6.flowlabel = key->label; - dsfield = ip6_tclass(key->label); + dsfield = key->tos; } else { if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) encap_limit = t->parms.encap_limit; @@ -1319,7 +1319,7 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) fl6.flowi6_proto = IPPROTO_IPV6; fl6.daddr = key->u.ipv6.dst; fl6.flowlabel = key->label; - dsfield = ip6_tclass(key->label); + dsfield = key->tos; } else { offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb)); /* ip6_tnl_parse_tlv_enc_lim() might have reallocated skb->head */ From a21ef715fbb8210c50b1d684145f8acdf2339596 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 15 Jun 2017 14:11:29 +0100 Subject: [PATCH 320/436] drm/i915: Differentiate between sw write location into ring and last hw read We need to keep track of the last location we ask the hw to read up to (RING_TAIL) separately from our last write location into the ring, so that in the event of a GPU reset we do not tell the HW to proceed into a partially written request (which can happen if that request is waiting for an external signal before being executed). v2: Refactor intel_ring_reset() (Mika) Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=100144 Testcase: igt/gem_exec_fence/await-hang Fixes: 821ed7df6e2a ("drm/i915: Update reset path to fix incomplete requests") Fixes: d55ac5bf97c6 ("drm/i915: Defer transfer onto execution timeline to actual hw submission") Signed-off-by: Chris Wilson Cc: Tvrtko Ursulin Cc: Mika Kuoppala Link: http://patchwork.freedesktop.org/patch/msgid/20170425130049.26147-1-chris@chris-wilson.co.uk Reviewed-by: Mika Kuoppala (cherry picked from commit e6ba9992de6c63fe86c028b4876338e1cb7dac34) Signed-off-by: Jani Nikula Link: http://patchwork.freedesktop.org/patch/msgid/20170615131129.3061-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_gem_request.c | 2 +- drivers/gpu/drm/i915/i915_guc_submission.c | 4 +-- drivers/gpu/drm/i915/intel_lrc.c | 6 ++-- drivers/gpu/drm/i915/intel_ringbuffer.c | 41 ++++++++++++++-------- drivers/gpu/drm/i915/intel_ringbuffer.h | 19 ++++++++-- 5 files changed, 48 insertions(+), 24 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c index 5ddbc9499775..a74d0ac737cb 100644 --- a/drivers/gpu/drm/i915/i915_gem_request.c +++ b/drivers/gpu/drm/i915/i915_gem_request.c @@ -623,7 +623,7 @@ i915_gem_request_alloc(struct intel_engine_cs *engine, * GPU processing the request, we never over-estimate the * position of the head. */ - req->head = req->ring->tail; + req->head = req->ring->emit; /* Check that we didn't interrupt ourselves with a new request */ GEM_BUG_ON(req->timeline->seqno != req->fence.seqno); diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c index 1642fff9cf13..ab5140ba108d 100644 --- a/drivers/gpu/drm/i915/i915_guc_submission.c +++ b/drivers/gpu/drm/i915/i915_guc_submission.c @@ -480,9 +480,7 @@ static void guc_wq_item_append(struct i915_guc_client *client, GEM_BUG_ON(freespace < wqi_size); /* The GuC firmware wants the tail index in QWords, not bytes */ - tail = rq->tail; - assert_ring_tail_valid(rq->ring, rq->tail); - tail >>= 3; + tail = intel_ring_set_tail(rq->ring, rq->tail) >> 3; GEM_BUG_ON(tail > WQ_RING_TAIL_MAX); /* For now workqueue item is 4 DWs; workqueue buffer is 2 pages. So we diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index dac4e003c1f3..62f44d3e7c43 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -326,8 +326,7 @@ static u64 execlists_update_context(struct drm_i915_gem_request *rq) rq->ctx->ppgtt ?: rq->i915->mm.aliasing_ppgtt; u32 *reg_state = ce->lrc_reg_state; - assert_ring_tail_valid(rq->ring, rq->tail); - reg_state[CTX_RING_TAIL+1] = rq->tail; + reg_state[CTX_RING_TAIL+1] = intel_ring_set_tail(rq->ring, rq->tail); /* True 32b PPGTT with dynamic page allocation: update PDP * registers and point the unallocated PDPs to scratch page. @@ -2036,8 +2035,7 @@ void intel_lr_context_resume(struct drm_i915_private *dev_priv) ce->state->obj->mm.dirty = true; i915_gem_object_unpin_map(ce->state->obj); - ce->ring->head = ce->ring->tail = 0; - intel_ring_update_space(ce->ring); + intel_ring_reset(ce->ring, 0); } } } diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 66a2b8b83972..513a0f4b469b 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -49,7 +49,7 @@ static int __intel_ring_space(int head, int tail, int size) void intel_ring_update_space(struct intel_ring *ring) { - ring->space = __intel_ring_space(ring->head, ring->tail, ring->size); + ring->space = __intel_ring_space(ring->head, ring->emit, ring->size); } static int @@ -774,8 +774,8 @@ static void i9xx_submit_request(struct drm_i915_gem_request *request) i915_gem_request_submit(request); - assert_ring_tail_valid(request->ring, request->tail); - I915_WRITE_TAIL(request->engine, request->tail); + I915_WRITE_TAIL(request->engine, + intel_ring_set_tail(request->ring, request->tail)); } static void i9xx_emit_breadcrumb(struct drm_i915_gem_request *req, u32 *cs) @@ -1316,11 +1316,23 @@ err: return PTR_ERR(addr); } +void intel_ring_reset(struct intel_ring *ring, u32 tail) +{ + GEM_BUG_ON(!list_empty(&ring->request_list)); + ring->tail = tail; + ring->head = tail; + ring->emit = tail; + intel_ring_update_space(ring); +} + void intel_ring_unpin(struct intel_ring *ring) { GEM_BUG_ON(!ring->vma); GEM_BUG_ON(!ring->vaddr); + /* Discard any unused bytes beyond that submitted to hw. */ + intel_ring_reset(ring, ring->tail); + if (i915_vma_is_map_and_fenceable(ring->vma)) i915_vma_unpin_iomap(ring->vma); else @@ -1562,8 +1574,9 @@ void intel_legacy_submission_resume(struct drm_i915_private *dev_priv) struct intel_engine_cs *engine; enum intel_engine_id id; + /* Restart from the beginning of the rings for convenience */ for_each_engine(engine, dev_priv, id) - engine->buffer->head = engine->buffer->tail; + intel_ring_reset(engine->buffer, 0); } static int ring_request_alloc(struct drm_i915_gem_request *request) @@ -1616,7 +1629,7 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes) unsigned space; /* Would completion of this request free enough space? */ - space = __intel_ring_space(target->postfix, ring->tail, + space = __intel_ring_space(target->postfix, ring->emit, ring->size); if (space >= bytes) break; @@ -1641,8 +1654,8 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes) u32 *intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords) { struct intel_ring *ring = req->ring; - int remain_actual = ring->size - ring->tail; - int remain_usable = ring->effective_size - ring->tail; + int remain_actual = ring->size - ring->emit; + int remain_usable = ring->effective_size - ring->emit; int bytes = num_dwords * sizeof(u32); int total_bytes, wait_bytes; bool need_wrap = false; @@ -1678,17 +1691,17 @@ u32 *intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords) if (unlikely(need_wrap)) { GEM_BUG_ON(remain_actual > ring->space); - GEM_BUG_ON(ring->tail + remain_actual > ring->size); + GEM_BUG_ON(ring->emit + remain_actual > ring->size); /* Fill the tail with MI_NOOP */ - memset(ring->vaddr + ring->tail, 0, remain_actual); - ring->tail = 0; + memset(ring->vaddr + ring->emit, 0, remain_actual); + ring->emit = 0; ring->space -= remain_actual; } - GEM_BUG_ON(ring->tail > ring->size - bytes); - cs = ring->vaddr + ring->tail; - ring->tail += bytes; + GEM_BUG_ON(ring->emit > ring->size - bytes); + cs = ring->vaddr + ring->emit; + ring->emit += bytes; ring->space -= bytes; GEM_BUG_ON(ring->space < 0); @@ -1699,7 +1712,7 @@ u32 *intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords) int intel_ring_cacheline_align(struct drm_i915_gem_request *req) { int num_dwords = - (req->ring->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t); + (req->ring->emit & (CACHELINE_BYTES - 1)) / sizeof(uint32_t); u32 *cs; if (num_dwords == 0) diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index a82a0807f64d..f7144fe09613 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -145,6 +145,7 @@ struct intel_ring { u32 head; u32 tail; + u32 emit; int space; int size; @@ -488,6 +489,8 @@ intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value) struct intel_ring * intel_engine_create_ring(struct intel_engine_cs *engine, int size); int intel_ring_pin(struct intel_ring *ring, unsigned int offset_bias); +void intel_ring_reset(struct intel_ring *ring, u32 tail); +void intel_ring_update_space(struct intel_ring *ring); void intel_ring_unpin(struct intel_ring *ring); void intel_ring_free(struct intel_ring *ring); @@ -511,7 +514,7 @@ intel_ring_advance(struct drm_i915_gem_request *req, u32 *cs) * reserved for the command packet (i.e. the value passed to * intel_ring_begin()). */ - GEM_BUG_ON((req->ring->vaddr + req->ring->tail) != cs); + GEM_BUG_ON((req->ring->vaddr + req->ring->emit) != cs); } static inline u32 @@ -540,7 +543,19 @@ assert_ring_tail_valid(const struct intel_ring *ring, unsigned int tail) GEM_BUG_ON(tail >= ring->size); } -void intel_ring_update_space(struct intel_ring *ring); +static inline unsigned int +intel_ring_set_tail(struct intel_ring *ring, unsigned int tail) +{ + /* Whilst writes to the tail are strictly order, there is no + * serialisation between readers and the writers. The tail may be + * read by i915_gem_request_retire() just as it is being updated + * by execlists, as although the breadcrumb is complete, the context + * switch hasn't been seen. + */ + assert_ring_tail_valid(ring, tail); + ring->tail = tail; + return tail; +} void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno); From b8d5a9ccfba5fc084b50b00b9f5b587a8e64b72c Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 9 Jun 2017 12:03:46 +0100 Subject: [PATCH 321/436] drm/i915: Encourage our shrinker more when our shmemfs allocations fails Commit 24f8e00a8a2e ("drm/i915: Prefer to report ENOMEM rather than incur the oom for gfx allocations") made the bold decision to try and avoid the oomkiller by reporting -ENOMEM to userspace if our allocation failed after attempting to free enough buffer objects. In short, it appears we were giving up too easily (even before we start wondering if one pass of reclaim is as strong as we would like). Part of the problem is that if we only shrink just enough pages for our expected allocation, the likelihood of those pages becoming available to us is less than 100% To counter-act that we ask for twice the number of pages to be made available. Furthermore, we allow the shrinker to pull pages from the active list in later passes. v2: Be a little more cautious in paging out gfx buffers, and leave that to a more balanced approach from shrink_slab(). Important when combined with "drm/i915: Start writeback from the shrinker" as anything shrunk is immediately swapped out and so should be more conservative. Fixes: 24f8e00a8a2e ("drm/i915: Prefer to report ENOMEM rather than incur the oom for gfx allocations") Signed-off-by: Chris Wilson Cc: Mika Kuoppala Cc: Joonas Lahtinen Cc: Daniel Vetter Reviewed-by: Joonas Lahtinen Link: http://patchwork.freedesktop.org/patch/msgid/20170609110350.1767-1-chris@chris-wilson.co.uk (cherry picked from commit 4846bf0ca8cb4304dde6140eff33a92b3fe8ef24) Signed-off-by: Jani Nikula --- drivers/gpu/drm/i915/i915_gem.c | 50 +++++++++++++++++++-------------- 1 file changed, 29 insertions(+), 21 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 462031cbd77f..c93f27b981f5 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2285,8 +2285,8 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) struct page *page; unsigned long last_pfn = 0; /* suppress gcc warning */ unsigned int max_segment; + gfp_t noreclaim; int ret; - gfp_t gfp; /* Assert that the object is not currently in any GPU domain. As it * wasn't in the GTT, there shouldn't be any way it could have been in @@ -2315,22 +2315,31 @@ rebuild_st: * Fail silently without starting the shrinker */ mapping = obj->base.filp->f_mapping; - gfp = mapping_gfp_constraint(mapping, ~(__GFP_IO | __GFP_RECLAIM)); - gfp |= __GFP_NORETRY | __GFP_NOWARN; + noreclaim = mapping_gfp_constraint(mapping, + ~(__GFP_IO | __GFP_RECLAIM)); + noreclaim |= __GFP_NORETRY | __GFP_NOWARN; + sg = st->sgl; st->nents = 0; for (i = 0; i < page_count; i++) { - page = shmem_read_mapping_page_gfp(mapping, i, gfp); - if (unlikely(IS_ERR(page))) { - i915_gem_shrink(dev_priv, - page_count, - I915_SHRINK_BOUND | - I915_SHRINK_UNBOUND | - I915_SHRINK_PURGEABLE); + const unsigned int shrink[] = { + I915_SHRINK_BOUND | I915_SHRINK_UNBOUND | I915_SHRINK_PURGEABLE, + 0, + }, *s = shrink; + gfp_t gfp = noreclaim; + + do { page = shmem_read_mapping_page_gfp(mapping, i, gfp); - } - if (unlikely(IS_ERR(page))) { - gfp_t reclaim; + if (likely(!IS_ERR(page))) + break; + + if (!*s) { + ret = PTR_ERR(page); + goto err_sg; + } + + i915_gem_shrink(dev_priv, 2 * page_count, *s++); + cond_resched(); /* We've tried hard to allocate the memory by reaping * our own buffer, now let the real VM do its job and @@ -2340,15 +2349,13 @@ rebuild_st: * defer the oom here by reporting the ENOMEM back * to userspace. */ - reclaim = mapping_gfp_mask(mapping); - reclaim |= __GFP_NORETRY; /* reclaim, but no oom */ - - page = shmem_read_mapping_page_gfp(mapping, i, reclaim); - if (IS_ERR(page)) { - ret = PTR_ERR(page); - goto err_sg; + if (!*s) { + /* reclaim and warn, but no oom */ + gfp = mapping_gfp_mask(mapping); + gfp |= __GFP_NORETRY; } - } + } while (1); + if (!i || sg->length >= max_segment || page_to_pfn(page) != last_pfn + 1) { @@ -4222,6 +4229,7 @@ i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size) mapping = obj->base.filp->f_mapping; mapping_set_gfp_mask(mapping, mask); + GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM)); i915_gem_object_init(obj, &i915_gem_object_ops); From ce2c58724f7d07e76dadfeba53d6877a9e67341d Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 9 Jun 2017 12:03:47 +0100 Subject: [PATCH 322/436] drm/i915: Remove __GFP_NORETRY from our buffer allocator I tried __GFP_NORETRY in the belief that __GFP_RECLAIM was effective. It struggles with handling reclaim of our dirty buffers and relies on reclaim via kswapd. As a result, a single pass of direct reclaim is unreliable when i915 occupies the majority of available memory, and the only means of effectively waiting on kswapd to amke progress is by not setting the __GFP_NORETRY flag and lopping. That leaves us with the dilemma of invoking the oomkiller instead of propagating the allocation failure back to userspace where it can be handled more gracefully (one hopes). In the future we may have __GFP_MAYFAIL to allow repeats up until we genuinely run out of memory and the oomkiller would have been invoked. Until then, let the oomkiller wreck havoc. v2: Stop playing with side-effects of gfp flags and await __GFP_MAYFAIL v3: Update comments that direct reclaim only appears to be ignoring our dirty buffers! Fixes: 24f8e00a8a2e ("drm/i915: Prefer to report ENOMEM rather than incur the oom for gfx allocations") Testcase: igt/gem_tiled_swapping Signed-off-by: Chris Wilson Cc: Mika Kuoppala Cc: Joonas Lahtinen Cc: Daniel Vetter Cc: Michal Hocko Link: http://patchwork.freedesktop.org/patch/msgid/20170609110350.1767-2-chris@chris-wilson.co.uk Reviewed-by: Joonas Lahtinen (cherry picked from commit eaf41801559a687cc7511c04dc712984765c9dd7) Signed-off-by: Jani Nikula --- drivers/gpu/drm/i915/i915_gem.c | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index c93f27b981f5..615f0a855222 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2352,7 +2352,20 @@ rebuild_st: if (!*s) { /* reclaim and warn, but no oom */ gfp = mapping_gfp_mask(mapping); - gfp |= __GFP_NORETRY; + + /* Our bo are always dirty and so we require + * kswapd to reclaim our pages (direct reclaim + * does not effectively begin pageout of our + * buffers on its own). However, direct reclaim + * only waits for kswapd when under allocation + * congestion. So as a result __GFP_RECLAIM is + * unreliable and fails to actually reclaim our + * dirty pages -- unless you try over and over + * again with !__GFP_NORETRY. However, we still + * want to fail this allocation rather than + * trigger the out-of-memory killer and for + * this we want the future __GFP_MAYFAIL. + */ } } while (1); From 17b206c27366f3cee816eaf86fafc6a11f628ecf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Thu, 1 Jun 2017 17:36:13 +0300 Subject: [PATCH 323/436] drm/i915: Fix deadlock witha the pipe A quirk during resume MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Pass down the correct acquire context to the pipe A quirk load detect hack during display resume. Avoids deadlocking the entire thing. Cc: stable@vger.kernel.org Cc: Maarten Lankhorst Fixes: e2c8b8701e2d ("drm/i915: Use atomic helpers for suspend, v2.") Signed-off-by: Ville Syrjälä Link: http://patchwork.freedesktop.org/patch/msgid/20170601143619.27840-2-ville.syrjala@linux.intel.com Reviewed-by: Maarten Lankhorst (cherry picked from commit aecd36b8a16b2302b33f49ba3fa24c955f1e32f7) Signed-off-by: Jani Nikula --- drivers/gpu/drm/i915/intel_display.c | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 96b0b01677e2..c27bc95d763c 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -120,7 +120,8 @@ static void intel_crtc_init_scalers(struct intel_crtc *crtc, static void skylake_pfit_enable(struct intel_crtc *crtc); static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force); static void ironlake_pfit_enable(struct intel_crtc *crtc); -static void intel_modeset_setup_hw_state(struct drm_device *dev); +static void intel_modeset_setup_hw_state(struct drm_device *dev, + struct drm_modeset_acquire_ctx *ctx); static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc); struct intel_limit { @@ -3449,7 +3450,7 @@ __intel_display_resume(struct drm_device *dev, struct drm_crtc *crtc; int i, ret; - intel_modeset_setup_hw_state(dev); + intel_modeset_setup_hw_state(dev, ctx); i915_redisable_vga(to_i915(dev)); if (!state) @@ -15030,7 +15031,7 @@ int intel_modeset_init(struct drm_device *dev) intel_setup_outputs(dev_priv); drm_modeset_lock_all(dev); - intel_modeset_setup_hw_state(dev); + intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx); drm_modeset_unlock_all(dev); for_each_intel_crtc(dev, crtc) { @@ -15067,13 +15068,13 @@ int intel_modeset_init(struct drm_device *dev) return 0; } -static void intel_enable_pipe_a(struct drm_device *dev) +static void intel_enable_pipe_a(struct drm_device *dev, + struct drm_modeset_acquire_ctx *ctx) { struct intel_connector *connector; struct drm_connector_list_iter conn_iter; struct drm_connector *crt = NULL; struct intel_load_detect_pipe load_detect_temp; - struct drm_modeset_acquire_ctx *ctx = dev->mode_config.acquire_ctx; int ret; /* We can't just switch on the pipe A, we need to set things up with a @@ -15145,7 +15146,8 @@ static bool has_pch_trancoder(struct drm_i915_private *dev_priv, (HAS_PCH_LPT_H(dev_priv) && pch_transcoder == TRANSCODER_A); } -static void intel_sanitize_crtc(struct intel_crtc *crtc) +static void intel_sanitize_crtc(struct intel_crtc *crtc, + struct drm_modeset_acquire_ctx *ctx) { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); @@ -15201,7 +15203,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc) * resume. Force-enable the pipe to fix this, the update_dpms * call below we restore the pipe to the right state, but leave * the required bits on. */ - intel_enable_pipe_a(dev); + intel_enable_pipe_a(dev, ctx); } /* Adjust the state of the output pipe according to whether we @@ -15505,7 +15507,8 @@ get_encoder_power_domains(struct drm_i915_private *dev_priv) * and sanitizes it to the current state */ static void -intel_modeset_setup_hw_state(struct drm_device *dev) +intel_modeset_setup_hw_state(struct drm_device *dev, + struct drm_modeset_acquire_ctx *ctx) { struct drm_i915_private *dev_priv = to_i915(dev); enum pipe pipe; @@ -15525,7 +15528,7 @@ intel_modeset_setup_hw_state(struct drm_device *dev) for_each_pipe(dev_priv, pipe) { crtc = intel_get_crtc_for_pipe(dev_priv, pipe); - intel_sanitize_crtc(crtc); + intel_sanitize_crtc(crtc, ctx); intel_dump_pipe_config(crtc, crtc->config, "[setup_hw_state]"); } From b7f5dd36e0c5cb9ca1070a5e0f22f666bcff07ef Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Thu, 1 Jun 2017 17:36:14 +0300 Subject: [PATCH 324/436] drm/i915: Plumb the correct acquire ctx into intel_crtc_disable_noatomic() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit If intel_crtc_disable_noatomic() were to ever get called during resume we'd end up deadlocking since resume has its own acqcuire_ctx but intel_crtc_disable_noatomic() still tries to use the mode_config.acquire_ctx. Pass down the correct acquire ctx from the top. Cc: stable@vger.kernel.org Cc: Maarten Lankhorst Fixes: e2c8b8701e2d ("drm/i915: Use atomic helpers for suspend, v2.") Signed-off-by: Ville Syrjälä Link: http://patchwork.freedesktop.org/patch/msgid/20170601143619.27840-3-ville.syrjala@linux.intel.com Reviewed-by: Maarten Lankhorst (cherry picked from commit da1d0e265535634bba80d44510b864c620549bee) Signed-off-by: Jani Nikula --- drivers/gpu/drm/i915/intel_display.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index c27bc95d763c..9106ea32b048 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -5826,7 +5826,8 @@ static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state, intel_update_watermarks(intel_crtc); } -static void intel_crtc_disable_noatomic(struct drm_crtc *crtc) +static void intel_crtc_disable_noatomic(struct drm_crtc *crtc, + struct drm_modeset_acquire_ctx *ctx) { struct intel_encoder *encoder; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); @@ -5856,7 +5857,7 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc) return; } - state->acquire_ctx = crtc->dev->mode_config.acquire_ctx; + state->acquire_ctx = ctx; /* Everything's already locked, -EDEADLK can't happen. */ crtc_state = intel_atomic_get_crtc_state(state, intel_crtc); @@ -15193,7 +15194,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc, plane = crtc->plane; crtc->base.primary->state->visible = true; crtc->plane = !plane; - intel_crtc_disable_noatomic(&crtc->base); + intel_crtc_disable_noatomic(&crtc->base, ctx); crtc->plane = plane; } @@ -15209,7 +15210,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc, /* Adjust the state of the output pipe according to whether we * have active connectors/encoders. */ if (crtc->active && !intel_crtc_has_encoders(crtc)) - intel_crtc_disable_noatomic(&crtc->base); + intel_crtc_disable_noatomic(&crtc->base, ctx); if (crtc->active || HAS_GMCH_DISPLAY(dev_priv)) { /* From 1be7107fbe18eed3e319a6c3e83c78254b693acb Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Mon, 19 Jun 2017 04:03:24 -0700 Subject: [PATCH 325/436] mm: larger stack guard gap, between vmas Stack guard page is a useful feature to reduce a risk of stack smashing into a different mapping. We have been using a single page gap which is sufficient to prevent having stack adjacent to a different mapping. But this seems to be insufficient in the light of the stack usage in userspace. E.g. glibc uses as large as 64kB alloca() in many commonly used functions. Others use constructs liks gid_t buffer[NGROUPS_MAX] which is 256kB or stack strings with MAX_ARG_STRLEN. This will become especially dangerous for suid binaries and the default no limit for the stack size limit because those applications can be tricked to consume a large portion of the stack and a single glibc call could jump over the guard page. These attacks are not theoretical, unfortunatelly. Make those attacks less probable by increasing the stack guard gap to 1MB (on systems with 4k pages; but make it depend on the page size because systems with larger base pages might cap stack allocations in the PAGE_SIZE units) which should cover larger alloca() and VLA stack allocations. It is obviously not a full fix because the problem is somehow inherent, but it should reduce attack space a lot. One could argue that the gap size should be configurable from userspace, but that can be done later when somebody finds that the new 1MB is wrong for some special case applications. For now, add a kernel command line option (stack_guard_gap) to specify the stack gap size (in page units). Implementation wise, first delete all the old code for stack guard page: because although we could get away with accounting one extra page in a stack vma, accounting a larger gap can break userspace - case in point, a program run with "ulimit -S -v 20000" failed when the 1MB gap was counted for RLIMIT_AS; similar problems could come with RLIMIT_MLOCK and strict non-overcommit mode. Instead of keeping gap inside the stack vma, maintain the stack guard gap as a gap between vmas: using vm_start_gap() in place of vm_start (or vm_end_gap() in place of vm_end if VM_GROWSUP) in just those few places which need to respect the gap - mainly arch_get_unmapped_area(), and and the vma tree's subtree_gap support for that. Original-patch-by: Oleg Nesterov Original-patch-by: Michal Hocko Signed-off-by: Hugh Dickins Acked-by: Michal Hocko Tested-by: Helge Deller # parisc Signed-off-by: Linus Torvalds --- .../admin-guide/kernel-parameters.txt | 7 + arch/arc/mm/mmap.c | 2 +- arch/arm/mm/mmap.c | 4 +- arch/frv/mm/elf-fdpic.c | 2 +- arch/mips/mm/mmap.c | 2 +- arch/parisc/kernel/sys_parisc.c | 15 +- arch/powerpc/mm/hugetlbpage-radix.c | 2 +- arch/powerpc/mm/mmap.c | 4 +- arch/powerpc/mm/slice.c | 2 +- arch/s390/mm/mmap.c | 4 +- arch/sh/mm/mmap.c | 4 +- arch/sparc/kernel/sys_sparc_64.c | 4 +- arch/sparc/mm/hugetlbpage.c | 2 +- arch/tile/mm/hugetlbpage.c | 2 +- arch/x86/kernel/sys_x86_64.c | 4 +- arch/x86/mm/hugetlbpage.c | 2 +- arch/xtensa/kernel/syscall.c | 2 +- fs/hugetlbfs/inode.c | 2 +- fs/proc/task_mmu.c | 4 - include/linux/mm.h | 53 +++---- mm/gup.c | 5 - mm/memory.c | 38 ----- mm/mmap.c | 149 +++++++++++------- 23 files changed, 152 insertions(+), 163 deletions(-) diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 0f5c3b4347c6..7737ab5d04b2 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -3811,6 +3811,13 @@ expediting. Set to zero to disable automatic expediting. + stack_guard_gap= [MM] + override the default stack gap protection. The value + is in page units and it defines how many pages prior + to (for stacks growing down) resp. after (for stacks + growing up) the main stack are reserved for no other + mapping. Default value is 256 pages. + stacktrace [FTRACE] Enabled the stack tracer on boot up. diff --git a/arch/arc/mm/mmap.c b/arch/arc/mm/mmap.c index 3e25e8d6486b..2e13683dfb24 100644 --- a/arch/arc/mm/mmap.c +++ b/arch/arc/mm/mmap.c @@ -65,7 +65,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c index 2239fde10b80..f0701d8d24df 100644 --- a/arch/arm/mm/mmap.c +++ b/arch/arm/mm/mmap.c @@ -90,7 +90,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } @@ -141,7 +141,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c index da82c25301e7..46aa289c5102 100644 --- a/arch/frv/mm/elf-fdpic.c +++ b/arch/frv/mm/elf-fdpic.c @@ -75,7 +75,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi addr = PAGE_ALIGN(addr); vma = find_vma(current->mm, addr); if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) goto success; } diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c index 64dd8bdd92c3..28adeabe851f 100644 --- a/arch/mips/mm/mmap.c +++ b/arch/mips/mm/mmap.c @@ -93,7 +93,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp, vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c index e5288638a1d9..378a754ca186 100644 --- a/arch/parisc/kernel/sys_parisc.c +++ b/arch/parisc/kernel/sys_parisc.c @@ -90,7 +90,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { struct mm_struct *mm = current->mm; - struct vm_area_struct *vma; + struct vm_area_struct *vma, *prev; unsigned long task_size = TASK_SIZE; int do_color_align, last_mmap; struct vm_unmapped_area_info info; @@ -117,9 +117,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, else addr = PAGE_ALIGN(addr); - vma = find_vma(mm, addr); + vma = find_vma_prev(mm, addr, &prev); if (task_size - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma)) && + (!prev || addr >= vm_end_gap(prev))) goto found_addr; } @@ -143,7 +144,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, const unsigned long len, const unsigned long pgoff, const unsigned long flags) { - struct vm_area_struct *vma; + struct vm_area_struct *vma, *prev; struct mm_struct *mm = current->mm; unsigned long addr = addr0; int do_color_align, last_mmap; @@ -177,9 +178,11 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, addr = COLOR_ALIGN(addr, last_mmap, pgoff); else addr = PAGE_ALIGN(addr); - vma = find_vma(mm, addr); + + vma = find_vma_prev(mm, addr, &prev); if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma)) && + (!prev || addr >= vm_end_gap(prev))) goto found_addr; } diff --git a/arch/powerpc/mm/hugetlbpage-radix.c b/arch/powerpc/mm/hugetlbpage-radix.c index 6575b9aabef4..a12e86395025 100644 --- a/arch/powerpc/mm/hugetlbpage-radix.c +++ b/arch/powerpc/mm/hugetlbpage-radix.c @@ -68,7 +68,7 @@ radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr, addr = ALIGN(addr, huge_page_size(h)); vma = find_vma(mm, addr); if (mm->task_size - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } /* diff --git a/arch/powerpc/mm/mmap.c b/arch/powerpc/mm/mmap.c index 9dbd2a733d6b..0ee6be4f1ba4 100644 --- a/arch/powerpc/mm/mmap.c +++ b/arch/powerpc/mm/mmap.c @@ -112,7 +112,7 @@ radix__arch_get_unmapped_area(struct file *filp, unsigned long addr, addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); if (mm->task_size - len >= addr && addr >= mmap_min_addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } @@ -157,7 +157,7 @@ radix__arch_get_unmapped_area_topdown(struct file *filp, addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); if (mm->task_size - len >= addr && addr >= mmap_min_addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c index 966b9fccfa66..45f6740dd407 100644 --- a/arch/powerpc/mm/slice.c +++ b/arch/powerpc/mm/slice.c @@ -99,7 +99,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr, if ((mm->task_size - len) < addr) return 0; vma = find_vma(mm, addr); - return (!vma || (addr + len) <= vma->vm_start); + return (!vma || (addr + len) <= vm_start_gap(vma)); } static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice) diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c index b017daed6887..b854b1da281a 100644 --- a/arch/s390/mm/mmap.c +++ b/arch/s390/mm/mmap.c @@ -101,7 +101,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) goto check_asce_limit; } @@ -151,7 +151,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) goto check_asce_limit; } diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c index 08e7af0be4a7..6a1a1297baae 100644 --- a/arch/sh/mm/mmap.c +++ b/arch/sh/mm/mmap.c @@ -64,7 +64,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } @@ -114,7 +114,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c index ef4520efc813..043544d0cda3 100644 --- a/arch/sparc/kernel/sys_sparc_64.c +++ b/arch/sparc/kernel/sys_sparc_64.c @@ -120,7 +120,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi vma = find_vma(mm, addr); if (task_size - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } @@ -183,7 +183,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, vma = find_vma(mm, addr); if (task_size - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c index 7c29d38e6b99..88855e383b34 100644 --- a/arch/sparc/mm/hugetlbpage.c +++ b/arch/sparc/mm/hugetlbpage.c @@ -120,7 +120,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, addr = ALIGN(addr, huge_page_size(h)); vma = find_vma(mm, addr); if (task_size - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } if (mm->get_unmapped_area == arch_get_unmapped_area) diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c index cb10153b5c9f..03e5cc4e76e4 100644 --- a/arch/tile/mm/hugetlbpage.c +++ b/arch/tile/mm/hugetlbpage.c @@ -233,7 +233,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, addr = ALIGN(addr, huge_page_size(h)); vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } if (current->mm->get_unmapped_area == arch_get_unmapped_area) diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c index 207b8f2582c7..213ddf3e937d 100644 --- a/arch/x86/kernel/sys_x86_64.c +++ b/arch/x86/kernel/sys_x86_64.c @@ -144,7 +144,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); if (end - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } @@ -187,7 +187,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c index 302f43fd9c28..adad702b39cd 100644 --- a/arch/x86/mm/hugetlbpage.c +++ b/arch/x86/mm/hugetlbpage.c @@ -148,7 +148,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, addr = ALIGN(addr, huge_page_size(h)); vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } if (mm->get_unmapped_area == arch_get_unmapped_area) diff --git a/arch/xtensa/kernel/syscall.c b/arch/xtensa/kernel/syscall.c index 06937928cb72..74afbf02d07e 100644 --- a/arch/xtensa/kernel/syscall.c +++ b/arch/xtensa/kernel/syscall.c @@ -88,7 +88,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, /* At this point: (!vmm || addr < vmm->vm_end). */ if (TASK_SIZE - len < addr) return -ENOMEM; - if (!vmm || addr + len <= vmm->vm_start) + if (!vmm || addr + len <= vm_start_gap(vmm)) return addr; addr = vmm->vm_end; if (flags & MAP_SHARED) diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index dde861387a40..d44f5456eb9b 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -200,7 +200,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, addr = ALIGN(addr, huge_page_size(h)); vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index f0c8b33d99b1..520802da059c 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -300,11 +300,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid) /* We don't show the stack guard page in /proc/maps */ start = vma->vm_start; - if (stack_guard_page_start(vma, start)) - start += PAGE_SIZE; end = vma->vm_end; - if (stack_guard_page_end(vma, end)) - end -= PAGE_SIZE; seq_setwidth(m, 25 + sizeof(void *) * 6 - 1); seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ", diff --git a/include/linux/mm.h b/include/linux/mm.h index b892e95d4929..6f543a47fc92 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1393,12 +1393,6 @@ int clear_page_dirty_for_io(struct page *page); int get_cmdline(struct task_struct *task, char *buffer, int buflen); -/* Is the vma a continuation of the stack vma above it? */ -static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr) -{ - return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN); -} - static inline bool vma_is_anonymous(struct vm_area_struct *vma) { return !vma->vm_ops; @@ -1414,28 +1408,6 @@ bool vma_is_shmem(struct vm_area_struct *vma); static inline bool vma_is_shmem(struct vm_area_struct *vma) { return false; } #endif -static inline int stack_guard_page_start(struct vm_area_struct *vma, - unsigned long addr) -{ - return (vma->vm_flags & VM_GROWSDOWN) && - (vma->vm_start == addr) && - !vma_growsdown(vma->vm_prev, addr); -} - -/* Is the vma a continuation of the stack vma below it? */ -static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr) -{ - return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP); -} - -static inline int stack_guard_page_end(struct vm_area_struct *vma, - unsigned long addr) -{ - return (vma->vm_flags & VM_GROWSUP) && - (vma->vm_end == addr) && - !vma_growsup(vma->vm_next, addr); -} - int vma_is_stack_for_current(struct vm_area_struct *vma); extern unsigned long move_page_tables(struct vm_area_struct *vma, @@ -2222,6 +2194,7 @@ void page_cache_async_readahead(struct address_space *mapping, pgoff_t offset, unsigned long size); +extern unsigned long stack_guard_gap; /* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */ extern int expand_stack(struct vm_area_struct *vma, unsigned long address); @@ -2250,6 +2223,30 @@ static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * m return vma; } +static inline unsigned long vm_start_gap(struct vm_area_struct *vma) +{ + unsigned long vm_start = vma->vm_start; + + if (vma->vm_flags & VM_GROWSDOWN) { + vm_start -= stack_guard_gap; + if (vm_start > vma->vm_start) + vm_start = 0; + } + return vm_start; +} + +static inline unsigned long vm_end_gap(struct vm_area_struct *vma) +{ + unsigned long vm_end = vma->vm_end; + + if (vma->vm_flags & VM_GROWSUP) { + vm_end += stack_guard_gap; + if (vm_end < vma->vm_end) + vm_end = -PAGE_SIZE; + } + return vm_end; +} + static inline unsigned long vma_pages(struct vm_area_struct *vma) { return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; diff --git a/mm/gup.c b/mm/gup.c index b3c7214d710d..576c4df58882 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -387,11 +387,6 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma, /* mlock all present pages, but do not fault in new pages */ if ((*flags & (FOLL_POPULATE | FOLL_MLOCK)) == FOLL_MLOCK) return -ENOENT; - /* For mm_populate(), just skip the stack guard page. */ - if ((*flags & FOLL_POPULATE) && - (stack_guard_page_start(vma, address) || - stack_guard_page_end(vma, address + PAGE_SIZE))) - return -ENOENT; if (*flags & FOLL_WRITE) fault_flags |= FAULT_FLAG_WRITE; if (*flags & FOLL_REMOTE) diff --git a/mm/memory.c b/mm/memory.c index 2e65df1831d9..bb11c474857e 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2854,40 +2854,6 @@ out_release: return ret; } -/* - * This is like a special single-page "expand_{down|up}wards()", - * except we must first make sure that 'address{-|+}PAGE_SIZE' - * doesn't hit another vma. - */ -static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address) -{ - address &= PAGE_MASK; - if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) { - struct vm_area_struct *prev = vma->vm_prev; - - /* - * Is there a mapping abutting this one below? - * - * That's only ok if it's the same stack mapping - * that has gotten split.. - */ - if (prev && prev->vm_end == address) - return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM; - - return expand_downwards(vma, address - PAGE_SIZE); - } - if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) { - struct vm_area_struct *next = vma->vm_next; - - /* As VM_GROWSDOWN but s/below/above/ */ - if (next && next->vm_start == address + PAGE_SIZE) - return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM; - - return expand_upwards(vma, address + PAGE_SIZE); - } - return 0; -} - /* * We enter with non-exclusive mmap_sem (to exclude vma changes, * but allow concurrent faults), and pte mapped but not yet locked. @@ -2904,10 +2870,6 @@ static int do_anonymous_page(struct vm_fault *vmf) if (vma->vm_flags & VM_SHARED) return VM_FAULT_SIGBUS; - /* Check if we need to add a guard page to the stack */ - if (check_stack_guard_page(vma, vmf->address) < 0) - return VM_FAULT_SIGSEGV; - /* * Use pte_alloc() instead of pte_alloc_map(). We can't run * pte_offset_map() on pmds where a huge pmd might be created diff --git a/mm/mmap.c b/mm/mmap.c index f82741e199c0..8e07976d5e47 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -183,6 +183,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk) unsigned long retval; unsigned long newbrk, oldbrk; struct mm_struct *mm = current->mm; + struct vm_area_struct *next; unsigned long min_brk; bool populate; LIST_HEAD(uf); @@ -229,7 +230,8 @@ SYSCALL_DEFINE1(brk, unsigned long, brk) } /* Check against existing mmap mappings. */ - if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE)) + next = find_vma(mm, oldbrk); + if (next && newbrk + PAGE_SIZE > vm_start_gap(next)) goto out; /* Ok, looks good - let it rip. */ @@ -253,10 +255,22 @@ out: static long vma_compute_subtree_gap(struct vm_area_struct *vma) { - unsigned long max, subtree_gap; - max = vma->vm_start; - if (vma->vm_prev) - max -= vma->vm_prev->vm_end; + unsigned long max, prev_end, subtree_gap; + + /* + * Note: in the rare case of a VM_GROWSDOWN above a VM_GROWSUP, we + * allow two stack_guard_gaps between them here, and when choosing + * an unmapped area; whereas when expanding we only require one. + * That's a little inconsistent, but keeps the code here simpler. + */ + max = vm_start_gap(vma); + if (vma->vm_prev) { + prev_end = vm_end_gap(vma->vm_prev); + if (max > prev_end) + max -= prev_end; + else + max = 0; + } if (vma->vm_rb.rb_left) { subtree_gap = rb_entry(vma->vm_rb.rb_left, struct vm_area_struct, vm_rb)->rb_subtree_gap; @@ -352,7 +366,7 @@ static void validate_mm(struct mm_struct *mm) anon_vma_unlock_read(anon_vma); } - highest_address = vma->vm_end; + highest_address = vm_end_gap(vma); vma = vma->vm_next; i++; } @@ -541,7 +555,7 @@ void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma, if (vma->vm_next) vma_gap_update(vma->vm_next); else - mm->highest_vm_end = vma->vm_end; + mm->highest_vm_end = vm_end_gap(vma); /* * vma->vm_prev wasn't known when we followed the rbtree to find the @@ -856,7 +870,7 @@ again: vma_gap_update(vma); if (end_changed) { if (!next) - mm->highest_vm_end = end; + mm->highest_vm_end = vm_end_gap(vma); else if (!adjust_next) vma_gap_update(next); } @@ -941,7 +955,7 @@ again: * mm->highest_vm_end doesn't need any update * in remove_next == 1 case. */ - VM_WARN_ON(mm->highest_vm_end != end); + VM_WARN_ON(mm->highest_vm_end != vm_end_gap(vma)); } } if (insert && file) @@ -1787,7 +1801,7 @@ unsigned long unmapped_area(struct vm_unmapped_area_info *info) while (true) { /* Visit left subtree if it looks promising */ - gap_end = vma->vm_start; + gap_end = vm_start_gap(vma); if (gap_end >= low_limit && vma->vm_rb.rb_left) { struct vm_area_struct *left = rb_entry(vma->vm_rb.rb_left, @@ -1798,7 +1812,7 @@ unsigned long unmapped_area(struct vm_unmapped_area_info *info) } } - gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0; + gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0; check_current: /* Check if current node has a suitable gap */ if (gap_start > high_limit) @@ -1825,8 +1839,8 @@ check_current: vma = rb_entry(rb_parent(prev), struct vm_area_struct, vm_rb); if (prev == vma->vm_rb.rb_left) { - gap_start = vma->vm_prev->vm_end; - gap_end = vma->vm_start; + gap_start = vm_end_gap(vma->vm_prev); + gap_end = vm_start_gap(vma); goto check_current; } } @@ -1890,7 +1904,7 @@ unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info) while (true) { /* Visit right subtree if it looks promising */ - gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0; + gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0; if (gap_start <= high_limit && vma->vm_rb.rb_right) { struct vm_area_struct *right = rb_entry(vma->vm_rb.rb_right, @@ -1903,7 +1917,7 @@ unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info) check_current: /* Check if current node has a suitable gap */ - gap_end = vma->vm_start; + gap_end = vm_start_gap(vma); if (gap_end < low_limit) return -ENOMEM; if (gap_start <= high_limit && gap_end - gap_start >= length) @@ -1929,7 +1943,7 @@ check_current: struct vm_area_struct, vm_rb); if (prev == vma->vm_rb.rb_right) { gap_start = vma->vm_prev ? - vma->vm_prev->vm_end : 0; + vm_end_gap(vma->vm_prev) : 0; goto check_current; } } @@ -1967,7 +1981,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { struct mm_struct *mm = current->mm; - struct vm_area_struct *vma; + struct vm_area_struct *vma, *prev; struct vm_unmapped_area_info info; if (len > TASK_SIZE - mmap_min_addr) @@ -1978,9 +1992,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, if (addr) { addr = PAGE_ALIGN(addr); - vma = find_vma(mm, addr); + vma = find_vma_prev(mm, addr, &prev); if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma)) && + (!prev || addr >= vm_end_gap(prev))) return addr; } @@ -2003,7 +2018,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, const unsigned long len, const unsigned long pgoff, const unsigned long flags) { - struct vm_area_struct *vma; + struct vm_area_struct *vma, *prev; struct mm_struct *mm = current->mm; unsigned long addr = addr0; struct vm_unmapped_area_info info; @@ -2018,9 +2033,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, /* requesting a specific address */ if (addr) { addr = PAGE_ALIGN(addr); - vma = find_vma(mm, addr); + vma = find_vma_prev(mm, addr, &prev); if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma)) && + (!prev || addr >= vm_end_gap(prev))) return addr; } @@ -2155,21 +2171,19 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr, * update accounting. This is shared with both the * grow-up and grow-down cases. */ -static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, unsigned long grow) +static int acct_stack_growth(struct vm_area_struct *vma, + unsigned long size, unsigned long grow) { struct mm_struct *mm = vma->vm_mm; struct rlimit *rlim = current->signal->rlim; - unsigned long new_start, actual_size; + unsigned long new_start; /* address space limit tests */ if (!may_expand_vm(mm, vma->vm_flags, grow)) return -ENOMEM; /* Stack limit test */ - actual_size = size; - if (size && (vma->vm_flags & (VM_GROWSUP | VM_GROWSDOWN))) - actual_size -= PAGE_SIZE; - if (actual_size > READ_ONCE(rlim[RLIMIT_STACK].rlim_cur)) + if (size > READ_ONCE(rlim[RLIMIT_STACK].rlim_cur)) return -ENOMEM; /* mlock limit tests */ @@ -2207,17 +2221,30 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns int expand_upwards(struct vm_area_struct *vma, unsigned long address) { struct mm_struct *mm = vma->vm_mm; + struct vm_area_struct *next; + unsigned long gap_addr; int error = 0; if (!(vma->vm_flags & VM_GROWSUP)) return -EFAULT; /* Guard against wrapping around to address 0. */ - if (address < PAGE_ALIGN(address+4)) - address = PAGE_ALIGN(address+4); - else + address &= PAGE_MASK; + address += PAGE_SIZE; + if (!address) return -ENOMEM; + /* Enforce stack_guard_gap */ + gap_addr = address + stack_guard_gap; + if (gap_addr < address) + return -ENOMEM; + next = vma->vm_next; + if (next && next->vm_start < gap_addr) { + if (!(next->vm_flags & VM_GROWSUP)) + return -ENOMEM; + /* Check that both stack segments have the same anon_vma? */ + } + /* We must make sure the anon_vma is allocated. */ if (unlikely(anon_vma_prepare(vma))) return -ENOMEM; @@ -2261,7 +2288,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address) if (vma->vm_next) vma_gap_update(vma->vm_next); else - mm->highest_vm_end = address; + mm->highest_vm_end = vm_end_gap(vma); spin_unlock(&mm->page_table_lock); perf_event_mmap(vma); @@ -2282,6 +2309,8 @@ int expand_downwards(struct vm_area_struct *vma, unsigned long address) { struct mm_struct *mm = vma->vm_mm; + struct vm_area_struct *prev; + unsigned long gap_addr; int error; address &= PAGE_MASK; @@ -2289,6 +2318,17 @@ int expand_downwards(struct vm_area_struct *vma, if (error) return error; + /* Enforce stack_guard_gap */ + gap_addr = address - stack_guard_gap; + if (gap_addr > address) + return -ENOMEM; + prev = vma->vm_prev; + if (prev && prev->vm_end > gap_addr) { + if (!(prev->vm_flags & VM_GROWSDOWN)) + return -ENOMEM; + /* Check that both stack segments have the same anon_vma? */ + } + /* We must make sure the anon_vma is allocated. */ if (unlikely(anon_vma_prepare(vma))) return -ENOMEM; @@ -2343,28 +2383,25 @@ int expand_downwards(struct vm_area_struct *vma, return error; } -/* - * Note how expand_stack() refuses to expand the stack all the way to - * abut the next virtual mapping, *unless* that mapping itself is also - * a stack mapping. We want to leave room for a guard page, after all - * (the guard page itself is not added here, that is done by the - * actual page faulting logic) - * - * This matches the behavior of the guard page logic (see mm/memory.c: - * check_stack_guard_page()), which only allows the guard page to be - * removed under these circumstances. - */ +/* enforced gap between the expanding stack and other mappings. */ +unsigned long stack_guard_gap = 256UL< Date: Sun, 11 Jun 2017 16:08:21 +0900 Subject: [PATCH 328/436] ALSA: firewire-lib: Fix stall of process context at packet error At Linux v3.5, packet processing can be done in process context of ALSA PCM application as well as software IRQ context for OHCI 1394. Below is an example of the callgraph (some calls are omitted). ioctl(2) with e.g. HWSYNC (sound/core/pcm_native.c) ->snd_pcm_common_ioctl1() ->snd_pcm_hwsync() ->snd_pcm_stream_lock_irq (sound/core/pcm_lib.c) ->snd_pcm_update_hw_ptr() ->snd_pcm_udpate_hw_ptr0() ->struct snd_pcm_ops.pointer() (sound/firewire/*) = Each handler on drivers in ALSA firewire stack (sound/firewire/amdtp-stream.c) ->amdtp_stream_pcm_pointer() (drivers/firewire/core-iso.c) ->fw_iso_context_flush_completions() ->struct fw_card_driver.flush_iso_completion() (drivers/firewire/ohci.c) = flush_iso_completions() ->struct fw_iso_context.callback.sc (sound/firewire/amdtp-stream.c) = in_stream_callback() or out_stream_callback() ->... ->snd_pcm_stream_unlock_irq When packet queueing error occurs or detecting invalid packets in 'in_stream_callback()' or 'out_stream_callback()', 'snd_pcm_stop_xrun()' is called on local CPU with disabled IRQ. (sound/firewire/amdtp-stream.c) in_stream_callback() or out_stream_callback() ->amdtp_stream_pcm_abort() ->snd_pcm_stop_xrun() ->snd_pcm_stream_lock_irqsave() ->snd_pcm_stop() ->snd_pcm_stream_unlock_irqrestore() The process is stalled on the CPU due to attempt to acquire recursive lock. [ 562.630853] INFO: rcu_sched detected stalls on CPUs/tasks: [ 562.630861] 2-...: (1 GPs behind) idle=37d/140000000000000/0 softirq=38323/38323 fqs=7140 [ 562.630862] (detected by 3, t=15002 jiffies, g=21036, c=21035, q=5933) [ 562.630866] Task dump for CPU 2: [ 562.630867] alsa-source-OXF R running task 0 6619 1 0x00000008 [ 562.630870] Call Trace: [ 562.630876] ? vt_console_print+0x79/0x3e0 [ 562.630880] ? msg_print_text+0x9d/0x100 [ 562.630883] ? up+0x32/0x50 [ 562.630885] ? irq_work_queue+0x8d/0xa0 [ 562.630886] ? console_unlock+0x2b6/0x4b0 [ 562.630888] ? vprintk_emit+0x312/0x4a0 [ 562.630892] ? dev_vprintk_emit+0xbf/0x230 [ 562.630895] ? do_sys_poll+0x37a/0x550 [ 562.630897] ? dev_printk_emit+0x4e/0x70 [ 562.630900] ? __dev_printk+0x3c/0x80 [ 562.630903] ? _raw_spin_lock+0x20/0x30 [ 562.630909] ? snd_pcm_stream_lock+0x31/0x50 [snd_pcm] [ 562.630914] ? _snd_pcm_stream_lock_irqsave+0x2e/0x40 [snd_pcm] [ 562.630918] ? snd_pcm_stop_xrun+0x16/0x70 [snd_pcm] [ 562.630922] ? in_stream_callback+0x3e6/0x450 [snd_firewire_lib] [ 562.630925] ? handle_ir_packet_per_buffer+0x8e/0x1a0 [firewire_ohci] [ 562.630928] ? ohci_flush_iso_completions+0xa3/0x130 [firewire_ohci] [ 562.630932] ? fw_iso_context_flush_completions+0x15/0x20 [firewire_core] [ 562.630935] ? amdtp_stream_pcm_pointer+0x2d/0x40 [snd_firewire_lib] [ 562.630938] ? pcm_capture_pointer+0x19/0x20 [snd_oxfw] [ 562.630943] ? snd_pcm_update_hw_ptr0+0x47/0x3d0 [snd_pcm] [ 562.630945] ? poll_select_copy_remaining+0x150/0x150 [ 562.630947] ? poll_select_copy_remaining+0x150/0x150 [ 562.630952] ? snd_pcm_update_hw_ptr+0x10/0x20 [snd_pcm] [ 562.630956] ? snd_pcm_hwsync+0x45/0xb0 [snd_pcm] [ 562.630960] ? snd_pcm_common_ioctl1+0x1ff/0xc90 [snd_pcm] [ 562.630962] ? futex_wake+0x90/0x170 [ 562.630966] ? snd_pcm_capture_ioctl1+0x136/0x260 [snd_pcm] [ 562.630970] ? snd_pcm_capture_ioctl+0x27/0x40 [snd_pcm] [ 562.630972] ? do_vfs_ioctl+0xa3/0x610 [ 562.630974] ? vfs_read+0x11b/0x130 [ 562.630976] ? SyS_ioctl+0x79/0x90 [ 562.630978] ? entry_SYSCALL_64_fastpath+0x1e/0xad This commit fixes the above bug. This assumes two cases: 1. Any error is detected in software IRQ context of OHCI 1394 context. In this case, PCM substream should be aborted in packet handler. On the other hand, it should not be done in any process context. TO distinguish these two context, use 'in_interrupt()' macro. 2. Any error is detect in process context of ALSA PCM application. In this case, PCM substream should not be aborted in packet handler because PCM substream lock is acquired. The task to abort PCM substream should be done in ALSA PCM core. For this purpose, SNDRV_PCM_POS_XRUN is returned at 'struct snd_pcm_ops.pointer()'. Suggested-by: Clemens Ladisch Fixes: e9148dddc3c7("ALSA: firewire-lib: flush completed packets when reading PCM position") Cc: # 4.9+ Signed-off-by: Takashi Sakamoto Signed-off-by: Takashi Iwai --- sound/firewire/amdtp-stream.c | 8 ++++++-- sound/firewire/amdtp-stream.h | 2 +- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/sound/firewire/amdtp-stream.c b/sound/firewire/amdtp-stream.c index 9e6f54f8c45d..1e26854b3425 100644 --- a/sound/firewire/amdtp-stream.c +++ b/sound/firewire/amdtp-stream.c @@ -682,7 +682,9 @@ static void out_stream_callback(struct fw_iso_context *context, u32 tstamp, cycle = increment_cycle_count(cycle, 1); if (s->handle_packet(s, 0, cycle, i) < 0) { s->packet_index = -1; - amdtp_stream_pcm_abort(s); + if (in_interrupt()) + amdtp_stream_pcm_abort(s); + WRITE_ONCE(s->pcm_buffer_pointer, SNDRV_PCM_POS_XRUN); return; } } @@ -734,7 +736,9 @@ static void in_stream_callback(struct fw_iso_context *context, u32 tstamp, /* Queueing error or detecting invalid payload. */ if (i < packets) { s->packet_index = -1; - amdtp_stream_pcm_abort(s); + if (in_interrupt()) + amdtp_stream_pcm_abort(s); + WRITE_ONCE(s->pcm_buffer_pointer, SNDRV_PCM_POS_XRUN); return; } diff --git a/sound/firewire/amdtp-stream.h b/sound/firewire/amdtp-stream.h index 7e8831722821..ea1a91e99875 100644 --- a/sound/firewire/amdtp-stream.h +++ b/sound/firewire/amdtp-stream.h @@ -135,7 +135,7 @@ struct amdtp_stream { /* For a PCM substream processing. */ struct snd_pcm_substream *pcm; struct tasklet_struct period_tasklet; - unsigned int pcm_buffer_pointer; + snd_pcm_uframes_t pcm_buffer_pointer; unsigned int pcm_period_pointer; /* To wait for first packet. */ From 9745e362add89432d2c951272a99b0a5fe4348a9 Mon Sep 17 00:00:00 2001 From: Gao Feng Date: Fri, 16 Jun 2017 15:00:02 +0800 Subject: [PATCH 329/436] net: 8021q: Fix one possible panic caused by BUG_ON in free_netdev The register_vlan_device would invoke free_netdev directly, when register_vlan_dev failed. It would trigger the BUG_ON in free_netdev if the dev was already registered. In this case, the netdev would be freed in netdev_run_todo later. So add one condition check now. Only when dev is not registered, then free it directly. The following is the part coredump when netdev_upper_dev_link failed in register_vlan_dev. I removed the lines which are too long. [ 411.237457] ------------[ cut here ]------------ [ 411.237458] kernel BUG at net/core/dev.c:7998! [ 411.237484] invalid opcode: 0000 [#1] SMP [ 411.237705] [last unloaded: 8021q] [ 411.237718] CPU: 1 PID: 12845 Comm: vconfig Tainted: G E 4.12.0-rc5+ #6 [ 411.237737] Hardware name: VMware, Inc. VMware Virtual Platform/440BX Desktop Reference Platform, BIOS 6.00 07/02/2015 [ 411.237764] task: ffff9cbeb6685580 task.stack: ffffa7d2807d8000 [ 411.237782] RIP: 0010:free_netdev+0x116/0x120 [ 411.237794] RSP: 0018:ffffa7d2807dbdb0 EFLAGS: 00010297 [ 411.237808] RAX: 0000000000000002 RBX: ffff9cbeb6ba8fd8 RCX: 0000000000001878 [ 411.237826] RDX: 0000000000000001 RSI: 0000000000000282 RDI: 0000000000000000 [ 411.237844] RBP: ffffa7d2807dbdc8 R08: 0002986100029841 R09: 0002982100029801 [ 411.237861] R10: 0004000100029980 R11: 0004000100029980 R12: ffff9cbeb6ba9000 [ 411.238761] R13: ffff9cbeb6ba9060 R14: ffff9cbe60f1a000 R15: ffff9cbeb6ba9000 [ 411.239518] FS: 00007fb690d81700(0000) GS:ffff9cbebb640000(0000) knlGS:0000000000000000 [ 411.239949] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 411.240454] CR2: 00007f7115624000 CR3: 0000000077cdf000 CR4: 00000000003406e0 [ 411.240936] Call Trace: [ 411.241462] vlan_ioctl_handler+0x3f1/0x400 [8021q] [ 411.241910] sock_ioctl+0x18b/0x2c0 [ 411.242394] do_vfs_ioctl+0xa1/0x5d0 [ 411.242853] ? sock_alloc_file+0xa6/0x130 [ 411.243465] SyS_ioctl+0x79/0x90 [ 411.243900] entry_SYSCALL_64_fastpath+0x1e/0xa9 [ 411.244425] RIP: 0033:0x7fb69089a357 [ 411.244863] RSP: 002b:00007ffcd04e0fc8 EFLAGS: 00000202 ORIG_RAX: 0000000000000010 [ 411.245445] RAX: ffffffffffffffda RBX: 00007ffcd04e2884 RCX: 00007fb69089a357 [ 411.245903] RDX: 00007ffcd04e0fd0 RSI: 0000000000008983 RDI: 0000000000000003 [ 411.246527] RBP: 00007ffcd04e0fd0 R08: 0000000000000000 R09: 1999999999999999 [ 411.246976] R10: 000000000000053f R11: 0000000000000202 R12: 0000000000000004 [ 411.247414] R13: 00007ffcd04e1128 R14: 00007ffcd04e2888 R15: 0000000000000001 [ 411.249129] RIP: free_netdev+0x116/0x120 RSP: ffffa7d2807dbdb0 Signed-off-by: Gao Feng Signed-off-by: David S. Miller --- net/8021q/vlan.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c index 467069b73ce1..9649579b5b9f 100644 --- a/net/8021q/vlan.c +++ b/net/8021q/vlan.c @@ -277,7 +277,8 @@ static int register_vlan_device(struct net_device *real_dev, u16 vlan_id) return 0; out_free_newdev: - free_netdev(new_dev); + if (new_dev->reg_state == NETREG_UNINITIALIZED) + free_netdev(new_dev); return err; } From 7fe5b914313ff67d71cb2b5aa4b850e0884e75dd Mon Sep 17 00:00:00 2001 From: Lin Yun Sheng Date: Fri, 16 Jun 2017 17:24:51 +0800 Subject: [PATCH 330/436] net/hns:bugfix of ethtool -t phy self_test This patch fixes the phy loopback self_test failed issue. when Marvell Phy Module is loaded, it will powerdown fiber when doing phy loopback self test, which cause phy loopback self_test fail. Signed-off-by: Lin Yun Sheng Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns/hns_ethtool.c | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c index b8fab149690f..e95795b3c841 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c @@ -288,9 +288,15 @@ static int hns_nic_config_phy_loopback(struct phy_device *phy_dev, u8 en) /* Force 1000M Link, Default is 0x0200 */ phy_write(phy_dev, 7, 0x20C); - phy_write(phy_dev, HNS_PHY_PAGE_REG, 0); - /* Enable PHY loop-back */ + /* Powerup Fiber */ + phy_write(phy_dev, HNS_PHY_PAGE_REG, 1); + val = phy_read(phy_dev, COPPER_CONTROL_REG); + val &= ~PHY_POWER_DOWN; + phy_write(phy_dev, COPPER_CONTROL_REG, val); + + /* Enable Phy Loopback */ + phy_write(phy_dev, HNS_PHY_PAGE_REG, 0); val = phy_read(phy_dev, COPPER_CONTROL_REG); val |= PHY_LOOP_BACK; val &= ~PHY_POWER_DOWN; @@ -299,6 +305,12 @@ static int hns_nic_config_phy_loopback(struct phy_device *phy_dev, u8 en) phy_write(phy_dev, HNS_PHY_PAGE_REG, 0xFA); phy_write(phy_dev, 1, 0x400); phy_write(phy_dev, 7, 0x200); + + phy_write(phy_dev, HNS_PHY_PAGE_REG, 1); + val = phy_read(phy_dev, COPPER_CONTROL_REG); + val |= PHY_POWER_DOWN; + phy_write(phy_dev, COPPER_CONTROL_REG, val); + phy_write(phy_dev, HNS_PHY_PAGE_REG, 0); phy_write(phy_dev, 9, 0xF00); From 94fc795454f461134cdffb88bef4eb9f788c0b5d Mon Sep 17 00:00:00 2001 From: Gary R Hook Date: Thu, 4 May 2017 11:36:52 -0500 Subject: [PATCH 331/436] ntb: Correct modinfo usage statement for ntb_perf The order parameters are powers of 2; adjust the usage information to use correct mathematical representations. Signed-off-by: Gary R Hook Fixes: 8a7b6a778a85 ("ntb: ntb perf tool") Acked-by: Dave Jiang Signed-off-by: Jon Mason --- drivers/ntb/test/ntb_perf.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/ntb/test/ntb_perf.c b/drivers/ntb/test/ntb_perf.c index 434e1d474f33..5cab2831ce99 100644 --- a/drivers/ntb/test/ntb_perf.c +++ b/drivers/ntb/test/ntb_perf.c @@ -90,11 +90,11 @@ MODULE_PARM_DESC(max_mw_size, "Limit size of large memory windows"); static unsigned int seg_order = 19; /* 512K */ module_param(seg_order, uint, 0644); -MODULE_PARM_DESC(seg_order, "size order [n^2] of buffer segment for testing"); +MODULE_PARM_DESC(seg_order, "size order [2^n] of buffer segment for testing"); static unsigned int run_order = 32; /* 4G */ module_param(run_order, uint, 0644); -MODULE_PARM_DESC(run_order, "size order [n^2] of total data to transfer"); +MODULE_PARM_DESC(run_order, "size order [2^n] of total data to transfer"); static bool use_dma; /* default to 0 */ module_param(use_dma, bool, 0644); From 07b0b22b3e58824f70b9188d085d400069ca3240 Mon Sep 17 00:00:00 2001 From: Logan Gunthorpe Date: Mon, 5 Jun 2017 10:13:24 -0600 Subject: [PATCH 332/436] NTB: ntb_test: fix bug printing ntb_perf results The code mistakenly prints the local perf results for the remote test so the script reports identical results for both directions. Fix this by ensuring we print the remote result. Signed-off-by: Logan Gunthorpe Fixes: a9c59ef77458 ("ntb_test: Add a selftest script for the NTB subsystem") Acked-by: Allen Hubbe Signed-off-by: Jon Mason --- tools/testing/selftests/ntb/ntb_test.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/testing/selftests/ntb/ntb_test.sh b/tools/testing/selftests/ntb/ntb_test.sh index a676d3eefefb..13f5198ba0ee 100755 --- a/tools/testing/selftests/ntb/ntb_test.sh +++ b/tools/testing/selftests/ntb/ntb_test.sh @@ -305,7 +305,7 @@ function perf_test() echo "Running remote perf test $WITH DMA" write_file "" $REMOTE_PERF/run echo -n " " - read_file $LOCAL_PERF/run + read_file $REMOTE_PERF/run echo " Passed" _modprobe -r ntb_perf From cb827ee6ccc3e480f0d9c0e8e53eef55be5b0414 Mon Sep 17 00:00:00 2001 From: Logan Gunthorpe Date: Mon, 5 Jun 2017 14:00:52 -0600 Subject: [PATCH 333/436] ntb_transport: fix qp count bug In cases where there are more mw's than spads/2-2, the mw count gets reduced to match the limitation. ntb_transport also tries to ensure that there are fewer qps than mws but uses the full mw count instead of the reduced one. When this happens, the math in 'ntb_transport_setup_qp_mw' will get confused and result in a kernel paging request bug. This patch fixes the bug by reducing qp_count to the reduced mw count instead of the full mw count. Signed-off-by: Logan Gunthorpe Fixes: e26a5843f7f5 ("NTB: Split ntb_hw_intel and ntb_transport drivers") Acked-by: Allen Hubbe Signed-off-by: Jon Mason --- drivers/ntb/ntb_transport.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c index 02ca45fdd892..0a778d2cab94 100644 --- a/drivers/ntb/ntb_transport.c +++ b/drivers/ntb/ntb_transport.c @@ -1128,8 +1128,8 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev) qp_count = ilog2(qp_bitmap); if (max_num_clients && max_num_clients < qp_count) qp_count = max_num_clients; - else if (mw_count < qp_count) - qp_count = mw_count; + else if (nt->mw_count < qp_count) + qp_count = nt->mw_count; qp_bitmap &= BIT_ULL(qp_count) - 1; From 8e8496e0e9564b66165f5219a4e8ed20b0d3fc6b Mon Sep 17 00:00:00 2001 From: Logan Gunthorpe Date: Mon, 5 Jun 2017 14:00:53 -0600 Subject: [PATCH 334/436] ntb_transport: fix bug calculating num_qps_mw A divide by zero error occurs if qp_count is less than mw_count because num_qps_mw is calculated to be zero. The calculation appears to be incorrect. The requirement is for num_qps_mw to be set to qp_count / mw_count with any remainder divided among the earlier mws. For example, if mw_count is 5 and qp_count is 12 then mws 0 and 1 will have 3 qps per window and mws 2 through 4 will have 2 qps per window. Thus, when mw_num < qp_count % mw_count, num_qps_mw is 1 higher than when mw_num >= qp_count. Signed-off-by: Logan Gunthorpe Fixes: e26a5843f7f5 ("NTB: Split ntb_hw_intel and ntb_transport drivers") Acked-by: Allen Hubbe Signed-off-by: Jon Mason --- drivers/ntb/ntb_transport.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c index 0a778d2cab94..5b6b00ea6ed9 100644 --- a/drivers/ntb/ntb_transport.c +++ b/drivers/ntb/ntb_transport.c @@ -623,7 +623,7 @@ static int ntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt, if (!mw->virt_addr) return -ENOMEM; - if (qp_count % mw_count && mw_num + 1 < qp_count / mw_count) + if (mw_num < qp_count % mw_count) num_qps_mw = qp_count / mw_count + 1; else num_qps_mw = qp_count / mw_count; @@ -1000,7 +1000,7 @@ static int ntb_transport_init_queue(struct ntb_transport_ctx *nt, qp->event_handler = NULL; ntb_qp_link_down_reset(qp); - if (qp_count % mw_count && mw_num + 1 < qp_count / mw_count) + if (mw_num < qp_count % mw_count) num_qps_mw = qp_count / mw_count + 1; else num_qps_mw = qp_count / mw_count; From 5eb449e15d2396785a8eb15baf42cea33db9ae13 Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Thu, 8 Jun 2017 12:46:45 -0700 Subject: [PATCH 335/436] ntb: ntb_hw_intel: Skylake doorbells should be 32bits, not 64bits Fixing doorbell register length to 32bits per spec. On Skylake NTB, the doorbell registers are 32bit write only registers. The source for the doorbell is a 64bit register that shows the interrupt bits. Signed-off-by: Dave Jiang Fixes: 783dfa6cc41b ("ntb: Adding Skylake Xeon NTB support") Acked-by: Allen Hubbe Signed-off-by: Jon Mason --- drivers/ntb/hw/intel/ntb_hw_intel.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/ntb/hw/intel/ntb_hw_intel.c b/drivers/ntb/hw/intel/ntb_hw_intel.c index c00238491673..7b3b6fd63d7d 100644 --- a/drivers/ntb/hw/intel/ntb_hw_intel.c +++ b/drivers/ntb/hw/intel/ntb_hw_intel.c @@ -2878,7 +2878,7 @@ static const struct intel_ntb_reg skx_reg = { .link_is_up = xeon_link_is_up, .db_ioread = skx_db_ioread, .db_iowrite = skx_db_iowrite, - .db_size = sizeof(u64), + .db_size = sizeof(u32), .ntb_ctl = SKX_NTBCNTL_OFFSET, .mw_bar = {2, 4}, }; From 88931ec3dc11e7dbceb3b0df455693873b508fbe Mon Sep 17 00:00:00 2001 From: Allen Hubbe Date: Fri, 9 Jun 2017 18:06:36 -0400 Subject: [PATCH 336/436] ntb: no sleep in ntb_async_tx_submit Do not sleep in ntb_async_tx_submit, which could deadlock. This reverts commit "8c874cc140d667f84ae4642bb5b5e0d6396d2ca4" Fixes: 8c874cc140d6 ("NTB: Address out of DMA descriptor issue with NTB") Reported-by: Jia-Ju Bai Signed-off-by: Allen Hubbe Acked-by: Dave Jiang Signed-off-by: Jon Mason --- drivers/ntb/ntb_transport.c | 50 ++++++------------------------------- 1 file changed, 7 insertions(+), 43 deletions(-) diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c index 5b6b00ea6ed9..10e5bf460139 100644 --- a/drivers/ntb/ntb_transport.c +++ b/drivers/ntb/ntb_transport.c @@ -177,14 +177,12 @@ struct ntb_transport_qp { u64 rx_err_ver; u64 rx_memcpy; u64 rx_async; - u64 dma_rx_prep_err; u64 tx_bytes; u64 tx_pkts; u64 tx_ring_full; u64 tx_err_no_buf; u64 tx_memcpy; u64 tx_async; - u64 dma_tx_prep_err; }; struct ntb_transport_mw { @@ -254,8 +252,6 @@ enum { #define QP_TO_MW(nt, qp) ((qp) % nt->mw_count) #define NTB_QP_DEF_NUM_ENTRIES 100 #define NTB_LINK_DOWN_TIMEOUT 10 -#define DMA_RETRIES 20 -#define DMA_OUT_RESOURCE_TO msecs_to_jiffies(50) static void ntb_transport_rxc_db(unsigned long data); static const struct ntb_ctx_ops ntb_transport_ops; @@ -516,12 +512,6 @@ static ssize_t debugfs_read(struct file *filp, char __user *ubuf, size_t count, out_offset += snprintf(buf + out_offset, out_count - out_offset, "free tx - \t%u\n", ntb_transport_tx_free_entry(qp)); - out_offset += snprintf(buf + out_offset, out_count - out_offset, - "DMA tx prep err - \t%llu\n", - qp->dma_tx_prep_err); - out_offset += snprintf(buf + out_offset, out_count - out_offset, - "DMA rx prep err - \t%llu\n", - qp->dma_rx_prep_err); out_offset += snprintf(buf + out_offset, out_count - out_offset, "\n"); @@ -768,8 +758,6 @@ static void ntb_qp_link_down_reset(struct ntb_transport_qp *qp) qp->tx_err_no_buf = 0; qp->tx_memcpy = 0; qp->tx_async = 0; - qp->dma_tx_prep_err = 0; - qp->dma_rx_prep_err = 0; } static void ntb_qp_link_cleanup(struct ntb_transport_qp *qp) @@ -1317,7 +1305,6 @@ static int ntb_async_rx_submit(struct ntb_queue_entry *entry, void *offset) struct dmaengine_unmap_data *unmap; dma_cookie_t cookie; void *buf = entry->buf; - int retries = 0; len = entry->len; device = chan->device; @@ -1346,22 +1333,11 @@ static int ntb_async_rx_submit(struct ntb_queue_entry *entry, void *offset) unmap->from_cnt = 1; - for (retries = 0; retries < DMA_RETRIES; retries++) { - txd = device->device_prep_dma_memcpy(chan, - unmap->addr[1], - unmap->addr[0], len, - DMA_PREP_INTERRUPT); - if (txd) - break; - - set_current_state(TASK_INTERRUPTIBLE); - schedule_timeout(DMA_OUT_RESOURCE_TO); - } - - if (!txd) { - qp->dma_rx_prep_err++; + txd = device->device_prep_dma_memcpy(chan, unmap->addr[1], + unmap->addr[0], len, + DMA_PREP_INTERRUPT); + if (!txd) goto err_get_unmap; - } txd->callback_result = ntb_rx_copy_callback; txd->callback_param = entry; @@ -1606,7 +1582,6 @@ static int ntb_async_tx_submit(struct ntb_transport_qp *qp, struct dmaengine_unmap_data *unmap; dma_addr_t dest; dma_cookie_t cookie; - int retries = 0; device = chan->device; dest = qp->tx_mw_phys + qp->tx_max_frame * entry->tx_index; @@ -1628,21 +1603,10 @@ static int ntb_async_tx_submit(struct ntb_transport_qp *qp, unmap->to_cnt = 1; - for (retries = 0; retries < DMA_RETRIES; retries++) { - txd = device->device_prep_dma_memcpy(chan, dest, - unmap->addr[0], len, - DMA_PREP_INTERRUPT); - if (txd) - break; - - set_current_state(TASK_INTERRUPTIBLE); - schedule_timeout(DMA_OUT_RESOURCE_TO); - } - - if (!txd) { - qp->dma_tx_prep_err++; + txd = device->device_prep_dma_memcpy(chan, dest, unmap->addr[0], len, + DMA_PREP_INTERRUPT); + if (!txd) goto err_get_unmap; - } txd->callback_result = ntb_tx_copy_callback; txd->callback_param = entry; From 86fdb3448cc1ffe0e9f55380f1410f1d12c35f95 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Sat, 17 Jun 2017 16:10:27 +0800 Subject: [PATCH 337/436] sctp: ensure ep is not destroyed before doing the dump Now before dumping a sock in sctp_diag, it only holds the sock while the ep may be already destroyed. It can cause a use-after-free panic when accessing ep->asocs. This patch is to set sctp_sk(sk)->ep NULL in sctp_endpoint_destroy, and check if this ep is already destroyed before dumping this ep. Suggested-by: Marcelo Ricardo Leitner Signed-off-by: Xin Long Acked-by: Neil Horman Signed-off-by: David S. Miller --- net/sctp/endpointola.c | 1 + net/sctp/sctp_diag.c | 5 +++-- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c index 8c589230794f..3dcd0ecf3d99 100644 --- a/net/sctp/endpointola.c +++ b/net/sctp/endpointola.c @@ -275,6 +275,7 @@ static void sctp_endpoint_destroy(struct sctp_endpoint *ep) if (sctp_sk(sk)->bind_hash) sctp_put_port(sk); + sctp_sk(sk)->ep = NULL; sock_put(sk); } diff --git a/net/sctp/sctp_diag.c b/net/sctp/sctp_diag.c index 048954eee984..9a647214a91e 100644 --- a/net/sctp/sctp_diag.c +++ b/net/sctp/sctp_diag.c @@ -278,7 +278,6 @@ out: static int sctp_sock_dump(struct sock *sk, void *p) { - struct sctp_endpoint *ep = sctp_sk(sk)->ep; struct sctp_comm_param *commp = p; struct sk_buff *skb = commp->skb; struct netlink_callback *cb = commp->cb; @@ -287,7 +286,9 @@ static int sctp_sock_dump(struct sock *sk, void *p) int err = 0; lock_sock(sk); - list_for_each_entry(assoc, &ep->asocs, asocs) { + if (!sctp_sk(sk)->ep) + goto release; + list_for_each_entry(assoc, &sctp_sk(sk)->ep->asocs, asocs) { if (cb->args[4] < cb->args[1]) goto next; From a8ae0a773d38b4b1d4566b0edcb6bb63f4a9d22f Mon Sep 17 00:00:00 2001 From: Dhinakaran Pandiyan Date: Mon, 19 Jun 2017 11:08:28 -0700 Subject: [PATCH 338/436] drm/i915: Don't enable backlight at setup time. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Maarten and Ville noticed that we are enabling backlight via DP aux very early in the modeset_init path via the intel_dp_aux_setup_backlight() function, since commit e7156c833903 ("drm/i915: Add Backlight Control using DPCD for eDP connectors (v9)"). Looks like all we need to do during _setup_backlight() is read the current brightness state instead of modifying it. v2: Rewrote commit message. Cc: Ville Syrjala Cc: Maarten Lankhorst Cc: Jani Nikula Cc: Yetunde Adebisi Signed-off-by: Dhinakaran Pandiyan Reviewed-by: Maarten Lankhorst Acked-by: Jani Nikula Tested-by: Puthikorn Voravootivat Fixes: e7156c833903 ("drm/i915: Add Backlight Control using DPCD for eDP connectors (v9)") Link: http://patchwork.freedesktop.org/patch/msgid/1497384239-2965-1-git-send-email-dhinakaran.pandiyan@intel.com Signed-off-by: Ville Syrjälä (cherry picked from commit f6262bda462e81e959b80a96dac799bd9df27f73) Signed-off-by: Jani Nikula Link: http://patchwork.freedesktop.org/patch/msgid/1497895708-19422-1-git-send-email-dhinakaran.pandiyan@intel.com --- drivers/gpu/drm/i915/intel_dp_aux_backlight.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/intel_dp_aux_backlight.c index 6532e226db29..40ba3134545e 100644 --- a/drivers/gpu/drm/i915/intel_dp_aux_backlight.c +++ b/drivers/gpu/drm/i915/intel_dp_aux_backlight.c @@ -119,8 +119,6 @@ static int intel_dp_aux_setup_backlight(struct intel_connector *connector, struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base); struct intel_panel *panel = &connector->panel; - intel_dp_aux_enable_backlight(connector); - if (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT) panel->backlight.max = 0xFFFF; else From 6e88491cf2a3b17199c78bd53348b39dc6a88275 Mon Sep 17 00:00:00 2001 From: Junshan Fang Date: Thu, 15 Jun 2017 14:02:20 +0800 Subject: [PATCH 339/436] drm/amdgpu: add Polaris12 DID Signed-off-by: Junshan Fang Reviewed-by: Roger.He Signed-off-by: Alex Deucher Cc: stable@vger.kernel.org Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index f2d705e6a75a..ab6b0d0febab 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -449,6 +449,7 @@ static const struct pci_device_id pciidlist[] = { {0x1002, 0x6986, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, {0x1002, 0x6987, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, {0x1002, 0x6995, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, + {0x1002, 0x6997, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, {0x1002, 0x699F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, /* Vega 10 */ {0x1002, 0x6860, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT}, From 4a072c71f49b0a0e495ea13423bdb850da73c58c Mon Sep 17 00:00:00 2001 From: "Jason A. Donenfeld" Date: Thu, 15 Jun 2017 00:45:26 +0200 Subject: [PATCH 340/436] random: silence compiler warnings and fix race Odd versions of gcc for the sh4 architecture will actually warn about flags being used while uninitialized, so we set them to zero. Non crazy gccs will optimize that out again, so it doesn't make a difference. Next, over aggressive gccs could inline the expression that defines use_lock, which could then introduce a race resulting in a lock imbalance. By using READ_ONCE, we prevent that fate. Finally, we make that assignment const, so that gcc can still optimize a nice amount. Finally, we fix a potential deadlock between primary_crng.lock and batched_entropy_reset_lock, where they could be called in opposite order. Moving the call to invalidate_batched_entropy to outside the lock rectifies this issue. Fixes: b169c13de473a85b3c859bb36216a4cb5f00a54a Signed-off-by: Jason A. Donenfeld Signed-off-by: Theodore Ts'o Cc: stable@vger.kernel.org --- drivers/char/random.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/char/random.c b/drivers/char/random.c index e870f329db88..01a260f67437 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -803,13 +803,13 @@ static int crng_fast_load(const char *cp, size_t len) p[crng_init_cnt % CHACHA20_KEY_SIZE] ^= *cp; cp++; crng_init_cnt++; len--; } + spin_unlock_irqrestore(&primary_crng.lock, flags); if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) { invalidate_batched_entropy(); crng_init = 1; wake_up_interruptible(&crng_init_wait); pr_notice("random: fast init done\n"); } - spin_unlock_irqrestore(&primary_crng.lock, flags); return 1; } @@ -841,6 +841,7 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r) } memzero_explicit(&buf, sizeof(buf)); crng->init_time = jiffies; + spin_unlock_irqrestore(&primary_crng.lock, flags); if (crng == &primary_crng && crng_init < 2) { invalidate_batched_entropy(); crng_init = 2; @@ -848,7 +849,6 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r) wake_up_interruptible(&crng_init_wait); pr_notice("random: crng init done\n"); } - spin_unlock_irqrestore(&primary_crng.lock, flags); } static inline void crng_wait_ready(void) @@ -2041,8 +2041,8 @@ static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64); u64 get_random_u64(void) { u64 ret; - bool use_lock = crng_init < 2; - unsigned long flags; + bool use_lock = READ_ONCE(crng_init) < 2; + unsigned long flags = 0; struct batched_entropy *batch; #if BITS_PER_LONG == 64 @@ -2073,8 +2073,8 @@ static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32); u32 get_random_u32(void) { u32 ret; - bool use_lock = crng_init < 2; - unsigned long flags; + bool use_lock = READ_ONCE(crng_init) < 2; + unsigned long flags = 0; struct batched_entropy *batch; if (arch_get_random_int(&ret)) From 6ebf81536d3be327c4f5f59bae3b841d62322343 Mon Sep 17 00:00:00 2001 From: Manish Rangankar Date: Thu, 15 Jun 2017 00:10:39 -0700 Subject: [PATCH 341/436] scsi: qedi: Remove WARN_ON for untracked cleanup. Signed-off-by: Manish Rangankar Reviewed-by: Lee Duncan Signed-off-by: Martin K. Petersen --- drivers/scsi/qedi/qedi_fw.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c index 8bc7ee1a8ca8..507512cc478b 100644 --- a/drivers/scsi/qedi/qedi_fw.c +++ b/drivers/scsi/qedi/qedi_fw.c @@ -870,7 +870,6 @@ static void qedi_process_cmd_cleanup_resp(struct qedi_ctx *qedi, QEDI_ERR(&qedi->dbg_ctx, "Delayed or untracked cleanup response, itt=0x%x, tid=0x%x, cid=0x%x, task=%p\n", protoitt, cqe->itid, qedi_conn->iscsi_conn_id, task); - WARN_ON(1); } } From 02d94e04747c5df55410c7b19f3cf72a1a11899b Mon Sep 17 00:00:00 2001 From: Manish Rangankar Date: Thu, 15 Jun 2017 00:10:40 -0700 Subject: [PATCH 342/436] scsi: qedi: Remove WARN_ON from clear task context. Signed-off-by: Manish Rangankar Reviewed-by: Lee Duncan Signed-off-by: Martin K. Petersen --- drivers/scsi/qedi/qedi_main.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c index 09a294634bc7..879d3b7462f9 100644 --- a/drivers/scsi/qedi/qedi_main.c +++ b/drivers/scsi/qedi/qedi_main.c @@ -1499,11 +1499,9 @@ err_idx: void qedi_clear_task_idx(struct qedi_ctx *qedi, int idx) { - if (!test_and_clear_bit(idx, qedi->task_idx_map)) { + if (!test_and_clear_bit(idx, qedi->task_idx_map)) QEDI_ERR(&qedi->dbg_ctx, "FW task context, already cleared, tid=0x%x\n", idx); - WARN_ON(1); - } } void qedi_update_itt_map(struct qedi_ctx *qedi, u32 tid, u32 proto_itt, From 817ae460c784f32cd45e60b2b1b21378c3c6a847 Mon Sep 17 00:00:00 2001 From: Daniel Drake Date: Mon, 19 Jun 2017 19:48:52 -0700 Subject: [PATCH 343/436] Input: i8042 - add Fujitsu Lifebook AH544 to notimeout list Without this quirk, the touchpad is not responsive on this product, with the following message repeated in the logs: psmouse serio1: bad data from KBC - timeout Add it to the notimeout list alongside other similar Fujitsu laptops. Signed-off-by: Daniel Drake Cc: stable@vger.kernel.org Signed-off-by: Dmitry Torokhov --- drivers/input/serio/i8042-x86ia64io.h | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h index 09720d950686..f932a83b4990 100644 --- a/drivers/input/serio/i8042-x86ia64io.h +++ b/drivers/input/serio/i8042-x86ia64io.h @@ -723,6 +723,13 @@ static const struct dmi_system_id __initconst i8042_dmi_notimeout_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK U574"), }, }, + { + /* Fujitsu UH554 laptop */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), + DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK UH544"), + }, + }, { } }; From c7ecb9068e6772c43941ce609f08bc53f36e1dce Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Wed, 14 Jun 2017 07:37:14 +0200 Subject: [PATCH 344/436] ALSA: hda - Apply quirks to Broxton-T, too Broxton-T was a forgotten child and we didn't apply the quirks for Skylake+ properly. Meanwhile, a quirk for reducing the DMA latency seems specific to the early Broxton model, so we leave as is. Cc: Signed-off-by: Takashi Iwai --- sound/pci/hda/hda_intel.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index e3c696c46a21..01eb1dc7b5b3 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -370,11 +370,12 @@ enum { #define IS_KBL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d71) #define IS_KBL_H(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa2f0) #define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98) +#define IS_BXT_T(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x1a98) #define IS_GLK(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x3198) #define IS_CFL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa348) -#define IS_SKL_PLUS(pci) (IS_SKL(pci) || IS_SKL_LP(pci) || IS_BXT(pci)) || \ - IS_KBL(pci) || IS_KBL_LP(pci) || IS_KBL_H(pci) || \ - IS_GLK(pci) || IS_CFL(pci) +#define IS_SKL_PLUS(pci) (IS_SKL(pci) || IS_SKL_LP(pci) || IS_BXT(pci) || \ + IS_BXT_T(pci) || IS_KBL(pci) || IS_KBL_LP(pci) || \ + IS_KBL_H(pci) || IS_GLK(pci) || IS_CFL(pci)) static char *driver_short_names[] = { [AZX_DRIVER_ICH] = "HDA Intel", From 53145c2e354b5a5ed031cec7472b4f16bab060c7 Mon Sep 17 00:00:00 2001 From: Daniel Stone Date: Thu, 15 Jun 2017 13:35:50 +0100 Subject: [PATCH 345/436] Revert "HID: magicmouse: Set multi-touch keybits for Magic Mouse" Setting these bits causes libinput to fail to initialize the device; setting BTN_TOUCH and BTN_TOOL_FINGER causes it to treat the mouse as a touchpad, and it then refuses to continue when it discovers ABS_X is not set. This breaks all known Wayland compositors, as well as Xorg when the libinput driver is being used. This reverts commit f4b65b9563216b3e01a5cc844c3ba68901d9b195. Signed-off-by: Daniel Stone Cc: Che-Liang Chiou Cc: Thierry Escande Cc: Jiri Kosina Cc: Benjamin Tissoires Acked-by: Benjamin Tissoires Signed-off-by: Jiri Kosina --- drivers/hid/hid-magicmouse.c | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c index 1d6c997b3001..20b40ad26325 100644 --- a/drivers/hid/hid-magicmouse.c +++ b/drivers/hid/hid-magicmouse.c @@ -349,7 +349,6 @@ static int magicmouse_raw_event(struct hid_device *hdev, if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE) { magicmouse_emit_buttons(msc, clicks & 3); - input_mt_report_pointer_emulation(input, true); input_report_rel(input, REL_X, x); input_report_rel(input, REL_Y, y); } else { /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */ @@ -389,16 +388,16 @@ static int magicmouse_setup_input(struct input_dev *input, struct hid_device *hd __clear_bit(BTN_RIGHT, input->keybit); __clear_bit(BTN_MIDDLE, input->keybit); __set_bit(BTN_MOUSE, input->keybit); + __set_bit(BTN_TOOL_FINGER, input->keybit); + __set_bit(BTN_TOOL_DOUBLETAP, input->keybit); + __set_bit(BTN_TOOL_TRIPLETAP, input->keybit); + __set_bit(BTN_TOOL_QUADTAP, input->keybit); + __set_bit(BTN_TOOL_QUINTTAP, input->keybit); + __set_bit(BTN_TOUCH, input->keybit); + __set_bit(INPUT_PROP_POINTER, input->propbit); __set_bit(INPUT_PROP_BUTTONPAD, input->propbit); } - __set_bit(BTN_TOOL_FINGER, input->keybit); - __set_bit(BTN_TOOL_DOUBLETAP, input->keybit); - __set_bit(BTN_TOOL_TRIPLETAP, input->keybit); - __set_bit(BTN_TOOL_QUADTAP, input->keybit); - __set_bit(BTN_TOOL_QUINTTAP, input->keybit); - __set_bit(BTN_TOUCH, input->keybit); - __set_bit(INPUT_PROP_POINTER, input->propbit); __set_bit(EV_ABS, input->evbit); From ceea5e3771ed2378668455fa21861bead7504df5 Mon Sep 17 00:00:00 2001 From: John Stultz Date: Thu, 8 Jun 2017 16:44:20 -0700 Subject: [PATCH 346/436] time: Fix clock->read(clock) race around clocksource changes In tests, which excercise switching of clocksources, a NULL pointer dereference can be observed on AMR64 platforms in the clocksource read() function: u64 clocksource_mmio_readl_down(struct clocksource *c) { return ~(u64)readl_relaxed(to_mmio_clksrc(c)->reg) & c->mask; } This is called from the core timekeeping code via: cycle_now = tkr->read(tkr->clock); tkr->read is the cached tkr->clock->read() function pointer. When the clocksource is changed then tkr->clock and tkr->read are updated sequentially. The code above results in a sequential load operation of tkr->read and tkr->clock as well. If the store to tkr->clock hits between the loads of tkr->read and tkr->clock, then the old read() function is called with the new clock pointer. As a consequence the read() function dereferences a different data structure and the resulting 'reg' pointer can point anywhere including NULL. This problem was introduced when the timekeeping code was switched over to use struct tk_read_base. Before that, it was theoretically possible as well when the compiler decided to reload clock in the code sequence: now = tk->clock->read(tk->clock); Add a helper function which avoids the issue by reading tk_read_base->clock once into a local variable clk and then issue the read function via clk->read(clk). This guarantees that the read() function always gets the proper clocksource pointer handed in. Since there is now no use for the tkr.read pointer, this patch also removes it, and to address stopping the fast timekeeper during suspend/resume, it introduces a dummy clocksource to use rather then just a dummy read function. Signed-off-by: John Stultz Acked-by: Ingo Molnar Cc: Prarit Bhargava Cc: Richard Cochran Cc: Stephen Boyd Cc: stable Cc: Miroslav Lichvar Cc: Daniel Mentz Link: http://lkml.kernel.org/r/1496965462-20003-2-git-send-email-john.stultz@linaro.org Signed-off-by: Thomas Gleixner --- include/linux/timekeeper_internal.h | 1 - kernel/time/timekeeping.c | 52 ++++++++++++++++++++--------- 2 files changed, 36 insertions(+), 17 deletions(-) diff --git a/include/linux/timekeeper_internal.h b/include/linux/timekeeper_internal.h index 110f4532188c..e9834ada4d0c 100644 --- a/include/linux/timekeeper_internal.h +++ b/include/linux/timekeeper_internal.h @@ -29,7 +29,6 @@ */ struct tk_read_base { struct clocksource *clock; - u64 (*read)(struct clocksource *cs); u64 mask; u64 cycle_last; u32 mult; diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 9652bc57fd09..eff94cb8e89e 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -118,6 +118,26 @@ static inline void tk_update_sleep_time(struct timekeeper *tk, ktime_t delta) tk->offs_boot = ktime_add(tk->offs_boot, delta); } +/* + * tk_clock_read - atomic clocksource read() helper + * + * This helper is necessary to use in the read paths because, while the + * seqlock ensures we don't return a bad value while structures are updated, + * it doesn't protect from potential crashes. There is the possibility that + * the tkr's clocksource may change between the read reference, and the + * clock reference passed to the read function. This can cause crashes if + * the wrong clocksource is passed to the wrong read function. + * This isn't necessary to use when holding the timekeeper_lock or doing + * a read of the fast-timekeeper tkrs (which is protected by its own locking + * and update logic). + */ +static inline u64 tk_clock_read(struct tk_read_base *tkr) +{ + struct clocksource *clock = READ_ONCE(tkr->clock); + + return clock->read(clock); +} + #ifdef CONFIG_DEBUG_TIMEKEEPING #define WARNING_FREQ (HZ*300) /* 5 minute rate-limiting */ @@ -175,7 +195,7 @@ static inline u64 timekeeping_get_delta(struct tk_read_base *tkr) */ do { seq = read_seqcount_begin(&tk_core.seq); - now = tkr->read(tkr->clock); + now = tk_clock_read(tkr); last = tkr->cycle_last; mask = tkr->mask; max = tkr->clock->max_cycles; @@ -209,7 +229,7 @@ static inline u64 timekeeping_get_delta(struct tk_read_base *tkr) u64 cycle_now, delta; /* read clocksource */ - cycle_now = tkr->read(tkr->clock); + cycle_now = tk_clock_read(tkr); /* calculate the delta since the last update_wall_time */ delta = clocksource_delta(cycle_now, tkr->cycle_last, tkr->mask); @@ -238,12 +258,10 @@ static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock) ++tk->cs_was_changed_seq; old_clock = tk->tkr_mono.clock; tk->tkr_mono.clock = clock; - tk->tkr_mono.read = clock->read; tk->tkr_mono.mask = clock->mask; - tk->tkr_mono.cycle_last = tk->tkr_mono.read(clock); + tk->tkr_mono.cycle_last = tk_clock_read(&tk->tkr_mono); tk->tkr_raw.clock = clock; - tk->tkr_raw.read = clock->read; tk->tkr_raw.mask = clock->mask; tk->tkr_raw.cycle_last = tk->tkr_mono.cycle_last; @@ -404,7 +422,7 @@ static __always_inline u64 __ktime_get_fast_ns(struct tk_fast *tkf) now += timekeeping_delta_to_ns(tkr, clocksource_delta( - tkr->read(tkr->clock), + tk_clock_read(tkr), tkr->cycle_last, tkr->mask)); } while (read_seqcount_retry(&tkf->seq, seq)); @@ -461,6 +479,10 @@ static u64 dummy_clock_read(struct clocksource *cs) return cycles_at_suspend; } +static struct clocksource dummy_clock = { + .read = dummy_clock_read, +}; + /** * halt_fast_timekeeper - Prevent fast timekeeper from accessing clocksource. * @tk: Timekeeper to snapshot. @@ -477,13 +499,13 @@ static void halt_fast_timekeeper(struct timekeeper *tk) struct tk_read_base *tkr = &tk->tkr_mono; memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy)); - cycles_at_suspend = tkr->read(tkr->clock); - tkr_dummy.read = dummy_clock_read; + cycles_at_suspend = tk_clock_read(tkr); + tkr_dummy.clock = &dummy_clock; update_fast_timekeeper(&tkr_dummy, &tk_fast_mono); tkr = &tk->tkr_raw; memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy)); - tkr_dummy.read = dummy_clock_read; + tkr_dummy.clock = &dummy_clock; update_fast_timekeeper(&tkr_dummy, &tk_fast_raw); } @@ -649,11 +671,10 @@ static void timekeeping_update(struct timekeeper *tk, unsigned int action) */ static void timekeeping_forward_now(struct timekeeper *tk) { - struct clocksource *clock = tk->tkr_mono.clock; u64 cycle_now, delta; u64 nsec; - cycle_now = tk->tkr_mono.read(clock); + cycle_now = tk_clock_read(&tk->tkr_mono); delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last, tk->tkr_mono.mask); tk->tkr_mono.cycle_last = cycle_now; tk->tkr_raw.cycle_last = cycle_now; @@ -929,8 +950,7 @@ void ktime_get_snapshot(struct system_time_snapshot *systime_snapshot) do { seq = read_seqcount_begin(&tk_core.seq); - - now = tk->tkr_mono.read(tk->tkr_mono.clock); + now = tk_clock_read(&tk->tkr_mono); systime_snapshot->cs_was_changed_seq = tk->cs_was_changed_seq; systime_snapshot->clock_was_set_seq = tk->clock_was_set_seq; base_real = ktime_add(tk->tkr_mono.base, @@ -1108,7 +1128,7 @@ int get_device_system_crosststamp(int (*get_time_fn) * Check whether the system counter value provided by the * device driver is on the current timekeeping interval. */ - now = tk->tkr_mono.read(tk->tkr_mono.clock); + now = tk_clock_read(&tk->tkr_mono); interval_start = tk->tkr_mono.cycle_last; if (!cycle_between(interval_start, cycles, now)) { clock_was_set_seq = tk->clock_was_set_seq; @@ -1629,7 +1649,7 @@ void timekeeping_resume(void) * The less preferred source will only be tried if there is no better * usable source. The rtc part is handled separately in rtc core code. */ - cycle_now = tk->tkr_mono.read(clock); + cycle_now = tk_clock_read(&tk->tkr_mono); if ((clock->flags & CLOCK_SOURCE_SUSPEND_NONSTOP) && cycle_now > tk->tkr_mono.cycle_last) { u64 nsec, cyc_delta; @@ -2030,7 +2050,7 @@ void update_wall_time(void) #ifdef CONFIG_ARCH_USES_GETTIMEOFFSET offset = real_tk->cycle_interval; #else - offset = clocksource_delta(tk->tkr_mono.read(tk->tkr_mono.clock), + offset = clocksource_delta(tk_clock_read(&tk->tkr_mono), tk->tkr_mono.cycle_last, tk->tkr_mono.mask); #endif From 3d88d56c5873f6eebe23e05c3da701960146b801 Mon Sep 17 00:00:00 2001 From: John Stultz Date: Thu, 8 Jun 2017 16:44:21 -0700 Subject: [PATCH 347/436] time: Fix CLOCK_MONOTONIC_RAW sub-nanosecond accounting Due to how the MONOTONIC_RAW accumulation logic was handled, there is the potential for a 1ns discontinuity when we do accumulations. This small discontinuity has for the most part gone un-noticed, but since ARM64 enabled CLOCK_MONOTONIC_RAW in their vDSO clock_gettime implementation, we've seen failures with the inconsistency-check test in kselftest. This patch addresses the issue by using the same sub-ns accumulation handling that CLOCK_MONOTONIC uses, which avoids the issue for in-kernel users. Since the ARM64 vDSO implementation has its own clock_gettime calculation logic, this patch reduces the frequency of errors, but failures are still seen. The ARM64 vDSO will need to be updated to include the sub-nanosecond xtime_nsec values in its calculation for this issue to be completely fixed. Signed-off-by: John Stultz Tested-by: Daniel Mentz Cc: Prarit Bhargava Cc: Kevin Brodsky Cc: Richard Cochran Cc: Stephen Boyd Cc: Will Deacon Cc: "stable #4 . 8+" Cc: Miroslav Lichvar Link: http://lkml.kernel.org/r/1496965462-20003-3-git-send-email-john.stultz@linaro.org Signed-off-by: Thomas Gleixner --- include/linux/timekeeper_internal.h | 4 ++-- kernel/time/timekeeping.c | 19 ++++++++++--------- 2 files changed, 12 insertions(+), 11 deletions(-) diff --git a/include/linux/timekeeper_internal.h b/include/linux/timekeeper_internal.h index e9834ada4d0c..f7043ccca81c 100644 --- a/include/linux/timekeeper_internal.h +++ b/include/linux/timekeeper_internal.h @@ -57,7 +57,7 @@ struct tk_read_base { * interval. * @xtime_remainder: Shifted nano seconds left over when rounding * @cycle_interval - * @raw_interval: Raw nano seconds accumulated per NTP interval. + * @raw_interval: Shifted raw nano seconds accumulated per NTP interval. * @ntp_error: Difference between accumulated time and NTP time in ntp * shifted nano seconds. * @ntp_error_shift: Shift conversion between clock shifted nano seconds and @@ -99,7 +99,7 @@ struct timekeeper { u64 cycle_interval; u64 xtime_interval; s64 xtime_remainder; - u32 raw_interval; + u64 raw_interval; /* The ntp_tick_length() value currently being used. * This cached copy ensures we consistently apply the tick * length for an entire tick, as ntp_tick_length may change diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index eff94cb8e89e..b602c48cb841 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -280,7 +280,7 @@ static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock) /* Go back from cycles -> shifted ns */ tk->xtime_interval = interval * clock->mult; tk->xtime_remainder = ntpinterval - tk->xtime_interval; - tk->raw_interval = (interval * clock->mult) >> clock->shift; + tk->raw_interval = interval * clock->mult; /* if changing clocks, convert xtime_nsec shift units */ if (old_clock) { @@ -1996,7 +1996,7 @@ static u64 logarithmic_accumulation(struct timekeeper *tk, u64 offset, u32 shift, unsigned int *clock_set) { u64 interval = tk->cycle_interval << shift; - u64 raw_nsecs; + u64 snsec_per_sec; /* If the offset is smaller than a shifted interval, do nothing */ if (offset < interval) @@ -2011,14 +2011,15 @@ static u64 logarithmic_accumulation(struct timekeeper *tk, u64 offset, *clock_set |= accumulate_nsecs_to_secs(tk); /* Accumulate raw time */ - raw_nsecs = (u64)tk->raw_interval << shift; - raw_nsecs += tk->raw_time.tv_nsec; - if (raw_nsecs >= NSEC_PER_SEC) { - u64 raw_secs = raw_nsecs; - raw_nsecs = do_div(raw_secs, NSEC_PER_SEC); - tk->raw_time.tv_sec += raw_secs; + tk->tkr_raw.xtime_nsec += (u64)tk->raw_time.tv_nsec << tk->tkr_raw.shift; + tk->tkr_raw.xtime_nsec += tk->raw_interval << shift; + snsec_per_sec = (u64)NSEC_PER_SEC << tk->tkr_raw.shift; + while (tk->tkr_raw.xtime_nsec >= snsec_per_sec) { + tk->tkr_raw.xtime_nsec -= snsec_per_sec; + tk->raw_time.tv_sec++; } - tk->raw_time.tv_nsec = raw_nsecs; + tk->raw_time.tv_nsec = tk->tkr_raw.xtime_nsec >> tk->tkr_raw.shift; + tk->tkr_raw.xtime_nsec -= (u64)tk->raw_time.tv_nsec << tk->tkr_raw.shift; /* Accumulate error between NTP and clock interval */ tk->ntp_error += tk->ntp_tick << shift; From dbb236c1ceb697a559e0694ac4c9e7b9131d0b16 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 8 Jun 2017 16:44:22 -0700 Subject: [PATCH 348/436] arm64/vdso: Fix nsec handling for CLOCK_MONOTONIC_RAW Recently vDSO support for CLOCK_MONOTONIC_RAW was added in 49eea433b326 ("arm64: Add support for CLOCK_MONOTONIC_RAW in clock_gettime() vDSO"). Noticing that the core timekeeping code never set tkr_raw.xtime_nsec, the vDSO implementation didn't bother exposing it via the data page and instead took the unshifted tk->raw_time.tv_nsec value which was then immediately shifted left in the vDSO code. Unfortunately, by accellerating the MONOTONIC_RAW clockid, it uncovered potential 1ns time inconsistencies caused by the timekeeping core not handing sub-ns resolution. Now that the core code has been fixed and is actually setting tkr_raw.xtime_nsec, we need to take that into account in the vDSO by adding it to the shifted raw_time value, in order to fix the user-visible inconsistency. Rather than do that at each use (and expand the data page in the process), instead perform the shift/addition operation when populating the data page and remove the shift from the vDSO code entirely. [jstultz: minor whitespace tweak, tried to improve commit message to make it more clear this fixes a regression] Reported-by: John Stultz Signed-off-by: Will Deacon Signed-off-by: John Stultz Tested-by: Daniel Mentz Acked-by: Kevin Brodsky Cc: Prarit Bhargava Cc: Richard Cochran Cc: Stephen Boyd Cc: "stable #4 . 8+" Cc: Miroslav Lichvar Link: http://lkml.kernel.org/r/1496965462-20003-4-git-send-email-john.stultz@linaro.org Signed-off-by: Thomas Gleixner --- arch/arm64/kernel/vdso.c | 5 +++-- arch/arm64/kernel/vdso/gettimeofday.S | 1 - 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c index 41b6e31f8f55..d0cb007fa482 100644 --- a/arch/arm64/kernel/vdso.c +++ b/arch/arm64/kernel/vdso.c @@ -221,10 +221,11 @@ void update_vsyscall(struct timekeeper *tk) /* tkr_mono.cycle_last == tkr_raw.cycle_last */ vdso_data->cs_cycle_last = tk->tkr_mono.cycle_last; vdso_data->raw_time_sec = tk->raw_time.tv_sec; - vdso_data->raw_time_nsec = tk->raw_time.tv_nsec; + vdso_data->raw_time_nsec = (tk->raw_time.tv_nsec << + tk->tkr_raw.shift) + + tk->tkr_raw.xtime_nsec; vdso_data->xtime_clock_sec = tk->xtime_sec; vdso_data->xtime_clock_nsec = tk->tkr_mono.xtime_nsec; - /* tkr_raw.xtime_nsec == 0 */ vdso_data->cs_mono_mult = tk->tkr_mono.mult; vdso_data->cs_raw_mult = tk->tkr_raw.mult; /* tkr_mono.shift == tkr_raw.shift */ diff --git a/arch/arm64/kernel/vdso/gettimeofday.S b/arch/arm64/kernel/vdso/gettimeofday.S index e00b4671bd7c..76320e920965 100644 --- a/arch/arm64/kernel/vdso/gettimeofday.S +++ b/arch/arm64/kernel/vdso/gettimeofday.S @@ -256,7 +256,6 @@ monotonic_raw: seqcnt_check fail=monotonic_raw /* All computations are done with left-shifted nsecs. */ - lsl x14, x14, x12 get_nsec_per_sec res=x9 lsl x9, x9, x12 From 842c08846420baa619fe3cb8c9af538efdb89428 Mon Sep 17 00:00:00 2001 From: Petr Mladek Date: Wed, 14 Jun 2017 10:54:52 +0200 Subject: [PATCH 349/436] livepatch: Fix stacking of patches with respect to RCU rcu_read_(un)lock(), list_*_rcu(), and synchronize_rcu() are used for a secure access and manipulation of the list of patches that modify the same function. In particular, it is the variable func_stack that is accessible from the ftrace handler via struct ftrace_ops and klp_ops. Of course, it synchronizes also some states of the patch on the top of the stack, e.g. func->transition in klp_ftrace_handler. At the same time, this mechanism guards also the manipulation of task->patch_state. It is modified according to the state of the transition and the state of the process. Now, all this works well as long as RCU works well. Sadly livepatching might get into some corner cases when this is not true. For example, RCU is not watching when rcu_read_lock() is taken in idle threads. It is because they might sleep and prevent reaching the grace period for too long. There are ways how to make RCU watching even in idle threads, see rcu_irq_enter(). But there is a small location inside RCU infrastructure when even this does not work. This small problematic location can be detected either before calling rcu_irq_enter() by rcu_irq_enter_disabled() or later by rcu_is_watching(). Sadly, there is no safe way how to handle it. Once we detect that RCU was not watching, we might see inconsistent state of the function stack and the related variables in klp_ftrace_handler(). Then we could do a wrong decision, use an incompatible implementation of the function and break the consistency of the system. We could warn but we could not avoid the damage. Fortunately, ftrace has similar problems and they seem to be solved well there. It uses a heavy weight implementation of some RCU operations. In particular, it replaces: + rcu_read_lock() with preempt_disable_notrace() + rcu_read_unlock() with preempt_enable_notrace() + synchronize_rcu() with schedule_on_each_cpu(sync_work) My understanding is that this is RCU implementation from a stone age. It meets the core RCU requirements but it is rather ineffective. Especially, it does not allow to batch or speed up the synchronize calls. On the other hand, it is very trivial. It allows to safely trace and/or livepatch even the RCU core infrastructure. And the effectiveness is a not a big issue because using ftrace or livepatches on productive systems is a rare operation. The safety is much more important than a negligible extra load. Note that the alternative implementation follows the RCU principles. Therefore, we could and actually must use list_*_rcu() variants when manipulating the func_stack. These functions allow to access the pointers in the right order and with the right barriers. But they do not use any other information that would be set only by rcu_read_lock(). Also note that there are actually two problems solved in ftrace: First, it cares about the consistency of RCU read sections. It is being solved the way as described and used in this patch. Second, ftrace needs to make sure that nobody is inside the dynamic trampoline when it is being freed. For this, it also calls synchronize_rcu_tasks() in preemptive kernel in ftrace_shutdown(). Livepatch has similar problem but it is solved by ftrace for free. klp_ftrace_handler() is a good guy and never sleeps. In addition, it is registered with FTRACE_OPS_FL_DYNAMIC. It causes that unregister_ftrace_function() calls: * schedule_on_each_cpu(ftrace_sync) - always * synchronize_rcu_tasks() - in preemptive kernel The effect is that nobody is neither inside the dynamic trampoline nor inside the ftrace handler after unregister_ftrace_function() returns. [jkosina@suse.cz: reformat changelog, fix comment] Signed-off-by: Petr Mladek Acked-by: Josh Poimboeuf Acked-by: Miroslav Benes Signed-off-by: Jiri Kosina --- kernel/livepatch/patch.c | 8 ++++++-- kernel/livepatch/transition.c | 36 ++++++++++++++++++++++++++++++----- 2 files changed, 37 insertions(+), 7 deletions(-) diff --git a/kernel/livepatch/patch.c b/kernel/livepatch/patch.c index f8269036bf0b..52c4e907c14b 100644 --- a/kernel/livepatch/patch.c +++ b/kernel/livepatch/patch.c @@ -59,7 +59,11 @@ static void notrace klp_ftrace_handler(unsigned long ip, ops = container_of(fops, struct klp_ops, fops); - rcu_read_lock(); + /* + * A variant of synchronize_sched() is used to allow patching functions + * where RCU is not watching, see klp_synchronize_transition(). + */ + preempt_disable_notrace(); func = list_first_or_null_rcu(&ops->func_stack, struct klp_func, stack_node); @@ -115,7 +119,7 @@ static void notrace klp_ftrace_handler(unsigned long ip, klp_arch_set_pc(regs, (unsigned long)func->new_func); unlock: - rcu_read_unlock(); + preempt_enable_notrace(); } /* diff --git a/kernel/livepatch/transition.c b/kernel/livepatch/transition.c index adc0cc64aa4b..b004a1fb6032 100644 --- a/kernel/livepatch/transition.c +++ b/kernel/livepatch/transition.c @@ -48,6 +48,28 @@ static void klp_transition_work_fn(struct work_struct *work) } static DECLARE_DELAYED_WORK(klp_transition_work, klp_transition_work_fn); +/* + * This function is just a stub to implement a hard force + * of synchronize_sched(). This requires synchronizing + * tasks even in userspace and idle. + */ +static void klp_sync(struct work_struct *work) +{ +} + +/* + * We allow to patch also functions where RCU is not watching, + * e.g. before user_exit(). We can not rely on the RCU infrastructure + * to do the synchronization. Instead hard force the sched synchronization. + * + * This approach allows to use RCU functions for manipulating func_stack + * safely. + */ +static void klp_synchronize_transition(void) +{ + schedule_on_each_cpu(klp_sync); +} + /* * The transition to the target patch state is complete. Clean up the data * structures. @@ -73,7 +95,7 @@ static void klp_complete_transition(void) * func->transition gets cleared, the handler may choose a * removed function. */ - synchronize_rcu(); + klp_synchronize_transition(); } if (klp_transition_patch->immediate) @@ -92,7 +114,7 @@ static void klp_complete_transition(void) /* Prevent klp_ftrace_handler() from seeing KLP_UNDEFINED state */ if (klp_target_state == KLP_PATCHED) - synchronize_rcu(); + klp_synchronize_transition(); read_lock(&tasklist_lock); for_each_process_thread(g, task) { @@ -136,7 +158,11 @@ void klp_cancel_transition(void) */ void klp_update_patch_state(struct task_struct *task) { - rcu_read_lock(); + /* + * A variant of synchronize_sched() is used to allow patching functions + * where RCU is not watching, see klp_synchronize_transition(). + */ + preempt_disable_notrace(); /* * This test_and_clear_tsk_thread_flag() call also serves as a read @@ -153,7 +179,7 @@ void klp_update_patch_state(struct task_struct *task) if (test_and_clear_tsk_thread_flag(task, TIF_PATCH_PENDING)) task->patch_state = READ_ONCE(klp_target_state); - rcu_read_unlock(); + preempt_enable_notrace(); } /* @@ -539,7 +565,7 @@ void klp_reverse_transition(void) clear_tsk_thread_flag(idle_task(cpu), TIF_PATCH_PENDING); /* Let any remaining calls to klp_update_patch_state() complete */ - synchronize_rcu(); + klp_synchronize_transition(); klp_start_transition(); } From 6c7515c61ffa0985c57abd8892c7928b52b9a306 Mon Sep 17 00:00:00 2001 From: Ralph Sennhauser Date: Thu, 1 Jun 2017 22:08:20 +0200 Subject: [PATCH 350/436] gpio: mvebu: change compatible string for PWM support As it turns out more than just Armada 370 and XP support using GPIO lines as PWM lines. For example the Armada 38x family has the same hardware support. As such "marvell,armada-370-xp-gpio" for the compatible string is a misnomer. Change the compatible string to "marvell,armada-370-gpio" before the driver makes it out of the -rc stage. This also follows the practice of using only the first device family supported as part of the name. Also update the documentation and comments in the code accordingly. Fixes: 757642f9a584 ("gpio: mvebu: Add limited PWM support") Signed-off-by: Ralph Sennhauser Acked-by: Gregory CLEMENT Acked-by: Rob Herring Signed-off-by: Linus Walleij --- Documentation/devicetree/bindings/gpio/gpio-mvebu.txt | 6 +++--- drivers/gpio/gpio-mvebu.c | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/Documentation/devicetree/bindings/gpio/gpio-mvebu.txt b/Documentation/devicetree/bindings/gpio/gpio-mvebu.txt index 42c3bb2d53e8..01e331a5f3e7 100644 --- a/Documentation/devicetree/bindings/gpio/gpio-mvebu.txt +++ b/Documentation/devicetree/bindings/gpio/gpio-mvebu.txt @@ -41,9 +41,9 @@ Required properties: Optional properties: In order to use the GPIO lines in PWM mode, some additional optional -properties are required. Only Armada 370 and XP support these properties. +properties are required. -- compatible: Must contain "marvell,armada-370-xp-gpio" +- compatible: Must contain "marvell,armada-370-gpio" - reg: an additional register set is needed, for the GPIO Blink Counter on/off registers. @@ -71,7 +71,7 @@ Example: }; gpio1: gpio@18140 { - compatible = "marvell,armada-370-xp-gpio"; + compatible = "marvell,armada-370-gpio"; reg = <0x18140 0x40>, <0x181c8 0x08>; reg-names = "gpio", "pwm"; ngpios = <17>; diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c index 5104b6398139..c83ea68be792 100644 --- a/drivers/gpio/gpio-mvebu.c +++ b/drivers/gpio/gpio-mvebu.c @@ -721,7 +721,7 @@ static int mvebu_pwm_probe(struct platform_device *pdev, u32 set; if (!of_device_is_compatible(mvchip->chip.of_node, - "marvell,armada-370-xp-gpio")) + "marvell,armada-370-gpio")) return 0; if (IS_ERR(mvchip->clk)) @@ -852,7 +852,7 @@ static const struct of_device_id mvebu_gpio_of_match[] = { .data = (void *) MVEBU_GPIO_SOC_VARIANT_ARMADAXP, }, { - .compatible = "marvell,armada-370-xp-gpio", + .compatible = "marvell,armada-370-gpio", .data = (void *) MVEBU_GPIO_SOC_VARIANT_ORION, }, { @@ -1128,7 +1128,7 @@ static int mvebu_gpio_probe(struct platform_device *pdev) mvchip); } - /* Armada 370/XP has simple PWM support for GPIO lines */ + /* Some MVEBU SoCs have simple PWM support for GPIO lines */ if (IS_ENABLED(CONFIG_PWM)) return mvebu_pwm_probe(pdev, mvchip, id); From e27a9eca5d4a392b96ce5d5238c8d637bcb0a52c Mon Sep 17 00:00:00 2001 From: James Cowgill Date: Tue, 20 Jun 2017 10:57:51 +0100 Subject: [PATCH 351/436] KVM: MIPS: Fix maybe-uninitialized build failure MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This commit fixes a "maybe-uninitialized" build failure in arch/mips/kvm/tlb.c when KVM, DYNAMIC_DEBUG and JUMP_LABEL are all enabled. The failure is: In file included from ./include/linux/printk.h:329:0, from ./include/linux/kernel.h:13, from ./include/asm-generic/bug.h:15, from ./arch/mips/include/asm/bug.h:41, from ./include/linux/bug.h:4, from ./include/linux/thread_info.h:11, from ./include/asm-generic/current.h:4, from ./arch/mips/include/generated/asm/current.h:1, from ./include/linux/sched.h:11, from arch/mips/kvm/tlb.c:13: arch/mips/kvm/tlb.c: In function ‘kvm_mips_host_tlb_inv’: ./include/linux/dynamic_debug.h:126:3: error: ‘idx_kernel’ may be used uninitialized in this function [-Werror=maybe-uninitialized] __dynamic_pr_debug(&descriptor, pr_fmt(fmt), \ ^~~~~~~~~~~~~~~~~~ arch/mips/kvm/tlb.c:169:16: note: ‘idx_kernel’ was declared here int idx_user, idx_kernel; ^~~~~~~~~~ There is a similar error relating to "idx_user". Both errors were observed with GCC 6. As far as I can tell, it is impossible for either idx_user or idx_kernel to be uninitialized when they are later read in the calls to kvm_debug, but to satisfy the compiler, add zero initializers to both variables. Signed-off-by: James Cowgill Fixes: 57e3869cfaae ("KVM: MIPS/TLB: Generalise host TLB invalidate to kernel ASID") Cc: # 4.11+ Acked-by: James Hogan Signed-off-by: Radim Krčmář --- arch/mips/kvm/tlb.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/arch/mips/kvm/tlb.c b/arch/mips/kvm/tlb.c index 7c6336dd2638..7cd92166a0b9 100644 --- a/arch/mips/kvm/tlb.c +++ b/arch/mips/kvm/tlb.c @@ -166,7 +166,11 @@ static int _kvm_mips_host_tlb_inv(unsigned long entryhi) int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va, bool user, bool kernel) { - int idx_user, idx_kernel; + /* + * Initialize idx_user and idx_kernel to workaround bogus + * maybe-initialized warning when using GCC 6. + */ + int idx_user = 0, idx_kernel = 0; unsigned long flags, old_entryhi; local_irq_save(flags); From 9e69672e90ccff10dab6f0c9545226a886e5973c Mon Sep 17 00:00:00 2001 From: Fabrice Gasnier Date: Wed, 14 Jun 2017 17:13:14 +0200 Subject: [PATCH 352/436] dt-bindings: mfd: Update STM32 timers clock names Clock name has been updated during driver/DT binding review: https://lkml.org/lkml/2016/12/13/718 Update DT binding doc to reflect this. Fixes: 8f9359c6c6a0 (dt-bindings: mfd: Add bindings for STM32 Timers driver) Signed-off-by: Fabrice Gasnier Acked-by: Benjamin Gaignard Signed-off-by: Lee Jones --- Documentation/devicetree/bindings/mfd/stm32-timers.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Documentation/devicetree/bindings/mfd/stm32-timers.txt b/Documentation/devicetree/bindings/mfd/stm32-timers.txt index bbd083f5600a..1db6e0057a63 100644 --- a/Documentation/devicetree/bindings/mfd/stm32-timers.txt +++ b/Documentation/devicetree/bindings/mfd/stm32-timers.txt @@ -31,7 +31,7 @@ Example: compatible = "st,stm32-timers"; reg = <0x40010000 0x400>; clocks = <&rcc 0 160>; - clock-names = "clk_int"; + clock-names = "int"; pwm { compatible = "st,stm32-pwm"; From 05b4017b37f1fce4b7185f138126dd8decdb381f Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 15 Jun 2017 10:55:11 -0400 Subject: [PATCH 353/436] drm/amdgpu/atom: fix ps allocation size for EnableDispPowerGating MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We were using the wrong structure which lead to an overflow on some boards. bug: https://bugs.freedesktop.org/show_bug.cgi?id=101387 Acked-by: Chunming Zhou Acked-by: Christian König Signed-off-by: Alex Deucher Cc: stable@vger.kernel.org --- drivers/gpu/drm/amd/amdgpu/atombios_crtc.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c index 8c9bc75a9c2d..8a0818b23ea4 100644 --- a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c +++ b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c @@ -165,7 +165,7 @@ void amdgpu_atombios_crtc_powergate(struct drm_crtc *crtc, int state) struct drm_device *dev = crtc->dev; struct amdgpu_device *adev = dev->dev_private; int index = GetIndexIntoMasterTable(COMMAND, EnableDispPowerGating); - ENABLE_DISP_POWER_GATING_PARAMETERS_V2_1 args; + ENABLE_DISP_POWER_GATING_PS_ALLOCATION args; memset(&args, 0, sizeof(args)); @@ -178,7 +178,7 @@ void amdgpu_atombios_crtc_powergate(struct drm_crtc *crtc, int state) void amdgpu_atombios_crtc_powergate_init(struct amdgpu_device *adev) { int index = GetIndexIntoMasterTable(COMMAND, EnableDispPowerGating); - ENABLE_DISP_POWER_GATING_PARAMETERS_V2_1 args; + ENABLE_DISP_POWER_GATING_PS_ALLOCATION args; memset(&args, 0, sizeof(args)); From 52b482b0f4fd6d5267faf29fe91398e203f3c230 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 15 Jun 2017 11:12:28 -0400 Subject: [PATCH 354/436] drm/amdgpu: adjust default display clock MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Increase the default display clock on newer asics to accomodate some high res modes with really high refresh rates. bug: https://bugs.freedesktop.org/show_bug.cgi?id=93826 Acked-by: Chunming Zhou Acked-by: Christian König Signed-off-by: Alex Deucher Cc: stable@vger.kernel.org --- drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c index 1cf78f4dd339..1e8e1123ddf4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c @@ -693,6 +693,10 @@ int amdgpu_atombios_get_clock_info(struct amdgpu_device *adev) DRM_INFO("Changing default dispclk from %dMhz to 600Mhz\n", adev->clock.default_dispclk / 100); adev->clock.default_dispclk = 60000; + } else if (adev->clock.default_dispclk <= 60000) { + DRM_INFO("Changing default dispclk from %dMhz to 625Mhz\n", + adev->clock.default_dispclk / 100); + adev->clock.default_dispclk = 62500; } adev->clock.dp_extclk = le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq); From 4eb59793cca00b0e629b6d55b5abb5acb82c5868 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 19 Jun 2017 12:52:47 -0400 Subject: [PATCH 355/436] drm/radeon: add a PX quirk for another K53TK variant Disable PX on these systems. bug: https://bugs.freedesktop.org/show_bug.cgi?id=101491 Signed-off-by: Alex Deucher Cc: stable@vger.kernel.org Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/radeon_device.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 6ecf42783d4b..0a6444d72000 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -136,6 +136,10 @@ static struct radeon_px_quirk radeon_px_quirk_list[] = { * https://bugzilla.kernel.org/show_bug.cgi?id=51381 */ { PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX }, + /* Asus K53TK laptop with AMD A6-3420M APU and Radeon 7670m GPU + * https://bugs.freedesktop.org/show_bug.cgi?id=101491 + */ + { PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX }, /* macbook pro 8.2 */ { PCI_VENDOR_ID_ATI, 0x6741, PCI_VENDOR_ID_APPLE, 0x00e2, RADEON_PX_QUIRK_LONG_WAKEUP }, { 0, 0, 0, 0, 0 }, From acfd6ee4fa7ebeee75511825fe02be3f7ac1d668 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 19 Jun 2017 15:59:58 -0400 Subject: [PATCH 356/436] drm/radeon: add a quirk for Toshiba Satellite L20-183 Fixes resume from suspend. bug: https://bugzilla.kernel.org/show_bug.cgi?id=196121 Reported-by: Przemek Signed-off-by: Alex Deucher Cc: stable@vger.kernel.org --- drivers/gpu/drm/radeon/radeon_combios.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c index 432480ff9d22..3178ba0c537c 100644 --- a/drivers/gpu/drm/radeon/radeon_combios.c +++ b/drivers/gpu/drm/radeon/radeon_combios.c @@ -3393,6 +3393,13 @@ void radeon_combios_asic_init(struct drm_device *dev) rdev->pdev->subsystem_vendor == 0x103c && rdev->pdev->subsystem_device == 0x280a) return; + /* quirk for rs4xx Toshiba Sattellite L20-183 latop to make it resume + * - it hangs on resume inside the dynclk 1 table. + */ + if (rdev->family == CHIP_RS400 && + rdev->pdev->subsystem_vendor == 0x1179 && + rdev->pdev->subsystem_device == 0xff31) + return; /* DYN CLK 1 */ table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE); From bdaf32c3ced3d111b692f0af585f880f82d686c5 Mon Sep 17 00:00:00 2001 From: Serhey Popovych Date: Fri, 16 Jun 2017 15:44:47 +0300 Subject: [PATCH 357/436] fib_rules: Resolve goto rules target on delete We should avoid marking goto rules unresolved when their target is actually reachable after rule deletion. Consolder following sample scenario: # ip -4 ru sh 0: from all lookup local 32000: from all goto 32100 32100: from all lookup main 32100: from all lookup default 32766: from all lookup main 32767: from all lookup default # ip -4 ru del pref 32100 table main # ip -4 ru sh 0: from all lookup local 32000: from all goto 32100 [unresolved] 32100: from all lookup default 32766: from all lookup main 32767: from all lookup default After removal of first rule with preference 32100 we mark all goto rules as unreachable, even when rule with same preference as removed one still present. Check if next rule with same preference is available and make all rules with goto action pointing to it. Signed-off-by: Serhey Popovych Signed-off-by: David S. Miller --- net/core/fib_rules.c | 21 ++++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c index f21c4d3aeae0..3bba291c6c32 100644 --- a/net/core/fib_rules.c +++ b/net/core/fib_rules.c @@ -568,7 +568,7 @@ int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr *nlh, struct net *net = sock_net(skb->sk); struct fib_rule_hdr *frh = nlmsg_data(nlh); struct fib_rules_ops *ops = NULL; - struct fib_rule *rule, *tmp; + struct fib_rule *rule, *r; struct nlattr *tb[FRA_MAX+1]; struct fib_kuid_range range; int err = -EINVAL; @@ -668,16 +668,23 @@ int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr *nlh, /* * Check if this rule is a target to any of them. If so, + * adjust to the next one with the same preference or * disable them. As this operation is eventually very - * expensive, it is only performed if goto rules have - * actually been added. + * expensive, it is only performed if goto rules, except + * current if it is goto rule, have actually been added. */ if (ops->nr_goto_rules > 0) { - list_for_each_entry(tmp, &ops->rules_list, list) { - if (rtnl_dereference(tmp->ctarget) == rule) { - RCU_INIT_POINTER(tmp->ctarget, NULL); + struct fib_rule *n; + + n = list_next_entry(rule, list); + if (&n->list == &ops->rules_list || n->pref != rule->pref) + n = NULL; + list_for_each_entry(r, &ops->rules_list, list) { + if (rtnl_dereference(r->ctarget) != rule) + continue; + rcu_assign_pointer(r->ctarget, n); + if (!n) ops->unresolved_rules++; - } } } From fe420d87bbc234015b4195dd239b7d3052b140ea Mon Sep 17 00:00:00 2001 From: Sebastian Siewior Date: Fri, 16 Jun 2017 19:24:00 +0200 Subject: [PATCH 358/436] net/core: remove explicit do_softirq() from busy_poll_stop() Since commit 217f69743681 ("net: busy-poll: allow preemption in sk_busy_loop()") there is an explicit do_softirq() invocation after local_bh_enable() has been invoked. I don't understand why we need this because local_bh_enable() will invoke do_softirq() once the softirq counter reached zero and we have softirq-related work pending. Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: David S. Miller --- net/core/dev.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/net/core/dev.c b/net/core/dev.c index 6d60149287a1..7243421c9783 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -5206,8 +5206,6 @@ static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock) if (rc == BUSY_POLL_BUDGET) __napi_schedule(napi); local_bh_enable(); - if (local_softirq_pending()) - do_softirq(); } void napi_busy_loop(unsigned int napi_id, From 5567e989198b5a8d78f9b5868e48fc9f4726bdd5 Mon Sep 17 00:00:00 2001 From: Madalin Bucur Date: Mon, 19 Jun 2017 18:04:16 +0300 Subject: [PATCH 359/436] fsl/fman: propagate dma_ops Make sure dma_ops are set, to be later used by the Ethernet driver. Signed-off-by: Madalin Bucur Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/fman/mac.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c index 0b31f8502ada..6e67d22fd0d5 100644 --- a/drivers/net/ethernet/freescale/fman/mac.c +++ b/drivers/net/ethernet/freescale/fman/mac.c @@ -623,6 +623,8 @@ static struct platform_device *dpaa_eth_add_device(int fman_id, goto no_mem; } + set_dma_ops(&pdev->dev, get_dma_ops(priv->dev)); + ret = platform_device_add_data(pdev, &data, sizeof(data)); if (ret) goto err; From fb52728a9294d97de808795b8e3f60fb8de50c00 Mon Sep 17 00:00:00 2001 From: Madalin Bucur Date: Mon, 19 Jun 2017 18:04:17 +0300 Subject: [PATCH 360/436] dpaa_eth: reuse the dma_ops provided by the FMan MAC device Remove the use of arch_setup_dma_ops() that was not exported and was breaking loadable module compilation. Signed-off-by: Madalin Bucur Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/dpaa/dpaa_eth.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c index 9a520e4f0df9..290ad0563320 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c @@ -2647,7 +2647,7 @@ static int dpaa_eth_probe(struct platform_device *pdev) priv->buf_layout[TX].priv_data_size = DPAA_TX_PRIV_DATA_SIZE; /* Tx */ /* device used for DMA mapping */ - arch_setup_dma_ops(dev, 0, 0, NULL, false); + set_dma_ops(dev, get_dma_ops(&pdev->dev)); err = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(40)); if (err) { dev_err(dev, "dma_coerce_mask_and_coherent() failed\n"); From 7e113321eccba2b52c0e9d11129d370c9511e4db Mon Sep 17 00:00:00 2001 From: Krzysztof Kozlowski Date: Mon, 19 Jun 2017 18:05:41 +0200 Subject: [PATCH 361/436] dt-bindings: net: sms911x: Add missing optional VDD regulators The lan911x family of devices require supplying from 3.3 V power supplies (connected to VDD_IO, VDD_A and VREG_3.3 pins). The existing driver however obtains only VDD_IO and VDD_A regulators in an optional way so document this in bindings. Signed-off-by: Krzysztof Kozlowski Reviewed-by: Linus Walleij Signed-off-by: David S. Miller --- Documentation/devicetree/bindings/net/smsc911x.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/Documentation/devicetree/bindings/net/smsc911x.txt b/Documentation/devicetree/bindings/net/smsc911x.txt index 16c3a9501f5d..acfafc8e143c 100644 --- a/Documentation/devicetree/bindings/net/smsc911x.txt +++ b/Documentation/devicetree/bindings/net/smsc911x.txt @@ -27,6 +27,7 @@ Optional properties: of the device. On many systems this is wired high so the device goes out of reset at power-on, but if it is under program control, this optional GPIO can wake up in response to it. +- vdd33a-supply, vddvario-supply : 3.3V analog and IO logic power supplies Examples: From 07f615574f8ac499875b21c1142f26308234a92c Mon Sep 17 00:00:00 2001 From: Serhey Popovych Date: Tue, 20 Jun 2017 13:29:25 +0300 Subject: [PATCH 362/436] ipv6: Do not leak throw route references While commit 73ba57bfae4a ("ipv6: fix backtracking for throw routes") does good job on error propagation to the fib_rules_lookup() in fib rules core framework that also corrects throw routes handling, it does not solve route reference leakage problem happened when we return -EAGAIN to the fib_rules_lookup() and leave routing table entry referenced in arg->result. If rule with matched throw route isn't last matched in the list we overwrite arg->result losing reference on throw route stored previously forever. We also partially revert commit ab997ad40839 ("ipv6: fix the incorrect return value of throw route") since we never return routing table entry with dst.error == -EAGAIN when CONFIG_IPV6_MULTIPLE_TABLES is on. Also there is no point to check for RTF_REJECT flag since it is always set throw route. Fixes: 73ba57bfae4a ("ipv6: fix backtracking for throw routes") Signed-off-by: Serhey Popovych Signed-off-by: David S. Miller --- net/ipv6/fib6_rules.c | 22 ++++++---------------- net/ipv6/ip6_fib.c | 3 +-- 2 files changed, 7 insertions(+), 18 deletions(-) diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c index eea23b57c6a5..ec849d88a662 100644 --- a/net/ipv6/fib6_rules.c +++ b/net/ipv6/fib6_rules.c @@ -32,7 +32,6 @@ struct fib6_rule { struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6, int flags, pol_lookup_t lookup) { - struct rt6_info *rt; struct fib_lookup_arg arg = { .lookup_ptr = lookup, .flags = FIB_LOOKUP_NOREF, @@ -44,21 +43,11 @@ struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6, fib_rules_lookup(net->ipv6.fib6_rules_ops, flowi6_to_flowi(fl6), flags, &arg); - rt = arg.result; + if (arg.result) + return arg.result; - if (!rt) { - dst_hold(&net->ipv6.ip6_null_entry->dst); - return &net->ipv6.ip6_null_entry->dst; - } - - if (rt->rt6i_flags & RTF_REJECT && - rt->dst.error == -EAGAIN) { - ip6_rt_put(rt); - rt = net->ipv6.ip6_null_entry; - dst_hold(&rt->dst); - } - - return &rt->dst; + dst_hold(&net->ipv6.ip6_null_entry->dst); + return &net->ipv6.ip6_null_entry->dst; } static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp, @@ -121,7 +110,8 @@ static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp, flp6->saddr = saddr; } err = rt->dst.error; - goto out; + if (err != -EAGAIN) + goto out; } again: ip6_rt_put(rt); diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index d4bf2c68a545..e6b78ba0e636 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c @@ -289,8 +289,7 @@ struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6, struct rt6_info *rt; rt = lookup(net, net->ipv6.fib6_main_tbl, fl6, flags); - if (rt->rt6i_flags & RTF_REJECT && - rt->dst.error == -EAGAIN) { + if (rt->dst.error == -EAGAIN) { ip6_rt_put(rt); rt = net->ipv6.ip6_null_entry; dst_hold(&rt->dst); From db833d40ad3263b2ee3b59a1ba168bb3cfed8137 Mon Sep 17 00:00:00 2001 From: Serhey Popovych Date: Tue, 20 Jun 2017 14:35:23 +0300 Subject: [PATCH 363/436] rtnetlink: add IFLA_GROUP to ifla_policy Network interface groups support added while ago, however there is no IFLA_GROUP attribute description in policy and netlink message size calculations until now. Add IFLA_GROUP attribute to the policy. Fixes: cbda10fa97d7 ("net_device: add support for network device groups") Signed-off-by: Serhey Popovych Signed-off-by: David S. Miller --- net/core/rtnetlink.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 5e61456f6bc7..467a2f4510a7 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -931,6 +931,7 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev, + nla_total_size(1) /* IFLA_LINKMODE */ + nla_total_size(4) /* IFLA_CARRIER_CHANGES */ + nla_total_size(4) /* IFLA_LINK_NETNSID */ + + nla_total_size(4) /* IFLA_GROUP */ + nla_total_size(ext_filter_mask & RTEXT_FILTER_VF ? 4 : 0) /* IFLA_NUM_VF */ + rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */ @@ -1468,6 +1469,7 @@ static const struct nla_policy ifla_policy[IFLA_MAX+1] = { [IFLA_LINK_NETNSID] = { .type = NLA_S32 }, [IFLA_PROTO_DOWN] = { .type = NLA_U8 }, [IFLA_XDP] = { .type = NLA_NESTED }, + [IFLA_GROUP] = { .type = NLA_U32 }, }; static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = { From 57f0c9cf58ff7fe479137ab847a886d0eed3ad1d Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Tue, 20 Jun 2017 13:08:51 +0100 Subject: [PATCH 364/436] sfc: remove duplicate up_write on VF filter_sem Somehow two copies of the line 'up_write(&vf->efx->filter_sem);' got into efx_ef10_sriov_set_vf_vlan(). This would put the mutex in a bad state and cause all subsequent down attempts to hang. Fixes: 671b53eec2ed ("sfc: Ensure down_write(&filter_sem) and up_write() are matched before calling efx_net_open()") Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/ef10_sriov.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/net/ethernet/sfc/ef10_sriov.c b/drivers/net/ethernet/sfc/ef10_sriov.c index b7e4345c990d..019cef1d3cf7 100644 --- a/drivers/net/ethernet/sfc/ef10_sriov.c +++ b/drivers/net/ethernet/sfc/ef10_sriov.c @@ -661,8 +661,6 @@ restore_filters: up_write(&vf->efx->filter_sem); mutex_unlock(&vf->efx->mac_lock); - up_write(&vf->efx->filter_sem); - rc2 = efx_net_open(vf->efx->net_dev); if (rc2) goto reset_nic; From 05cf0d1bf4ed722aefff92775244dbe9e1bb4679 Mon Sep 17 00:00:00 2001 From: Niklas Cassel Date: Tue, 20 Jun 2017 14:32:41 +0200 Subject: [PATCH 365/436] net: stmmac: free an skb first when there are no longer any descriptors using it When having the skb pointer in the first descriptor, stmmac_tx_clean can get called at a moment where the IP has only cleared the own bit of the first descriptor, thus freeing the skb, even though there can be several descriptors whose buffers point into the same skb. By simply moving the skb pointer from the first descriptor to the last descriptor, a skb will get freed only when the IP has cleared the own bit of all the descriptors that are using that skb. Signed-off-by: Niklas Cassel Signed-off-by: David S. Miller --- .../net/ethernet/stmicro/stmmac/stmmac_main.c | 20 +++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index d16d11bfc046..6e4cbc6ce0ef 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -2831,7 +2831,6 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) tx_q->tx_skbuff_dma[first_entry].buf = des; tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb); - tx_q->tx_skbuff[first_entry] = skb; first->des0 = cpu_to_le32(des); @@ -2865,6 +2864,14 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true; + /* Only the last descriptor gets to point to the skb. */ + tx_q->tx_skbuff[tx_q->cur_tx] = skb; + + /* We've used all descriptors we need for this skb, however, + * advance cur_tx so that it references a fresh descriptor. + * ndo_start_xmit will fill this descriptor the next time it's + * called and stmmac_tx_clean may clean up to this descriptor. + */ tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE); if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) { @@ -2998,8 +3005,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) first = desc; - tx_q->tx_skbuff[first_entry] = skb; - enh_desc = priv->plat->enh_desc; /* To program the descriptors according to the size of the frame */ if (enh_desc) @@ -3047,8 +3052,15 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) skb->len); } - entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); + /* Only the last descriptor gets to point to the skb. */ + tx_q->tx_skbuff[entry] = skb; + /* We've used all descriptors we need for this skb, however, + * advance cur_tx so that it references a fresh descriptor. + * ndo_start_xmit will fill this descriptor the next time it's + * called and stmmac_tx_clean may clean up to this descriptor. + */ + entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); tx_q->cur_tx = entry; if (netif_msg_pktdata(priv)) { From 9f93d87cba63e3d18629261243b1f633519eabb5 Mon Sep 17 00:00:00 2001 From: Marcin Nowakowski Date: Fri, 9 Jun 2017 09:04:05 +0200 Subject: [PATCH 366/436] irqchip/mips-gic: Mark count and compare accessors notrace gic_read_count(), gic_write_compare() and gic_write_cpu_compare() are often used in a sequence to update the compare register with a count value increased by a small offset. With small delta values used to update the compare register, the time to update function trace for these operations may be longer than the update timeout leading to update failure. Signed-off-by: Marcin Nowakowski Signed-off-by: Thomas Gleixner Cc: Marc Zyngier Cc: linux-mips@linux-mips.org Cc: Jason Cooper Link: http://lkml.kernel.org/r/1496991845-27031-1-git-send-email-marcin.nowakowski@imgtec.com --- drivers/irqchip/irq-mips-gic.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c index eb7fbe159963..929f8558bf1c 100644 --- a/drivers/irqchip/irq-mips-gic.c +++ b/drivers/irqchip/irq-mips-gic.c @@ -140,7 +140,7 @@ static inline void gic_map_to_vpe(unsigned int intr, unsigned int vpe) } #ifdef CONFIG_CLKSRC_MIPS_GIC -u64 gic_read_count(void) +u64 notrace gic_read_count(void) { unsigned int hi, hi2, lo; @@ -167,7 +167,7 @@ unsigned int gic_get_count_width(void) return bits; } -void gic_write_compare(u64 cnt) +void notrace gic_write_compare(u64 cnt) { if (mips_cm_is64) { gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE), cnt); @@ -179,7 +179,7 @@ void gic_write_compare(u64 cnt) } } -void gic_write_cpu_compare(u64 cnt, int cpu) +void notrace gic_write_cpu_compare(u64 cnt, int cpu) { unsigned long flags; From b4846fc3c8559649277e3e4e6b5cec5348a8d208 Mon Sep 17 00:00:00 2001 From: WANG Cong Date: Tue, 20 Jun 2017 10:46:27 -0700 Subject: [PATCH 367/436] igmp: add a missing spin_lock_init() Andrey reported a lockdep warning on non-initialized spinlock: INFO: trying to register non-static key. the code is fine but needs lockdep annotation. turning off the locking correctness validator. CPU: 1 PID: 4099 Comm: a.out Not tainted 4.12.0-rc6+ #9 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Bochs 01/01/2011 Call Trace: __dump_stack lib/dump_stack.c:16 dump_stack+0x292/0x395 lib/dump_stack.c:52 register_lock_class+0x717/0x1aa0 kernel/locking/lockdep.c:755 ? 0xffffffffa0000000 __lock_acquire+0x269/0x3690 kernel/locking/lockdep.c:3255 lock_acquire+0x22d/0x560 kernel/locking/lockdep.c:3855 __raw_spin_lock_bh ./include/linux/spinlock_api_smp.h:135 _raw_spin_lock_bh+0x36/0x50 kernel/locking/spinlock.c:175 spin_lock_bh ./include/linux/spinlock.h:304 ip_mc_clear_src+0x27/0x1e0 net/ipv4/igmp.c:2076 igmpv3_clear_delrec+0xee/0x4f0 net/ipv4/igmp.c:1194 ip_mc_destroy_dev+0x4e/0x190 net/ipv4/igmp.c:1736 We miss a spin_lock_init() in igmpv3_add_delrec(), probably because previously we never use it on this code path. Since we already unlink it from the global mc_tomb list, it is probably safe not to acquire this spinlock here. It does not harm to have it although, to avoid conditional locking. Fixes: c38b7d327aaf ("igmp: acquire pmc lock for ip_mc_clear_src()") Reported-by: Andrey Konovalov Signed-off-by: Cong Wang Signed-off-by: David S. Miller --- net/ipv4/igmp.c | 1 + 1 file changed, 1 insertion(+) diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index 8f6b5bbcbf69..ec9a396fa466 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c @@ -1112,6 +1112,7 @@ static void igmpv3_add_delrec(struct in_device *in_dev, struct ip_mc_list *im) pmc = kzalloc(sizeof(*pmc), GFP_KERNEL); if (!pmc) return; + spin_lock_init(&pmc->lock); spin_lock_bh(&im->lock); pmc->interface = im->interface; in_dev_hold(in_dev); From 8a7b0d8e8d9962ec3b2ae64dd4e86d68a6fb9220 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Fri, 5 May 2017 08:30:40 +0300 Subject: [PATCH 368/436] CIFS: Set ->should_dirty in cifs_user_readv() The current code causes a static checker warning because ITER_IOVEC is zero so the condition is never true. Fixes: 6685c5e2d1ac ("CIFS: Add asynchronous read support through kernel AIO") Signed-off-by: Dan Carpenter Signed-off-by: Steve French --- fs/cifs/file.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 0fd081bd2a2f..fcef70602b27 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -3271,7 +3271,7 @@ ssize_t cifs_user_readv(struct kiocb *iocb, struct iov_iter *to) if (!is_sync_kiocb(iocb)) ctx->iocb = iocb; - if (to->type & ITER_IOVEC) + if (to->type == ITER_IOVEC) ctx->should_dirty = true; rc = setup_aio_ctx_iter(ctx, to, READ); From ecf3411a121e7a653e309ff50a820ffa87c537f8 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Wed, 17 May 2017 19:24:15 +0100 Subject: [PATCH 369/436] CIFS: check if pages is null rather than bv for a failed allocation pages is being allocated however a null check on bv is being used to see if the allocation failed. Fix this by checking if pages is null. Detected by CoverityScan, CID#1432974 ("Logically dead code") Fixes: ccf7f4088af2dd ("CIFS: Add asynchronous context to support kernel AIO") Signed-off-by: Colin Ian King Reviewed-by: Pavel Shilovsky Signed-off-by: Steve French --- fs/cifs/misc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c index b08531977daa..3b147dc6af63 100644 --- a/fs/cifs/misc.c +++ b/fs/cifs/misc.c @@ -810,7 +810,7 @@ setup_aio_ctx_iter(struct cifs_aio_ctx *ctx, struct iov_iter *iter, int rw) if (!pages) { pages = vmalloc(max_pages * sizeof(struct page *)); - if (!bv) { + if (!pages) { kvfree(bv); return -ENOMEM; } From dcd87838c06f05ab7650b249ebf0d5b57ae63e1e Mon Sep 17 00:00:00 2001 From: Pavel Shilovsky Date: Tue, 6 Jun 2017 16:58:58 -0700 Subject: [PATCH 370/436] CIFS: Improve readdir verbosity Downgrade the loglevel for SMB2 to prevent filling the log with messages if e.g. readdir was interrupted. Also make SMB2 and SMB1 codepaths do the same logging during readdir. Signed-off-by: Pavel Shilovsky Signed-off-by: Steve French CC: Stable --- fs/cifs/smb1ops.c | 9 +++++++-- fs/cifs/smb2ops.c | 4 ++-- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c index 27bc360c7ffd..a723df3e0197 100644 --- a/fs/cifs/smb1ops.c +++ b/fs/cifs/smb1ops.c @@ -849,8 +849,13 @@ cifs_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon, struct cifs_fid *fid, __u16 search_flags, struct cifs_search_info *srch_inf) { - return CIFSFindFirst(xid, tcon, path, cifs_sb, - &fid->netfid, search_flags, srch_inf, true); + int rc; + + rc = CIFSFindFirst(xid, tcon, path, cifs_sb, + &fid->netfid, search_flags, srch_inf, true); + if (rc) + cifs_dbg(FYI, "find first failed=%d\n", rc); + return rc; } static int diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c index c58691834eb2..59726013375b 100644 --- a/fs/cifs/smb2ops.c +++ b/fs/cifs/smb2ops.c @@ -982,7 +982,7 @@ smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon, rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL); kfree(utf16_path); if (rc) { - cifs_dbg(VFS, "open dir failed\n"); + cifs_dbg(FYI, "open dir failed rc=%d\n", rc); return rc; } @@ -992,7 +992,7 @@ smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon, rc = SMB2_query_directory(xid, tcon, fid->persistent_fid, fid->volatile_fid, 0, srch_inf); if (rc) { - cifs_dbg(VFS, "query directory failed\n"); + cifs_dbg(FYI, "query directory failed rc=%d\n", rc); SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid); } return rc; From e125f5284f81bbb765a504494622b45c02faf978 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Wed, 7 Jun 2017 00:33:45 +0100 Subject: [PATCH 371/436] cifs: remove redundant return in cifs_creation_time_get There is a redundant return in function cifs_creation_time_get that appears to be old vestigial code than can be removed. So remove it. Detected by CoverityScan, CID#1361924 ("Structurally dead code") Signed-off-by: Colin Ian King Signed-off-by: Steve French --- fs/cifs/xattr.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/fs/cifs/xattr.c b/fs/cifs/xattr.c index 3cb5c9e2d4e7..de50e749ff05 100644 --- a/fs/cifs/xattr.c +++ b/fs/cifs/xattr.c @@ -188,8 +188,6 @@ static int cifs_creation_time_get(struct dentry *dentry, struct inode *inode, pcreatetime = (__u64 *)value; *pcreatetime = CIFS_I(inode)->createtime; return sizeof(__u64); - - return rc; } From 517a6e43c4872c89794af5b377fa085e47345952 Mon Sep 17 00:00:00 2001 From: Christophe Jaillet Date: Sun, 11 Jun 2017 09:12:47 +0200 Subject: [PATCH 372/436] CIFS: Fix some return values in case of error in 'crypt_message' 'rc' is known to be 0 at this point. So if 'init_sg' or 'kzalloc' fails, we should return -ENOMEM instead. Also remove a useless 'rc' in a debug message as it is meaningless here. Fixes: 026e93dc0a3ee ("CIFS: Encrypt SMB3 requests before sending") Signed-off-by: Christophe JAILLET Reviewed-by: Pavel Shilovsky Reviewed-by: Aurelien Aptel Signed-off-by: Steve French CC: Stable --- fs/cifs/smb2ops.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c index 59726013375b..7e48561abd29 100644 --- a/fs/cifs/smb2ops.c +++ b/fs/cifs/smb2ops.c @@ -1809,7 +1809,8 @@ crypt_message(struct TCP_Server_Info *server, struct smb_rqst *rqst, int enc) sg = init_sg(rqst, sign); if (!sg) { - cifs_dbg(VFS, "%s: Failed to init sg %d", __func__, rc); + cifs_dbg(VFS, "%s: Failed to init sg", __func__); + rc = -ENOMEM; goto free_req; } @@ -1817,6 +1818,7 @@ crypt_message(struct TCP_Server_Info *server, struct smb_rqst *rqst, int enc) iv = kzalloc(iv_len, GFP_KERNEL); if (!iv) { cifs_dbg(VFS, "%s: Failed to alloc IV", __func__); + rc = -ENOMEM; goto free_sg; } iv[0] = 3; From e94ac3510b6a0f696f2c442c4fc4051c8101ef12 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Tue, 20 Jun 2017 22:28:37 +0200 Subject: [PATCH 373/436] drm: Fix GETCONNECTOR regression In commit 91eefc05f0ac71902906b2058360e61bd25137fe Author: Daniel Vetter Date: Wed Dec 14 00:08:10 2016 +0100 drm: Tighten locking in drm_mode_getconnector I reordered the logic a bit in that IOCTL, but that broke userspace since it'll get the new mode list, but not the new property values. Fix that again. v2: Fix up the error path handling when copy_to_user for the modes failes (Dhinakaran). Fixes: 91eefc05f0ac ("drm: Tighten locking in drm_mode_getconnector") Cc: Sean Paul Cc: Daniel Vetter Cc: Jani Nikula Cc: David Airlie Cc: dri-devel@lists.freedesktop.org Reported-by: "H.J. Lu" Tested-by: "H.J. Lu" Cc: # v4.11+ Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=100576 Cc: "H.J. Lu" Cc: "Pandiyan, Dhinakaran" Reviewed-by: Sean Paul Reviewed-by: Dhinakaran Pandiyan Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/20170620202837.1701-1-daniel.vetter@ffwll.ch --- drivers/gpu/drm/drm_connector.c | 38 +++++++++++++++++---------------- 1 file changed, 20 insertions(+), 18 deletions(-) diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c index 9f847615ac74..48ca2457df8c 100644 --- a/drivers/gpu/drm/drm_connector.c +++ b/drivers/gpu/drm/drm_connector.c @@ -1229,21 +1229,6 @@ int drm_mode_getconnector(struct drm_device *dev, void *data, if (!connector) return -ENOENT; - drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); - encoder = drm_connector_get_encoder(connector); - if (encoder) - out_resp->encoder_id = encoder->base.id; - else - out_resp->encoder_id = 0; - - ret = drm_mode_object_get_properties(&connector->base, file_priv->atomic, - (uint32_t __user *)(unsigned long)(out_resp->props_ptr), - (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr), - &out_resp->count_props); - drm_modeset_unlock(&dev->mode_config.connection_mutex); - if (ret) - goto out_unref; - for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) if (connector->encoder_ids[i] != 0) encoders_count++; @@ -1256,7 +1241,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data, if (put_user(connector->encoder_ids[i], encoder_ptr + copied)) { ret = -EFAULT; - goto out_unref; + goto out; } copied++; } @@ -1300,15 +1285,32 @@ int drm_mode_getconnector(struct drm_device *dev, void *data, if (copy_to_user(mode_ptr + copied, &u_mode, sizeof(u_mode))) { ret = -EFAULT; + mutex_unlock(&dev->mode_config.mutex); + goto out; } copied++; } } out_resp->count_modes = mode_count; -out: mutex_unlock(&dev->mode_config.mutex); -out_unref: + + drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); + encoder = drm_connector_get_encoder(connector); + if (encoder) + out_resp->encoder_id = encoder->base.id; + else + out_resp->encoder_id = 0; + + /* Only grab properties after probing, to make sure EDID and other + * properties reflect the latest status. */ + ret = drm_mode_object_get_properties(&connector->base, file_priv->atomic, + (uint32_t __user *)(unsigned long)(out_resp->props_ptr), + (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr), + &out_resp->count_props); + drm_modeset_unlock(&dev->mode_config.connection_mutex); + +out: drm_connector_put(connector); return ret; From 8a1898db51a3390241cd5fae267dc8aaa9db0f8b Mon Sep 17 00:00:00 2001 From: Hendrik Brueckner Date: Tue, 20 Jun 2017 12:26:39 +0200 Subject: [PATCH 374/436] perf/aux: Correct return code of rb_alloc_aux() if !has_aux(ev) If the event for which an AUX area is about to be allocated, does not support setting up an AUX area, rb_alloc_aux() return -ENOTSUPP. This error condition is being returned unfiltered to the user space, and, for example, the perf tools fails with: failed to mmap with 524 (INTERNAL ERROR: strerror_r(524, 0x3fff497a1c8, 512)=22) This error can be easily seen with "perf record -m 128,256 -e cpu-clock". The 524 error code maps to -ENOTSUPP (in rb_alloc_aux()). The -ENOTSUPP error code shall be only used within the kernel. So the correct error code would then be -EOPNOTSUPP. With this commit, the perf tool then reports: failed to mmap with 95 (Operation not supported) which is more clear. Signed-off-by: Hendrik Brueckner Acked-by: Alexander Shishkin Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Pu Hou Cc: Thomas Gleixner Cc: Thomas-Mich Richter Cc: acme@kernel.org Cc: linux-s390@vger.kernel.org Link: http://lkml.kernel.org/r/1497954399-6355-1-git-send-email-brueckner@linux.vnet.ibm.com Signed-off-by: Ingo Molnar --- kernel/events/ring_buffer.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c index 2831480c63a2..ee97196bb151 100644 --- a/kernel/events/ring_buffer.c +++ b/kernel/events/ring_buffer.c @@ -580,7 +580,7 @@ int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event, int ret = -ENOMEM, max_order = 0; if (!has_aux(event)) - return -ENOTSUPP; + return -EOPNOTSUPP; if (event->pmu->capabilities & PERF_PMU_CAP_AUX_NO_SG) { /* From 7def52b78a5fda14864aab9b6fd14f09a4d4ff72 Mon Sep 17 00:00:00 2001 From: Mike Snitzer Date: Mon, 19 Jun 2017 10:55:47 -0400 Subject: [PATCH 375/436] dm integrity: fix to not disable/enable interrupts from interrupt context Use spin_lock_irqsave and spin_unlock_irqrestore rather than spin_{lock,unlock}_irq in submit_flush_bio(). Otherwise lockdep issues the following warning: DEBUG_LOCKS_WARN_ON(current->hardirq_context) WARNING: CPU: 1 PID: 0 at kernel/locking/lockdep.c:2748 trace_hardirqs_on_caller+0x107/0x180 Reported-by: Ondrej Kozina Tested-by: Ondrej Kozina Signed-off-by: Mike Snitzer Acked-by: Mikulas Patocka --- drivers/md/dm-integrity.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c index 4ab10cf718c9..93b181088168 100644 --- a/drivers/md/dm-integrity.c +++ b/drivers/md/dm-integrity.c @@ -1105,10 +1105,13 @@ static void schedule_autocommit(struct dm_integrity_c *ic) static void submit_flush_bio(struct dm_integrity_c *ic, struct dm_integrity_io *dio) { struct bio *bio; - spin_lock_irq(&ic->endio_wait.lock); + unsigned long flags; + + spin_lock_irqsave(&ic->endio_wait.lock, flags); bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io)); bio_list_add(&ic->flush_bio_list, bio); - spin_unlock_irq(&ic->endio_wait.lock); + spin_unlock_irqrestore(&ic->endio_wait.lock, flags); + queue_work(ic->commit_wq, &ic->commit_work); } From feb7695fe9fb83084aa29de0094774f4c9d4c9fc Mon Sep 17 00:00:00 2001 From: Mike Snitzer Date: Tue, 20 Jun 2017 19:14:30 -0400 Subject: [PATCH 376/436] dm io: fix duplicate bio completion due to missing ref count If only a subset of the devices associated with multiple regions support a given special operation (eg. DISCARD) then the dec_count() that is used to set error for the region must increment the io->count. Otherwise, when the dec_count() is called it can cause the dm-io caller's bio to be completed multiple times. As was reported against the dm-mirror target that had mirror legs with a mix of discard capabilities. Bug: https://bugzilla.kernel.org/show_bug.cgi?id=196077 Reported-by: Zhang Yi Signed-off-by: Mike Snitzer --- drivers/md/dm-io.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c index 3702e502466d..8d5ca30f6551 100644 --- a/drivers/md/dm-io.c +++ b/drivers/md/dm-io.c @@ -317,8 +317,8 @@ static void do_region(int op, int op_flags, unsigned region, else if (op == REQ_OP_WRITE_SAME) special_cmd_max_sectors = q->limits.max_write_same_sectors; if ((op == REQ_OP_DISCARD || op == REQ_OP_WRITE_ZEROES || - op == REQ_OP_WRITE_SAME) && - special_cmd_max_sectors == 0) { + op == REQ_OP_WRITE_SAME) && special_cmd_max_sectors == 0) { + atomic_inc(&io->count); dec_count(io, region, -EOPNOTSUPP); return; } From 8e8320c9315c47a6a090188720ccff32a6a6ba18 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 20 Jun 2017 17:56:13 -0600 Subject: [PATCH 377/436] blk-mq: fix performance regression with shared tags If we have shared tags enabled, then every IO completion will trigger a full loop of every queue belonging to a tag set, and every hardware queue for each of those queues, even if nothing needs to be done. This causes a massive performance regression if you have a lot of shared devices. Instead of doing this huge full scan on every IO, add an atomic counter to the main queue that tracks how many hardware queues have been marked as needing a restart. With that, we can avoid looking for restartable queues, if we don't have to. Max reports that this restores performance. Before this patch, 4K IOPS was limited to 22-23K IOPS. With the patch, we are running at 950-970K IOPS. Fixes: 6d8c6c0f97ad ("blk-mq: Restart a single queue if tag sets are shared") Reported-by: Max Gurtovoy Tested-by: Max Gurtovoy Reviewed-by: Bart Van Assche Tested-by: Bart Van Assche Signed-off-by: Jens Axboe --- block/blk-mq-sched.c | 58 +++++++++++++++++++++++++++++++++--------- block/blk-mq-sched.h | 9 ------- block/blk-mq.c | 16 +++++++++--- include/linux/blkdev.h | 2 ++ 4 files changed, 61 insertions(+), 24 deletions(-) diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index 1f5b692526ae..0ded5e846335 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c @@ -68,6 +68,45 @@ static void blk_mq_sched_assign_ioc(struct request_queue *q, __blk_mq_sched_assign_ioc(q, rq, bio, ioc); } +/* + * Mark a hardware queue as needing a restart. For shared queues, maintain + * a count of how many hardware queues are marked for restart. + */ +static void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx) +{ + if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) + return; + + if (hctx->flags & BLK_MQ_F_TAG_SHARED) { + struct request_queue *q = hctx->queue; + + if (!test_and_set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) + atomic_inc(&q->shared_hctx_restart); + } else + set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); +} + +static bool blk_mq_sched_restart_hctx(struct blk_mq_hw_ctx *hctx) +{ + if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) + return false; + + if (hctx->flags & BLK_MQ_F_TAG_SHARED) { + struct request_queue *q = hctx->queue; + + if (test_and_clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) + atomic_dec(&q->shared_hctx_restart); + } else + clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); + + if (blk_mq_hctx_has_pending(hctx)) { + blk_mq_run_hw_queue(hctx, true); + return true; + } + + return false; +} + struct request *blk_mq_sched_get_request(struct request_queue *q, struct bio *bio, unsigned int op, @@ -266,18 +305,6 @@ static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx, return true; } -static bool blk_mq_sched_restart_hctx(struct blk_mq_hw_ctx *hctx) -{ - if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) { - clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); - if (blk_mq_hctx_has_pending(hctx)) { - blk_mq_run_hw_queue(hctx, true); - return true; - } - } - return false; -} - /** * list_for_each_entry_rcu_rr - iterate in a round-robin fashion over rcu list * @pos: loop cursor. @@ -309,6 +336,13 @@ void blk_mq_sched_restart(struct blk_mq_hw_ctx *const hctx) unsigned int i, j; if (set->flags & BLK_MQ_F_TAG_SHARED) { + /* + * If this is 0, then we know that no hardware queues + * have RESTART marked. We're done. + */ + if (!atomic_read(&queue->shared_hctx_restart)) + return; + rcu_read_lock(); list_for_each_entry_rcu_rr(q, queue, &set->tag_list, tag_set_list) { diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h index edafb5383b7b..5007edece51a 100644 --- a/block/blk-mq-sched.h +++ b/block/blk-mq-sched.h @@ -115,15 +115,6 @@ static inline bool blk_mq_sched_has_work(struct blk_mq_hw_ctx *hctx) return false; } -/* - * Mark a hardware queue as needing a restart. - */ -static inline void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx) -{ - if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) - set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); -} - static inline bool blk_mq_sched_needs_restart(struct blk_mq_hw_ctx *hctx) { return test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); diff --git a/block/blk-mq.c b/block/blk-mq.c index bb66c96850b1..958cedaff8b8 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -2103,20 +2103,30 @@ static void blk_mq_map_swqueue(struct request_queue *q, } } +/* + * Caller needs to ensure that we're either frozen/quiesced, or that + * the queue isn't live yet. + */ static void queue_set_hctx_shared(struct request_queue *q, bool shared) { struct blk_mq_hw_ctx *hctx; int i; queue_for_each_hw_ctx(q, hctx, i) { - if (shared) + if (shared) { + if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) + atomic_inc(&q->shared_hctx_restart); hctx->flags |= BLK_MQ_F_TAG_SHARED; - else + } else { + if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) + atomic_dec(&q->shared_hctx_restart); hctx->flags &= ~BLK_MQ_F_TAG_SHARED; + } } } -static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set, bool shared) +static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set, + bool shared) { struct request_queue *q; diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index b74a3edcb3da..1ddd36bd2173 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -391,6 +391,8 @@ struct request_queue { int nr_rqs[2]; /* # allocated [a]sync rqs */ int nr_rqs_elvpriv; /* # allocated rqs w/ elvpriv */ + atomic_t shared_hctx_restart; + struct blk_queue_stats *stats; struct rq_wb *rq_wb; From f4cb767d76cf7ee72f97dd76f6cfa6c76a5edc89 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Tue, 20 Jun 2017 02:10:44 -0700 Subject: [PATCH 378/436] mm: fix new crash in unmapped_area_topdown() Trinity gets kernel BUG at mm/mmap.c:1963! in about 3 minutes of mmap testing. That's the VM_BUG_ON(gap_end < gap_start) at the end of unmapped_area_topdown(). Linus points out how MAP_FIXED (which does not have to respect our stack guard gap intentions) could result in gap_end below gap_start there. Fix that, and the similar case in its alternative, unmapped_area(). Cc: stable@vger.kernel.org Fixes: 1be7107fbe18 ("mm: larger stack guard gap, between vmas") Reported-by: Dave Jones Debugged-by: Linus Torvalds Signed-off-by: Hugh Dickins Acked-by: Michal Hocko Signed-off-by: Linus Torvalds --- mm/mmap.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/mm/mmap.c b/mm/mmap.c index 8e07976d5e47..290b77d9a01e 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1817,7 +1817,8 @@ check_current: /* Check if current node has a suitable gap */ if (gap_start > high_limit) return -ENOMEM; - if (gap_end >= low_limit && gap_end - gap_start >= length) + if (gap_end >= low_limit && + gap_end > gap_start && gap_end - gap_start >= length) goto found; /* Visit right subtree if it looks promising */ @@ -1920,7 +1921,8 @@ check_current: gap_end = vm_start_gap(vma); if (gap_end < low_limit) return -ENOMEM; - if (gap_start <= high_limit && gap_end - gap_start >= length) + if (gap_start <= high_limit && + gap_end > gap_start && gap_end - gap_start >= length) goto found; /* Visit left subtree if it looks promising */ From bd726c90b6b8ce87602208701b208a208e6d5600 Mon Sep 17 00:00:00 2001 From: Helge Deller Date: Mon, 19 Jun 2017 17:34:05 +0200 Subject: [PATCH 379/436] Allow stack to grow up to address space limit Fix expand_upwards() on architectures with an upward-growing stack (parisc, metag and partly IA-64) to allow the stack to reliably grow exactly up to the address space limit given by TASK_SIZE. Signed-off-by: Helge Deller Acked-by: Hugh Dickins Signed-off-by: Linus Torvalds --- mm/mmap.c | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/mm/mmap.c b/mm/mmap.c index 290b77d9a01e..a5e3dcd75e79 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -2230,16 +2230,19 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address) if (!(vma->vm_flags & VM_GROWSUP)) return -EFAULT; - /* Guard against wrapping around to address 0. */ + /* Guard against exceeding limits of the address space. */ address &= PAGE_MASK; - address += PAGE_SIZE; - if (!address) + if (address >= TASK_SIZE) return -ENOMEM; + address += PAGE_SIZE; /* Enforce stack_guard_gap */ gap_addr = address + stack_guard_gap; - if (gap_addr < address) - return -ENOMEM; + + /* Guard against overflow */ + if (gap_addr < address || gap_addr > TASK_SIZE) + gap_addr = TASK_SIZE; + next = vma->vm_next; if (next && next->vm_start < gap_addr) { if (!(next->vm_flags & VM_GROWSUP)) From e4330d8bf669139a983255d1801733b64c2ae841 Mon Sep 17 00:00:00 2001 From: Jarkko Nikula Date: Mon, 19 Jun 2017 15:53:01 +0300 Subject: [PATCH 380/436] ACPI / scan: Fix enumeration for special SPI and I2C devices Commit f406270bf73d ("ACPI / scan: Set the visited flag for all enumerated devices") caused that two group of special SPI or I2C devices do not enumerate. SPI and I2C devices are expected to be enumerated by the SPI and I2C subsystems but change caused that acpi_bus_attach() marks those devices with acpi_device_set_enumerated(). First group of devices are matched using Device Tree compatible property with special _HID "PRP0001". Those devices have matched scan handler, acpi_scan_attach_handler() retuns 1 and acpi_bus_attach() marks them with acpi_device_set_enumerated(). Second group of devices without valid _HID such as "LNXVIDEO" have device->pnp.type.platform_id set to zero and change again marks them with acpi_device_set_enumerated(). Fix this by flagging the SPI and I2C devices during struct acpi_device object initialization time and let the code in acpi_bus_attach() to go through the device_attach() and acpi_default_enumeration() path for all SPI and I2C devices. Fixes: f406270bf73d (ACPI / scan: Set the visited flag for all enumerated devices) Signed-off-by: Jarkko Nikula Acked-by: Mika Westerberg Cc: 4.11+ # 4.11+ Signed-off-by: Rafael J. Wysocki --- drivers/acpi/scan.c | 67 +++++++++++++++++++++++------------------ include/acpi/acpi_bus.h | 3 +- 2 files changed, 39 insertions(+), 31 deletions(-) diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index 3a10d7573477..d53162997f32 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c @@ -1428,6 +1428,37 @@ static void acpi_init_coherency(struct acpi_device *adev) adev->flags.coherent_dma = cca; } +static int acpi_check_spi_i2c_slave(struct acpi_resource *ares, void *data) +{ + bool *is_spi_i2c_slave_p = data; + + if (ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS) + return 1; + + /* + * devices that are connected to UART still need to be enumerated to + * platform bus + */ + if (ares->data.common_serial_bus.type != ACPI_RESOURCE_SERIAL_TYPE_UART) + *is_spi_i2c_slave_p = true; + + /* no need to do more checking */ + return -1; +} + +static bool acpi_is_spi_i2c_slave(struct acpi_device *device) +{ + struct list_head resource_list; + bool is_spi_i2c_slave = false; + + INIT_LIST_HEAD(&resource_list); + acpi_dev_get_resources(device, &resource_list, acpi_check_spi_i2c_slave, + &is_spi_i2c_slave); + acpi_dev_free_resource_list(&resource_list); + + return is_spi_i2c_slave; +} + void acpi_init_device_object(struct acpi_device *device, acpi_handle handle, int type, unsigned long long sta) { @@ -1443,6 +1474,7 @@ void acpi_init_device_object(struct acpi_device *device, acpi_handle handle, acpi_bus_get_flags(device); device->flags.match_driver = false; device->flags.initialized = true; + device->flags.spi_i2c_slave = acpi_is_spi_i2c_slave(device); acpi_device_clear_enumerated(device); device_initialize(&device->dev); dev_set_uevent_suppress(&device->dev, true); @@ -1727,38 +1759,13 @@ static acpi_status acpi_bus_check_add(acpi_handle handle, u32 lvl_not_used, return AE_OK; } -static int acpi_check_spi_i2c_slave(struct acpi_resource *ares, void *data) -{ - bool *is_spi_i2c_slave_p = data; - - if (ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS) - return 1; - - /* - * devices that are connected to UART still need to be enumerated to - * platform bus - */ - if (ares->data.common_serial_bus.type != ACPI_RESOURCE_SERIAL_TYPE_UART) - *is_spi_i2c_slave_p = true; - - /* no need to do more checking */ - return -1; -} - static void acpi_default_enumeration(struct acpi_device *device) { - struct list_head resource_list; - bool is_spi_i2c_slave = false; - /* * Do not enumerate SPI/I2C slaves as they will be enumerated by their * respective parents. */ - INIT_LIST_HEAD(&resource_list); - acpi_dev_get_resources(device, &resource_list, acpi_check_spi_i2c_slave, - &is_spi_i2c_slave); - acpi_dev_free_resource_list(&resource_list); - if (!is_spi_i2c_slave) { + if (!device->flags.spi_i2c_slave) { acpi_create_platform_device(device, NULL); acpi_device_set_enumerated(device); } else { @@ -1854,7 +1861,7 @@ static void acpi_bus_attach(struct acpi_device *device) return; device->flags.match_driver = true; - if (ret > 0) { + if (ret > 0 && !device->flags.spi_i2c_slave) { acpi_device_set_enumerated(device); goto ok; } @@ -1863,10 +1870,10 @@ static void acpi_bus_attach(struct acpi_device *device) if (ret < 0) return; - if (device->pnp.type.platform_id) - acpi_default_enumeration(device); - else + if (!device->pnp.type.platform_id && !device->flags.spi_i2c_slave) acpi_device_set_enumerated(device); + else + acpi_default_enumeration(device); ok: list_for_each_entry(child, &device->children, node) diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h index 197f3fffc9a7..408c7820e200 100644 --- a/include/acpi/acpi_bus.h +++ b/include/acpi/acpi_bus.h @@ -210,7 +210,8 @@ struct acpi_device_flags { u32 of_compatible_ok:1; u32 coherent_dma:1; u32 cca_seen:1; - u32 reserved:20; + u32 spi_i2c_slave:1; + u32 reserved:19; }; /* File System */ From 2f263d145140ea4b9f5762b15886ae26195a764a Mon Sep 17 00:00:00 2001 From: Richard Genoud Date: Thu, 15 Jun 2017 10:36:22 +0200 Subject: [PATCH 381/436] kbuild: fix header installation under fakechroot environment Since commit fcc8487d477a ("uapi: export all headers under uapi directories") fakechroot make bindeb-pkg fails, mismatching files for directories: touch: cannot touch 'usr/include/video/uvesafb.h/.install': Not a directory This due to a bug in fakechroot: when using the function $(wildcard $(srcdir)/*/.) in a makefile, under a fakechroot environment, not only directories but also files are returned. To circumvent that, we are using the functions: $(sort $(dir $(wildcard $(srcdir)/*/)))) Fixes: fcc8487d477a ("uapi: export all headers under uapi directories") Signed-off-by: Richard Genoud Signed-off-by: Masahiro Yamada --- scripts/Makefile.headersinst | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/scripts/Makefile.headersinst b/scripts/Makefile.headersinst index ce753a408c56..c583a1e1bd3c 100644 --- a/scripts/Makefile.headersinst +++ b/scripts/Makefile.headersinst @@ -14,7 +14,15 @@ __headers: include scripts/Kbuild.include srcdir := $(srctree)/$(obj) -subdirs := $(patsubst $(srcdir)/%/.,%,$(wildcard $(srcdir)/*/.)) + +# When make is run under a fakechroot environment, the function +# $(wildcard $(srcdir)/*/.) doesn't only return directories, but also regular +# files. So, we are using a combination of sort/dir/wildcard which works +# with fakechroot. +subdirs := $(patsubst $(srcdir)/%/,%,\ + $(filter-out $(srcdir)/,\ + $(sort $(dir $(wildcard $(srcdir)/*/))))) + # caller may set destination dir (when installing to asm/) _dst := $(if $(dst),$(dst),$(obj)) From eb5e248d502bec191bd99f04cae8b49992b3abde Mon Sep 17 00:00:00 2001 From: "Darrick J. Wong" Date: Wed, 21 Jun 2017 20:27:35 -0700 Subject: [PATCH 382/436] xfs: don't allow bmap on rt files bmap returns a dumb LBA address but not the block device that goes with that LBA. Swapfiles don't care about this and will blindly assume that the data volume is the correct blockdev, which is totally bogus for files on the rt subvolume. This results in the swap code doing IOs to arbitrary locations on the data device(!) if the passed in mapping is a realtime file, so just turn off bmap for rt files. Signed-off-by: Darrick J. Wong Reviewed-by: Christoph Hellwig --- fs/xfs/xfs_aops.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c index 09af0f7cd55e..3b91faacc1ba 100644 --- a/fs/xfs/xfs_aops.c +++ b/fs/xfs/xfs_aops.c @@ -1316,9 +1316,12 @@ xfs_vm_bmap( * The swap code (ab-)uses ->bmap to get a block mapping and then * bypasseѕ the file system for actual I/O. We really can't allow * that on reflinks inodes, so we have to skip out here. And yes, - * 0 is the magic code for a bmap error.. + * 0 is the magic code for a bmap error. + * + * Since we don't pass back blockdev info, we can't return bmap + * information for rt files either. */ - if (xfs_is_reflink_inode(ip)) + if (xfs_is_reflink_inode(ip) || XFS_IS_REALTIME_INODE(ip)) return 0; filemap_write_and_wait(mapping); From 6c782a5ea56a799658e213a78dc1455264938afa Mon Sep 17 00:00:00 2001 From: Michail Georgios Etairidis Date: Tue, 20 Jun 2017 10:20:42 +0200 Subject: [PATCH 383/436] i2c: imx: Use correct function to write to register MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The i2c-imx driver incorrectly uses readb()/writeb() to read and write to the appropriate registers when performing a repeated start. The appropriate imx_i2c_read_reg()/imx_i2c_write_reg() functions should be used instead. Performing a repeated start results in a kernel panic. The platform is imx. Signed-off-by: Michail G Etairidis Fixes: ce1a78840ff7 ("i2c: imx: add DMA support for freescale i2c driver") Fixes: 054b62d9f25c ("i2c: imx: fix the i2c bus hang issue when do repeat restart") Acked-by: Fugang Duan Acked-by: Uwe Kleine-König Signed-off-by: Wolfram Sang --- drivers/i2c/busses/i2c-imx.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c index 95ed17183e73..54a47b40546f 100644 --- a/drivers/i2c/busses/i2c-imx.c +++ b/drivers/i2c/busses/i2c-imx.c @@ -734,9 +734,9 @@ static int i2c_imx_dma_read(struct imx_i2c_struct *i2c_imx, * the first read operation, otherwise the first read cost * one extra clock cycle. */ - temp = readb(i2c_imx->base + IMX_I2C_I2CR); + temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR); temp |= I2CR_MTX; - writeb(temp, i2c_imx->base + IMX_I2C_I2CR); + imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR); } msgs->buf[msgs->len-1] = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2DR); @@ -857,9 +857,9 @@ static int i2c_imx_read(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs, bo * the first read operation, otherwise the first read cost * one extra clock cycle. */ - temp = readb(i2c_imx->base + IMX_I2C_I2CR); + temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR); temp |= I2CR_MTX; - writeb(temp, i2c_imx->base + IMX_I2C_I2CR); + imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR); } } else if (i == (msgs->len - 2)) { dev_dbg(&i2c_imx->adapter.dev, From fb3a5055cd7098f8d1dd0cd38d7172211113255f Mon Sep 17 00:00:00 2001 From: Kan Liang Date: Mon, 19 Jun 2017 07:26:09 -0700 Subject: [PATCH 384/436] perf/x86/intel: Add 1G DTLB load/store miss support for SKL Current DTLB load/store miss events (0x608/0x649) only counts 4K,2M and 4M page size. Need to extend the events to support any page size (4K/2M/4M/1G). The complete DTLB load/store miss events are: DTLB_LOAD_MISSES.WALK_COMPLETED 0xe08 DTLB_STORE_MISSES.WALK_COMPLETED 0xe49 Signed-off-by: Kan Liang Cc: Cc: Alexander Shishkin Cc: Arnaldo Carvalho de Melo Cc: Jiri Olsa Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: eranian@google.com Link: http://lkml.kernel.org/r/20170619142609.11058-1-kan.liang@intel.com Signed-off-by: Ingo Molnar --- arch/x86/events/intel/core.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index a6d91d4e37a1..110ce8238466 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -431,11 +431,11 @@ static __initconst const u64 skl_hw_cache_event_ids [ C(DTLB) ] = { [ C(OP_READ) ] = { [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_INST_RETIRED.ALL_LOADS */ - [ C(RESULT_MISS) ] = 0x608, /* DTLB_LOAD_MISSES.WALK_COMPLETED */ + [ C(RESULT_MISS) ] = 0xe08, /* DTLB_LOAD_MISSES.WALK_COMPLETED */ }, [ C(OP_WRITE) ] = { [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_INST_RETIRED.ALL_STORES */ - [ C(RESULT_MISS) ] = 0x649, /* DTLB_STORE_MISSES.WALK_COMPLETED */ + [ C(RESULT_MISS) ] = 0xe49, /* DTLB_STORE_MISSES.WALK_COMPLETED */ }, [ C(OP_PREFETCH) ] = { [ C(RESULT_ACCESS) ] = 0x0, From addb63c18a0d52a9ce2611d039f981f7b6148d2b Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Mon, 19 Jun 2017 08:02:28 +0200 Subject: [PATCH 385/436] KVM: s390: gaccess: fix real-space designation asce handling for gmap shadows For real-space designation asces the asce origin part is only a token. The asce token origin must not be used to generate an effective address for storage references. This however is erroneously done within kvm_s390_shadow_tables(). Furthermore within the same function the wrong parts of virtual addresses are used to generate a corresponding real address (e.g. the region second index is used as region first index). Both of the above can result in incorrect address translations. Only for real space designations with a token origin of zero and addresses below one megabyte the translation was correct. Furthermore replace a "!asce.r" statement with a "!*fake" statement to make it more obvious that a specific condition has nothing to do with the architecture, but with the fake handling of real space designations. Fixes: 3218f7094b6b ("s390/mm: support real-space for gmap shadows") Cc: David Hildenbrand Cc: stable@vger.kernel.org Signed-off-by: Heiko Carstens Reviewed-by: Martin Schwidefsky Signed-off-by: Christian Borntraeger --- arch/s390/kvm/gaccess.c | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c index 9da243d94cc3..3b297fa3aa67 100644 --- a/arch/s390/kvm/gaccess.c +++ b/arch/s390/kvm/gaccess.c @@ -977,11 +977,12 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr, ptr = asce.origin * 4096; if (asce.r) { *fake = 1; + ptr = 0; asce.dt = ASCE_TYPE_REGION1; } switch (asce.dt) { case ASCE_TYPE_REGION1: - if (vaddr.rfx01 > asce.tl && !asce.r) + if (vaddr.rfx01 > asce.tl && !*fake) return PGM_REGION_FIRST_TRANS; break; case ASCE_TYPE_REGION2: @@ -1009,8 +1010,7 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr, union region1_table_entry rfte; if (*fake) { - /* offset in 16EB guest memory block */ - ptr = ptr + ((unsigned long) vaddr.rsx << 53UL); + ptr += (unsigned long) vaddr.rfx << 53; rfte.val = ptr; goto shadow_r2t; } @@ -1036,8 +1036,7 @@ shadow_r2t: union region2_table_entry rste; if (*fake) { - /* offset in 8PB guest memory block */ - ptr = ptr + ((unsigned long) vaddr.rtx << 42UL); + ptr += (unsigned long) vaddr.rsx << 42; rste.val = ptr; goto shadow_r3t; } @@ -1064,8 +1063,7 @@ shadow_r3t: union region3_table_entry rtte; if (*fake) { - /* offset in 4TB guest memory block */ - ptr = ptr + ((unsigned long) vaddr.sx << 31UL); + ptr += (unsigned long) vaddr.rtx << 31; rtte.val = ptr; goto shadow_sgt; } @@ -1101,8 +1099,7 @@ shadow_sgt: union segment_table_entry ste; if (*fake) { - /* offset in 2G guest memory block */ - ptr = ptr + ((unsigned long) vaddr.sx << 20UL); + ptr += (unsigned long) vaddr.sx << 20; ste.val = ptr; goto shadow_pgt; } From 111237415393acdef9b4f700c747a2e92c9be62e Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Fri, 16 Jun 2017 16:09:54 +0200 Subject: [PATCH 386/436] iommu/amd: Disable IOMMUs at boot if they are enabled When booting, make sure the IOMMUs are disabled. They could be previously enabled if we boot into a kexec or kdump kernel. So make sure they are off. Signed-off-by: Joerg Roedel --- drivers/iommu/amd_iommu_init.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c index cf7896550e75..f123fa5a7adb 100644 --- a/drivers/iommu/amd_iommu_init.c +++ b/drivers/iommu/amd_iommu_init.c @@ -2318,6 +2318,9 @@ static int __init early_amd_iommu_init(void) if (ret) goto out; + /* Disable any previously enabled IOMMUs */ + disable_iommus(); + if (amd_iommu_irq_remap) amd_iommu_irq_remap = check_ioapic_information(); From 90b3eb03e1b2b2ece299c485ae9da0cd221e700d Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Fri, 16 Jun 2017 16:09:55 +0200 Subject: [PATCH 387/436] iommu/amd: Rename free_on_init_error() The function will also be used to free iommu resources when amd_iommu=off was specified on the kernel command line. So rename the function to reflect that. Signed-off-by: Joerg Roedel --- drivers/iommu/amd_iommu_init.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c index f123fa5a7adb..74f4eea6be77 100644 --- a/drivers/iommu/amd_iommu_init.c +++ b/drivers/iommu/amd_iommu_init.c @@ -2108,7 +2108,7 @@ static struct syscore_ops amd_iommu_syscore_ops = { .resume = amd_iommu_resume, }; -static void __init free_on_init_error(void) +static void __init free_iommu_resources(void) { kmemleak_free(irq_lookup_table); free_pages((unsigned long)irq_lookup_table, @@ -2536,7 +2536,7 @@ static int __init amd_iommu_init(void) free_dma_resources(); if (!irq_remapping_enabled) { disable_iommus(); - free_on_init_error(); + free_iommu_resources(); } else { struct amd_iommu *iommu; From 1b1e942e344d3d41de187c1275024e915cb15844 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Fri, 16 Jun 2017 16:09:56 +0200 Subject: [PATCH 388/436] iommu/amd: Add new init-state IOMMU_CMDLINE_DISABLED This will be used when during initialization we detect that the iommu should be disabled. Signed-off-by: Joerg Roedel --- drivers/iommu/amd_iommu_init.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c index 74f4eea6be77..df9ec85271f5 100644 --- a/drivers/iommu/amd_iommu_init.c +++ b/drivers/iommu/amd_iommu_init.c @@ -237,6 +237,7 @@ enum iommu_init_state { IOMMU_INITIALIZED, IOMMU_NOT_FOUND, IOMMU_INIT_ERROR, + IOMMU_CMDLINE_DISABLED, }; /* Early ioapic and hpet maps from kernel command line */ @@ -2452,6 +2453,7 @@ static int __init state_next(void) break; case IOMMU_NOT_FOUND: case IOMMU_INIT_ERROR: + case IOMMU_CMDLINE_DISABLED: /* Error states => do nothing */ ret = -EINVAL; break; @@ -2469,8 +2471,9 @@ static int __init iommu_go_to_state(enum iommu_init_state state) while (init_state != state) { ret = state_next(); - if (init_state == IOMMU_NOT_FOUND || - init_state == IOMMU_INIT_ERROR) + if (init_state == IOMMU_NOT_FOUND || + init_state == IOMMU_INIT_ERROR || + init_state == IOMMU_CMDLINE_DISABLED) break; } From 151b09031a76ba6b6b83f94953074d6f10aa30b3 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Fri, 16 Jun 2017 16:09:57 +0200 Subject: [PATCH 389/436] iommu/amd: Check for error states first in iommu_go_to_state() Check if we are in an error state already before calling into state_next(). Signed-off-by: Joerg Roedel --- drivers/iommu/amd_iommu_init.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c index df9ec85271f5..a6b81a05a0d1 100644 --- a/drivers/iommu/amd_iommu_init.c +++ b/drivers/iommu/amd_iommu_init.c @@ -2467,14 +2467,14 @@ static int __init state_next(void) static int __init iommu_go_to_state(enum iommu_init_state state) { - int ret = 0; + int ret = -EINVAL; while (init_state != state) { - ret = state_next(); if (init_state == IOMMU_NOT_FOUND || init_state == IOMMU_INIT_ERROR || init_state == IOMMU_CMDLINE_DISABLED) break; + ret = state_next(); } return ret; From f601927136d69be49f0a14ae820b44c02fa591ba Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Fri, 16 Jun 2017 16:09:58 +0200 Subject: [PATCH 390/436] iommu/amd: Set global pointers to NULL after freeing them Avoid any tries to double-free these pointers. Signed-off-by: Joerg Roedel --- drivers/iommu/amd_iommu_init.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c index a6b81a05a0d1..8cc507f96f3a 100644 --- a/drivers/iommu/amd_iommu_init.c +++ b/drivers/iommu/amd_iommu_init.c @@ -2114,18 +2114,22 @@ static void __init free_iommu_resources(void) kmemleak_free(irq_lookup_table); free_pages((unsigned long)irq_lookup_table, get_order(rlookup_table_size)); + irq_lookup_table = NULL; kmem_cache_destroy(amd_iommu_irq_cache); amd_iommu_irq_cache = NULL; free_pages((unsigned long)amd_iommu_rlookup_table, get_order(rlookup_table_size)); + amd_iommu_rlookup_table = NULL; free_pages((unsigned long)amd_iommu_alias_table, get_order(alias_table_size)); + amd_iommu_alias_table = NULL; free_pages((unsigned long)amd_iommu_dev_table, get_order(dev_table_size)); + amd_iommu_dev_table = NULL; free_iommu_all(); @@ -2195,6 +2199,7 @@ static void __init free_dma_resources(void) { free_pages((unsigned long)amd_iommu_pd_alloc_bitmap, get_order(MAX_DOMAIN_ID/8)); + amd_iommu_pd_alloc_bitmap = NULL; free_unity_maps(); } From 7ad820e43330bbf974792c5ab4e2c2355e08d597 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Fri, 16 Jun 2017 16:09:59 +0200 Subject: [PATCH 391/436] iommu/amd: Free IOMMU resources when disabled on command line After we made sure that all IOMMUs have been disabled we need to make sure that all resources we allocated are released again. Signed-off-by: Joerg Roedel --- drivers/iommu/amd_iommu_init.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c index 8cc507f96f3a..128f9665c326 100644 --- a/drivers/iommu/amd_iommu_init.c +++ b/drivers/iommu/amd_iommu_init.c @@ -2430,6 +2430,13 @@ static int __init state_next(void) case IOMMU_IVRS_DETECTED: ret = early_amd_iommu_init(); init_state = ret ? IOMMU_INIT_ERROR : IOMMU_ACPI_FINISHED; + if (init_state == IOMMU_ACPI_FINISHED && amd_iommu_disabled) { + pr_info("AMD-Vi: AMD IOMMU disabled on kernel command-line\n"); + free_dma_resources(); + free_iommu_resources(); + init_state = IOMMU_CMDLINE_DISABLED; + ret = -EINVAL; + } break; case IOMMU_ACPI_FINISHED: early_enable_iommus(); From ffa080ebb5405b27b0b84a3a75c9cdf4ed3d28da Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Fri, 16 Jun 2017 16:10:00 +0200 Subject: [PATCH 392/436] iommu/amd: Remove amd_iommu_disabled check from amd_iommu_detect() This check needs to happens later now, when all previously enabled IOMMUs have been disabled. Signed-off-by: Joerg Roedel --- drivers/iommu/amd_iommu_init.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c index 128f9665c326..5cc597b383c7 100644 --- a/drivers/iommu/amd_iommu_init.c +++ b/drivers/iommu/amd_iommu_init.c @@ -2578,9 +2578,6 @@ int __init amd_iommu_detect(void) if (no_iommu || (iommu_detected && !gart_iommu_aperture)) return -ENODEV; - if (amd_iommu_disabled) - return -ENODEV; - ret = iommu_go_to_state(IOMMU_IVRS_DETECTED); if (ret) return ret; From 9ce3a72cd7f7e0b9ba1c5952e4461b363824bca9 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Thu, 22 Jun 2017 12:16:33 +0200 Subject: [PATCH 393/436] iommu/amd: Free already flushed ring-buffer entries before full-check To benefit from IOTLB flushes on other CPUs we have to free the already flushed IOVAs from the ring-buffer before we do the queue_ring_full() check. Signed-off-by: Joerg Roedel --- drivers/iommu/amd_iommu.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 95ee360a5199..c618c26a3b33 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -1910,15 +1910,21 @@ static void queue_add(struct dma_ops_domain *dom, queue = get_cpu_ptr(dom->flush_queue); spin_lock_irqsave(&queue->lock, flags); + /* + * First remove the enries from the ring-buffer that are already + * flushed to make the below queue_ring_full() check less likely + */ + queue_ring_free_flushed(dom, queue); + /* * When ring-queue is full, flush the entries from the IOTLB so * that we can free all entries with queue_ring_free_flushed() * below. */ - if (queue_ring_full(queue)) + if (queue_ring_full(queue)) { dma_ops_domain_flush_tlb(dom); - - queue_ring_free_flushed(dom, queue); + queue_ring_free_flushed(dom, queue); + } idx = queue_ring_add(queue); From bbd5ff50afffcf4a01d05367524736c57607a478 Mon Sep 17 00:00:00 2001 From: Alistair Popple Date: Tue, 20 Jun 2017 18:37:28 +1000 Subject: [PATCH 394/436] powerpc/powernv/npu-dma: Add explicit flush when sending an ATSD NPU2 requires an extra explicit flush to an active GPU PID when sending address translation shoot downs (ATSDs) to reliably flush the GPU TLB. This patch adds just such a flush at the end of each sequence of ATSDs. We can safely use PID 0 which is always reserved and active on the GPU. PID 0 is only used for init_mm which will never be a user mm on the GPU. To enforce this we add a check in pnv_npu2_init_context() just in case someone tries to use PID 0 on the GPU. Signed-off-by: Alistair Popple [mpe: Use true/false for bool literals] Signed-off-by: Michael Ellerman --- arch/powerpc/platforms/powernv/npu-dma.c | 94 ++++++++++++++++-------- 1 file changed, 65 insertions(+), 29 deletions(-) diff --git a/arch/powerpc/platforms/powernv/npu-dma.c b/arch/powerpc/platforms/powernv/npu-dma.c index e6f444b46207..b5d960d6db3d 100644 --- a/arch/powerpc/platforms/powernv/npu-dma.c +++ b/arch/powerpc/platforms/powernv/npu-dma.c @@ -449,7 +449,7 @@ static int mmio_launch_invalidate(struct npu *npu, unsigned long launch, return mmio_atsd_reg; } -static int mmio_invalidate_pid(struct npu *npu, unsigned long pid) +static int mmio_invalidate_pid(struct npu *npu, unsigned long pid, bool flush) { unsigned long launch; @@ -465,12 +465,15 @@ static int mmio_invalidate_pid(struct npu *npu, unsigned long pid) /* PID */ launch |= pid << PPC_BITLSHIFT(38); + /* No flush */ + launch |= !flush << PPC_BITLSHIFT(39); + /* Invalidating the entire process doesn't use a va */ return mmio_launch_invalidate(npu, launch, 0); } static int mmio_invalidate_va(struct npu *npu, unsigned long va, - unsigned long pid) + unsigned long pid, bool flush) { unsigned long launch; @@ -486,26 +489,60 @@ static int mmio_invalidate_va(struct npu *npu, unsigned long va, /* PID */ launch |= pid << PPC_BITLSHIFT(38); + /* No flush */ + launch |= !flush << PPC_BITLSHIFT(39); + return mmio_launch_invalidate(npu, launch, va); } #define mn_to_npu_context(x) container_of(x, struct npu_context, mn) +struct mmio_atsd_reg { + struct npu *npu; + int reg; +}; + +static void mmio_invalidate_wait( + struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS], bool flush) +{ + struct npu *npu; + int i, reg; + + /* Wait for all invalidations to complete */ + for (i = 0; i <= max_npu2_index; i++) { + if (mmio_atsd_reg[i].reg < 0) + continue; + + /* Wait for completion */ + npu = mmio_atsd_reg[i].npu; + reg = mmio_atsd_reg[i].reg; + while (__raw_readq(npu->mmio_atsd_regs[reg] + XTS_ATSD_STAT)) + cpu_relax(); + + put_mmio_atsd_reg(npu, reg); + + /* + * The GPU requires two flush ATSDs to ensure all entries have + * been flushed. We use PID 0 as it will never be used for a + * process on the GPU. + */ + if (flush) + mmio_invalidate_pid(npu, 0, true); + } +} + /* * Invalidate either a single address or an entire PID depending on * the value of va. */ static void mmio_invalidate(struct npu_context *npu_context, int va, - unsigned long address) + unsigned long address, bool flush) { - int i, j, reg; + int i, j; struct npu *npu; struct pnv_phb *nphb; struct pci_dev *npdev; - struct { - struct npu *npu; - int reg; - } mmio_atsd_reg[NV_MAX_NPUS]; + struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS]; unsigned long pid = npu_context->mm->context.id; /* @@ -525,10 +562,11 @@ static void mmio_invalidate(struct npu_context *npu_context, int va, if (va) mmio_atsd_reg[i].reg = - mmio_invalidate_va(npu, address, pid); + mmio_invalidate_va(npu, address, pid, + flush); else mmio_atsd_reg[i].reg = - mmio_invalidate_pid(npu, pid); + mmio_invalidate_pid(npu, pid, flush); /* * The NPU hardware forwards the shootdown to all GPUs @@ -544,18 +582,10 @@ static void mmio_invalidate(struct npu_context *npu_context, int va, */ flush_tlb_mm(npu_context->mm); - /* Wait for all invalidations to complete */ - for (i = 0; i <= max_npu2_index; i++) { - if (mmio_atsd_reg[i].reg < 0) - continue; - - /* Wait for completion */ - npu = mmio_atsd_reg[i].npu; - reg = mmio_atsd_reg[i].reg; - while (__raw_readq(npu->mmio_atsd_regs[reg] + XTS_ATSD_STAT)) - cpu_relax(); - put_mmio_atsd_reg(npu, reg); - } + mmio_invalidate_wait(mmio_atsd_reg, flush); + if (flush) + /* Wait for the flush to complete */ + mmio_invalidate_wait(mmio_atsd_reg, false); } static void pnv_npu2_mn_release(struct mmu_notifier *mn, @@ -571,7 +601,7 @@ static void pnv_npu2_mn_release(struct mmu_notifier *mn, * There should be no more translation requests for this PID, but we * need to ensure any entries for it are removed from the TLB. */ - mmio_invalidate(npu_context, 0, 0); + mmio_invalidate(npu_context, 0, 0, true); } static void pnv_npu2_mn_change_pte(struct mmu_notifier *mn, @@ -581,7 +611,7 @@ static void pnv_npu2_mn_change_pte(struct mmu_notifier *mn, { struct npu_context *npu_context = mn_to_npu_context(mn); - mmio_invalidate(npu_context, 1, address); + mmio_invalidate(npu_context, 1, address, true); } static void pnv_npu2_mn_invalidate_page(struct mmu_notifier *mn, @@ -590,7 +620,7 @@ static void pnv_npu2_mn_invalidate_page(struct mmu_notifier *mn, { struct npu_context *npu_context = mn_to_npu_context(mn); - mmio_invalidate(npu_context, 1, address); + mmio_invalidate(npu_context, 1, address, true); } static void pnv_npu2_mn_invalidate_range(struct mmu_notifier *mn, @@ -600,8 +630,11 @@ static void pnv_npu2_mn_invalidate_range(struct mmu_notifier *mn, struct npu_context *npu_context = mn_to_npu_context(mn); unsigned long address; - for (address = start; address <= end; address += PAGE_SIZE) - mmio_invalidate(npu_context, 1, address); + for (address = start; address < end; address += PAGE_SIZE) + mmio_invalidate(npu_context, 1, address, false); + + /* Do the flush only on the final addess == end */ + mmio_invalidate(npu_context, 1, address, true); } static const struct mmu_notifier_ops nv_nmmu_notifier_ops = { @@ -651,8 +684,11 @@ struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev, /* No nvlink associated with this GPU device */ return ERR_PTR(-ENODEV); - if (!mm) { - /* kernel thread contexts are not supported */ + if (!mm || mm->context.id == 0) { + /* + * Kernel thread contexts are not supported and context id 0 is + * reserved on the GPU. + */ return ERR_PTR(-EINVAL); } From c8401dda2f0a00cd25c0af6a95ed50e478d25de4 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Wed, 7 Jun 2017 15:13:14 +0200 Subject: [PATCH 395/436] KVM: x86: fix singlestepping over syscall MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit TF is handled a bit differently for syscall and sysret, compared to the other instructions: TF is checked after the instruction completes, so that the OS can disable #DB at a syscall by adding TF to FMASK. When the sysret is executed the #DB is taken "as if" the syscall insn just completed. KVM emulates syscall so that it can trap 32-bit syscall on Intel processors. Fix the behavior, otherwise you could get #DB on a user stack which is not nice. This does not affect Linux guests, as they use an IST or task gate for #DB. This fixes CVE-2017-7518. Cc: stable@vger.kernel.org Reported-by: Andy Lutomirski Signed-off-by: Paolo Bonzini Signed-off-by: Radim Krčmář --- arch/x86/include/asm/kvm_emulate.h | 1 + arch/x86/kvm/emulate.c | 1 + arch/x86/kvm/x86.c | 62 +++++++++++++++--------------- 3 files changed, 34 insertions(+), 30 deletions(-) diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h index 055962615779..722d0e568863 100644 --- a/arch/x86/include/asm/kvm_emulate.h +++ b/arch/x86/include/asm/kvm_emulate.h @@ -296,6 +296,7 @@ struct x86_emulate_ctxt { bool perm_ok; /* do not check permissions if true */ bool ud; /* inject an #UD if host doesn't support insn */ + bool tf; /* TF value before instruction (after for syscall/sysret) */ bool have_exception; struct x86_exception exception; diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 0816ab2e8adc..80890dee66ce 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -2742,6 +2742,7 @@ static int em_syscall(struct x86_emulate_ctxt *ctxt) ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF); } + ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0; return X86EMUL_CONTINUE; } diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 87d3cb901935..0e846f0cb83b 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -5313,6 +5313,8 @@ static void init_emulate_ctxt(struct kvm_vcpu *vcpu) kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); ctxt->eflags = kvm_get_rflags(vcpu); + ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0; + ctxt->eip = kvm_rip_read(vcpu); ctxt->mode = (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL : (ctxt->eflags & X86_EFLAGS_VM) ? X86EMUL_MODE_VM86 : @@ -5528,36 +5530,25 @@ static int kvm_vcpu_check_hw_bp(unsigned long addr, u32 type, u32 dr7, return dr6; } -static void kvm_vcpu_check_singlestep(struct kvm_vcpu *vcpu, unsigned long rflags, int *r) +static void kvm_vcpu_do_singlestep(struct kvm_vcpu *vcpu, int *r) { struct kvm_run *kvm_run = vcpu->run; - /* - * rflags is the old, "raw" value of the flags. The new value has - * not been saved yet. - * - * This is correct even for TF set by the guest, because "the - * processor will not generate this exception after the instruction - * that sets the TF flag". - */ - if (unlikely(rflags & X86_EFLAGS_TF)) { - if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) { - kvm_run->debug.arch.dr6 = DR6_BS | DR6_FIXED_1 | - DR6_RTM; - kvm_run->debug.arch.pc = vcpu->arch.singlestep_rip; - kvm_run->debug.arch.exception = DB_VECTOR; - kvm_run->exit_reason = KVM_EXIT_DEBUG; - *r = EMULATE_USER_EXIT; - } else { - /* - * "Certain debug exceptions may clear bit 0-3. The - * remaining contents of the DR6 register are never - * cleared by the processor". - */ - vcpu->arch.dr6 &= ~15; - vcpu->arch.dr6 |= DR6_BS | DR6_RTM; - kvm_queue_exception(vcpu, DB_VECTOR); - } + if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) { + kvm_run->debug.arch.dr6 = DR6_BS | DR6_FIXED_1 | DR6_RTM; + kvm_run->debug.arch.pc = vcpu->arch.singlestep_rip; + kvm_run->debug.arch.exception = DB_VECTOR; + kvm_run->exit_reason = KVM_EXIT_DEBUG; + *r = EMULATE_USER_EXIT; + } else { + /* + * "Certain debug exceptions may clear bit 0-3. The + * remaining contents of the DR6 register are never + * cleared by the processor". + */ + vcpu->arch.dr6 &= ~15; + vcpu->arch.dr6 |= DR6_BS | DR6_RTM; + kvm_queue_exception(vcpu, DB_VECTOR); } } @@ -5567,7 +5558,17 @@ int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu) int r = EMULATE_DONE; kvm_x86_ops->skip_emulated_instruction(vcpu); - kvm_vcpu_check_singlestep(vcpu, rflags, &r); + + /* + * rflags is the old, "raw" value of the flags. The new value has + * not been saved yet. + * + * This is correct even for TF set by the guest, because "the + * processor will not generate this exception after the instruction + * that sets the TF flag". + */ + if (unlikely(rflags & X86_EFLAGS_TF)) + kvm_vcpu_do_singlestep(vcpu, &r); return r == EMULATE_DONE; } EXPORT_SYMBOL_GPL(kvm_skip_emulated_instruction); @@ -5726,8 +5727,9 @@ restart: toggle_interruptibility(vcpu, ctxt->interruptibility); vcpu->arch.emulate_regs_need_sync_to_vcpu = false; kvm_rip_write(vcpu, ctxt->eip); - if (r == EMULATE_DONE) - kvm_vcpu_check_singlestep(vcpu, rflags, &r); + if (r == EMULATE_DONE && + (ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP))) + kvm_vcpu_do_singlestep(vcpu, &r); if (!ctxt->have_exception || exception_type(ctxt->exception.vector) == EXCPT_TRAP) __kvm_set_rflags(vcpu, ctxt->eflags); From 7598f8bc1383ffd77686cb4e92e749bef3c75937 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B6rn=20T=C3=B6pel?= Date: Wed, 21 Jun 2017 18:41:34 +0200 Subject: [PATCH 396/436] perf probe: Fix probe definition for inlined functions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In commit 613f050d68a8 ("perf probe: Fix to probe on gcc generated functions in modules"), the offset from symbol is, incorrectly, added to the trace point address. This leads to incorrect probe trace points for inlined functions and when using relative line number on symbols. Prior this patch: $ perf probe -m nf_nat -D in_range p:probe/in_range nf_nat:in_range.isra.9+0 $ perf probe -m i40e -D i40e_clean_rx_irq p:probe/i40e_clean_rx_irq i40e:i40e_napi_poll+2212 $ perf probe -m i40e -D i40e_clean_rx_irq:16 p:probe/i40e_clean_rx_irq i40e:i40e_lan_xmit_frame+626 After: $ perf probe -m nf_nat -D in_range p:probe/in_range nf_nat:in_range.isra.9+0 $ perf probe -m i40e -D i40e_clean_rx_irq p:probe/i40e_clean_rx_irq i40e:i40e_napi_poll+1106 $ perf probe -m i40e -D i40e_clean_rx_irq:16 p:probe/i40e_clean_rx_irq i40e:i40e_napi_poll+2665 Committer testing: Using 'pfunct', a tool found in the 'dwarves' package [1], one can ask what are the functions that while not being explicitely marked as inline, were inlined by the compiler: # pfunct --cc_inlined /lib/modules/4.12.0-rc4+/kernel/drivers/net/ethernet/intel/e1000e/e1000e.ko | head __ew32 e1000_regdump e1000e_dump_ps_pages e1000_desc_unused e1000e_systim_to_hwtstamp e1000e_rx_hwtstamp e1000e_update_rdt_wa e1000e_update_tdt_wa e1000_put_txbuf e1000_consume_page Then ask 'perf probe' to produce the kprobe_tracer probe definitions for two of them: # perf probe -m e1000e -D e1000e_rx_hwtstamp p:probe/e1000e_rx_hwtstamp e1000e:e1000_receive_skb+74 # perf probe -m e1000e -D e1000_consume_page p:probe/e1000_consume_page e1000e:e1000_clean_jumbo_rx_irq+876 p:probe/e1000_consume_page_1 e1000e:e1000_clean_jumbo_rx_irq+1506 p:probe/e1000_consume_page_2 e1000e:e1000_clean_rx_irq_ps+1074 Now lets concentrate on the 'e1000_consume_page' one, that was inlined twice in e1000_clean_jumbo_rx_irq(), lets see what readelf says about the DWARF tags for that function: $ readelf -wi /lib/modules/4.12.0-rc4+/kernel/drivers/net/ethernet/intel/e1000e/e1000e.ko <1><13e27b>: Abbrev Number: 121 (DW_TAG_subprogram) <13e27c> DW_AT_name : (indirect string, offset: 0xa8945): e1000_clean_jumbo_rx_irq <13e287> DW_AT_low_pc : 0x17a30 <3><13e6ef>: Abbrev Number: 119 (DW_TAG_inlined_subroutine) <13e6f0> DW_AT_abstract_origin: <0x13ed2c> <13e6f4> DW_AT_low_pc : 0x17be6 <1><13ed2c>: Abbrev Number: 142 (DW_TAG_subprogram) <13ed2e> DW_AT_name : (indirect string, offset: 0xa54c3): e1000_consume_page So, the first time in e1000_clean_jumbo_rx_irq() where e1000_consume_page() is inlined is at PC 0x17be6, which subtracted from e1000_clean_jumbo_rx_irq()'s address, gives us the offset we should use in the probe definition: 0x17be6 - 0x17a30 = 438 but above we have 876, which is twice as much. Lets see the second inline expansion of e1000_consume_page() in e1000_clean_jumbo_rx_irq(): <3><13e86e>: Abbrev Number: 119 (DW_TAG_inlined_subroutine) <13e86f> DW_AT_abstract_origin: <0x13ed2c> <13e873> DW_AT_low_pc : 0x17d21 0x17d21 - 0x17a30 = 753 So we where adding it at twice the offset from the containing function as we should. And then after this patch: # perf probe -m e1000e -D e1000e_rx_hwtstamp p:probe/e1000e_rx_hwtstamp e1000e:e1000_receive_skb+37 # perf probe -m e1000e -D e1000_consume_page p:probe/e1000_consume_page e1000e:e1000_clean_jumbo_rx_irq+438 p:probe/e1000_consume_page_1 e1000e:e1000_clean_jumbo_rx_irq+753 p:probe/e1000_consume_page_2 e1000e:e1000_clean_jumbo_rx_irq+1353 # Which matches the two first expansions and shows that because we were doubling the offset it would spill over the next function: readelf -sw /lib/modules/4.12.0-rc4+/kernel/drivers/net/ethernet/intel/e1000e/e1000e.ko 673: 0000000000017a30 1626 FUNC LOCAL DEFAULT 2 e1000_clean_jumbo_rx_irq 674: 0000000000018090 2013 FUNC LOCAL DEFAULT 2 e1000_clean_rx_irq_ps This is the 3rd inline expansion of e1000_consume_page() in e1000_clean_jumbo_rx_irq(): <3><13ec77>: Abbrev Number: 119 (DW_TAG_inlined_subroutine) <13ec78> DW_AT_abstract_origin: <0x13ed2c> <13ec7c> DW_AT_low_pc : 0x17f79 0x17f79 - 0x17a30 = 1353 So: 0x17a30 + 2 * 1353 = 0x184c2 And: 0x184c2 - 0x18090 = 1074 Which explains the bogus third expansion for e1000_consume_page() to end up at: p:probe/e1000_consume_page_2 e1000e:e1000_clean_rx_irq_ps+1074 All fixed now :-) [1] https://git.kernel.org/pub/scm/devel/pahole/pahole.git/ Signed-off-by: Björn Töpel Tested-by: Arnaldo Carvalho de Melo Acked-by: Magnus Karlsson Acked-by: Masami Hiramatsu Cc: stable@vger.kernel.org Fixes: 613f050d68a8 ("perf probe: Fix to probe on gcc generated functions in modules") Link: http://lkml.kernel.org/r/20170621164134.5701-1-bjorn.topel@gmail.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/probe-event.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c index 84e7e698411e..a2670e9d652d 100644 --- a/tools/perf/util/probe-event.c +++ b/tools/perf/util/probe-event.c @@ -619,7 +619,7 @@ static int post_process_probe_trace_point(struct probe_trace_point *tp, struct map *map, unsigned long offs) { struct symbol *sym; - u64 addr = tp->address + tp->offset - offs; + u64 addr = tp->address - offs; sym = map__find_symbol(map, addr); if (!sym) From ad8181060788c80c0ad75b583f24c22fa962a7a6 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Mon, 22 May 2017 18:44:57 -0700 Subject: [PATCH 397/436] kconfig: fix sparse warnings in nconfig Fix sparse warnings in scripts/kconfig/nconf* ('make nconfig'): ../scripts/kconfig/nconf.c:1071:32: warning: Using plain integer as NULL pointer ../scripts/kconfig/nconf.c:1238:30: warning: Using plain integer as NULL pointer ../scripts/kconfig/nconf.c:511:51: warning: Using plain integer as NULL pointer ../scripts/kconfig/nconf.c:1460:6: warning: symbol 'setup_windows' was not declared. Should it be static? ../scripts/kconfig/nconf.c:274:12: warning: symbol 'current_instructions' was not declared. Should it be static? ../scripts/kconfig/nconf.c:308:22: warning: symbol 'function_keys' was not declared. Should it be static? ../scripts/kconfig/nconf.gui.c:132:17: warning: non-ANSI function declaration of function 'set_colors' ../scripts/kconfig/nconf.gui.c:195:24: warning: Using plain integer as NULL pointer nconf.gui.o before/after files are the same. nconf.o before/after files are the same until the 'static' function declarations are added. Signed-off-by: Randy Dunlap Signed-off-by: Masahiro Yamada --- scripts/kconfig/nconf.c | 12 ++++++------ scripts/kconfig/nconf.gui.c | 4 ++-- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/scripts/kconfig/nconf.c b/scripts/kconfig/nconf.c index a9bc5334a478..003114779815 100644 --- a/scripts/kconfig/nconf.c +++ b/scripts/kconfig/nconf.c @@ -271,7 +271,7 @@ static struct mitem k_menu_items[MAX_MENU_ITEMS]; static int items_num; static int global_exit; /* the currently selected button */ -const char *current_instructions = menu_instructions; +static const char *current_instructions = menu_instructions; static char *dialog_input_result; static int dialog_input_result_len; @@ -305,7 +305,7 @@ struct function_keys { }; static const int function_keys_num = 9; -struct function_keys function_keys[] = { +static struct function_keys function_keys[] = { { .key_str = "F1", .func = "Help", @@ -508,7 +508,7 @@ static int get_mext_match(const char *match_str, match_f flag) index = (index + items_num) % items_num; while (true) { char *str = k_menu_items[index].str; - if (strcasestr(str, match_str) != 0) + if (strcasestr(str, match_str) != NULL) return index; if (flag == FIND_NEXT_MATCH_UP || flag == MATCH_TINKER_PATTERN_UP) @@ -1067,7 +1067,7 @@ static int do_match(int key, struct match_state *state, int *ans) static void conf(struct menu *menu) { - struct menu *submenu = 0; + struct menu *submenu = NULL; const char *prompt = menu_get_prompt(menu); struct symbol *sym; int res; @@ -1234,7 +1234,7 @@ static void show_help(struct menu *menu) static void conf_choice(struct menu *menu) { const char *prompt = _(menu_get_prompt(menu)); - struct menu *child = 0; + struct menu *child = NULL; struct symbol *active; int selected_index = 0; int last_top_row = 0; @@ -1456,7 +1456,7 @@ static void conf_save(void) } } -void setup_windows(void) +static void setup_windows(void) { int lines, columns; diff --git a/scripts/kconfig/nconf.gui.c b/scripts/kconfig/nconf.gui.c index 4b2f44c20caf..a64b1c31253e 100644 --- a/scripts/kconfig/nconf.gui.c +++ b/scripts/kconfig/nconf.gui.c @@ -129,7 +129,7 @@ static void no_colors_theme(void) mkattrn(FUNCTION_TEXT, A_REVERSE); } -void set_colors() +void set_colors(void) { start_color(); use_default_colors(); @@ -192,7 +192,7 @@ const char *get_line(const char *text, int line_no) int lines = 0; if (!text) - return 0; + return NULL; for (i = 0; text[i] != '\0' && lines < line_no; i++) if (text[i] == '\n') From 34f19ff1b5a0d11e46df479623d6936460105c9f Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Wed, 21 Jun 2017 15:58:29 +1000 Subject: [PATCH 398/436] powerpc/64: Initialise thread_info for emergency stacks Emergency stacks have their thread_info mostly uninitialised, which in particular means garbage preempt_count values. Emergency stack code runs with interrupts disabled entirely, and is used very rarely, so this has been unnoticed so far. It was found by a proposed new powerpc watchdog that takes a soft-NMI directly from the masked_interrupt handler and using the emergency stack. That crashed at BUG_ON(in_nmi()) in nmi_enter(). preempt_count()s were found to be garbage. To fix this, zero the entire THREAD_SIZE allocation, and initialize the thread_info. Cc: stable@vger.kernel.org Reported-by: Abdul Haleem Signed-off-by: Nicholas Piggin [mpe: Move it all into setup_64.c, use a function not a macro. Fix crashes on Cell by setting preempt_count to 0 not HARDIRQ_OFFSET] Signed-off-by: Michael Ellerman --- arch/powerpc/kernel/setup_64.c | 31 ++++++++++++++++++++++++++++--- 1 file changed, 28 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index a8c1f99e9607..4640f6d64f8b 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -615,6 +615,24 @@ void __init exc_lvl_early_init(void) } #endif +/* + * Emergency stacks are used for a range of things, from asynchronous + * NMIs (system reset, machine check) to synchronous, process context. + * We set preempt_count to zero, even though that isn't necessarily correct. To + * get the right value we'd need to copy it from the previous thread_info, but + * doing that might fault causing more problems. + * TODO: what to do with accounting? + */ +static void emerg_stack_init_thread_info(struct thread_info *ti, int cpu) +{ + ti->task = NULL; + ti->cpu = cpu; + ti->preempt_count = 0; + ti->local_flags = 0; + ti->flags = 0; + klp_init_thread_info(ti); +} + /* * Stack space used when we detect a bad kernel stack pointer, and * early in SMP boots before relocation is enabled. Exclusive emergency @@ -633,24 +651,31 @@ void __init emergency_stack_init(void) * Since we use these as temporary stacks during secondary CPU * bringup, we need to get at them in real mode. This means they * must also be within the RMO region. + * + * The IRQ stacks allocated elsewhere in this file are zeroed and + * initialized in kernel/irq.c. These are initialized here in order + * to have emergency stacks available as early as possible. */ limit = min(safe_stack_limit(), ppc64_rma_size); for_each_possible_cpu(i) { struct thread_info *ti; ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit)); - klp_init_thread_info(ti); + memset(ti, 0, THREAD_SIZE); + emerg_stack_init_thread_info(ti, i); paca[i].emergency_sp = (void *)ti + THREAD_SIZE; #ifdef CONFIG_PPC_BOOK3S_64 /* emergency stack for NMI exception handling. */ ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit)); - klp_init_thread_info(ti); + memset(ti, 0, THREAD_SIZE); + emerg_stack_init_thread_info(ti, i); paca[i].nmi_emergency_sp = (void *)ti + THREAD_SIZE; /* emergency stack for machine check exception handling. */ ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit)); - klp_init_thread_info(ti); + memset(ti, 0, THREAD_SIZE); + emerg_stack_init_thread_info(ti, i); paca[i].mc_emergency_sp = (void *)ti + THREAD_SIZE; #endif } From 9768935264c4f0e4afd788a185d8e8d89c28e41d Mon Sep 17 00:00:00 2001 From: Andrew Duggan Date: Fri, 23 Jun 2017 00:04:51 -0700 Subject: [PATCH 399/436] Input: synaptics-rmi4 - only read the F54 query registers which are used The F54 driver is currently only using the first 6 bytes of F54 so there is no need to read all 27 bytes. Some Dell systems (Dell XP13 9333 and similar) have an issue with the touchpad or I2C bus when reading reports larger then 16 bytes. Reads larger then 16 bytes are reported in two HID reports. Something about the back to back reports seems to cause the next read to report incorrect data. This results in F30 failing to load and the click button failing to work. Previous issues with the I2C controller or touchpad were addressed in: commit 5b65c2a02966 ("HID: rmi: check sanity of the incoming report") Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=195949 Signed-off-by: Andrew Duggan Reviewed-by: Benjamin Tissoires Reviewed-by: Nick Dyer Signed-off-by: Dmitry Torokhov --- drivers/input/rmi4/rmi_f54.c | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) diff --git a/drivers/input/rmi4/rmi_f54.c b/drivers/input/rmi4/rmi_f54.c index dea63e2db3e6..f5206e2c767e 100644 --- a/drivers/input/rmi4/rmi_f54.c +++ b/drivers/input/rmi4/rmi_f54.c @@ -31,9 +31,6 @@ #define F54_GET_REPORT 1 #define F54_FORCE_CAL 2 -/* Fixed sizes of reports */ -#define F54_QUERY_LEN 27 - /* F54 capabilities */ #define F54_CAP_BASELINE (1 << 2) #define F54_CAP_IMAGE8 (1 << 3) @@ -95,7 +92,6 @@ struct rmi_f54_reports { struct f54_data { struct rmi_function *fn; - u8 qry[F54_QUERY_LEN]; u8 num_rx_electrodes; u8 num_tx_electrodes; u8 capabilities; @@ -632,22 +628,23 @@ static int rmi_f54_detect(struct rmi_function *fn) { int error; struct f54_data *f54; + u8 buf[6]; f54 = dev_get_drvdata(&fn->dev); error = rmi_read_block(fn->rmi_dev, fn->fd.query_base_addr, - &f54->qry, sizeof(f54->qry)); + buf, sizeof(buf)); if (error) { dev_err(&fn->dev, "%s: Failed to query F54 properties\n", __func__); return error; } - f54->num_rx_electrodes = f54->qry[0]; - f54->num_tx_electrodes = f54->qry[1]; - f54->capabilities = f54->qry[2]; - f54->clock_rate = f54->qry[3] | (f54->qry[4] << 8); - f54->family = f54->qry[5]; + f54->num_rx_electrodes = buf[0]; + f54->num_tx_electrodes = buf[1]; + f54->capabilities = buf[2]; + f54->clock_rate = buf[3] | (buf[4] << 8); + f54->family = buf[5]; rmi_dbg(RMI_DEBUG_FN, &fn->dev, "F54 num_rx_electrodes: %d\n", f54->num_rx_electrodes); From b847de4e5087fc8577c38a697d14fd2a5ce93352 Mon Sep 17 00:00:00 2001 From: Sunil Goutham Date: Fri, 5 May 2017 16:47:46 +0530 Subject: [PATCH 400/436] iommu/arm-smmu-v3: Increase CMDQ drain timeout value Waiting for a CMD_SYNC to be processed involves waiting for the command queue to drain, which can take an awful lot longer than waiting for a single entry to become available. Consequently, the common timeout value of 100us has been observed to be too short on some platforms when a CMD_SYNC is issued into a queued full of TLBI commands. This patch resolves the issue by using a different (1s) timeout when waiting for the CMDQ to drain and using a simple back-off mechanism when polling the cons pointer in the absence of WFE support. Signed-off-by: Sunil Goutham [will: rewrote commit message and cosmetic changes] Signed-off-by: Will Deacon --- drivers/iommu/arm-smmu-v3.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index 380969aa60d5..6a06be7626db 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -408,6 +408,7 @@ /* High-level queue structures */ #define ARM_SMMU_POLL_TIMEOUT_US 100 +#define ARM_SMMU_CMDQ_DRAIN_TIMEOUT_US 1000000 /* 1s! */ #define MSI_IOVA_BASE 0x8000000 #define MSI_IOVA_LENGTH 0x100000 @@ -737,7 +738,13 @@ static void queue_inc_prod(struct arm_smmu_queue *q) */ static int queue_poll_cons(struct arm_smmu_queue *q, bool drain, bool wfe) { - ktime_t timeout = ktime_add_us(ktime_get(), ARM_SMMU_POLL_TIMEOUT_US); + ktime_t timeout; + unsigned int delay = 1; + + /* Wait longer if it's queue drain */ + timeout = ktime_add_us(ktime_get(), drain ? + ARM_SMMU_CMDQ_DRAIN_TIMEOUT_US : + ARM_SMMU_POLL_TIMEOUT_US); while (queue_sync_cons(q), (drain ? !queue_empty(q) : queue_full(q))) { if (ktime_compare(ktime_get(), timeout) > 0) @@ -747,7 +754,8 @@ static int queue_poll_cons(struct arm_smmu_queue *q, bool drain, bool wfe) wfe(); } else { cpu_relax(); - udelay(1); + udelay(delay); + delay *= 2; } } From 60ab7a75c8d83049b0e6189b4128247513880b19 Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Tue, 13 Jun 2017 15:58:30 +0530 Subject: [PATCH 401/436] iommu/io-pgtable-arm-v7s: constify dummy_tlb_ops. File size before: text data bss dec hex filename 6146 56 9 6211 1843 drivers/iommu/io-pgtable-arm-v7s.o File size After adding 'const': text data bss dec hex filename 6170 24 9 6203 183b drivers/iommu/io-pgtable-arm-v7s.o Signed-off-by: Arvind Yadav Signed-off-by: Will Deacon --- drivers/iommu/io-pgtable-arm-v7s.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c index 8d6ca28c3e1f..f8869951610c 100644 --- a/drivers/iommu/io-pgtable-arm-v7s.c +++ b/drivers/iommu/io-pgtable-arm-v7s.c @@ -749,7 +749,7 @@ static void dummy_tlb_sync(void *cookie) WARN_ON(cookie != cfg_cookie); } -static struct iommu_gather_ops dummy_tlb_ops = { +static const struct iommu_gather_ops dummy_tlb_ops = { .tlb_flush_all = dummy_tlb_flush_all, .tlb_add_flush = dummy_tlb_add_flush, .tlb_sync = dummy_tlb_sync, From 84c24379a783c514e5ff7c8fc8a21cf8d64fd05f Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Mon, 19 Jun 2017 16:41:56 +0100 Subject: [PATCH 402/436] iommu/arm-smmu: Plumb in new ACPI identifiers Revision C of IORT now allows us to identify ARM MMU-401 and the Cavium ThunderX implementation. Wire them up so that we can probe these models once firmware starts using the new codes in place of generic ones, and so that the appropriate features and quirks get enabled when we do. For the sake of backports and mitigating sychronisation problems with the ACPICA headers, we'll carry a backup copy of the new definitions locally for the short term to make life simpler. CC: stable@vger.kernel.org # 4.10 Acked-by: Robert Richter Tested-by: Robert Richter Signed-off-by: Robin Murphy Signed-off-by: Will Deacon --- drivers/iommu/arm-smmu.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index 7ec30b08b3bd..7ecd1a0b8419 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -312,6 +312,14 @@ enum arm_smmu_implementation { CAVIUM_SMMUV2, }; +/* Until ACPICA headers cover IORT rev. C */ +#ifndef ACPI_IORT_SMMU_CORELINK_MMU401 +#define ACPI_IORT_SMMU_CORELINK_MMU401 0x4 +#endif +#ifndef ACPI_IORT_SMMU_CAVIUM_THUNDERX +#define ACPI_IORT_SMMU_CAVIUM_THUNDERX 0x5 +#endif + struct arm_smmu_s2cr { struct iommu_group *group; int count; @@ -2073,6 +2081,10 @@ static int acpi_smmu_get_data(u32 model, struct arm_smmu_device *smmu) smmu->version = ARM_SMMU_V1; smmu->model = GENERIC_SMMU; break; + case ACPI_IORT_SMMU_CORELINK_MMU401: + smmu->version = ARM_SMMU_V1_64K; + smmu->model = GENERIC_SMMU; + break; case ACPI_IORT_SMMU_V2: smmu->version = ARM_SMMU_V2; smmu->model = GENERIC_SMMU; @@ -2081,6 +2093,10 @@ static int acpi_smmu_get_data(u32 model, struct arm_smmu_device *smmu) smmu->version = ARM_SMMU_V2; smmu->model = ARM_MMU500; break; + case ACPI_IORT_SMMU_CAVIUM_THUNDERX: + smmu->version = ARM_SMMU_V2; + smmu->model = CAVIUM_SMMUV2; + break; default: ret = -ENODEV; } From ebdd13c93f8e878afcdba642f48cd1bd85619e2a Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Thu, 22 Jun 2017 12:51:00 +0530 Subject: [PATCH 403/436] iommu: arm-smmu-v3: make of_device_ids const of_device_ids are not supposed to change at runtime. All functions working with of_device_ids provided by work with const of_device_ids. So mark the non-const structs as const. Signed-off-by: Arvind Yadav Signed-off-by: Will Deacon --- drivers/iommu/arm-smmu-v3.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index 6a06be7626db..0fd09745822f 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -2776,7 +2776,7 @@ static int arm_smmu_device_remove(struct platform_device *pdev) return 0; } -static struct of_device_id arm_smmu_of_match[] = { +static const struct of_device_id arm_smmu_of_match[] = { { .compatible = "arm,smmu-v3", }, { }, }; From 5c2d0218290afa3c335f38583bf4f8e8adad4c76 Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Thu, 22 Jun 2017 12:57:42 +0530 Subject: [PATCH 404/436] iommu: arm-smmu: Handle return of iommu_device_register. iommu_device_register returns an error code and, although it currently never fails, we should check its return value anyway. Signed-off-by: Arvind Yadav [will: adjusted to follow arm-smmu.c] Signed-off-by: Will Deacon --- drivers/iommu/arm-smmu-v3.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index 0fd09745822f..029fe0cffee7 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -2744,6 +2744,10 @@ static int arm_smmu_device_probe(struct platform_device *pdev) iommu_device_set_fwnode(&smmu->iommu, dev->fwnode); ret = iommu_device_register(&smmu->iommu); + if (ret) { + dev_err(dev, "Failed to register iommu\n"); + return ret; + } #ifdef CONFIG_PCI if (pci_bus_type.iommu_ops != &arm_smmu_ops) { From 9db829d2818501f07583542c05d01513b9161e14 Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Thu, 22 Jun 2017 16:53:50 +0100 Subject: [PATCH 405/436] iommu/io-pgtable-arm-v7s: Check table PTEs more precisely Whilst we don't support the PXN bit at all, so should never encounter a level 1 section or supersection PTE with it set, it would still be wise to check both table type bits to resolve any theoretical ambiguity. Signed-off-by: Robin Murphy Signed-off-by: Will Deacon --- drivers/iommu/io-pgtable-arm-v7s.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c index f8869951610c..46da7aa7c7d0 100644 --- a/drivers/iommu/io-pgtable-arm-v7s.c +++ b/drivers/iommu/io-pgtable-arm-v7s.c @@ -92,7 +92,8 @@ #define ARM_V7S_PTE_TYPE_CONT_PAGE 0x1 #define ARM_V7S_PTE_IS_VALID(pte) (((pte) & 0x3) != 0) -#define ARM_V7S_PTE_IS_TABLE(pte, lvl) (lvl == 1 && ((pte) & ARM_V7S_PTE_TYPE_TABLE)) +#define ARM_V7S_PTE_IS_TABLE(pte, lvl) \ + ((lvl) == 1 && (((pte) & 0x3) == ARM_V7S_PTE_TYPE_TABLE)) /* Page table bits */ #define ARM_V7S_ATTR_XN(lvl) BIT(4 * (2 - (lvl))) From fb3a95795da53d05a4fc5fcdc0d3ec69e7163355 Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Thu, 22 Jun 2017 16:53:51 +0100 Subject: [PATCH 406/436] iommu/io-pgtable-arm: Improve split_blk_unmap The current split_blk_unmap implementation suffers from some inscrutable pointer trickery for creating the tables to replace the block entry, but more than that it also suffers from hideous inefficiency. For example, the most pathological case of unmapping a level 3 page from a level 1 block will allocate 513 lower-level tables to remap the entire block at page granularity, when only 2 are actually needed (the rest can be covered by level 2 block entries). Also, we would like to be able to relax the spinlock requirement in future, for which the roll-back-and-try-again logic for race resolution would be pretty hideous under the current paradigm. Both issues can be resolved most neatly by turning things sideways: instead of repeatedly recursing into __arm_lpae_map() map to build up an entire new sub-table depth-first, we can directly replace the block entry with a next-level table of block/page entries, then repeat by unmapping at the next level if necessary. With a little refactoring of some helper functions, the code ends up not much bigger than before, but considerably easier to follow and to adapt in future. Signed-off-by: Robin Murphy Signed-off-by: Will Deacon --- drivers/iommu/io-pgtable-arm.c | 118 +++++++++++++++++++-------------- 1 file changed, 69 insertions(+), 49 deletions(-) diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c index 6e5df5e0a3bd..dd7477010291 100644 --- a/drivers/iommu/io-pgtable-arm.c +++ b/drivers/iommu/io-pgtable-arm.c @@ -264,19 +264,38 @@ static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data, unsigned long iova, size_t size, int lvl, arm_lpae_iopte *ptep); +static void __arm_lpae_init_pte(struct arm_lpae_io_pgtable *data, + phys_addr_t paddr, arm_lpae_iopte prot, + int lvl, arm_lpae_iopte *ptep) +{ + arm_lpae_iopte pte = prot; + + if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS) + pte |= ARM_LPAE_PTE_NS; + + if (lvl == ARM_LPAE_MAX_LEVELS - 1) + pte |= ARM_LPAE_PTE_TYPE_PAGE; + else + pte |= ARM_LPAE_PTE_TYPE_BLOCK; + + pte |= ARM_LPAE_PTE_AF | ARM_LPAE_PTE_SH_IS; + pte |= pfn_to_iopte(paddr >> data->pg_shift, data); + + __arm_lpae_set_pte(ptep, pte, &data->iop.cfg); +} + static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data, unsigned long iova, phys_addr_t paddr, arm_lpae_iopte prot, int lvl, arm_lpae_iopte *ptep) { - arm_lpae_iopte pte = prot; - struct io_pgtable_cfg *cfg = &data->iop.cfg; + arm_lpae_iopte pte = *ptep; - if (iopte_leaf(*ptep, lvl)) { + if (iopte_leaf(pte, lvl)) { /* We require an unmap first */ WARN_ON(!selftest_running); return -EEXIST; - } else if (iopte_type(*ptep, lvl) == ARM_LPAE_PTE_TYPE_TABLE) { + } else if (iopte_type(pte, lvl) == ARM_LPAE_PTE_TYPE_TABLE) { /* * We need to unmap and free the old table before * overwriting it with a block entry. @@ -289,21 +308,24 @@ static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data, return -EINVAL; } - if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS) - pte |= ARM_LPAE_PTE_NS; - - if (lvl == ARM_LPAE_MAX_LEVELS - 1) - pte |= ARM_LPAE_PTE_TYPE_PAGE; - else - pte |= ARM_LPAE_PTE_TYPE_BLOCK; - - pte |= ARM_LPAE_PTE_AF | ARM_LPAE_PTE_SH_IS; - pte |= pfn_to_iopte(paddr >> data->pg_shift, data); - - __arm_lpae_set_pte(ptep, pte, cfg); + __arm_lpae_init_pte(data, paddr, prot, lvl, ptep); return 0; } +static arm_lpae_iopte arm_lpae_install_table(arm_lpae_iopte *table, + arm_lpae_iopte *ptep, + struct io_pgtable_cfg *cfg) +{ + arm_lpae_iopte new; + + new = __pa(table) | ARM_LPAE_PTE_TYPE_TABLE; + if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS) + new |= ARM_LPAE_PTE_NSTABLE; + + __arm_lpae_set_pte(ptep, new, cfg); + return new; +} + static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova, phys_addr_t paddr, size_t size, arm_lpae_iopte prot, int lvl, arm_lpae_iopte *ptep) @@ -331,10 +353,7 @@ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova, if (!cptep) return -ENOMEM; - pte = __pa(cptep) | ARM_LPAE_PTE_TYPE_TABLE; - if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS) - pte |= ARM_LPAE_PTE_NSTABLE; - __arm_lpae_set_pte(ptep, pte, cfg); + arm_lpae_install_table(cptep, ptep, cfg); } else if (!iopte_leaf(pte, lvl)) { cptep = iopte_deref(pte, data); } else { @@ -452,40 +471,43 @@ static void arm_lpae_free_pgtable(struct io_pgtable *iop) static int arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data, unsigned long iova, size_t size, - arm_lpae_iopte prot, int lvl, - arm_lpae_iopte *ptep, size_t blk_size) + arm_lpae_iopte blk_pte, int lvl, + arm_lpae_iopte *ptep) { - unsigned long blk_start, blk_end; + struct io_pgtable_cfg *cfg = &data->iop.cfg; + arm_lpae_iopte pte, *tablep; phys_addr_t blk_paddr; - arm_lpae_iopte table = 0; + size_t tablesz = ARM_LPAE_GRANULE(data); + size_t split_sz = ARM_LPAE_BLOCK_SIZE(lvl, data); + int i, unmap_idx = -1; - blk_start = iova & ~(blk_size - 1); - blk_end = blk_start + blk_size; - blk_paddr = iopte_to_pfn(*ptep, data) << data->pg_shift; + if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS)) + return 0; - for (; blk_start < blk_end; blk_start += size, blk_paddr += size) { - arm_lpae_iopte *tablep; + tablep = __arm_lpae_alloc_pages(tablesz, GFP_ATOMIC, cfg); + if (!tablep) + return 0; /* Bytes unmapped */ + if (size == split_sz) + unmap_idx = ARM_LPAE_LVL_IDX(iova, lvl, data); + + blk_paddr = iopte_to_pfn(blk_pte, data) << data->pg_shift; + pte = iopte_prot(blk_pte); + + for (i = 0; i < tablesz / sizeof(pte); i++, blk_paddr += split_sz) { /* Unmap! */ - if (blk_start == iova) + if (i == unmap_idx) continue; - /* __arm_lpae_map expects a pointer to the start of the table */ - tablep = &table - ARM_LPAE_LVL_IDX(blk_start, lvl, data); - if (__arm_lpae_map(data, blk_start, blk_paddr, size, prot, lvl, - tablep) < 0) { - if (table) { - /* Free the table we allocated */ - tablep = iopte_deref(table, data); - __arm_lpae_free_pgtable(data, lvl + 1, tablep); - } - return 0; /* Bytes unmapped */ - } + __arm_lpae_init_pte(data, blk_paddr, pte, lvl, &tablep[i]); } - __arm_lpae_set_pte(ptep, table, &data->iop.cfg); - iova &= ~(blk_size - 1); - io_pgtable_tlb_add_flush(&data->iop, iova, blk_size, blk_size, true); + arm_lpae_install_table(tablep, ptep, cfg); + + if (unmap_idx < 0) + return __arm_lpae_unmap(data, iova, size, lvl, tablep); + + io_pgtable_tlb_add_flush(&data->iop, iova, size, size, true); return size; } @@ -495,7 +517,6 @@ static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data, { arm_lpae_iopte pte; struct io_pgtable *iop = &data->iop; - size_t blk_size = ARM_LPAE_BLOCK_SIZE(lvl, data); /* Something went horribly wrong and we ran out of page table */ if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS)) @@ -507,7 +528,7 @@ static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data, return 0; /* If the size matches this level, we're in the right place */ - if (size == blk_size) { + if (size == ARM_LPAE_BLOCK_SIZE(lvl, data)) { __arm_lpae_set_pte(ptep, 0, &iop->cfg); if (!iopte_leaf(pte, lvl)) { @@ -527,9 +548,8 @@ static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data, * Insert a table at the next level to map the old region, * minus the part we want to unmap */ - return arm_lpae_split_blk_unmap(data, iova, size, - iopte_prot(pte), lvl, ptep, - blk_size); + return arm_lpae_split_blk_unmap(data, iova, size, pte, + lvl + 1, ptep); } /* Keep on walkin' */ From b9f1ef30ac2e9942b8628d551f4a21e8cec1415c Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Thu, 22 Jun 2017 16:53:52 +0100 Subject: [PATCH 407/436] iommu/io-pgtable-arm-v7s: Refactor split_blk_unmap Whilst the short-descriptor format's split_blk_unmap implementation has no need to be recursive, it followed the pattern of the LPAE version anyway for the sake of consistency. With the latter now reworked for both efficiency and future scalability improvements, tweak the former similarly, not least to make it less obtuse. Signed-off-by: Robin Murphy Signed-off-by: Will Deacon --- drivers/iommu/io-pgtable-arm-v7s.c | 85 ++++++++++++++++-------------- 1 file changed, 45 insertions(+), 40 deletions(-) diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c index 46da7aa7c7d0..dc74631322e4 100644 --- a/drivers/iommu/io-pgtable-arm-v7s.c +++ b/drivers/iommu/io-pgtable-arm-v7s.c @@ -281,6 +281,13 @@ static arm_v7s_iopte arm_v7s_prot_to_pte(int prot, int lvl, else if (prot & IOMMU_CACHE) pte |= ARM_V7S_ATTR_B | ARM_V7S_ATTR_C; + pte |= ARM_V7S_PTE_TYPE_PAGE; + if (lvl == 1 && (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS)) + pte |= ARM_V7S_ATTR_NS_SECTION; + + if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_MTK_4GB) + pte |= ARM_V7S_ATTR_MTK_4GB; + return pte; } @@ -353,7 +360,7 @@ static int arm_v7s_init_pte(struct arm_v7s_io_pgtable *data, int lvl, int num_entries, arm_v7s_iopte *ptep) { struct io_pgtable_cfg *cfg = &data->iop.cfg; - arm_v7s_iopte pte = arm_v7s_prot_to_pte(prot, lvl, cfg); + arm_v7s_iopte pte; int i; for (i = 0; i < num_entries; i++) @@ -375,13 +382,7 @@ static int arm_v7s_init_pte(struct arm_v7s_io_pgtable *data, return -EEXIST; } - pte |= ARM_V7S_PTE_TYPE_PAGE; - if (lvl == 1 && (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS)) - pte |= ARM_V7S_ATTR_NS_SECTION; - - if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_MTK_4GB) - pte |= ARM_V7S_ATTR_MTK_4GB; - + pte = arm_v7s_prot_to_pte(prot, lvl, cfg); if (num_entries > 1) pte = arm_v7s_pte_to_cont(pte, lvl); @@ -391,6 +392,20 @@ static int arm_v7s_init_pte(struct arm_v7s_io_pgtable *data, return 0; } +static arm_v7s_iopte arm_v7s_install_table(arm_v7s_iopte *table, + arm_v7s_iopte *ptep, + struct io_pgtable_cfg *cfg) +{ + arm_v7s_iopte new; + + new = virt_to_phys(table) | ARM_V7S_PTE_TYPE_TABLE; + if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS) + new |= ARM_V7S_ATTR_NS_TABLE; + + __arm_v7s_set_pte(ptep, new, 1, cfg); + return new; +} + static int __arm_v7s_map(struct arm_v7s_io_pgtable *data, unsigned long iova, phys_addr_t paddr, size_t size, int prot, int lvl, arm_v7s_iopte *ptep) @@ -418,11 +433,7 @@ static int __arm_v7s_map(struct arm_v7s_io_pgtable *data, unsigned long iova, if (!cptep) return -ENOMEM; - pte = virt_to_phys(cptep) | ARM_V7S_PTE_TYPE_TABLE; - if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS) - pte |= ARM_V7S_ATTR_NS_TABLE; - - __arm_v7s_set_pte(ptep, pte, 1, cfg); + arm_v7s_install_table(cptep, ptep, cfg); } else if (ARM_V7S_PTE_IS_TABLE(pte, lvl)) { cptep = iopte_deref(pte, lvl); } else { @@ -503,41 +514,35 @@ static void arm_v7s_split_cont(struct arm_v7s_io_pgtable *data, static int arm_v7s_split_blk_unmap(struct arm_v7s_io_pgtable *data, unsigned long iova, size_t size, - arm_v7s_iopte *ptep) + arm_v7s_iopte blk_pte, arm_v7s_iopte *ptep) { - unsigned long blk_start, blk_end, blk_size; - phys_addr_t blk_paddr; - arm_v7s_iopte table = 0; - int prot = arm_v7s_pte_to_prot(*ptep, 1); + struct io_pgtable_cfg *cfg = &data->iop.cfg; + arm_v7s_iopte pte, *tablep; + int i, unmap_idx, num_entries, num_ptes; - blk_size = ARM_V7S_BLOCK_SIZE(1); - blk_start = iova & ARM_V7S_LVL_MASK(1); - blk_end = blk_start + ARM_V7S_BLOCK_SIZE(1); - blk_paddr = *ptep & ARM_V7S_LVL_MASK(1); + tablep = __arm_v7s_alloc_table(2, GFP_ATOMIC, data); + if (!tablep) + return 0; /* Bytes unmapped */ - for (; blk_start < blk_end; blk_start += size, blk_paddr += size) { - arm_v7s_iopte *tablep; + num_ptes = ARM_V7S_PTES_PER_LVL(2); + num_entries = size >> ARM_V7S_LVL_SHIFT(2); + unmap_idx = ARM_V7S_LVL_IDX(iova, 2); + pte = arm_v7s_prot_to_pte(arm_v7s_pte_to_prot(blk_pte, 1), 2, cfg); + if (num_entries > 1) + pte = arm_v7s_pte_to_cont(pte, 2); + + for (i = 0; i < num_ptes; i += num_entries, pte += size) { /* Unmap! */ - if (blk_start == iova) + if (i == unmap_idx) continue; - /* __arm_v7s_map expects a pointer to the start of the table */ - tablep = &table - ARM_V7S_LVL_IDX(blk_start, 1); - if (__arm_v7s_map(data, blk_start, blk_paddr, size, prot, 1, - tablep) < 0) { - if (table) { - /* Free the table we allocated */ - tablep = iopte_deref(table, 1); - __arm_v7s_free_table(tablep, 2, data); - } - return 0; /* Bytes unmapped */ - } + __arm_v7s_set_pte(&tablep[i], pte, num_entries, cfg); } - __arm_v7s_set_pte(ptep, table, 1, &data->iop.cfg); - iova &= ~(blk_size - 1); - io_pgtable_tlb_add_flush(&data->iop, iova, blk_size, blk_size, true); + arm_v7s_install_table(tablep, ptep, cfg); + + io_pgtable_tlb_add_flush(&data->iop, iova, size, size, true); return size; } @@ -594,7 +599,7 @@ static int __arm_v7s_unmap(struct arm_v7s_io_pgtable *data, * Insert a table at the next level to map the old region, * minus the part we want to unmap */ - return arm_v7s_split_blk_unmap(data, iova, size, ptep); + return arm_v7s_split_blk_unmap(data, iova, size, pte[0], ptep); } /* Keep on walkin' */ From 81b3c25218447c65f93adf08b099a322b6803536 Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Thu, 22 Jun 2017 16:53:53 +0100 Subject: [PATCH 408/436] iommu/io-pgtable: Introduce explicit coherency Once we remove the serialising spinlock, a potential race opens up for non-coherent IOMMUs whereby a caller of .map() can be sure that cache maintenance has been performed on their new PTE, but will have no guarantee that such maintenance for table entries above it has actually completed (e.g. if another CPU took an interrupt immediately after writing the table entry, but before initiating the DMA sync). Handling this race safely will add some potentially non-trivial overhead to installing a table entry, which we would much rather avoid on coherent systems where it will be unnecessary, and where we are stirivng to minimise latency by removing the locking in the first place. To that end, let's introduce an explicit notion of cache-coherency to io-pgtable, such that we will be able to avoid penalising IOMMUs which know enough to know when they are coherent. Signed-off-by: Robin Murphy Signed-off-by: Will Deacon --- drivers/iommu/arm-smmu-v3.c | 3 +++ drivers/iommu/arm-smmu.c | 3 +++ drivers/iommu/io-pgtable-arm-v7s.c | 17 ++++++++++------- drivers/iommu/io-pgtable-arm.c | 11 ++++++----- drivers/iommu/io-pgtable.h | 6 ++++++ 5 files changed, 28 insertions(+), 12 deletions(-) diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index 029fe0cffee7..d50c8d4b9af9 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -1563,6 +1563,9 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain) .iommu_dev = smmu->dev, }; + if (smmu->features & ARM_SMMU_FEAT_COHERENCY) + pgtbl_cfg.quirks = IO_PGTABLE_QUIRK_NO_DMA; + pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain); if (!pgtbl_ops) return -ENOMEM; diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index 7ecd1a0b8419..2bc5df8a490f 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -1018,6 +1018,9 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, .iommu_dev = smmu->dev, }; + if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) + pgtbl_cfg.quirks = IO_PGTABLE_QUIRK_NO_DMA; + smmu_domain->smmu = smmu; pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain); if (!pgtbl_ops) { diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c index dc74631322e4..ec024c75a09e 100644 --- a/drivers/iommu/io-pgtable-arm-v7s.c +++ b/drivers/iommu/io-pgtable-arm-v7s.c @@ -187,7 +187,8 @@ static arm_v7s_iopte *iopte_deref(arm_v7s_iopte pte, int lvl) static void *__arm_v7s_alloc_table(int lvl, gfp_t gfp, struct arm_v7s_io_pgtable *data) { - struct device *dev = data->iop.cfg.iommu_dev; + struct io_pgtable_cfg *cfg = &data->iop.cfg; + struct device *dev = cfg->iommu_dev; dma_addr_t dma; size_t size = ARM_V7S_TABLE_SIZE(lvl); void *table = NULL; @@ -196,7 +197,7 @@ static void *__arm_v7s_alloc_table(int lvl, gfp_t gfp, table = (void *)__get_dma_pages(__GFP_ZERO, get_order(size)); else if (lvl == 2) table = kmem_cache_zalloc(data->l2_tables, gfp | GFP_DMA); - if (table && !selftest_running) { + if (table && !(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA)) { dma = dma_map_single(dev, table, size, DMA_TO_DEVICE); if (dma_mapping_error(dev, dma)) goto out_free; @@ -225,10 +226,11 @@ out_free: static void __arm_v7s_free_table(void *table, int lvl, struct arm_v7s_io_pgtable *data) { - struct device *dev = data->iop.cfg.iommu_dev; + struct io_pgtable_cfg *cfg = &data->iop.cfg; + struct device *dev = cfg->iommu_dev; size_t size = ARM_V7S_TABLE_SIZE(lvl); - if (!selftest_running) + if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA)) dma_unmap_single(dev, __arm_v7s_dma_addr(table), size, DMA_TO_DEVICE); if (lvl == 1) @@ -240,7 +242,7 @@ static void __arm_v7s_free_table(void *table, int lvl, static void __arm_v7s_pte_sync(arm_v7s_iopte *ptep, int num_entries, struct io_pgtable_cfg *cfg) { - if (selftest_running) + if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA)) return; dma_sync_single_for_device(cfg->iommu_dev, __arm_v7s_dma_addr(ptep), @@ -657,7 +659,8 @@ static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg, if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS | IO_PGTABLE_QUIRK_NO_PERMS | IO_PGTABLE_QUIRK_TLBI_ON_MAP | - IO_PGTABLE_QUIRK_ARM_MTK_4GB)) + IO_PGTABLE_QUIRK_ARM_MTK_4GB | + IO_PGTABLE_QUIRK_NO_DMA)) return NULL; /* If ARM_MTK_4GB is enabled, the NO_PERMS is also expected. */ @@ -774,7 +777,7 @@ static int __init arm_v7s_do_selftests(void) .tlb = &dummy_tlb_ops, .oas = 32, .ias = 32, - .quirks = IO_PGTABLE_QUIRK_ARM_NS, + .quirks = IO_PGTABLE_QUIRK_ARM_NS | IO_PGTABLE_QUIRK_NO_DMA, .pgsize_bitmap = SZ_4K | SZ_64K | SZ_1M | SZ_16M, }; unsigned int iova, size, iova_start; diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c index dd7477010291..6334f51912ea 100644 --- a/drivers/iommu/io-pgtable-arm.c +++ b/drivers/iommu/io-pgtable-arm.c @@ -217,7 +217,7 @@ static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp, if (!pages) return NULL; - if (!selftest_running) { + if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA)) { dma = dma_map_single(dev, pages, size, DMA_TO_DEVICE); if (dma_mapping_error(dev, dma)) goto out_free; @@ -243,7 +243,7 @@ out_free: static void __arm_lpae_free_pages(void *pages, size_t size, struct io_pgtable_cfg *cfg) { - if (!selftest_running) + if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA)) dma_unmap_single(cfg->iommu_dev, __arm_lpae_dma_addr(pages), size, DMA_TO_DEVICE); free_pages_exact(pages, size); @@ -254,7 +254,7 @@ static void __arm_lpae_set_pte(arm_lpae_iopte *ptep, arm_lpae_iopte pte, { *ptep = pte; - if (!selftest_running) + if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA)) dma_sync_single_for_device(cfg->iommu_dev, __arm_lpae_dma_addr(ptep), sizeof(pte), DMA_TO_DEVICE); @@ -693,7 +693,7 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie) u64 reg; struct arm_lpae_io_pgtable *data; - if (cfg->quirks & ~IO_PGTABLE_QUIRK_ARM_NS) + if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS | IO_PGTABLE_QUIRK_NO_DMA)) return NULL; data = arm_lpae_alloc_pgtable(cfg); @@ -782,7 +782,7 @@ arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie) struct arm_lpae_io_pgtable *data; /* The NS quirk doesn't apply at stage 2 */ - if (cfg->quirks) + if (cfg->quirks & ~IO_PGTABLE_QUIRK_NO_DMA) return NULL; data = arm_lpae_alloc_pgtable(cfg); @@ -1086,6 +1086,7 @@ static int __init arm_lpae_do_selftests(void) struct io_pgtable_cfg cfg = { .tlb = &dummy_tlb_ops, .oas = 48, + .quirks = IO_PGTABLE_QUIRK_NO_DMA, }; for (i = 0; i < ARRAY_SIZE(pgsize); ++i) { diff --git a/drivers/iommu/io-pgtable.h b/drivers/iommu/io-pgtable.h index 969d82cc92ca..524263a7ae6f 100644 --- a/drivers/iommu/io-pgtable.h +++ b/drivers/iommu/io-pgtable.h @@ -65,11 +65,17 @@ struct io_pgtable_cfg { * PTEs, for Mediatek IOMMUs which treat it as a 33rd address bit * when the SoC is in "4GB mode" and they can only access the high * remap of DRAM (0x1_00000000 to 0x1_ffffffff). + * + * IO_PGTABLE_QUIRK_NO_DMA: Guarantees that the tables will only ever + * be accessed by a fully cache-coherent IOMMU or CPU (e.g. for a + * software-emulated IOMMU), such that pagetable updates need not + * be treated as explicit DMA data. */ #define IO_PGTABLE_QUIRK_ARM_NS BIT(0) #define IO_PGTABLE_QUIRK_NO_PERMS BIT(1) #define IO_PGTABLE_QUIRK_TLBI_ON_MAP BIT(2) #define IO_PGTABLE_QUIRK_ARM_MTK_4GB BIT(3) + #define IO_PGTABLE_QUIRK_NO_DMA BIT(4) unsigned long quirks; unsigned long pgsize_bitmap; unsigned int ias; From 2c3d273eabe8b1ed3b3cffe2c79643b1bf7e2d4a Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Thu, 22 Jun 2017 16:53:54 +0100 Subject: [PATCH 409/436] iommu/io-pgtable-arm: Support lockless operation For parallel I/O with multiple concurrent threads servicing the same device (or devices, if several share a domain), serialising page table updates becomes a massive bottleneck. On reflection, though, we don't strictly need to do that - for valid IOMMU API usage, there are in fact only two races that we need to guard against: multiple map requests for different blocks within the same region, when the intermediate-level table for that region does not yet exist; and multiple unmaps of different parts of the same block entry. Both of those are fairly easily solved by using a cmpxchg to install the new table, such that if we then find that someone else's table got there first, we can simply free ours and continue. Make the requisite changes such that we can withstand being called without the caller maintaining a lock. In theory, this opens up a few corners in which wildly misbehaving callers making nonsensical overlapping requests might lead to crashes instead of just unpredictable results, but correct code really does not deserve to pay a significant performance cost for the sake of masking bugs in theoretical broken code. Signed-off-by: Robin Murphy Signed-off-by: Will Deacon --- drivers/iommu/io-pgtable-arm.c | 72 +++++++++++++++++++++++++++------- 1 file changed, 57 insertions(+), 15 deletions(-) diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c index 6334f51912ea..52700fa958c2 100644 --- a/drivers/iommu/io-pgtable-arm.c +++ b/drivers/iommu/io-pgtable-arm.c @@ -20,6 +20,7 @@ #define pr_fmt(fmt) "arm-lpae io-pgtable: " fmt +#include #include #include #include @@ -99,6 +100,8 @@ #define ARM_LPAE_PTE_ATTR_HI_MASK (((arm_lpae_iopte)6) << 52) #define ARM_LPAE_PTE_ATTR_MASK (ARM_LPAE_PTE_ATTR_LO_MASK | \ ARM_LPAE_PTE_ATTR_HI_MASK) +/* Software bit for solving coherency races */ +#define ARM_LPAE_PTE_SW_SYNC (((arm_lpae_iopte)1) << 55) /* Stage-1 PTE */ #define ARM_LPAE_PTE_AP_UNPRIV (((arm_lpae_iopte)1) << 6) @@ -249,15 +252,20 @@ static void __arm_lpae_free_pages(void *pages, size_t size, free_pages_exact(pages, size); } +static void __arm_lpae_sync_pte(arm_lpae_iopte *ptep, + struct io_pgtable_cfg *cfg) +{ + dma_sync_single_for_device(cfg->iommu_dev, __arm_lpae_dma_addr(ptep), + sizeof(*ptep), DMA_TO_DEVICE); +} + static void __arm_lpae_set_pte(arm_lpae_iopte *ptep, arm_lpae_iopte pte, struct io_pgtable_cfg *cfg) { *ptep = pte; if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA)) - dma_sync_single_for_device(cfg->iommu_dev, - __arm_lpae_dma_addr(ptep), - sizeof(pte), DMA_TO_DEVICE); + __arm_lpae_sync_pte(ptep, cfg); } static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data, @@ -314,16 +322,30 @@ static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data, static arm_lpae_iopte arm_lpae_install_table(arm_lpae_iopte *table, arm_lpae_iopte *ptep, + arm_lpae_iopte curr, struct io_pgtable_cfg *cfg) { - arm_lpae_iopte new; + arm_lpae_iopte old, new; new = __pa(table) | ARM_LPAE_PTE_TYPE_TABLE; if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS) new |= ARM_LPAE_PTE_NSTABLE; - __arm_lpae_set_pte(ptep, new, cfg); - return new; + /* Ensure the table itself is visible before its PTE can be */ + wmb(); + + old = cmpxchg64_relaxed(ptep, curr, new); + + if ((cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA) || + (old & ARM_LPAE_PTE_SW_SYNC)) + return old; + + /* Even if it's not ours, there's no point waiting; just kick it */ + __arm_lpae_sync_pte(ptep, cfg); + if (old == curr) + WRITE_ONCE(*ptep, new | ARM_LPAE_PTE_SW_SYNC); + + return old; } static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova, @@ -332,6 +354,7 @@ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova, { arm_lpae_iopte *cptep, pte; size_t block_size = ARM_LPAE_BLOCK_SIZE(lvl, data); + size_t tblsz = ARM_LPAE_GRANULE(data); struct io_pgtable_cfg *cfg = &data->iop.cfg; /* Find our entry at the current level */ @@ -346,17 +369,23 @@ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova, return -EINVAL; /* Grab a pointer to the next level */ - pte = *ptep; + pte = READ_ONCE(*ptep); if (!pte) { - cptep = __arm_lpae_alloc_pages(ARM_LPAE_GRANULE(data), - GFP_ATOMIC, cfg); + cptep = __arm_lpae_alloc_pages(tblsz, GFP_ATOMIC, cfg); if (!cptep) return -ENOMEM; - arm_lpae_install_table(cptep, ptep, cfg); - } else if (!iopte_leaf(pte, lvl)) { + pte = arm_lpae_install_table(cptep, ptep, 0, cfg); + if (pte) + __arm_lpae_free_pages(cptep, tblsz, cfg); + } else if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA) && + !(pte & ARM_LPAE_PTE_SW_SYNC)) { + __arm_lpae_sync_pte(ptep, cfg); + } + + if (pte && !iopte_leaf(pte, lvl)) { cptep = iopte_deref(pte, data); - } else { + } else if (pte) { /* We require an unmap first */ WARN_ON(!selftest_running); return -EEXIST; @@ -502,7 +531,19 @@ static int arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data, __arm_lpae_init_pte(data, blk_paddr, pte, lvl, &tablep[i]); } - arm_lpae_install_table(tablep, ptep, cfg); + pte = arm_lpae_install_table(tablep, ptep, blk_pte, cfg); + if (pte != blk_pte) { + __arm_lpae_free_pages(tablep, tablesz, cfg); + /* + * We may race against someone unmapping another part of this + * block, but anything else is invalid. We can't misinterpret + * a page entry here since we're never at the last level. + */ + if (iopte_type(pte, lvl - 1) != ARM_LPAE_PTE_TYPE_TABLE) + return 0; + + tablep = iopte_deref(pte, data); + } if (unmap_idx < 0) return __arm_lpae_unmap(data, iova, size, lvl, tablep); @@ -523,7 +564,7 @@ static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data, return 0; ptep += ARM_LPAE_LVL_IDX(iova, lvl, data); - pte = *ptep; + pte = READ_ONCE(*ptep); if (WARN_ON(!pte)) return 0; @@ -585,7 +626,8 @@ static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops, return 0; /* Grab the IOPTE we're interested in */ - pte = *(ptep + ARM_LPAE_LVL_IDX(iova, lvl, data)); + ptep += ARM_LPAE_LVL_IDX(iova, lvl, data); + pte = READ_ONCE(*ptep); /* Valid entry? */ if (!pte) From 119ff305b02793dc31eb1e921b85925ce10dc0a1 Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Thu, 22 Jun 2017 16:53:55 +0100 Subject: [PATCH 410/436] iommu/io-pgtable-arm-v7s: Support lockless operation Mirroring the LPAE implementation, rework the v7s code to be robust against concurrent operations. The same two potential races exist, and are solved in the same manner, with the fixed 2-level structure making life ever so slightly simpler. What complicates matters compared to LPAE, however, is large page entries, since we can't update a block of 16 PTEs atomically, nor assume available software bits to do clever things with. As most users are never likely to do partial unmaps anyway (due to DMA API rules), it doesn't seem unreasonable for this case to remain behind a serialising lock; we just pull said lock down into the bowels of the implementation so it's well out of the way of the normal call paths. Signed-off-by: Robin Murphy Signed-off-by: Will Deacon --- drivers/iommu/io-pgtable-arm-v7s.c | 84 ++++++++++++++++++++++-------- 1 file changed, 63 insertions(+), 21 deletions(-) diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c index ec024c75a09e..690629457565 100644 --- a/drivers/iommu/io-pgtable-arm-v7s.c +++ b/drivers/iommu/io-pgtable-arm-v7s.c @@ -32,6 +32,7 @@ #define pr_fmt(fmt) "arm-v7s io-pgtable: " fmt +#include #include #include #include @@ -39,6 +40,7 @@ #include #include #include +#include #include #include @@ -168,6 +170,7 @@ struct arm_v7s_io_pgtable { arm_v7s_iopte *pgd; struct kmem_cache *l2_tables; + spinlock_t split_lock; }; static dma_addr_t __arm_v7s_dma_addr(void *pages) @@ -396,16 +399,22 @@ static int arm_v7s_init_pte(struct arm_v7s_io_pgtable *data, static arm_v7s_iopte arm_v7s_install_table(arm_v7s_iopte *table, arm_v7s_iopte *ptep, + arm_v7s_iopte curr, struct io_pgtable_cfg *cfg) { - arm_v7s_iopte new; + arm_v7s_iopte old, new; new = virt_to_phys(table) | ARM_V7S_PTE_TYPE_TABLE; if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS) new |= ARM_V7S_ATTR_NS_TABLE; - __arm_v7s_set_pte(ptep, new, 1, cfg); - return new; + /* Ensure the table itself is visible before its PTE can be */ + wmb(); + + old = cmpxchg_relaxed(ptep, curr, new); + __arm_v7s_pte_sync(ptep, 1, cfg); + + return old; } static int __arm_v7s_map(struct arm_v7s_io_pgtable *data, unsigned long iova, @@ -429,16 +438,23 @@ static int __arm_v7s_map(struct arm_v7s_io_pgtable *data, unsigned long iova, return -EINVAL; /* Grab a pointer to the next level */ - pte = *ptep; + pte = READ_ONCE(*ptep); if (!pte) { cptep = __arm_v7s_alloc_table(lvl + 1, GFP_ATOMIC, data); if (!cptep) return -ENOMEM; - arm_v7s_install_table(cptep, ptep, cfg); - } else if (ARM_V7S_PTE_IS_TABLE(pte, lvl)) { - cptep = iopte_deref(pte, lvl); + pte = arm_v7s_install_table(cptep, ptep, 0, cfg); + if (pte) + __arm_v7s_free_table(cptep, lvl + 1, data); } else { + /* We've no easy way of knowing if it's synced yet, so... */ + __arm_v7s_pte_sync(ptep, 1, cfg); + } + + if (ARM_V7S_PTE_IS_TABLE(pte, lvl)) { + cptep = iopte_deref(pte, lvl); + } else if (pte) { /* We require an unmap first */ WARN_ON(!selftest_running); return -EEXIST; @@ -491,27 +507,31 @@ static void arm_v7s_free_pgtable(struct io_pgtable *iop) kfree(data); } -static void arm_v7s_split_cont(struct arm_v7s_io_pgtable *data, - unsigned long iova, int idx, int lvl, - arm_v7s_iopte *ptep) +static arm_v7s_iopte arm_v7s_split_cont(struct arm_v7s_io_pgtable *data, + unsigned long iova, int idx, int lvl, + arm_v7s_iopte *ptep) { struct io_pgtable *iop = &data->iop; arm_v7s_iopte pte; size_t size = ARM_V7S_BLOCK_SIZE(lvl); int i; + /* Check that we didn't lose a race to get the lock */ + pte = *ptep; + if (!arm_v7s_pte_is_cont(pte, lvl)) + return pte; + ptep -= idx & (ARM_V7S_CONT_PAGES - 1); - pte = arm_v7s_cont_to_pte(*ptep, lvl); - for (i = 0; i < ARM_V7S_CONT_PAGES; i++) { - ptep[i] = pte; - pte += size; - } + pte = arm_v7s_cont_to_pte(pte, lvl); + for (i = 0; i < ARM_V7S_CONT_PAGES; i++) + ptep[i] = pte + i * size; __arm_v7s_pte_sync(ptep, ARM_V7S_CONT_PAGES, &iop->cfg); size *= ARM_V7S_CONT_PAGES; io_pgtable_tlb_add_flush(iop, iova, size, size, true); io_pgtable_tlb_sync(iop); + return pte; } static int arm_v7s_split_blk_unmap(struct arm_v7s_io_pgtable *data, @@ -542,7 +562,16 @@ static int arm_v7s_split_blk_unmap(struct arm_v7s_io_pgtable *data, __arm_v7s_set_pte(&tablep[i], pte, num_entries, cfg); } - arm_v7s_install_table(tablep, ptep, cfg); + pte = arm_v7s_install_table(tablep, ptep, blk_pte, cfg); + if (pte != blk_pte) { + __arm_v7s_free_table(tablep, 2, data); + + if (!ARM_V7S_PTE_IS_TABLE(pte, 1)) + return 0; + + tablep = iopte_deref(pte, 1); + return __arm_v7s_unmap(data, iova, size, 2, tablep); + } io_pgtable_tlb_add_flush(&data->iop, iova, size, size, true); return size; @@ -563,17 +592,28 @@ static int __arm_v7s_unmap(struct arm_v7s_io_pgtable *data, idx = ARM_V7S_LVL_IDX(iova, lvl); ptep += idx; do { - if (WARN_ON(!ARM_V7S_PTE_IS_VALID(ptep[i]))) + pte[i] = READ_ONCE(ptep[i]); + if (WARN_ON(!ARM_V7S_PTE_IS_VALID(pte[i]))) return 0; - pte[i] = ptep[i]; } while (++i < num_entries); /* * If we've hit a contiguous 'large page' entry at this level, it * needs splitting first, unless we're unmapping the whole lot. + * + * For splitting, we can't rewrite 16 PTEs atomically, and since we + * can't necessarily assume TEX remap we don't have a software bit to + * mark live entries being split. In practice (i.e. DMA API code), we + * will never be splitting large pages anyway, so just wrap this edge + * case in a lock for the sake of correctness and be done with it. */ - if (num_entries <= 1 && arm_v7s_pte_is_cont(pte[0], lvl)) - arm_v7s_split_cont(data, iova, idx, lvl, ptep); + if (num_entries <= 1 && arm_v7s_pte_is_cont(pte[0], lvl)) { + unsigned long flags; + + spin_lock_irqsave(&data->split_lock, flags); + pte[0] = arm_v7s_split_cont(data, iova, idx, lvl, ptep); + spin_unlock_irqrestore(&data->split_lock, flags); + } /* If the size matches this level, we're in the right place */ if (num_entries) { @@ -631,7 +671,8 @@ static phys_addr_t arm_v7s_iova_to_phys(struct io_pgtable_ops *ops, u32 mask; do { - pte = ptep[ARM_V7S_LVL_IDX(iova, ++lvl)]; + ptep += ARM_V7S_LVL_IDX(iova, ++lvl); + pte = READ_ONCE(*ptep); ptep = iopte_deref(pte, lvl); } while (ARM_V7S_PTE_IS_TABLE(pte, lvl)); @@ -672,6 +713,7 @@ static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg, if (!data) return NULL; + spin_lock_init(&data->split_lock); data->l2_tables = kmem_cache_create("io-pgtable_armv7s_l2", ARM_V7S_TABLE_SIZE(2), ARM_V7S_TABLE_SIZE(2), From 523d7423e21bfe78452a640878d1de189ac45d91 Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Thu, 22 Jun 2017 16:53:56 +0100 Subject: [PATCH 411/436] iommu/arm-smmu: Remove io-pgtable spinlock With the io-pgtable code now robust against (valid) races, we no longer need to serialise all operations with a lock. This might make broken callers who issue concurrent operations on overlapping addresses go even more wrong than before, but hey, they already had little hope of useful or deterministic results. We do however still have to keep a lock around to serialise the ATS1* translation ops, as parallel iova_to_phys() calls could lead to unpredictable hardware behaviour otherwise. Signed-off-by: Robin Murphy Signed-off-by: Will Deacon --- drivers/iommu/arm-smmu.c | 45 +++++++++++++--------------------------- 1 file changed, 14 insertions(+), 31 deletions(-) diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index 2bc5df8a490f..bc89b4d6c043 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -433,10 +433,10 @@ enum arm_smmu_domain_stage { struct arm_smmu_domain { struct arm_smmu_device *smmu; struct io_pgtable_ops *pgtbl_ops; - spinlock_t pgtbl_lock; struct arm_smmu_cfg cfg; enum arm_smmu_domain_stage stage; struct mutex init_mutex; /* Protects smmu pointer */ + spinlock_t cb_lock; /* Serialises ATS1* ops */ struct iommu_domain domain; }; @@ -1113,7 +1113,7 @@ static struct iommu_domain *arm_smmu_domain_alloc(unsigned type) } mutex_init(&smmu_domain->init_mutex); - spin_lock_init(&smmu_domain->pgtbl_lock); + spin_lock_init(&smmu_domain->cb_lock); return &smmu_domain->domain; } @@ -1391,35 +1391,23 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova, phys_addr_t paddr, size_t size, int prot) { - int ret; - unsigned long flags; - struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); - struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops; + struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops; if (!ops) return -ENODEV; - spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags); - ret = ops->map(ops, iova, paddr, size, prot); - spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags); - return ret; + return ops->map(ops, iova, paddr, size, prot); } static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size) { - size_t ret; - unsigned long flags; - struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); - struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops; + struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops; if (!ops) return 0; - spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags); - ret = ops->unmap(ops, iova, size); - spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags); - return ret; + return ops->unmap(ops, iova, size); } static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain, @@ -1433,10 +1421,11 @@ static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain, void __iomem *cb_base; u32 tmp; u64 phys; - unsigned long va; + unsigned long va, flags; cb_base = ARM_SMMU_CB(smmu, cfg->cbndx); + spin_lock_irqsave(&smmu_domain->cb_lock, flags); /* ATS1 registers can only be written atomically */ va = iova & ~0xfffUL; if (smmu->version == ARM_SMMU_V2) @@ -1446,6 +1435,7 @@ static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain, if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp, !(tmp & ATSR_ACTIVE), 5, 50)) { + spin_unlock_irqrestore(&smmu_domain->cb_lock, flags); dev_err(dev, "iova to phys timed out on %pad. Falling back to software table walk.\n", &iova); @@ -1453,6 +1443,7 @@ static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain, } phys = readq_relaxed(cb_base + ARM_SMMU_CB_PAR); + spin_unlock_irqrestore(&smmu_domain->cb_lock, flags); if (phys & CB_PAR_F) { dev_err(dev, "translation fault!\n"); dev_err(dev, "PAR = 0x%llx\n", phys); @@ -1465,10 +1456,8 @@ static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain, static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) { - phys_addr_t ret; - unsigned long flags; struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); - struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops; + struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops; if (domain->type == IOMMU_DOMAIN_IDENTITY) return iova; @@ -1476,17 +1465,11 @@ static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain, if (!ops) return 0; - spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags); if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS && - smmu_domain->stage == ARM_SMMU_DOMAIN_S1) { - ret = arm_smmu_iova_to_phys_hard(domain, iova); - } else { - ret = ops->iova_to_phys(ops, iova); - } + smmu_domain->stage == ARM_SMMU_DOMAIN_S1) + return arm_smmu_iova_to_phys_hard(domain, iova); - spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags); - - return ret; + return ops->iova_to_phys(ops, iova); } static bool arm_smmu_capable(enum iommu_cap cap) From 58188afeb727e0f73706f1460707bd3ba6ccc221 Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Thu, 22 Jun 2017 16:53:57 +0100 Subject: [PATCH 412/436] iommu/arm-smmu-v3: Remove io-pgtable spinlock As for SMMUv2, take advantage of io-pgtable's newfound tolerance for concurrency. Unfortunately in this case the command queue lock remains a point of serialisation for the unmap path, but there may be a little more we can do to ameliorate that in future. Signed-off-by: Robin Murphy Signed-off-by: Will Deacon --- drivers/iommu/arm-smmu-v3.c | 33 ++++++--------------------------- 1 file changed, 6 insertions(+), 27 deletions(-) diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index d50c8d4b9af9..884b1a49a52a 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -646,7 +646,6 @@ struct arm_smmu_domain { struct mutex init_mutex; /* Protects smmu pointer */ struct io_pgtable_ops *pgtbl_ops; - spinlock_t pgtbl_lock; enum arm_smmu_domain_stage stage; union { @@ -1414,7 +1413,6 @@ static struct iommu_domain *arm_smmu_domain_alloc(unsigned type) } mutex_init(&smmu_domain->init_mutex); - spin_lock_init(&smmu_domain->pgtbl_lock); return &smmu_domain->domain; } @@ -1686,44 +1684,29 @@ out_unlock: static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova, phys_addr_t paddr, size_t size, int prot) { - int ret; - unsigned long flags; - struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); - struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops; + struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops; if (!ops) return -ENODEV; - spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags); - ret = ops->map(ops, iova, paddr, size, prot); - spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags); - return ret; + return ops->map(ops, iova, paddr, size, prot); } static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size) { - size_t ret; - unsigned long flags; - struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); - struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops; + struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops; if (!ops) return 0; - spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags); - ret = ops->unmap(ops, iova, size); - spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags); - return ret; + return ops->unmap(ops, iova, size); } static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) { - phys_addr_t ret; - unsigned long flags; - struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); - struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops; + struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops; if (domain->type == IOMMU_DOMAIN_IDENTITY) return iova; @@ -1731,11 +1714,7 @@ arm_smmu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) if (!ops) return 0; - spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags); - ret = ops->iova_to_phys(ops, iova); - spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags); - - return ret; + return ops->iova_to_phys(ops, iova); } static struct platform_driver arm_smmu_driver; From c1004803b40596c1aabbbc78a6b1b33e4dfd96c6 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 23 Jun 2017 11:45:57 +0100 Subject: [PATCH 413/436] iommu/io-pgtable: depend on !GENERIC_ATOMIC64 when using COMPILE_TEST with LPAE The LPAE/ARMv8 page table format relies on the ability to read and write 64-bit page table entries in an atomic fashion. With the move to a lockless implementation, we also need support for cmpxchg64 to resolve races when installing table entries concurrently. Unfortunately, not all architectures support cmpxchg64, so the code can fail to compiler when building for these architectures using COMPILE_TEST. Rather than disable COMPILE_TEST altogether, instead check that GENERIC_ATOMIC64 is not selected, which is a reasonable indication that the architecture has support for 64-bit cmpxchg. Reported-by: kbuild test robot Signed-off-by: Will Deacon --- drivers/iommu/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index 6ee3a25ae731..c88cfa7522b2 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig @@ -23,7 +23,7 @@ config IOMMU_IO_PGTABLE config IOMMU_IO_PGTABLE_LPAE bool "ARMv7/v8 Long Descriptor Format" select IOMMU_IO_PGTABLE - depends on HAS_DMA && (ARM || ARM64 || COMPILE_TEST) + depends on HAS_DMA && (ARM || ARM64 || (COMPILE_TEST && !GENERIC_ATOMIC64)) help Enable support for the ARM long descriptor pagetable format. This allocator supports 4K/2M/1G, 16K/32M and 64K/512M page From 77f3445866c39d8866b31d8d9fa47c7c20938e05 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 23 Jun 2017 12:02:38 +0100 Subject: [PATCH 414/436] iommu/io-pgtable-arm: Use dma_wmb() instead of wmb() when publishing table When writing a new table entry, we must ensure that the contents of the table is made visible to the SMMU page table walker before the updated table entry itself. This is currently achieved using wmb(), which expands to an expensive and unnecessary DSB instruction. Ideally, we'd just use cmpxchg64_release when writing the table entry, but this doesn't have memory ordering semantics on !SMP systems. Instead, use dma_wmb(), which emits DMB OSHST. Strictly speaking, this does more than we require (since it targets the outer-shareable domain), but it's likely to be significantly faster than the DSB approach. Reported-by: Linu Cherian Suggested-by: Robin Murphy Signed-off-by: Will Deacon --- drivers/iommu/io-pgtable-arm-v7s.c | 8 ++++++-- drivers/iommu/io-pgtable-arm.c | 8 ++++++-- 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c index 690629457565..af330f513653 100644 --- a/drivers/iommu/io-pgtable-arm-v7s.c +++ b/drivers/iommu/io-pgtable-arm-v7s.c @@ -408,8 +408,12 @@ static arm_v7s_iopte arm_v7s_install_table(arm_v7s_iopte *table, if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS) new |= ARM_V7S_ATTR_NS_TABLE; - /* Ensure the table itself is visible before its PTE can be */ - wmb(); + /* + * Ensure the table itself is visible before its PTE can be. + * Whilst we could get away with cmpxchg64_release below, this + * doesn't have any ordering semantics when !CONFIG_SMP. + */ + dma_wmb(); old = cmpxchg_relaxed(ptep, curr, new); __arm_v7s_pte_sync(ptep, 1, cfg); diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c index 52700fa958c2..b182039862c5 100644 --- a/drivers/iommu/io-pgtable-arm.c +++ b/drivers/iommu/io-pgtable-arm.c @@ -331,8 +331,12 @@ static arm_lpae_iopte arm_lpae_install_table(arm_lpae_iopte *table, if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS) new |= ARM_LPAE_PTE_NSTABLE; - /* Ensure the table itself is visible before its PTE can be */ - wmb(); + /* + * Ensure the table itself is visible before its PTE can be. + * Whilst we could get away with cmpxchg64_release below, this + * doesn't have any ordering semantics when !CONFIG_SMP. + */ + dma_wmb(); old = cmpxchg64_relaxed(ptep, curr, new); From 12275bf0a4deb690a5dc9903d207060737b7bae6 Mon Sep 17 00:00:00 2001 From: Robert Richter Date: Thu, 22 Jun 2017 21:20:54 +0200 Subject: [PATCH 415/436] iommu/arm-smmu-v3, acpi: Add temporary Cavium SMMU-V3 IORT model number definitions The model number is already defined in acpica and we are actually waiting for the acpi maintainers to include it: https://github.com/acpica/acpica/commit/d00a4eb86e64 Adding those temporary definitions until the change makes it into include/acpi/actbl2.h. Once that is done this patch can be reverted. Acked-by: Lorenzo Pieralisi Signed-off-by: Robert Richter Signed-off-by: Will Deacon --- drivers/acpi/arm64/iort.c | 5 +++++ drivers/iommu/arm-smmu-v3.c | 5 +++++ 2 files changed, 10 insertions(+) diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c index c5fecf97ee2f..da225710b009 100644 --- a/drivers/acpi/arm64/iort.c +++ b/drivers/acpi/arm64/iort.c @@ -31,6 +31,11 @@ #define IORT_IOMMU_TYPE ((1 << ACPI_IORT_NODE_SMMU) | \ (1 << ACPI_IORT_NODE_SMMU_V3)) +/* Until ACPICA headers cover IORT rev. C */ +#ifndef ACPI_IORT_SMMU_V3_CAVIUM_CN99XX +#define ACPI_IORT_SMMU_V3_CAVIUM_CN99XX 0x2 +#endif + struct iort_its_msi_chip { struct list_head list; struct fwnode_handle *fw_node; diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index 884b1a49a52a..da481d53e09b 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -413,6 +413,11 @@ #define MSI_IOVA_BASE 0x8000000 #define MSI_IOVA_LENGTH 0x100000 +/* Until ACPICA headers cover IORT rev. C */ +#ifndef ACPI_IORT_SMMU_V3_CAVIUM_CN99XX +#define ACPI_IORT_SMMU_V3_CAVIUM_CN99XX 0x2 +#endif + static bool disable_bypass; module_param_named(disable_bypass, disable_bypass, bool, S_IRUGO); MODULE_PARM_DESC(disable_bypass, From 403e8c7c5bcaff3291a2c7012fe80f707a854d10 Mon Sep 17 00:00:00 2001 From: Linu Cherian Date: Thu, 22 Jun 2017 17:35:36 +0530 Subject: [PATCH 416/436] ACPI/IORT: Fixup SMMUv3 resource size for Cavium ThunderX2 SMMUv3 model Cavium ThunderX2 implementation doesn't support second page in SMMU register space. Hence, resource size is set as 64k for this model. Signed-off-by: Linu Cherian Signed-off-by: Geetha Sowjanya Signed-off-by: Will Deacon --- drivers/acpi/arm64/iort.c | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c index da225710b009..a8ebda9f7e97 100644 --- a/drivers/acpi/arm64/iort.c +++ b/drivers/acpi/arm64/iort.c @@ -833,6 +833,18 @@ static int __init arm_smmu_v3_count_resources(struct acpi_iort_node *node) return num_res; } +static unsigned long arm_smmu_v3_resource_size(struct acpi_iort_smmu_v3 *smmu) +{ + /* + * Override the size, for Cavium ThunderX2 implementation + * which doesn't support the page 1 SMMU register space. + */ + if (smmu->model == ACPI_IORT_SMMU_V3_CAVIUM_CN99XX) + return SZ_64K; + + return SZ_128K; +} + static void __init arm_smmu_v3_init_resources(struct resource *res, struct acpi_iort_node *node) { @@ -843,7 +855,8 @@ static void __init arm_smmu_v3_init_resources(struct resource *res, smmu = (struct acpi_iort_smmu_v3 *)node->node_data; res[num_res].start = smmu->base_address; - res[num_res].end = smmu->base_address + SZ_128K - 1; + res[num_res].end = smmu->base_address + + arm_smmu_v3_resource_size(smmu) - 1; res[num_res].flags = IORESOURCE_MEM; num_res++; From e5b829de053d9994dfc8652ce558e90e3406c578 Mon Sep 17 00:00:00 2001 From: Linu Cherian Date: Thu, 22 Jun 2017 17:35:37 +0530 Subject: [PATCH 417/436] iommu/arm-smmu-v3: Add workaround for Cavium ThunderX2 erratum #74 Cavium ThunderX2 SMMU implementation doesn't support page 1 register space and PAGE0_REGS_ONLY option is enabled as an errata workaround. This option when turned on, replaces all page 1 offsets used for EVTQ_PROD/CONS, PRIQ_PROD/CONS register access with page 0 offsets. SMMU resource size checks are now based on SMMU option PAGE0_REGS_ONLY, since resource size can be either 64k/128k. For this, arm_smmu_device_dt_probe/acpi_probe has been moved before platform_get_resource call, so that SMMU options are set beforehand. Signed-off-by: Linu Cherian Signed-off-by: Geetha Sowjanya Signed-off-by: Will Deacon --- Documentation/arm64/silicon-errata.txt | 1 + .../devicetree/bindings/iommu/arm,smmu-v3.txt | 6 ++ drivers/iommu/arm-smmu-v3.c | 68 ++++++++++++++----- 3 files changed, 57 insertions(+), 18 deletions(-) diff --git a/Documentation/arm64/silicon-errata.txt b/Documentation/arm64/silicon-errata.txt index 10f2dddbf449..4693a328947a 100644 --- a/Documentation/arm64/silicon-errata.txt +++ b/Documentation/arm64/silicon-errata.txt @@ -62,6 +62,7 @@ stable kernels. | Cavium | ThunderX GICv3 | #23154 | CAVIUM_ERRATUM_23154 | | Cavium | ThunderX Core | #27456 | CAVIUM_ERRATUM_27456 | | Cavium | ThunderX SMMUv2 | #27704 | N/A | +| Cavium | ThunderX2 SMMUv3| #74 | N/A | | | | | | | Freescale/NXP | LS2080A/LS1043A | A-008585 | FSL_ERRATUM_A008585 | | | | | | diff --git a/Documentation/devicetree/bindings/iommu/arm,smmu-v3.txt b/Documentation/devicetree/bindings/iommu/arm,smmu-v3.txt index be57550e14e4..e7855cf6038e 100644 --- a/Documentation/devicetree/bindings/iommu/arm,smmu-v3.txt +++ b/Documentation/devicetree/bindings/iommu/arm,smmu-v3.txt @@ -49,6 +49,12 @@ the PCIe specification. - hisilicon,broken-prefetch-cmd : Avoid sending CMD_PREFETCH_* commands to the SMMU. +- cavium,cn9900-broken-page1-regspace + : Replaces all page 1 offsets used for EVTQ_PROD/CONS, + PRIQ_PROD/CONS register access with page 0 offsets. + Set for Cavium ThunderX2 silicon that doesn't support + SMMU page1 register space. + ** Example smmu@2b400000 { diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index da481d53e09b..2d5b48b4260a 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -603,6 +603,7 @@ struct arm_smmu_device { u32 features; #define ARM_SMMU_OPT_SKIP_PREFETCH (1 << 0) +#define ARM_SMMU_OPT_PAGE0_REGS_ONLY (1 << 1) u32 options; struct arm_smmu_cmdq cmdq; @@ -668,9 +669,20 @@ struct arm_smmu_option_prop { static struct arm_smmu_option_prop arm_smmu_options[] = { { ARM_SMMU_OPT_SKIP_PREFETCH, "hisilicon,broken-prefetch-cmd" }, + { ARM_SMMU_OPT_PAGE0_REGS_ONLY, "cavium,cn9900-broken-page1-regspace"}, { 0, NULL}, }; +static inline void __iomem *arm_smmu_page1_fixup(unsigned long offset, + struct arm_smmu_device *smmu) +{ + if ((offset > SZ_64K) && + (smmu->options & ARM_SMMU_OPT_PAGE0_REGS_ONLY)) + offset -= SZ_64K; + + return smmu->base + offset; +} + static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom) { return container_of(dom, struct arm_smmu_domain, domain); @@ -1956,8 +1968,8 @@ static int arm_smmu_init_one_queue(struct arm_smmu_device *smmu, return -ENOMEM; } - q->prod_reg = smmu->base + prod_off; - q->cons_reg = smmu->base + cons_off; + q->prod_reg = arm_smmu_page1_fixup(prod_off, smmu); + q->cons_reg = arm_smmu_page1_fixup(cons_off, smmu); q->ent_dwords = dwords; q->q_base = Q_BASE_RWA; @@ -2358,8 +2370,10 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass) /* Event queue */ writeq_relaxed(smmu->evtq.q.q_base, smmu->base + ARM_SMMU_EVTQ_BASE); - writel_relaxed(smmu->evtq.q.prod, smmu->base + ARM_SMMU_EVTQ_PROD); - writel_relaxed(smmu->evtq.q.cons, smmu->base + ARM_SMMU_EVTQ_CONS); + writel_relaxed(smmu->evtq.q.prod, + arm_smmu_page1_fixup(ARM_SMMU_EVTQ_PROD, smmu)); + writel_relaxed(smmu->evtq.q.cons, + arm_smmu_page1_fixup(ARM_SMMU_EVTQ_CONS, smmu)); enables |= CR0_EVTQEN; ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0, @@ -2374,9 +2388,9 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass) writeq_relaxed(smmu->priq.q.q_base, smmu->base + ARM_SMMU_PRIQ_BASE); writel_relaxed(smmu->priq.q.prod, - smmu->base + ARM_SMMU_PRIQ_PROD); + arm_smmu_page1_fixup(ARM_SMMU_PRIQ_PROD, smmu)); writel_relaxed(smmu->priq.q.cons, - smmu->base + ARM_SMMU_PRIQ_CONS); + arm_smmu_page1_fixup(ARM_SMMU_PRIQ_CONS, smmu)); enables |= CR0_PRIQEN; ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0, @@ -2600,6 +2614,14 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu) } #ifdef CONFIG_ACPI +static void acpi_smmu_get_options(u32 model, struct arm_smmu_device *smmu) +{ + if (model == ACPI_IORT_SMMU_V3_CAVIUM_CN99XX) + smmu->options |= ARM_SMMU_OPT_PAGE0_REGS_ONLY; + + dev_notice(smmu->dev, "option mask 0x%x\n", smmu->options); +} + static int arm_smmu_device_acpi_probe(struct platform_device *pdev, struct arm_smmu_device *smmu) { @@ -2612,6 +2634,8 @@ static int arm_smmu_device_acpi_probe(struct platform_device *pdev, /* Retrieve SMMUv3 specific data */ iort_smmu = (struct acpi_iort_smmu_v3 *)node->node_data; + acpi_smmu_get_options(iort_smmu->model, smmu); + if (iort_smmu->flags & ACPI_IORT_SMMU_V3_COHACC_OVERRIDE) smmu->features |= ARM_SMMU_FEAT_COHERENCY; @@ -2647,6 +2671,14 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev, return ret; } +static unsigned long arm_smmu_resource_size(struct arm_smmu_device *smmu) +{ + if (smmu->options & ARM_SMMU_OPT_PAGE0_REGS_ONLY) + return SZ_64K; + else + return SZ_128K; +} + static int arm_smmu_device_probe(struct platform_device *pdev) { int irq, ret; @@ -2663,9 +2695,20 @@ static int arm_smmu_device_probe(struct platform_device *pdev) } smmu->dev = dev; + if (dev->of_node) { + ret = arm_smmu_device_dt_probe(pdev, smmu); + } else { + ret = arm_smmu_device_acpi_probe(pdev, smmu); + if (ret == -ENODEV) + return ret; + } + + /* Set bypass mode according to firmware probing result */ + bypass = !!ret; + /* Base address */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (resource_size(res) + 1 < SZ_128K) { + if (resource_size(res) + 1 < arm_smmu_resource_size(smmu)) { dev_err(dev, "MMIO region too small (%pr)\n", res); return -EINVAL; } @@ -2692,17 +2735,6 @@ static int arm_smmu_device_probe(struct platform_device *pdev) if (irq > 0) smmu->gerr_irq = irq; - if (dev->of_node) { - ret = arm_smmu_device_dt_probe(pdev, smmu); - } else { - ret = arm_smmu_device_acpi_probe(pdev, smmu); - if (ret == -ENODEV) - return ret; - } - - /* Set bypass mode according to firmware probing result */ - bypass = !!ret; - /* Probe the h/w */ ret = arm_smmu_device_hw_probe(smmu); if (ret) From 99caf177f6fd3e67575f6ce05b36e8e041bcef60 Mon Sep 17 00:00:00 2001 From: shameer Date: Wed, 17 May 2017 10:12:05 +0100 Subject: [PATCH 418/436] iommu/arm-smmu-v3: Enable ACPI based HiSilicon CMD_PREFETCH quirk(erratum 161010701) HiSilicon SMMUv3 on Hip06/Hip07 platforms doesn't support CMD_PREFETCH command. The dt based support for this quirk is already present in the driver(hisilicon,broken-prefetch-cmd). This adds ACPI support for the quirk using the IORT smmu model number. Signed-off-by: shameer Signed-off-by: hanjun [will: rewrote patch] Signed-off-by: Will Deacon --- Documentation/arm64/silicon-errata.txt | 1 + drivers/iommu/arm-smmu-v3.c | 12 +++++++++++- 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/Documentation/arm64/silicon-errata.txt b/Documentation/arm64/silicon-errata.txt index 4693a328947a..ef4e43590685 100644 --- a/Documentation/arm64/silicon-errata.txt +++ b/Documentation/arm64/silicon-errata.txt @@ -67,6 +67,7 @@ stable kernels. | Freescale/NXP | LS2080A/LS1043A | A-008585 | FSL_ERRATUM_A008585 | | | | | | | Hisilicon | Hip0{5,6,7} | #161010101 | HISILICON_ERRATUM_161010101 | +| Hisilicon | Hip0{6,7} | #161010701 | N/A | | | | | | | Qualcomm Tech. | Falkor v1 | E1003 | QCOM_FALKOR_ERRATUM_1003 | | Qualcomm Tech. | Falkor v1 | E1009 | QCOM_FALKOR_ERRATUM_1009 | diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index 2d5b48b4260a..81fc1b5c91ee 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -414,6 +414,10 @@ #define MSI_IOVA_LENGTH 0x100000 /* Until ACPICA headers cover IORT rev. C */ +#ifndef ACPI_IORT_SMMU_HISILICON_HI161X +#define ACPI_IORT_SMMU_HISILICON_HI161X 0x1 +#endif + #ifndef ACPI_IORT_SMMU_V3_CAVIUM_CN99XX #define ACPI_IORT_SMMU_V3_CAVIUM_CN99XX 0x2 #endif @@ -2616,8 +2620,14 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu) #ifdef CONFIG_ACPI static void acpi_smmu_get_options(u32 model, struct arm_smmu_device *smmu) { - if (model == ACPI_IORT_SMMU_V3_CAVIUM_CN99XX) + switch (model) { + case ACPI_IORT_SMMU_V3_CAVIUM_CN99XX: smmu->options |= ARM_SMMU_OPT_PAGE0_REGS_ONLY; + break; + case ACPI_IORT_SMMU_HISILICON_HI161X: + smmu->options |= ARM_SMMU_OPT_SKIP_PREFETCH; + break; + } dev_notice(smmu->dev, "option mask 0x%x\n", smmu->options); } From f935448acf462c26142e8b04f1c8829b28d3b9d8 Mon Sep 17 00:00:00 2001 From: Geetha Sowjanya Date: Fri, 23 Jun 2017 19:04:36 +0530 Subject: [PATCH 419/436] iommu/arm-smmu-v3: Add workaround for Cavium ThunderX2 erratum #126 Cavium ThunderX2 SMMU doesn't support MSI and also doesn't have unique irq lines for gerror, eventq and cmdq-sync. New named irq "combined" is set as a errata workaround, which allows to share the irq line by register single irq handler for all the interrupts. Acked-by: Lorenzo Pieralisi Signed-off-by: Geetha sowjanya [will: reworked irq equality checking and added SPI check] Signed-off-by: Will Deacon --- Documentation/arm64/silicon-errata.txt | 1 + .../devicetree/bindings/iommu/arm,smmu-v3.txt | 6 ++ drivers/acpi/arm64/iort.c | 57 +++++++---- drivers/iommu/arm-smmu-v3.c | 94 ++++++++++++++----- 4 files changed, 118 insertions(+), 40 deletions(-) diff --git a/Documentation/arm64/silicon-errata.txt b/Documentation/arm64/silicon-errata.txt index ef4e43590685..856479525776 100644 --- a/Documentation/arm64/silicon-errata.txt +++ b/Documentation/arm64/silicon-errata.txt @@ -63,6 +63,7 @@ stable kernels. | Cavium | ThunderX Core | #27456 | CAVIUM_ERRATUM_27456 | | Cavium | ThunderX SMMUv2 | #27704 | N/A | | Cavium | ThunderX2 SMMUv3| #74 | N/A | +| Cavium | ThunderX2 SMMUv3| #126 | N/A | | | | | | | Freescale/NXP | LS2080A/LS1043A | A-008585 | FSL_ERRATUM_A008585 | | | | | | diff --git a/Documentation/devicetree/bindings/iommu/arm,smmu-v3.txt b/Documentation/devicetree/bindings/iommu/arm,smmu-v3.txt index e7855cf6038e..c9abbf3e4f68 100644 --- a/Documentation/devicetree/bindings/iommu/arm,smmu-v3.txt +++ b/Documentation/devicetree/bindings/iommu/arm,smmu-v3.txt @@ -26,6 +26,12 @@ the PCIe specification. * "priq" - PRI Queue not empty * "cmdq-sync" - CMD_SYNC complete * "gerror" - Global Error activated + * "combined" - The combined interrupt is optional, + and should only be provided if the + hardware supports just a single, + combined interrupt line. + If provided, then the combined interrupt + will be used in preference to any others. - #iommu-cells : See the generic IOMMU binding described in devicetree/bindings/pci/pci-iommu.txt diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c index a8ebda9f7e97..83d65d96676f 100644 --- a/drivers/acpi/arm64/iort.c +++ b/drivers/acpi/arm64/iort.c @@ -833,6 +833,24 @@ static int __init arm_smmu_v3_count_resources(struct acpi_iort_node *node) return num_res; } +static bool arm_smmu_v3_is_combined_irq(struct acpi_iort_smmu_v3 *smmu) +{ + /* + * Cavium ThunderX2 implementation doesn't not support unique + * irq line. Use single irq line for all the SMMUv3 interrupts. + */ + if (smmu->model != ACPI_IORT_SMMU_V3_CAVIUM_CN99XX) + return false; + + /* + * ThunderX2 doesn't support MSIs from the SMMU, so we're checking + * SPI numbers here. + */ + return smmu->event_gsiv == smmu->pri_gsiv && + smmu->event_gsiv == smmu->gerr_gsiv && + smmu->event_gsiv == smmu->sync_gsiv; +} + static unsigned long arm_smmu_v3_resource_size(struct acpi_iort_smmu_v3 *smmu) { /* @@ -860,26 +878,33 @@ static void __init arm_smmu_v3_init_resources(struct resource *res, res[num_res].flags = IORESOURCE_MEM; num_res++; + if (arm_smmu_v3_is_combined_irq(smmu)) { + if (smmu->event_gsiv) + acpi_iort_register_irq(smmu->event_gsiv, "combined", + ACPI_EDGE_SENSITIVE, + &res[num_res++]); + } else { - if (smmu->event_gsiv) - acpi_iort_register_irq(smmu->event_gsiv, "eventq", - ACPI_EDGE_SENSITIVE, - &res[num_res++]); + if (smmu->event_gsiv) + acpi_iort_register_irq(smmu->event_gsiv, "eventq", + ACPI_EDGE_SENSITIVE, + &res[num_res++]); - if (smmu->pri_gsiv) - acpi_iort_register_irq(smmu->pri_gsiv, "priq", - ACPI_EDGE_SENSITIVE, - &res[num_res++]); + if (smmu->pri_gsiv) + acpi_iort_register_irq(smmu->pri_gsiv, "priq", + ACPI_EDGE_SENSITIVE, + &res[num_res++]); - if (smmu->gerr_gsiv) - acpi_iort_register_irq(smmu->gerr_gsiv, "gerror", - ACPI_EDGE_SENSITIVE, - &res[num_res++]); + if (smmu->gerr_gsiv) + acpi_iort_register_irq(smmu->gerr_gsiv, "gerror", + ACPI_EDGE_SENSITIVE, + &res[num_res++]); - if (smmu->sync_gsiv) - acpi_iort_register_irq(smmu->sync_gsiv, "cmdq-sync", - ACPI_EDGE_SENSITIVE, - &res[num_res++]); + if (smmu->sync_gsiv) + acpi_iort_register_irq(smmu->sync_gsiv, "cmdq-sync", + ACPI_EDGE_SENSITIVE, + &res[num_res++]); + } } static bool __init arm_smmu_v3_is_coherent(struct acpi_iort_node *node) diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index 81fc1b5c91ee..568c400eeaed 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -615,6 +615,7 @@ struct arm_smmu_device { struct arm_smmu_priq priq; int gerr_irq; + int combined_irq; unsigned long ias; /* IPA */ unsigned long oas; /* PA */ @@ -1330,6 +1331,24 @@ static irqreturn_t arm_smmu_gerror_handler(int irq, void *dev) return IRQ_HANDLED; } +static irqreturn_t arm_smmu_combined_irq_thread(int irq, void *dev) +{ + struct arm_smmu_device *smmu = dev; + + arm_smmu_evtq_thread(irq, dev); + if (smmu->features & ARM_SMMU_FEAT_PRI) + arm_smmu_priq_thread(irq, dev); + + return IRQ_HANDLED; +} + +static irqreturn_t arm_smmu_combined_irq_handler(int irq, void *dev) +{ + arm_smmu_gerror_handler(irq, dev); + arm_smmu_cmdq_sync_handler(irq, dev); + return IRQ_WAKE_THREAD; +} + /* IO_PGTABLE API */ static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu) { @@ -2229,18 +2248,9 @@ static void arm_smmu_setup_msis(struct arm_smmu_device *smmu) devm_add_action(dev, arm_smmu_free_msis, dev); } -static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu) +static void arm_smmu_setup_unique_irqs(struct arm_smmu_device *smmu) { - int ret, irq; - u32 irqen_flags = IRQ_CTRL_EVTQ_IRQEN | IRQ_CTRL_GERROR_IRQEN; - - /* Disable IRQs first */ - ret = arm_smmu_write_reg_sync(smmu, 0, ARM_SMMU_IRQ_CTRL, - ARM_SMMU_IRQ_CTRLACK); - if (ret) { - dev_err(smmu->dev, "failed to disable irqs\n"); - return ret; - } + int irq, ret; arm_smmu_setup_msis(smmu); @@ -2283,10 +2293,41 @@ static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu) if (ret < 0) dev_warn(smmu->dev, "failed to enable priq irq\n"); - else - irqen_flags |= IRQ_CTRL_PRIQ_IRQEN; } } +} + +static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu) +{ + int ret, irq; + u32 irqen_flags = IRQ_CTRL_EVTQ_IRQEN | IRQ_CTRL_GERROR_IRQEN; + + /* Disable IRQs first */ + ret = arm_smmu_write_reg_sync(smmu, 0, ARM_SMMU_IRQ_CTRL, + ARM_SMMU_IRQ_CTRLACK); + if (ret) { + dev_err(smmu->dev, "failed to disable irqs\n"); + return ret; + } + + irq = smmu->combined_irq; + if (irq) { + /* + * Cavium ThunderX2 implementation doesn't not support unique + * irq lines. Use single irq line for all the SMMUv3 interrupts. + */ + ret = devm_request_threaded_irq(smmu->dev, irq, + arm_smmu_combined_irq_handler, + arm_smmu_combined_irq_thread, + IRQF_ONESHOT, + "arm-smmu-v3-combined-irq", smmu); + if (ret < 0) + dev_warn(smmu->dev, "failed to enable combined irq\n"); + } else + arm_smmu_setup_unique_irqs(smmu); + + if (smmu->features & ARM_SMMU_FEAT_PRI) + irqen_flags |= IRQ_CTRL_PRIQ_IRQEN; /* Enable interrupt generation on the SMMU */ ret = arm_smmu_write_reg_sync(smmu, irqen_flags, @@ -2729,22 +2770,27 @@ static int arm_smmu_device_probe(struct platform_device *pdev) return PTR_ERR(smmu->base); /* Interrupt lines */ - irq = platform_get_irq_byname(pdev, "eventq"); - if (irq > 0) - smmu->evtq.q.irq = irq; - irq = platform_get_irq_byname(pdev, "priq"); + irq = platform_get_irq_byname(pdev, "combined"); if (irq > 0) - smmu->priq.q.irq = irq; + smmu->combined_irq = irq; + else { + irq = platform_get_irq_byname(pdev, "eventq"); + if (irq > 0) + smmu->evtq.q.irq = irq; - irq = platform_get_irq_byname(pdev, "cmdq-sync"); - if (irq > 0) - smmu->cmdq.q.irq = irq; + irq = platform_get_irq_byname(pdev, "priq"); + if (irq > 0) + smmu->priq.q.irq = irq; - irq = platform_get_irq_byname(pdev, "gerror"); - if (irq > 0) - smmu->gerr_irq = irq; + irq = platform_get_irq_byname(pdev, "cmdq-sync"); + if (irq > 0) + smmu->cmdq.q.irq = irq; + irq = platform_get_irq_byname(pdev, "gerror"); + if (irq > 0) + smmu->gerr_irq = irq; + } /* Probe the h/w */ ret = arm_smmu_device_hw_probe(smmu); if (ret) From c891d9f6bf2a78c9c657656872a60807820db4c8 Mon Sep 17 00:00:00 2001 From: David Rientjes Date: Fri, 23 Jun 2017 15:08:38 -0700 Subject: [PATCH 420/436] mm, thp: remove cond_resched from __collapse_huge_page_copy This is a partial revert of commit 338a16ba1549 ("mm, thp: copying user pages must schedule on collapse") which added a cond_resched() to __collapse_huge_page_copy(). On x86 with CONFIG_HIGHPTE, __collapse_huge_page_copy is called in atomic context and thus scheduling is not possible. This is only a possible config on arm and i386. Although need_resched has been shown to be set for over 100 jiffies while doing the iteration in __collapse_huge_page_copy, this is better than doing if (in_atomic()) cond_resched() to cover only non-CONFIG_HIGHPTE configs. Link: http://lkml.kernel.org/r/alpine.DEB.2.10.1706191341550.97821@chino.kir.corp.google.com Signed-off-by: David Rientjes Reported-by: Larry Finger Tested-by: Larry Finger Acked-by: Michal Hocko Cc: Vlastimil Babka Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/khugepaged.c | 1 - 1 file changed, 1 deletion(-) diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 945fd1ca49b5..df4ebdb2b10a 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -652,7 +652,6 @@ static void __collapse_huge_page_copy(pte_t *pte, struct page *page, spin_unlock(ptl); free_page_and_swap_cache(src_page); } - cond_resched(); } } From 029c54b09599573015a5c18dbe59cbdf42742237 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Fri, 23 Jun 2017 15:08:41 -0700 Subject: [PATCH 421/436] mm/vmalloc.c: huge-vmap: fail gracefully on unexpected huge vmap mappings Existing code that uses vmalloc_to_page() may assume that any address for which is_vmalloc_addr() returns true may be passed into vmalloc_to_page() to retrieve the associated struct page. This is not un unreasonable assumption to make, but on architectures that have CONFIG_HAVE_ARCH_HUGE_VMAP=y, it no longer holds, and we need to ensure that vmalloc_to_page() does not go off into the weeds trying to dereference huge PUDs or PMDs as table entries. Given that vmalloc() and vmap() themselves never create huge mappings or deal with compound pages at all, there is no correct answer in this case, so return NULL instead, and issue a warning. When reading /proc/kcore on arm64, you will hit an oops as soon as you hit the huge mappings used for the various segments that make up the mapping of vmlinux. With this patch applied, you will no longer hit the oops, but the kcore contents willl be incorrect (these regions will be zeroed out) We are fixing this for kcore specifically, so it avoids vread() for those regions. At least one other problematic user exists, i.e., /dev/kmem, but that is currently broken on arm64 for other reasons. Link: http://lkml.kernel.org/r/20170609082226.26152-1-ard.biesheuvel@linaro.org Signed-off-by: Ard Biesheuvel Acked-by: Mark Rutland Reviewed-by: Laura Abbott Cc: Michal Hocko Cc: zhong jiang Cc: Dave Hansen Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/vmalloc.c | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 34a1c3e46ed7..ecc97f74ab18 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -287,10 +287,21 @@ struct page *vmalloc_to_page(const void *vmalloc_addr) if (p4d_none(*p4d)) return NULL; pud = pud_offset(p4d, addr); - if (pud_none(*pud)) + + /* + * Don't dereference bad PUD or PMD (below) entries. This will also + * identify huge mappings, which we may encounter on architectures + * that define CONFIG_HAVE_ARCH_HUGE_VMAP=y. Such regions will be + * identified as vmalloc addresses by is_vmalloc_addr(), but are + * not [unambiguously] associated with a struct page, so there is + * no correct value to return for them. + */ + WARN_ON_ONCE(pud_bad(*pud)); + if (pud_none(*pud) || pud_bad(*pud)) return NULL; pmd = pmd_offset(pud, addr); - if (pmd_none(*pmd)) + WARN_ON_ONCE(pmd_bad(*pmd)); + if (pmd_none(*pmd) || pmd_bad(*pmd)) return NULL; ptep = pte_offset_map(pmd, addr); From 9fa4eb8e490a28de40964b1b0e583d8db4c7e57c Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Fri, 23 Jun 2017 15:08:43 -0700 Subject: [PATCH 422/436] autofs: sanity check status reported with AUTOFS_DEV_IOCTL_FAIL If a positive status is passed with the AUTOFS_DEV_IOCTL_FAIL ioctl, autofs4_d_automount() will return ERR_PTR(status) with that status to follow_automount(), which will then dereference an invalid pointer. So treat a positive status the same as zero, and map to ENOENT. See comment in systemd src/core/automount.c::automount_send_ready(). Link: http://lkml.kernel.org/r/871sqwczx5.fsf@notabene.neil.brown.name Signed-off-by: NeilBrown Cc: Ian Kent Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/autofs4/dev-ioctl.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/autofs4/dev-ioctl.c b/fs/autofs4/dev-ioctl.c index 734cbf8d9676..dd9f1bebb5a3 100644 --- a/fs/autofs4/dev-ioctl.c +++ b/fs/autofs4/dev-ioctl.c @@ -344,7 +344,7 @@ static int autofs_dev_ioctl_fail(struct file *fp, int status; token = (autofs_wqt_t) param->fail.token; - status = param->fail.status ? param->fail.status : -ENOENT; + status = param->fail.status < 0 ? param->fail.status : -ENOENT; return autofs4_wait_release(sbi, token, status); } From 1eb643d02b21412e603b42cdd96010a2ac31c05f Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Fri, 23 Jun 2017 15:08:46 -0700 Subject: [PATCH 423/436] fs/dax.c: fix inefficiency in dax_writeback_mapping_range() dax_writeback_mapping_range() fails to update iteration index when searching radix tree for entries needing cache flushing. Thus each pagevec worth of entries is searched starting from the start which is inefficient and prone to livelocks. Update index properly. Link: http://lkml.kernel.org/r/20170619124531.21491-1-jack@suse.cz Fixes: 9973c98ecfda3 ("dax: add support for fsync/sync") Signed-off-by: Jan Kara Reviewed-by: Ross Zwisler Cc: Dan Williams Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/dax.c | 1 + 1 file changed, 1 insertion(+) diff --git a/fs/dax.c b/fs/dax.c index 2a6889b3585f..9187f3b07f3e 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -859,6 +859,7 @@ int dax_writeback_mapping_range(struct address_space *mapping, if (ret < 0) goto out; } + start_index = indices[pvec.nr - 1] + 1; } out: put_dax(dax_dev); From a91e0f680bcd9e10c253ae8b62462a38bd48f09f Mon Sep 17 00:00:00 2001 From: Ilya Matveychikov Date: Fri, 23 Jun 2017 15:08:49 -0700 Subject: [PATCH 424/436] lib/cmdline.c: fix get_options() overflow while parsing ranges When using get_options() it's possible to specify a range of numbers, like 1-100500. The problem is that it doesn't track array size while calling internally to get_range() which iterates over the range and fills the memory with numbers. Link: http://lkml.kernel.org/r/2613C75C-B04D-4BFF-82A6-12F97BA0F620@gmail.com Signed-off-by: Ilya V. Matveychikov Cc: Jonathan Corbet Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/cmdline.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/cmdline.c b/lib/cmdline.c index 3c6432df7e63..4c0888c4a68d 100644 --- a/lib/cmdline.c +++ b/lib/cmdline.c @@ -23,14 +23,14 @@ * the values[M, M+1, ..., N] into the ints array in get_options. */ -static int get_range(char **str, int *pint) +static int get_range(char **str, int *pint, int n) { int x, inc_counter, upper_range; (*str)++; upper_range = simple_strtol((*str), NULL, 0); inc_counter = upper_range - *pint; - for (x = *pint; x < upper_range; x++) + for (x = *pint; n && x < upper_range; x++, n--) *pint++ = x; return inc_counter; } @@ -97,7 +97,7 @@ char *get_options(const char *str, int nints, int *ints) break; if (res == 3) { int range_nums; - range_nums = get_range((char **)&str, ints + i); + range_nums = get_range((char **)&str, ints + i, nints - i); if (range_nums < 0) break; /* From 3b7b314053d021601940c50b07f5f1423ae67e21 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Fri, 23 Jun 2017 15:08:52 -0700 Subject: [PATCH 425/436] slub: make sysfs file removal asynchronous Commit bf5eb3de3847 ("slub: separate out sysfs_slab_release() from sysfs_slab_remove()") made slub sysfs file removals synchronous to kmem_cache shutdown. Unfortunately, this created a possible ABBA deadlock between slab_mutex and sysfs draining mechanism triggering the following lockdep warning. ====================================================== [ INFO: possible circular locking dependency detected ] 4.10.0-test+ #48 Not tainted ------------------------------------------------------- rmmod/1211 is trying to acquire lock: (s_active#120){++++.+}, at: [] kernfs_remove+0x23/0x40 but task is already holding lock: (slab_mutex){+.+.+.}, at: [] kmem_cache_destroy+0x41/0x2d0 which lock already depends on the new lock. the existing dependency chain (in reverse order) is: -> #1 (slab_mutex){+.+.+.}: lock_acquire+0xf6/0x1f0 __mutex_lock+0x75/0x950 mutex_lock_nested+0x1b/0x20 slab_attr_store+0x75/0xd0 sysfs_kf_write+0x45/0x60 kernfs_fop_write+0x13c/0x1c0 __vfs_write+0x28/0x120 vfs_write+0xc8/0x1e0 SyS_write+0x49/0xa0 entry_SYSCALL_64_fastpath+0x1f/0xc2 -> #0 (s_active#120){++++.+}: __lock_acquire+0x10ed/0x1260 lock_acquire+0xf6/0x1f0 __kernfs_remove+0x254/0x320 kernfs_remove+0x23/0x40 sysfs_remove_dir+0x51/0x80 kobject_del+0x18/0x50 __kmem_cache_shutdown+0x3e6/0x460 kmem_cache_destroy+0x1fb/0x2d0 kvm_exit+0x2d/0x80 [kvm] vmx_exit+0x19/0xa1b [kvm_intel] SyS_delete_module+0x198/0x1f0 entry_SYSCALL_64_fastpath+0x1f/0xc2 other info that might help us debug this: Possible unsafe locking scenario: CPU0 CPU1 ---- ---- lock(slab_mutex); lock(s_active#120); lock(slab_mutex); lock(s_active#120); *** DEADLOCK *** 2 locks held by rmmod/1211: #0: (cpu_hotplug.dep_map){++++++}, at: [] get_online_cpus+0x37/0x80 #1: (slab_mutex){+.+.+.}, at: [] kmem_cache_destroy+0x41/0x2d0 stack backtrace: CPU: 3 PID: 1211 Comm: rmmod Not tainted 4.10.0-test+ #48 Hardware name: Hewlett-Packard HP Compaq Pro 6300 SFF/339A, BIOS K01 v02.05 05/07/2012 Call Trace: print_circular_bug+0x1be/0x210 __lock_acquire+0x10ed/0x1260 lock_acquire+0xf6/0x1f0 __kernfs_remove+0x254/0x320 kernfs_remove+0x23/0x40 sysfs_remove_dir+0x51/0x80 kobject_del+0x18/0x50 __kmem_cache_shutdown+0x3e6/0x460 kmem_cache_destroy+0x1fb/0x2d0 kvm_exit+0x2d/0x80 [kvm] vmx_exit+0x19/0xa1b [kvm_intel] SyS_delete_module+0x198/0x1f0 ? SyS_delete_module+0x5/0x1f0 entry_SYSCALL_64_fastpath+0x1f/0xc2 It'd be the cleanest to deal with the issue by removing sysfs files without holding slab_mutex before the rest of shutdown; however, given the current code structure, it is pretty difficult to do so. This patch punts sysfs file removal to a work item. Before commit bf5eb3de3847, the removal was punted to a RCU delayed work item which is executed after release. Now, we're punting to a different work item on shutdown which still maintains the goal removing the sysfs files earlier when destroying kmem_caches. Link: http://lkml.kernel.org/r/20170620204512.GI21326@htj.duckdns.org Fixes: bf5eb3de3847 ("slub: separate out sysfs_slab_release() from sysfs_slab_remove()") Signed-off-by: Tejun Heo Reported-by: Steven Rostedt (VMware) Tested-by: Steven Rostedt (VMware) Cc: Christoph Lameter Cc: Pekka Enberg Cc: David Rientjes Cc: Joonsoo Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/slub_def.h | 1 + mm/slub.c | 40 ++++++++++++++++++++++++++-------------- 2 files changed, 27 insertions(+), 14 deletions(-) diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index 07ef550c6627..93315d6b21a8 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h @@ -84,6 +84,7 @@ struct kmem_cache { int red_left_pad; /* Left redzone padding size */ #ifdef CONFIG_SYSFS struct kobject kobj; /* For sysfs */ + struct work_struct kobj_remove_work; #endif #ifdef CONFIG_MEMCG struct memcg_cache_params memcg_params; diff --git a/mm/slub.c b/mm/slub.c index 7449593fca72..8addc535bcdc 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -5625,6 +5625,28 @@ static char *create_unique_id(struct kmem_cache *s) return name; } +static void sysfs_slab_remove_workfn(struct work_struct *work) +{ + struct kmem_cache *s = + container_of(work, struct kmem_cache, kobj_remove_work); + + if (!s->kobj.state_in_sysfs) + /* + * For a memcg cache, this may be called during + * deactivation and again on shutdown. Remove only once. + * A cache is never shut down before deactivation is + * complete, so no need to worry about synchronization. + */ + return; + +#ifdef CONFIG_MEMCG + kset_unregister(s->memcg_kset); +#endif + kobject_uevent(&s->kobj, KOBJ_REMOVE); + kobject_del(&s->kobj); + kobject_put(&s->kobj); +} + static int sysfs_slab_add(struct kmem_cache *s) { int err; @@ -5632,6 +5654,8 @@ static int sysfs_slab_add(struct kmem_cache *s) struct kset *kset = cache_kset(s); int unmergeable = slab_unmergeable(s); + INIT_WORK(&s->kobj_remove_work, sysfs_slab_remove_workfn); + if (!kset) { kobject_init(&s->kobj, &slab_ktype); return 0; @@ -5695,20 +5719,8 @@ static void sysfs_slab_remove(struct kmem_cache *s) */ return; - if (!s->kobj.state_in_sysfs) - /* - * For a memcg cache, this may be called during - * deactivation and again on shutdown. Remove only once. - * A cache is never shut down before deactivation is - * complete, so no need to worry about synchronization. - */ - return; - -#ifdef CONFIG_MEMCG - kset_unregister(s->memcg_kset); -#endif - kobject_uevent(&s->kobj, KOBJ_REMOVE); - kobject_del(&s->kobj); + kobject_get(&s->kobj); + schedule_work(&s->kobj_remove_work); } void sysfs_slab_release(struct kmem_cache *s) From 8818efaaacb78c60a9d90c5705b6c99b75d7d442 Mon Sep 17 00:00:00 2001 From: Eric Ren Date: Fri, 23 Jun 2017 15:08:55 -0700 Subject: [PATCH 426/436] ocfs2: fix deadlock caused by recursive locking in xattr Another deadlock path caused by recursive locking is reported. This kind of issue was introduced since commit 743b5f1434f5 ("ocfs2: take inode lock in ocfs2_iop_set/get_acl()"). Two deadlock paths have been fixed by commit b891fa5024a9 ("ocfs2: fix deadlock issue when taking inode lock at vfs entry points"). Yes, we intend to fix this kind of case in incremental way, because it's hard to find out all possible paths at once. This one can be reproduced like this. On node1, cp a large file from home directory to ocfs2 mountpoint. While on node2, run setfacl/getfacl. Both nodes will hang up there. The backtraces: On node1: __ocfs2_cluster_lock.isra.39+0x357/0x740 [ocfs2] ocfs2_inode_lock_full_nested+0x17d/0x840 [ocfs2] ocfs2_write_begin+0x43/0x1a0 [ocfs2] generic_perform_write+0xa9/0x180 __generic_file_write_iter+0x1aa/0x1d0 ocfs2_file_write_iter+0x4f4/0xb40 [ocfs2] __vfs_write+0xc3/0x130 vfs_write+0xb1/0x1a0 SyS_write+0x46/0xa0 On node2: __ocfs2_cluster_lock.isra.39+0x357/0x740 [ocfs2] ocfs2_inode_lock_full_nested+0x17d/0x840 [ocfs2] ocfs2_xattr_set+0x12e/0xe80 [ocfs2] ocfs2_set_acl+0x22d/0x260 [ocfs2] ocfs2_iop_set_acl+0x65/0xb0 [ocfs2] set_posix_acl+0x75/0xb0 posix_acl_xattr_set+0x49/0xa0 __vfs_setxattr+0x69/0x80 __vfs_setxattr_noperm+0x72/0x1a0 vfs_setxattr+0xa7/0xb0 setxattr+0x12d/0x190 path_setxattr+0x9f/0xb0 SyS_setxattr+0x14/0x20 Fix this one by using ocfs2_inode_{lock|unlock}_tracker, which is exported by commit 439a36b8ef38 ("ocfs2/dlmglue: prepare tracking logic to avoid recursive cluster lock"). Link: http://lkml.kernel.org/r/20170622014746.5815-1-zren@suse.com Fixes: 743b5f1434f5 ("ocfs2: take inode lock in ocfs2_iop_set/get_acl()") Signed-off-by: Eric Ren Reported-by: Thomas Voegtle Tested-by: Thomas Voegtle Reviewed-by: Joseph Qi Cc: Mark Fasheh Cc: Joel Becker Cc: Junxiao Bi Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/ocfs2/dlmglue.c | 4 ++++ fs/ocfs2/xattr.c | 23 +++++++++++++---------- 2 files changed, 17 insertions(+), 10 deletions(-) diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c index 3b7c937a36b5..4689940a953c 100644 --- a/fs/ocfs2/dlmglue.c +++ b/fs/ocfs2/dlmglue.c @@ -2591,6 +2591,10 @@ void ocfs2_inode_unlock_tracker(struct inode *inode, struct ocfs2_lock_res *lockres; lockres = &OCFS2_I(inode)->ip_inode_lockres; + /* had_lock means that the currect process already takes the cluster + * lock previously. If had_lock is 1, we have nothing to do here, and + * it will get unlocked where we got the lock. + */ if (!had_lock) { ocfs2_remove_holder(lockres, oh); ocfs2_inode_unlock(inode, ex); diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c index 3c5384d9b3a5..f70c3778d600 100644 --- a/fs/ocfs2/xattr.c +++ b/fs/ocfs2/xattr.c @@ -1328,20 +1328,21 @@ static int ocfs2_xattr_get(struct inode *inode, void *buffer, size_t buffer_size) { - int ret; + int ret, had_lock; struct buffer_head *di_bh = NULL; + struct ocfs2_lock_holder oh; - ret = ocfs2_inode_lock(inode, &di_bh, 0); - if (ret < 0) { - mlog_errno(ret); - return ret; + had_lock = ocfs2_inode_lock_tracker(inode, &di_bh, 0, &oh); + if (had_lock < 0) { + mlog_errno(had_lock); + return had_lock; } down_read(&OCFS2_I(inode)->ip_xattr_sem); ret = ocfs2_xattr_get_nolock(inode, di_bh, name_index, name, buffer, buffer_size); up_read(&OCFS2_I(inode)->ip_xattr_sem); - ocfs2_inode_unlock(inode, 0); + ocfs2_inode_unlock_tracker(inode, 0, &oh, had_lock); brelse(di_bh); @@ -3537,11 +3538,12 @@ int ocfs2_xattr_set(struct inode *inode, { struct buffer_head *di_bh = NULL; struct ocfs2_dinode *di; - int ret, credits, ref_meta = 0, ref_credits = 0; + int ret, credits, had_lock, ref_meta = 0, ref_credits = 0; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); struct inode *tl_inode = osb->osb_tl_inode; struct ocfs2_xattr_set_ctxt ctxt = { NULL, NULL, NULL, }; struct ocfs2_refcount_tree *ref_tree = NULL; + struct ocfs2_lock_holder oh; struct ocfs2_xattr_info xi = { .xi_name_index = name_index, @@ -3572,8 +3574,9 @@ int ocfs2_xattr_set(struct inode *inode, return -ENOMEM; } - ret = ocfs2_inode_lock(inode, &di_bh, 1); - if (ret < 0) { + had_lock = ocfs2_inode_lock_tracker(inode, &di_bh, 1, &oh); + if (had_lock < 0) { + ret = had_lock; mlog_errno(ret); goto cleanup_nolock; } @@ -3670,7 +3673,7 @@ cleanup: if (ret) mlog_errno(ret); } - ocfs2_inode_unlock(inode, 1); + ocfs2_inode_unlock_tracker(inode, 1, &oh, had_lock); cleanup_nolock: brelse(di_bh); brelse(xbs.xattr_bh); From 98da7d08850fb8bdeb395d6368ed15753304aa0c Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Fri, 23 Jun 2017 15:08:57 -0700 Subject: [PATCH 427/436] fs/exec.c: account for argv/envp pointers When limiting the argv/envp strings during exec to 1/4 of the stack limit, the storage of the pointers to the strings was not included. This means that an exec with huge numbers of tiny strings could eat 1/4 of the stack limit in strings and then additional space would be later used by the pointers to the strings. For example, on 32-bit with a 8MB stack rlimit, an exec with 1677721 single-byte strings would consume less than 2MB of stack, the max (8MB / 4) amount allowed, but the pointers to the strings would consume the remaining additional stack space (1677721 * 4 == 6710884). The result (1677721 + 6710884 == 8388605) would exhaust stack space entirely. Controlling this stack exhaustion could result in pathological behavior in setuid binaries (CVE-2017-1000365). [akpm@linux-foundation.org: additional commenting from Kees] Fixes: b6a2fea39318 ("mm: variable length argument support") Link: http://lkml.kernel.org/r/20170622001720.GA32173@beast Signed-off-by: Kees Cook Acked-by: Rik van Riel Acked-by: Michal Hocko Cc: Alexander Viro Cc: Qualys Security Advisory Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/exec.c | 28 ++++++++++++++++++++++++---- 1 file changed, 24 insertions(+), 4 deletions(-) diff --git a/fs/exec.c b/fs/exec.c index 72934df68471..904199086490 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -220,8 +220,26 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos, if (write) { unsigned long size = bprm->vma->vm_end - bprm->vma->vm_start; + unsigned long ptr_size; struct rlimit *rlim; + /* + * Since the stack will hold pointers to the strings, we + * must account for them as well. + * + * The size calculation is the entire vma while each arg page is + * built, so each time we get here it's calculating how far it + * is currently (rather than each call being just the newly + * added size from the arg page). As a result, we need to + * always add the entire size of the pointers, so that on the + * last call to get_arg_page() we'll actually have the entire + * correct size. + */ + ptr_size = (bprm->argc + bprm->envc) * sizeof(void *); + if (ptr_size > ULONG_MAX - size) + goto fail; + size += ptr_size; + acct_arg_size(bprm, size / PAGE_SIZE); /* @@ -239,13 +257,15 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos, * to work from. */ rlim = current->signal->rlim; - if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur) / 4) { - put_page(page); - return NULL; - } + if (size > READ_ONCE(rlim[RLIMIT_STACK].rlim_cur) / 4) + goto fail; } return page; + +fail: + put_page(page); + return NULL; } static void put_arg_page(struct page *page) From 26fcd952d5c977a94ac64bb44ed409e37607b2c9 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 23 Jun 2017 10:50:38 +0200 Subject: [PATCH 428/436] x86/mshyperv: Remove excess #includes from mshyperv.h A recent commit included linux/slab.h in linux/irq.h. This breaks the build of vdso32 on a 64-bit kernel. The reason is that linux/irq.h gets included into the vdso code via linux/interrupt.h which is included from asm/mshyperv.h. That makes the 32-bit vdso compile fail, because slab.h includes the pgtable headers for 64-bit on a 64-bit build. Neither linux/clocksource.h nor linux/interrupt.h are needed in the mshyperv.h header file itself - it has a dependency on . Remove the includes and unbreak the build. Reported-by: Ingo Molnar Signed-off-by: Thomas Gleixner Cc: K. Y. Srinivasan Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Vitaly Kuznetsov Cc: devel@linuxdriverproject.org Fixes: dee863b571b0 ("hv: export current Hyper-V clocksource") Link: http://lkml.kernel.org/r/alpine.DEB.2.20.1706231038460.2647@nanos Signed-off-by: Ingo Molnar --- arch/x86/include/asm/mshyperv.h | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h index fba100713924..d5acc27ed1cc 100644 --- a/arch/x86/include/asm/mshyperv.h +++ b/arch/x86/include/asm/mshyperv.h @@ -2,8 +2,7 @@ #define _ASM_X86_MSHYPER_H #include -#include -#include +#include #include /* From c0bc126f97fb929b3ae02c1c62322645d70eb408 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Sun, 25 Jun 2017 18:30:05 -0700 Subject: [PATCH 429/436] Linux 4.12-rc7 --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 57df7569c1bf..6d8a984ed9c9 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ VERSION = 4 PATCHLEVEL = 12 SUBLEVEL = 0 -EXTRAVERSION = -rc6 +EXTRAVERSION = -rc7 NAME = Fearless Coyote # *DOCUMENTATION* From aaffaa8a3b5950c47e5f7573c34bc47de8894a18 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Tue, 27 Jun 2017 18:16:47 +0200 Subject: [PATCH 430/436] iommu/iova: Don't disable preempt around this_cpu_ptr() Commit 583248e6620a ("iommu/iova: Disable preemption around use of this_cpu_ptr()") disables preemption while accessing a per-CPU variable. This does keep lockdep quiet. However I don't see the point why it is bad if we get migrated after its access to another CPU. __iova_rcache_insert() and __iova_rcache_get() immediately locks the variable after obtaining it - before accessing its members. _If_ we get migrated away after retrieving the address of cpu_rcache before taking the lock then the *other* task on the same CPU will retrieve the same address of cpu_rcache and will spin on the lock. alloc_iova_fast() disables preemption while invoking free_cpu_cached_iovas() on each CPU. The function itself uses per_cpu_ptr() which does not trigger a warning (like this_cpu_ptr() does). It _could_ make sense to use get_online_cpus() instead but the we have a hotplug notifier for CPU down (and none for up) so we are good. Cc: Joerg Roedel Cc: iommu@lists.linux-foundation.org Cc: Andrew Morton Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: Joerg Roedel --- drivers/iommu/iova.c | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c index 5c88ba70e4e0..f0ff0aa04081 100644 --- a/drivers/iommu/iova.c +++ b/drivers/iommu/iova.c @@ -22,6 +22,7 @@ #include #include #include +#include static bool iova_rcache_insert(struct iova_domain *iovad, unsigned long pfn, @@ -398,10 +399,8 @@ retry: /* Try replenishing IOVAs by flushing rcache. */ flushed_rcache = true; - preempt_disable(); for_each_online_cpu(cpu) free_cpu_cached_iovas(cpu, iovad); - preempt_enable(); goto retry; } @@ -729,7 +728,7 @@ static bool __iova_rcache_insert(struct iova_domain *iovad, bool can_insert = false; unsigned long flags; - cpu_rcache = get_cpu_ptr(rcache->cpu_rcaches); + cpu_rcache = raw_cpu_ptr(rcache->cpu_rcaches); spin_lock_irqsave(&cpu_rcache->lock, flags); if (!iova_magazine_full(cpu_rcache->loaded)) { @@ -759,7 +758,6 @@ static bool __iova_rcache_insert(struct iova_domain *iovad, iova_magazine_push(cpu_rcache->loaded, iova_pfn); spin_unlock_irqrestore(&cpu_rcache->lock, flags); - put_cpu_ptr(rcache->cpu_rcaches); if (mag_to_free) { iova_magazine_free_pfns(mag_to_free, iovad); @@ -793,7 +791,7 @@ static unsigned long __iova_rcache_get(struct iova_rcache *rcache, bool has_pfn = false; unsigned long flags; - cpu_rcache = get_cpu_ptr(rcache->cpu_rcaches); + cpu_rcache = raw_cpu_ptr(rcache->cpu_rcaches); spin_lock_irqsave(&cpu_rcache->lock, flags); if (!iova_magazine_empty(cpu_rcache->loaded)) { @@ -815,7 +813,6 @@ static unsigned long __iova_rcache_get(struct iova_rcache *rcache, iova_pfn = iova_magazine_pop(cpu_rcache->loaded, limit_pfn); spin_unlock_irqrestore(&cpu_rcache->lock, flags); - put_cpu_ptr(rcache->cpu_rcaches); return iova_pfn; } From 58c4a95f90839624b67f67acdb8a129f4383b569 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Tue, 27 Jun 2017 18:16:48 +0200 Subject: [PATCH 431/436] iommu/vt-d: Don't disable preemption while accessing deferred_flush() get_cpu() disables preemption and returns the current CPU number. The CPU number is only used once while retrieving the address of the local's CPU deferred_flush pointer. We can instead use raw_cpu_ptr() while we remain preemptible. The worst thing that can happen is that flush_unmaps_timeout() is invoked multiple times: once by taskA after seeing HIGH_WATER_MARK and then preempted to another CPU and then by taskB which saw HIGH_WATER_MARK on the same CPU as taskA. It is also likely that ->size got from HIGH_WATER_MARK to 0 right after its read because another CPU invoked flush_unmaps_timeout() for this CPU. The access to flush_data is protected by a spinlock so even if we get migrated to another CPU or preempted - the data structure is protected. While at it, I marked deferred_flush static since I can't find a reference to it outside of this file. Cc: David Woodhouse Cc: Joerg Roedel Cc: iommu@lists.linux-foundation.org Cc: Andrew Morton Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: Joerg Roedel --- drivers/iommu/intel-iommu.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index ee9c258d3ae0..0ca985b418ec 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -481,7 +481,7 @@ struct deferred_flush_data { struct deferred_flush_table *tables; }; -DEFINE_PER_CPU(struct deferred_flush_data, deferred_flush); +static DEFINE_PER_CPU(struct deferred_flush_data, deferred_flush); /* bitmap for indexing intel_iommus */ static int g_num_of_iommus; @@ -3710,10 +3710,8 @@ static void add_unmap(struct dmar_domain *dom, unsigned long iova_pfn, struct intel_iommu *iommu; struct deferred_flush_entry *entry; struct deferred_flush_data *flush_data; - unsigned int cpuid; - cpuid = get_cpu(); - flush_data = per_cpu_ptr(&deferred_flush, cpuid); + flush_data = raw_cpu_ptr(&deferred_flush); /* Flush all CPUs' entries to avoid deferring too much. If * this becomes a bottleneck, can just flush us, and rely on @@ -3746,8 +3744,6 @@ static void add_unmap(struct dmar_domain *dom, unsigned long iova_pfn, } flush_data->size++; spin_unlock_irqrestore(&flush_data->lock, flags); - - put_cpu(); } static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size) From 0929deca40bbdd7e821aada2906ac67882cfbf28 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Thu, 15 Jun 2017 15:11:51 +0200 Subject: [PATCH 432/436] iommu/s390: Use iommu_group_get_for_dev() in s390_iommu_add_device() The iommu_group_get_for_dev() function also attaches the device to its group, so this code doesn't need to be in the iommu driver. Further by using this function the driver can make use of default domains in the future. Reviewed-by: Gerald Schaefer Signed-off-by: Joerg Roedel --- drivers/iommu/s390-iommu.c | 15 +++++---------- 1 file changed, 5 insertions(+), 10 deletions(-) diff --git a/drivers/iommu/s390-iommu.c b/drivers/iommu/s390-iommu.c index 179e636a4d91..8788640756a7 100644 --- a/drivers/iommu/s390-iommu.c +++ b/drivers/iommu/s390-iommu.c @@ -165,20 +165,14 @@ static void s390_iommu_detach_device(struct iommu_domain *domain, static int s390_iommu_add_device(struct device *dev) { - struct iommu_group *group; - int rc; + struct iommu_group *group = iommu_group_get_for_dev(dev); - group = iommu_group_get(dev); - if (!group) { - group = iommu_group_alloc(); - if (IS_ERR(group)) - return PTR_ERR(group); - } + if (IS_ERR(group)) + return PTR_ERR(group); - rc = iommu_group_add_device(group, dev); iommu_group_put(group); - return rc; + return 0; } static void s390_iommu_remove_device(struct device *dev) @@ -344,6 +338,7 @@ static struct iommu_ops s390_iommu_ops = { .iova_to_phys = s390_iommu_iova_to_phys, .add_device = s390_iommu_add_device, .remove_device = s390_iommu_remove_device, + .device_group = generic_device_group, .pgsize_bitmap = S390_IOMMU_PGSIZES, }; From 7f7a2304aabc4a8102bbbbeed2ec9eaee4a480c2 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Wed, 28 Jun 2017 12:45:31 +0200 Subject: [PATCH 433/436] iommu: Return ERR_PTR() values from device_group call-backs The generic device_group call-backs in iommu.c return NULL in case of error. Since they are getting ERR_PTR values from iommu_group_alloc(), just pass them up instead. Reported-by: Gerald Schaefer Signed-off-by: Joerg Roedel --- drivers/iommu/iommu.c | 14 ++------------ 1 file changed, 2 insertions(+), 12 deletions(-) diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index cf7ca7e70777..de09e1e35830 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -915,13 +915,7 @@ static int get_pci_alias_or_group(struct pci_dev *pdev, u16 alias, void *opaque) */ struct iommu_group *generic_device_group(struct device *dev) { - struct iommu_group *group; - - group = iommu_group_alloc(); - if (IS_ERR(group)) - return NULL; - - return group; + return iommu_group_alloc(); } /* @@ -988,11 +982,7 @@ struct iommu_group *pci_device_group(struct device *dev) return group; /* No shared group found, allocate new */ - group = iommu_group_alloc(); - if (IS_ERR(group)) - return NULL; - - return group; + return iommu_group_alloc(); } /** From 8faf5e5a12c511410de1590cf310ec331c5ec7b1 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Wed, 28 Jun 2017 12:50:16 +0200 Subject: [PATCH 434/436] iommu/omap: Return ERR_PTR in device_group call-back Make sure that the device_group callback returns an ERR_PTR instead of NULL. Signed-off-by: Joerg Roedel --- drivers/iommu/omap-iommu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c index 95dfca36ccb9..641e035cf866 100644 --- a/drivers/iommu/omap-iommu.c +++ b/drivers/iommu/omap-iommu.c @@ -1309,7 +1309,7 @@ static void omap_iommu_remove_device(struct device *dev) static struct iommu_group *omap_iommu_device_group(struct device *dev) { struct omap_iommu_arch_data *arch_data = dev->archdata.iommu; - struct iommu_group *group = NULL; + struct iommu_group *group = ERR_PTR(-EINVAL); if (arch_data->iommu_dev) group = arch_data->iommu_dev->group; From 72dcac633475a5b331cf21f3525467d0e123395a Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Wed, 28 Jun 2017 12:52:48 +0200 Subject: [PATCH 435/436] iommu: Warn once when device_group callback returns NULL This callback should never return NULL. Print a warning if that happens so that we notice and can fix it. Signed-off-by: Joerg Roedel --- drivers/iommu/iommu.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index de09e1e35830..3f6ea160afed 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -1010,6 +1010,9 @@ struct iommu_group *iommu_group_get_for_dev(struct device *dev) if (ops && ops->device_group) group = ops->device_group(dev); + if (WARN_ON_ONCE(group == NULL)) + return ERR_PTR(-EINVAL); + if (IS_ERR(group)) return group; From 01e1932a1748e20b69ba23d0a01db5eb3a525782 Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Wed, 28 Jun 2017 16:39:32 +0530 Subject: [PATCH 436/436] iommu/vt-d: Constify intel_dma_ops Most dma_map_ops structures are never modified. Constify these structures such that these can be write-protected. Signed-off-by: Arvind Yadav Signed-off-by: Joerg Roedel --- drivers/iommu/intel-iommu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 0ca985b418ec..1a79a4ec6f09 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -3954,7 +3954,7 @@ static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr) return !dma_addr; } -struct dma_map_ops intel_dma_ops = { +const struct dma_map_ops intel_dma_ops = { .alloc = intel_alloc_coherent, .free = intel_free_coherent, .map_sg = intel_map_sg,