Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

This commit is contained in:
David S. Miller 2018-11-24 17:01:43 -08:00
commit b1bf78bfb2
197 changed files with 1788 additions and 743 deletions

View File

@ -4713,6 +4713,8 @@
prevent spurious wakeup);
n = USB_QUIRK_DELAY_CTRL_MSG (Device needs a
pause after every control message);
o = USB_QUIRK_HUB_SLOW_RESET (Hub needs extra
delay after resetting its port);
Example: quirks=0781:5580:bk,0a5c:5834:gij
usbhid.mousepoll=

View File

@ -32,16 +32,17 @@ Disclosure and embargoed information
The security list is not a disclosure channel. For that, see Coordination
below.
Once a robust fix has been developed, our preference is to release the
fix in a timely fashion, treating it no differently than any of the other
thousands of changes and fixes the Linux kernel project releases every
month.
Once a robust fix has been developed, the release process starts. Fixes
for publicly known bugs are released immediately.
However, at the request of the reporter, we will postpone releasing the
fix for up to 5 business days after the date of the report or after the
embargo has lifted; whichever comes first. The only exception to that
rule is if the bug is publicly known, in which case the preference is to
release the fix as soon as it's available.
Although our preference is to release fixes for publicly undisclosed bugs
as soon as they become available, this may be postponed at the request of
the reporter or an affected party for up to 7 calendar days from the start
of the release process, with an exceptional extension to 14 calendar days
if it is agreed that the criticality of the bug requires more time. The
only valid reason for deferring the publication of a fix is to accommodate
the logistics of QA and large scale rollouts which require release
coordination.
Whilst embargoed information may be shared with trusted individuals in
order to develop a fix, such information will not be published alongside

View File

@ -7,7 +7,7 @@ limitations.
Current Binding
---------------
Switches are true Linux devices and can be probes by any means. Once
Switches are true Linux devices and can be probed by any means. Once
probed, they register to the DSA framework, passing a node
pointer. This node is expected to fulfil the following binding, and
may contain additional properties as required by the device it is

View File

@ -40,7 +40,7 @@ To use the :ref:`format` ioctls applications set the ``type`` field of the
the desired operation. Both drivers and applications must set the remainder of
the :c:type:`v4l2_format` structure to 0.
.. _v4l2-meta-format:
.. c:type:: v4l2_meta_format
.. tabularcolumns:: |p{1.4cm}|p{2.2cm}|p{13.9cm}|

View File

@ -132,6 +132,11 @@ The format as returned by :ref:`VIDIOC_TRY_FMT <VIDIOC_G_FMT>` must be identical
- ``sdr``
- Definition of a data format, see :ref:`pixfmt`, used by SDR
capture and output devices.
* -
- struct :c:type:`v4l2_meta_format`
- ``meta``
- Definition of a metadata format, see :ref:`meta-formats`, used by
metadata capture devices.
* -
- __u8
- ``raw_data``\ [200]

View File

@ -180,6 +180,7 @@ F: drivers/net/hamradio/6pack.c
8169 10/100/1000 GIGABIT ETHERNET DRIVER
M: Realtek linux nic maintainers <nic_swsd@realtek.com>
M: Heiner Kallweit <hkallweit1@gmail.com>
L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/ethernet/realtek/r8169.c
@ -5534,6 +5535,7 @@ F: net/bridge/
ETHERNET PHY LIBRARY
M: Andrew Lunn <andrew@lunn.ch>
M: Florian Fainelli <f.fainelli@gmail.com>
M: Heiner Kallweit <hkallweit1@gmail.com>
L: netdev@vger.kernel.org
S: Maintained
F: Documentation/ABI/testing/sysfs-bus-mdio
@ -6305,6 +6307,7 @@ F: tools/testing/selftests/gpio/
GPIO SUBSYSTEM
M: Linus Walleij <linus.walleij@linaro.org>
M: Bartosz Golaszewski <bgolaszewski@baylibre.com>
L: linux-gpio@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-gpio.git
S: Maintained
@ -7442,6 +7445,20 @@ S: Maintained
F: Documentation/fb/intelfb.txt
F: drivers/video/fbdev/intelfb/
INTEL GPIO DRIVERS
M: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
L: linux-gpio@vger.kernel.org
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/andy/linux-gpio-intel.git
F: drivers/gpio/gpio-ich.c
F: drivers/gpio/gpio-intel-mid.c
F: drivers/gpio/gpio-lynxpoint.c
F: drivers/gpio/gpio-merrifield.c
F: drivers/gpio/gpio-ml-ioh.c
F: drivers/gpio/gpio-pch.c
F: drivers/gpio/gpio-sch.c
F: drivers/gpio/gpio-sodaville.c
INTEL GVT-g DRIVERS (Intel GPU Virtualization)
M: Zhenyu Wang <zhenyuw@linux.intel.com>
M: Zhi Wang <zhi.a.wang@intel.com>
@ -7452,12 +7469,6 @@ T: git https://github.com/intel/gvt-linux.git
S: Supported
F: drivers/gpu/drm/i915/gvt/
INTEL PMIC GPIO DRIVER
R: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
S: Maintained
F: drivers/gpio/gpio-*cove.c
F: drivers/gpio/gpio-msic.c
INTEL HID EVENT DRIVER
M: Alex Hung <alex.hung@canonical.com>
L: platform-driver-x86@vger.kernel.org
@ -7545,12 +7556,6 @@ W: https://01.org/linux-acpi
S: Supported
F: drivers/platform/x86/intel_menlow.c
INTEL MERRIFIELD GPIO DRIVER
M: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
L: linux-gpio@vger.kernel.org
S: Maintained
F: drivers/gpio/gpio-merrifield.c
INTEL MIC DRIVERS (mic)
M: Sudeep Dutt <sudeep.dutt@intel.com>
M: Ashutosh Dixit <ashutosh.dixit@intel.com>
@ -7583,6 +7588,13 @@ F: drivers/platform/x86/intel_punit_ipc.c
F: arch/x86/include/asm/intel_pmc_ipc.h
F: arch/x86/include/asm/intel_punit_ipc.h
INTEL PMIC GPIO DRIVERS
M: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/andy/linux-gpio-intel.git
F: drivers/gpio/gpio-*cove.c
F: drivers/gpio/gpio-msic.c
INTEL MULTIFUNCTION PMIC DEVICE DRIVERS
R: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
S: Maintained
@ -14086,6 +14098,7 @@ F: Documentation/devicetree/bindings/iio/proximity/vl53l0x.txt
STABLE BRANCH
M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
M: Sasha Levin <sashal@kernel.org>
L: stable@vger.kernel.org
S: Supported
F: Documentation/process/stable-kernel-rules.rst

View File

@ -468,7 +468,7 @@
SCTLR_ELx_SA | SCTLR_ELx_I | SCTLR_ELx_WXN | \
SCTLR_ELx_DSSBS | ENDIAN_CLEAR_EL2 | SCTLR_EL2_RES0)
#if (SCTLR_EL2_SET ^ SCTLR_EL2_CLEAR) != 0xffffffffffffffff
#if (SCTLR_EL2_SET ^ SCTLR_EL2_CLEAR) != 0xffffffffffffffffUL
#error "Inconsistent SCTLR_EL2 set/clear bits"
#endif
@ -509,7 +509,7 @@
SCTLR_EL1_UMA | SCTLR_ELx_WXN | ENDIAN_CLEAR_EL1 |\
SCTLR_ELx_DSSBS | SCTLR_EL1_NTWI | SCTLR_EL1_RES0)
#if (SCTLR_EL1_SET ^ SCTLR_EL1_CLEAR) != 0xffffffffffffffff
#if (SCTLR_EL1_SET ^ SCTLR_EL1_CLEAR) != 0xffffffffffffffffUL
#error "Inconsistent SCTLR_EL1 set/clear bits"
#endif

View File

@ -1333,7 +1333,6 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.cpu_enable = cpu_enable_hw_dbm,
},
#endif
#ifdef CONFIG_ARM64_SSBD
{
.desc = "CRC32 instructions",
.capability = ARM64_HAS_CRC32,
@ -1343,6 +1342,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.field_pos = ID_AA64ISAR0_CRC32_SHIFT,
.min_field_value = 1,
},
#ifdef CONFIG_ARM64_SSBD
{
.desc = "Speculative Store Bypassing Safe (SSBS)",
.capability = ARM64_SSBS,

View File

@ -140,6 +140,7 @@ CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_DS1307=y
CONFIG_STAGING=y
CONFIG_OCTEON_ETHERNET=y
CONFIG_OCTEON_USB=y
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_RAS=y
CONFIG_EXT4_FS=y

View File

@ -794,6 +794,7 @@ static void __init arch_mem_init(char **cmdline_p)
/* call board setup routine */
plat_mem_setup();
memblock_set_bottom_up(true);
/*
* Make sure all kernel memory is in the maps. The "UP" and

View File

@ -2260,10 +2260,8 @@ void __init trap_init(void)
unsigned long size = 0x200 + VECTORSPACING*64;
phys_addr_t ebase_pa;
memblock_set_bottom_up(true);
ebase = (unsigned long)
memblock_alloc_from(size, 1 << fls(size), 0);
memblock_set_bottom_up(false);
/*
* Try to ensure ebase resides in KSeg0 if possible.
@ -2307,6 +2305,7 @@ void __init trap_init(void)
if (board_ebase_setup)
board_ebase_setup();
per_cpu_trap_init(true);
memblock_set_bottom_up(false);
/*
* Copy the generic exception handlers to their final destination.

View File

@ -231,6 +231,8 @@ static __init void prom_meminit(void)
cpumask_clear(&__node_data[(node)]->cpumask);
}
}
max_low_pfn = PHYS_PFN(memblock_end_of_DRAM());
for (cpu = 0; cpu < loongson_sysconf.nr_cpus; cpu++) {
node = cpu / loongson_sysconf.cores_per_node;
if (node >= num_online_nodes())
@ -248,19 +250,9 @@ static __init void prom_meminit(void)
void __init paging_init(void)
{
unsigned node;
unsigned long zones_size[MAX_NR_ZONES] = {0, };
pagetable_init();
for_each_online_node(node) {
unsigned long start_pfn, end_pfn;
get_pfn_range_for_nid(node, &start_pfn, &end_pfn);
if (end_pfn > max_low_pfn)
max_low_pfn = end_pfn;
}
#ifdef CONFIG_ZONE_DMA32
zones_size[ZONE_DMA32] = MAX_DMA32_PFN;
#endif

View File

@ -435,6 +435,7 @@ void __init prom_meminit(void)
mlreset();
szmem();
max_low_pfn = PHYS_PFN(memblock_end_of_DRAM());
for (node = 0; node < MAX_COMPACT_NODES; node++) {
if (node_online(node)) {
@ -455,18 +456,8 @@ extern void setup_zero_pages(void);
void __init paging_init(void)
{
unsigned long zones_size[MAX_NR_ZONES] = {0, };
unsigned node;
pagetable_init();
for_each_online_node(node) {
unsigned long start_pfn, end_pfn;
get_pfn_range_for_nid(node, &start_pfn, &end_pfn);
if (end_pfn > max_low_pfn)
max_low_pfn = end_pfn;
}
zones_size[ZONE_NORMAL] = max_low_pfn;
free_area_init_nodes(zones_size);
}

View File

@ -71,6 +71,10 @@ KBUILD_CFLAGS += $(call cc-option,-mstrict-align)
# arch specific predefines for sparse
CHECKFLAGS += -D__riscv -D__riscv_xlen=$(BITS)
# Default target when executing plain make
boot := arch/riscv/boot
KBUILD_IMAGE := $(boot)/Image.gz
head-y := arch/riscv/kernel/head.o
core-y += arch/riscv/kernel/ arch/riscv/mm/
@ -81,4 +85,13 @@ PHONY += vdso_install
vdso_install:
$(Q)$(MAKE) $(build)=arch/riscv/kernel/vdso $@
all: vmlinux
all: Image.gz
Image: vmlinux
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
Image.%: Image
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
zinstall install:
$(Q)$(MAKE) $(build)=$(boot) $@

2
arch/riscv/boot/.gitignore vendored Normal file
View File

@ -0,0 +1,2 @@
Image
Image.gz

33
arch/riscv/boot/Makefile Normal file
View File

@ -0,0 +1,33 @@
#
# arch/riscv/boot/Makefile
#
# This file is included by the global makefile so that you can add your own
# architecture-specific flags and dependencies.
#
# This file is subject to the terms and conditions of the GNU General Public
# License. See the file "COPYING" in the main directory of this archive
# for more details.
#
# Copyright (C) 2018, Anup Patel.
# Author: Anup Patel <anup@brainfault.org>
#
# Based on the ia64 and arm64 boot/Makefile.
#
OBJCOPYFLAGS_Image :=-O binary -R .note -R .note.gnu.build-id -R .comment -S
targets := Image
$(obj)/Image: vmlinux FORCE
$(call if_changed,objcopy)
$(obj)/Image.gz: $(obj)/Image FORCE
$(call if_changed,gzip)
install:
$(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
$(obj)/Image System.map "$(INSTALL_PATH)"
zinstall:
$(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
$(obj)/Image.gz System.map "$(INSTALL_PATH)"

View File

@ -0,0 +1,60 @@
#!/bin/sh
#
# arch/riscv/boot/install.sh
#
# This file is subject to the terms and conditions of the GNU General Public
# License. See the file "COPYING" in the main directory of this archive
# for more details.
#
# Copyright (C) 1995 by Linus Torvalds
#
# Adapted from code in arch/i386/boot/Makefile by H. Peter Anvin
# Adapted from code in arch/i386/boot/install.sh by Russell King
#
# "make install" script for the RISC-V Linux port
#
# Arguments:
# $1 - kernel version
# $2 - kernel image file
# $3 - kernel map file
# $4 - default install path (blank if root directory)
#
verify () {
if [ ! -f "$1" ]; then
echo "" 1>&2
echo " *** Missing file: $1" 1>&2
echo ' *** You need to run "make" before "make install".' 1>&2
echo "" 1>&2
exit 1
fi
}
# Make sure the files actually exist
verify "$2"
verify "$3"
# User may have a custom install script
if [ -x ~/bin/${INSTALLKERNEL} ]; then exec ~/bin/${INSTALLKERNEL} "$@"; fi
if [ -x /sbin/${INSTALLKERNEL} ]; then exec /sbin/${INSTALLKERNEL} "$@"; fi
if [ "$(basename $2)" = "Image.gz" ]; then
# Compressed install
echo "Installing compressed kernel"
base=vmlinuz
else
# Normal install
echo "Installing normal kernel"
base=vmlinux
fi
if [ -f $4/$base-$1 ]; then
mv $4/$base-$1 $4/$base-$1.old
fi
cat $2 > $4/$base-$1
# Install system map file
if [ -f $4/System.map-$1 ]; then
mv $4/System.map-$1 $4/System.map-$1.old
fi
cp $3 $4/System.map-$1

View File

@ -8,6 +8,7 @@
#define MODULE_ARCH_VERMAGIC "riscv"
struct module;
u64 module_emit_got_entry(struct module *mod, u64 val);
u64 module_emit_plt_entry(struct module *mod, u64 val);

View File

@ -400,13 +400,13 @@ extern unsigned long __must_check __asm_copy_from_user(void *to,
static inline unsigned long
raw_copy_from_user(void *to, const void __user *from, unsigned long n)
{
return __asm_copy_to_user(to, from, n);
return __asm_copy_from_user(to, from, n);
}
static inline unsigned long
raw_copy_to_user(void __user *to, const void *from, unsigned long n)
{
return __asm_copy_from_user(to, from, n);
return __asm_copy_to_user(to, from, n);
}
extern long strncpy_from_user(char *dest, const char __user *src, long count);

View File

@ -13,10 +13,9 @@
/*
* There is explicitly no include guard here because this file is expected to
* be included multiple times. See uapi/asm/syscalls.h for more info.
* be included multiple times.
*/
#define __ARCH_WANT_NEW_STAT
#define __ARCH_WANT_SYS_CLONE
#include <uapi/asm/unistd.h>
#include <uapi/asm/syscalls.h>

View File

@ -1,13 +1,25 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
* Copyright (C) 2017-2018 SiFive
* Copyright (C) 2018 David Abdurachmanov <david.abdurachmanov@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* There is explicitly no include guard here because this file is expected to
* be included multiple times in order to define the syscall macros via
* __SYSCALL.
*/
#ifdef __LP64__
#define __ARCH_WANT_NEW_STAT
#endif /* __LP64__ */
#include <asm-generic/unistd.h>
/*
* Allows the instruction cache to be flushed from userspace. Despite RISC-V

View File

@ -64,7 +64,7 @@ int riscv_of_processor_hartid(struct device_node *node)
static void print_isa(struct seq_file *f, const char *orig_isa)
{
static const char *ext = "mafdc";
static const char *ext = "mafdcsu";
const char *isa = orig_isa;
const char *e;
@ -88,11 +88,14 @@ static void print_isa(struct seq_file *f, const char *orig_isa)
/*
* Check the rest of the ISA string for valid extensions, printing those
* we find. RISC-V ISA strings define an order, so we only print the
* extension bits when they're in order.
* extension bits when they're in order. Hide the supervisor (S)
* extension from userspace as it's not accessible from there.
*/
for (e = ext; *e != '\0'; ++e) {
if (isa[0] == e[0]) {
seq_write(f, isa, 1);
if (isa[0] != 's')
seq_write(f, isa, 1);
isa++;
}
}

View File

@ -44,6 +44,16 @@ ENTRY(_start)
amoadd.w a3, a2, (a3)
bnez a3, .Lsecondary_start
/* Clear BSS for flat non-ELF images */
la a3, __bss_start
la a4, __bss_stop
ble a4, a3, clear_bss_done
clear_bss:
REG_S zero, (a3)
add a3, a3, RISCV_SZPTR
blt a3, a4, clear_bss
clear_bss_done:
/* Save hart ID and DTB physical address */
mv s0, a0
mv s1, a1

View File

@ -74,7 +74,7 @@ SECTIONS
*(.sbss*)
}
BSS_SECTION(0, 0, 0)
BSS_SECTION(PAGE_SIZE, PAGE_SIZE, 0)
EXCEPTION_TABLE(0x10)
NOTES

View File

@ -30,6 +30,7 @@ static const struct acpi_device_id forbidden_id_list[] = {
{"PNP0200", 0}, /* AT DMA Controller */
{"ACPI0009", 0}, /* IOxAPIC */
{"ACPI000A", 0}, /* IOAPIC */
{"SMB0001", 0}, /* ACPI SMBUS virtual device */
{"", 0},
};

View File

@ -201,19 +201,28 @@ static const struct of_device_id ti_cpufreq_of_match[] = {
{},
};
static const struct of_device_id *ti_cpufreq_match_node(void)
{
struct device_node *np;
const struct of_device_id *match;
np = of_find_node_by_path("/");
match = of_match_node(ti_cpufreq_of_match, np);
of_node_put(np);
return match;
}
static int ti_cpufreq_probe(struct platform_device *pdev)
{
u32 version[VERSION_COUNT];
struct device_node *np;
const struct of_device_id *match;
struct opp_table *ti_opp_table;
struct ti_cpufreq_data *opp_data;
const char * const reg_names[] = {"vdd", "vbb"};
int ret;
np = of_find_node_by_path("/");
match = of_match_node(ti_cpufreq_of_match, np);
of_node_put(np);
match = dev_get_platdata(&pdev->dev);
if (!match)
return -ENODEV;
@ -290,7 +299,14 @@ fail_put_node:
static int ti_cpufreq_init(void)
{
platform_device_register_simple("ti-cpufreq", -1, NULL, 0);
const struct of_device_id *match;
/* Check to ensure we are on a compatible platform */
match = ti_cpufreq_match_node();
if (match)
platform_device_register_data(NULL, "ti-cpufreq", -1, match,
sizeof(*match));
return 0;
}
module_init(ti_cpufreq_init);

View File

@ -184,6 +184,7 @@ static long udmabuf_create(const struct udmabuf_create_list *head,
exp_info.ops = &udmabuf_ops;
exp_info.size = ubuf->pagecount << PAGE_SHIFT;
exp_info.priv = ubuf;
exp_info.flags = O_RDWR;
buf = dma_buf_export(&exp_info);
if (IS_ERR(buf)) {

View File

@ -13,6 +13,7 @@
#include <linux/of.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
#include <linux/sched.h>
#include <linux/serdev.h>
#include <linux/slab.h>
@ -63,7 +64,7 @@ static int gnss_serial_write_raw(struct gnss_device *gdev,
int ret;
/* write is only buffered synchronously */
ret = serdev_device_write(serdev, buf, count, 0);
ret = serdev_device_write(serdev, buf, count, MAX_SCHEDULE_TIMEOUT);
if (ret < 0)
return ret;

View File

@ -16,6 +16,7 @@
#include <linux/pm.h>
#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
#include <linux/sched.h>
#include <linux/serdev.h>
#include <linux/slab.h>
#include <linux/wait.h>
@ -83,7 +84,7 @@ static int sirf_write_raw(struct gnss_device *gdev, const unsigned char *buf,
int ret;
/* write is only buffered synchronously */
ret = serdev_device_write(serdev, buf, count, 0);
ret = serdev_device_write(serdev, buf, count, MAX_SCHEDULE_TIMEOUT);
if (ret < 0)
return ret;

View File

@ -35,8 +35,8 @@
#define gpio_mockup_err(...) pr_err(GPIO_MOCKUP_NAME ": " __VA_ARGS__)
enum {
GPIO_MOCKUP_DIR_OUT = 0,
GPIO_MOCKUP_DIR_IN = 1,
GPIO_MOCKUP_DIR_IN = 0,
GPIO_MOCKUP_DIR_OUT = 1,
};
/*
@ -131,7 +131,7 @@ static int gpio_mockup_get_direction(struct gpio_chip *gc, unsigned int offset)
{
struct gpio_mockup_chip *chip = gpiochip_get_data(gc);
return chip->lines[offset].dir;
return !chip->lines[offset].dir;
}
static int gpio_mockup_to_irq(struct gpio_chip *gc, unsigned int offset)

View File

@ -268,8 +268,8 @@ static int pxa_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
if (pxa_gpio_has_pinctrl()) {
ret = pinctrl_gpio_direction_input(chip->base + offset);
if (!ret)
return 0;
if (ret)
return ret;
}
spin_lock_irqsave(&gpio_lock, flags);

View File

@ -1295,7 +1295,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
gdev->descs = kcalloc(chip->ngpio, sizeof(gdev->descs[0]), GFP_KERNEL);
if (!gdev->descs) {
status = -ENOMEM;
goto err_free_gdev;
goto err_free_ida;
}
if (chip->ngpio == 0) {
@ -1427,8 +1427,9 @@ err_free_label:
kfree_const(gdev->label);
err_free_descs:
kfree(gdev->descs);
err_free_gdev:
err_free_ida:
ida_simple_remove(&gpio_ida, gdev->id);
err_free_gdev:
/* failures here can mean systems won't boot... */
pr_err("%s: GPIOs %d..%d (%s) failed to register, %d\n", __func__,
gdev->base, gdev->base + gdev->ngpio - 1,

View File

@ -501,8 +501,11 @@ void amdgpu_amdkfd_set_compute_idle(struct kgd_dev *kgd, bool idle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
amdgpu_dpm_switch_power_profile(adev,
PP_SMC_POWER_PROFILE_COMPUTE, !idle);
if (adev->powerplay.pp_funcs &&
adev->powerplay.pp_funcs->switch_power_profile)
amdgpu_dpm_switch_power_profile(adev,
PP_SMC_POWER_PROFILE_COMPUTE,
!idle);
}
bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid)

View File

@ -626,6 +626,13 @@ int amdgpu_display_modeset_create_props(struct amdgpu_device *adev)
"dither",
amdgpu_dither_enum_list, sz);
if (amdgpu_device_has_dc_support(adev)) {
adev->mode_info.max_bpc_property =
drm_property_create_range(adev->ddev, 0, "max bpc", 8, 16);
if (!adev->mode_info.max_bpc_property)
return -ENOMEM;
}
return 0;
}

View File

@ -339,6 +339,8 @@ struct amdgpu_mode_info {
struct drm_property *audio_property;
/* FMT dithering */
struct drm_property *dither_property;
/* maximum number of bits per channel for monitor color */
struct drm_property *max_bpc_property;
/* hardcoded DFP edid from BIOS */
struct edid *bios_hardcoded_edid;
int bios_hardcoded_edid_size;

View File

@ -46,6 +46,7 @@ MODULE_FIRMWARE("amdgpu/tahiti_mc.bin");
MODULE_FIRMWARE("amdgpu/pitcairn_mc.bin");
MODULE_FIRMWARE("amdgpu/verde_mc.bin");
MODULE_FIRMWARE("amdgpu/oland_mc.bin");
MODULE_FIRMWARE("amdgpu/hainan_mc.bin");
MODULE_FIRMWARE("amdgpu/si58_mc.bin");
#define MC_SEQ_MISC0__MT__MASK 0xf0000000

View File

@ -65,6 +65,13 @@
#define mmMP0_MISC_LIGHT_SLEEP_CTRL 0x01ba
#define mmMP0_MISC_LIGHT_SLEEP_CTRL_BASE_IDX 0
/* for Vega20 register name change */
#define mmHDP_MEM_POWER_CTRL 0x00d4
#define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK 0x00000001L
#define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK 0x00000002L
#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK 0x00010000L
#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK 0x00020000L
#define mmHDP_MEM_POWER_CTRL_BASE_IDX 0
/*
* Indirect registers accessor
*/
@ -870,15 +877,33 @@ static void soc15_update_hdp_light_sleep(struct amdgpu_device *adev, bool enable
{
uint32_t def, data;
def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS));
if (adev->asic_type == CHIP_VEGA20) {
def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_CTRL));
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK;
else
data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK;
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
data |= HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK |
HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK |
HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK |
HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK;
else
data &= ~(HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK |
HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK |
HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK |
HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK);
if (def != data)
WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS), data);
if (def != data)
WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_CTRL), data);
} else {
def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS));
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK;
else
data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK;
if (def != data)
WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS), data);
}
}
static void soc15_update_drm_clock_gating(struct amdgpu_device *adev, bool enable)

View File

@ -2358,8 +2358,15 @@ static void update_stream_scaling_settings(const struct drm_display_mode *mode,
static enum dc_color_depth
convert_color_depth_from_display_info(const struct drm_connector *connector)
{
struct dm_connector_state *dm_conn_state =
to_dm_connector_state(connector->state);
uint32_t bpc = connector->display_info.bpc;
/* TODO: Remove this when there's support for max_bpc in drm */
if (dm_conn_state && bpc > dm_conn_state->max_bpc)
/* Round down to nearest even number. */
bpc = dm_conn_state->max_bpc - (dm_conn_state->max_bpc & 1);
switch (bpc) {
case 0:
/*
@ -2943,6 +2950,9 @@ int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
} else if (property == adev->mode_info.underscan_property) {
dm_new_state->underscan_enable = val;
ret = 0;
} else if (property == adev->mode_info.max_bpc_property) {
dm_new_state->max_bpc = val;
ret = 0;
}
return ret;
@ -2985,6 +2995,9 @@ int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
} else if (property == adev->mode_info.underscan_property) {
*val = dm_state->underscan_enable;
ret = 0;
} else if (property == adev->mode_info.max_bpc_property) {
*val = dm_state->max_bpc;
ret = 0;
}
return ret;
}
@ -3795,6 +3808,9 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
drm_object_attach_property(&aconnector->base.base,
adev->mode_info.underscan_vborder_property,
0);
drm_object_attach_property(&aconnector->base.base,
adev->mode_info.max_bpc_property,
0);
}

View File

@ -204,6 +204,7 @@ struct dm_connector_state {
enum amdgpu_rmx_type scaling;
uint8_t underscan_vborder;
uint8_t underscan_hborder;
uint8_t max_bpc;
bool underscan_enable;
bool freesync_enable;
bool freesync_capable;

View File

@ -4525,12 +4525,12 @@ static int smu7_get_sclk_od(struct pp_hwmgr *hwmgr)
struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
struct smu7_single_dpm_table *golden_sclk_table =
&(data->golden_dpm_table.sclk_table);
int value;
int value = sclk_table->dpm_levels[sclk_table->count - 1].value;
int golden_value = golden_sclk_table->dpm_levels
[golden_sclk_table->count - 1].value;
value = (sclk_table->dpm_levels[sclk_table->count - 1].value -
golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) *
100 /
golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
value -= golden_value;
value = DIV_ROUND_UP(value * 100, golden_value);
return value;
}
@ -4567,12 +4567,12 @@ static int smu7_get_mclk_od(struct pp_hwmgr *hwmgr)
struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
struct smu7_single_dpm_table *golden_mclk_table =
&(data->golden_dpm_table.mclk_table);
int value;
int value = mclk_table->dpm_levels[mclk_table->count - 1].value;
int golden_value = golden_mclk_table->dpm_levels
[golden_mclk_table->count - 1].value;
value = (mclk_table->dpm_levels[mclk_table->count - 1].value -
golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) *
100 /
golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
value -= golden_value;
value = DIV_ROUND_UP(value * 100, golden_value);
return value;
}

View File

@ -4522,15 +4522,13 @@ static int vega10_get_sclk_od(struct pp_hwmgr *hwmgr)
struct vega10_single_dpm_table *sclk_table = &(data->dpm_table.gfx_table);
struct vega10_single_dpm_table *golden_sclk_table =
&(data->golden_dpm_table.gfx_table);
int value;
value = (sclk_table->dpm_levels[sclk_table->count - 1].value -
golden_sclk_table->dpm_levels
[golden_sclk_table->count - 1].value) *
100 /
golden_sclk_table->dpm_levels
int value = sclk_table->dpm_levels[sclk_table->count - 1].value;
int golden_value = golden_sclk_table->dpm_levels
[golden_sclk_table->count - 1].value;
value -= golden_value;
value = DIV_ROUND_UP(value * 100, golden_value);
return value;
}
@ -4575,16 +4573,13 @@ static int vega10_get_mclk_od(struct pp_hwmgr *hwmgr)
struct vega10_single_dpm_table *mclk_table = &(data->dpm_table.mem_table);
struct vega10_single_dpm_table *golden_mclk_table =
&(data->golden_dpm_table.mem_table);
int value;
value = (mclk_table->dpm_levels
[mclk_table->count - 1].value -
golden_mclk_table->dpm_levels
[golden_mclk_table->count - 1].value) *
100 /
golden_mclk_table->dpm_levels
int value = mclk_table->dpm_levels[mclk_table->count - 1].value;
int golden_value = golden_mclk_table->dpm_levels
[golden_mclk_table->count - 1].value;
value -= golden_value;
value = DIV_ROUND_UP(value * 100, golden_value);
return value;
}

View File

@ -2243,12 +2243,12 @@ static int vega12_get_sclk_od(struct pp_hwmgr *hwmgr)
struct vega12_single_dpm_table *sclk_table = &(data->dpm_table.gfx_table);
struct vega12_single_dpm_table *golden_sclk_table =
&(data->golden_dpm_table.gfx_table);
int value;
int value = sclk_table->dpm_levels[sclk_table->count - 1].value;
int golden_value = golden_sclk_table->dpm_levels
[golden_sclk_table->count - 1].value;
value = (sclk_table->dpm_levels[sclk_table->count - 1].value -
golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) *
100 /
golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
value -= golden_value;
value = DIV_ROUND_UP(value * 100, golden_value);
return value;
}
@ -2264,16 +2264,13 @@ static int vega12_get_mclk_od(struct pp_hwmgr *hwmgr)
struct vega12_single_dpm_table *mclk_table = &(data->dpm_table.mem_table);
struct vega12_single_dpm_table *golden_mclk_table =
&(data->golden_dpm_table.mem_table);
int value;
value = (mclk_table->dpm_levels
[mclk_table->count - 1].value -
golden_mclk_table->dpm_levels
[golden_mclk_table->count - 1].value) *
100 /
golden_mclk_table->dpm_levels
int value = mclk_table->dpm_levels[mclk_table->count - 1].value;
int golden_value = golden_mclk_table->dpm_levels
[golden_mclk_table->count - 1].value;
value -= golden_value;
value = DIV_ROUND_UP(value * 100, golden_value);
return value;
}

View File

@ -75,7 +75,17 @@ static void vega20_set_default_registry_data(struct pp_hwmgr *hwmgr)
data->phy_clk_quad_eqn_b = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT;
data->phy_clk_quad_eqn_c = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT;
data->registry_data.disallowed_features = 0x0;
/*
* Disable the following features for now:
* GFXCLK DS
* SOCLK DS
* LCLK DS
* DCEFCLK DS
* FCLK DS
* MP1CLK DS
* MP0CLK DS
*/
data->registry_data.disallowed_features = 0xE0041C00;
data->registry_data.od_state_in_dc_support = 0;
data->registry_data.thermal_support = 1;
data->registry_data.skip_baco_hardware = 0;
@ -1313,12 +1323,13 @@ static int vega20_get_sclk_od(
&(data->dpm_table.gfx_table);
struct vega20_single_dpm_table *golden_sclk_table =
&(data->golden_dpm_table.gfx_table);
int value;
int value = sclk_table->dpm_levels[sclk_table->count - 1].value;
int golden_value = golden_sclk_table->dpm_levels
[golden_sclk_table->count - 1].value;
/* od percentage */
value = DIV_ROUND_UP((sclk_table->dpm_levels[sclk_table->count - 1].value -
golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) * 100,
golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value);
value -= golden_value;
value = DIV_ROUND_UP(value * 100, golden_value);
return value;
}
@ -1358,12 +1369,13 @@ static int vega20_get_mclk_od(
&(data->dpm_table.mem_table);
struct vega20_single_dpm_table *golden_mclk_table =
&(data->golden_dpm_table.mem_table);
int value;
int value = mclk_table->dpm_levels[mclk_table->count - 1].value;
int golden_value = golden_mclk_table->dpm_levels
[golden_mclk_table->count - 1].value;
/* od percentage */
value = DIV_ROUND_UP((mclk_table->dpm_levels[mclk_table->count - 1].value -
golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) * 100,
golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value);
value -= golden_value;
value = DIV_ROUND_UP(value * 100, golden_value);
return value;
}

View File

@ -60,8 +60,29 @@ static const struct pci_device_id pciidlist[] = {
MODULE_DEVICE_TABLE(pci, pciidlist);
static void ast_kick_out_firmware_fb(struct pci_dev *pdev)
{
struct apertures_struct *ap;
bool primary = false;
ap = alloc_apertures(1);
if (!ap)
return;
ap->ranges[0].base = pci_resource_start(pdev, 0);
ap->ranges[0].size = pci_resource_len(pdev, 0);
#ifdef CONFIG_X86
primary = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
#endif
drm_fb_helper_remove_conflicting_framebuffers(ap, "astdrmfb", primary);
kfree(ap);
}
static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
ast_kick_out_firmware_fb(pdev);
return drm_get_pci_dev(pdev, ent, &driver);
}

View File

@ -568,6 +568,7 @@ static int ast_crtc_do_set_base(struct drm_crtc *crtc,
}
ast_bo_unreserve(bo);
ast_set_offset_reg(crtc);
ast_set_start_address_crt1(crtc, (u32)gpu_addr);
return 0;
@ -1254,7 +1255,7 @@ static int ast_cursor_move(struct drm_crtc *crtc,
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc7, ((y >> 8) & 0x07));
/* dummy write to fire HWC */
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xCB, 0xFF, 0x00);
ast_show_cursor(crtc);
return 0;
}

View File

@ -219,6 +219,9 @@ int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper)
mutex_lock(&fb_helper->lock);
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
continue;
ret = __drm_fb_helper_add_one_connector(fb_helper, connector);
if (ret)
goto fail;

View File

@ -1268,7 +1268,7 @@ relocate_entry(struct i915_vma *vma,
else if (gen >= 4)
len = 4;
else
len = 3;
len = 6;
batch = reloc_gpu(eb, vma, len);
if (IS_ERR(batch))
@ -1309,6 +1309,11 @@ relocate_entry(struct i915_vma *vma,
*batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
*batch++ = addr;
*batch++ = target_offset;
/* And again for good measure (blb/pnv) */
*batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
*batch++ = addr;
*batch++ = target_offset;
}
goto out;

View File

@ -3413,6 +3413,11 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
ggtt->vm.insert_page = bxt_vtd_ggtt_insert_page__BKL;
if (ggtt->vm.clear_range != nop_clear_range)
ggtt->vm.clear_range = bxt_vtd_ggtt_clear_range__BKL;
/* Prevent recursively calling stop_machine() and deadlocks. */
dev_info(dev_priv->drm.dev,
"Disabling error capture for VT-d workaround\n");
i915_disable_error_state(dev_priv, -ENODEV);
}
ggtt->invalidate = gen6_ggtt_invalidate;

View File

@ -648,6 +648,9 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
return 0;
}
if (IS_ERR(error))
return PTR_ERR(error);
if (*error->error_msg)
err_printf(m, "%s\n", error->error_msg);
err_printf(m, "Kernel: " UTS_RELEASE "\n");
@ -1859,6 +1862,7 @@ void i915_capture_error_state(struct drm_i915_private *i915,
error = i915_capture_gpu_state(i915);
if (!error) {
DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
i915_disable_error_state(i915, -ENOMEM);
return;
}
@ -1914,5 +1918,14 @@ void i915_reset_error_state(struct drm_i915_private *i915)
i915->gpu_error.first_error = NULL;
spin_unlock_irq(&i915->gpu_error.lock);
i915_gpu_state_put(error);
if (!IS_ERR(error))
i915_gpu_state_put(error);
}
void i915_disable_error_state(struct drm_i915_private *i915, int err)
{
spin_lock_irq(&i915->gpu_error.lock);
if (!i915->gpu_error.first_error)
i915->gpu_error.first_error = ERR_PTR(err);
spin_unlock_irq(&i915->gpu_error.lock);
}

View File

@ -343,6 +343,7 @@ static inline void i915_gpu_state_put(struct i915_gpu_state *gpu)
struct i915_gpu_state *i915_first_error_state(struct drm_i915_private *i915);
void i915_reset_error_state(struct drm_i915_private *i915);
void i915_disable_error_state(struct drm_i915_private *i915, int err);
#else
@ -355,13 +356,18 @@ static inline void i915_capture_error_state(struct drm_i915_private *dev_priv,
static inline struct i915_gpu_state *
i915_first_error_state(struct drm_i915_private *i915)
{
return NULL;
return ERR_PTR(-ENODEV);
}
static inline void i915_reset_error_state(struct drm_i915_private *i915)
{
}
static inline void i915_disable_error_state(struct drm_i915_private *i915,
int err)
{
}
#endif /* IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) */
#endif /* _I915_GPU_ERROR_H_ */

View File

@ -2890,6 +2890,7 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
return;
valid_fb:
intel_state->base.rotation = plane_config->rotation;
intel_fill_fb_ggtt_view(&intel_state->view, fb,
intel_state->base.rotation);
intel_state->color_plane[0].stride =
@ -7882,8 +7883,15 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
plane_config->tiling = I915_TILING_X;
fb->modifier = I915_FORMAT_MOD_X_TILED;
}
if (val & DISPPLANE_ROTATE_180)
plane_config->rotation = DRM_MODE_ROTATE_180;
}
if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B &&
val & DISPPLANE_MIRROR)
plane_config->rotation |= DRM_MODE_REFLECT_X;
pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
fourcc = i9xx_format_to_fourcc(pixel_format);
fb->format = drm_format_info(fourcc);
@ -8952,6 +8960,29 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
goto error;
}
/*
* DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
* while i915 HW rotation is clockwise, thats why this swapping.
*/
switch (val & PLANE_CTL_ROTATE_MASK) {
case PLANE_CTL_ROTATE_0:
plane_config->rotation = DRM_MODE_ROTATE_0;
break;
case PLANE_CTL_ROTATE_90:
plane_config->rotation = DRM_MODE_ROTATE_270;
break;
case PLANE_CTL_ROTATE_180:
plane_config->rotation = DRM_MODE_ROTATE_180;
break;
case PLANE_CTL_ROTATE_270:
plane_config->rotation = DRM_MODE_ROTATE_90;
break;
}
if (INTEL_GEN(dev_priv) >= 10 &&
val & PLANE_CTL_FLIP_HORIZONTAL)
plane_config->rotation |= DRM_MODE_REFLECT_X;
base = I915_READ(PLANE_SURF(pipe, plane_id)) & 0xfffff000;
plane_config->base = base;
@ -15267,6 +15298,14 @@ retry:
ret = drm_atomic_add_affected_planes(state, crtc);
if (ret)
goto out;
/*
* FIXME hack to force a LUT update to avoid the
* plane update forcing the pipe gamma on without
* having a proper LUT loaded. Remove once we
* have readout for pipe gamma enable.
*/
crtc_state->color_mgmt_changed = true;
}
}

View File

@ -547,6 +547,7 @@ struct intel_initial_plane_config {
unsigned int tiling;
int size;
u32 base;
u8 rotation;
};
#define SKL_MIN_SRC_W 8

View File

@ -2493,6 +2493,9 @@ static uint32_t ilk_compute_pri_wm(const struct intel_crtc_state *cstate,
uint32_t method1, method2;
int cpp;
if (mem_value == 0)
return U32_MAX;
if (!intel_wm_plane_visible(cstate, pstate))
return 0;
@ -2522,6 +2525,9 @@ static uint32_t ilk_compute_spr_wm(const struct intel_crtc_state *cstate,
uint32_t method1, method2;
int cpp;
if (mem_value == 0)
return U32_MAX;
if (!intel_wm_plane_visible(cstate, pstate))
return 0;
@ -2545,6 +2551,9 @@ static uint32_t ilk_compute_cur_wm(const struct intel_crtc_state *cstate,
{
int cpp;
if (mem_value == 0)
return U32_MAX;
if (!intel_wm_plane_visible(cstate, pstate))
return 0;
@ -3008,6 +3017,34 @@ static void snb_wm_latency_quirk(struct drm_i915_private *dev_priv)
intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
}
static void snb_wm_lp3_irq_quirk(struct drm_i915_private *dev_priv)
{
/*
* On some SNB machines (Thinkpad X220 Tablet at least)
* LP3 usage can cause vblank interrupts to be lost.
* The DEIIR bit will go high but it looks like the CPU
* never gets interrupted.
*
* It's not clear whether other interrupt source could
* be affected or if this is somehow limited to vblank
* interrupts only. To play it safe we disable LP3
* watermarks entirely.
*/
if (dev_priv->wm.pri_latency[3] == 0 &&
dev_priv->wm.spr_latency[3] == 0 &&
dev_priv->wm.cur_latency[3] == 0)
return;
dev_priv->wm.pri_latency[3] = 0;
dev_priv->wm.spr_latency[3] = 0;
dev_priv->wm.cur_latency[3] = 0;
DRM_DEBUG_KMS("LP3 watermarks disabled due to potential for lost interrupts\n");
intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency);
intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
}
static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv)
{
intel_read_wm_latency(dev_priv, dev_priv->wm.pri_latency);
@ -3024,8 +3061,10 @@ static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv)
intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
if (IS_GEN6(dev_priv))
if (IS_GEN6(dev_priv)) {
snb_wm_latency_quirk(dev_priv);
snb_wm_lp3_irq_quirk(dev_priv);
}
}
static void skl_setup_wm_latency(struct drm_i915_private *dev_priv)

View File

@ -214,6 +214,12 @@ static int vc4_atomic_commit(struct drm_device *dev,
return 0;
}
/* We know for sure we don't want an async update here. Set
* state->legacy_cursor_update to false to prevent
* drm_atomic_helper_setup_commit() from auto-completing
* commit->flip_done.
*/
state->legacy_cursor_update = false;
ret = drm_atomic_helper_setup_commit(state, nonblock);
if (ret)
return ret;

View File

@ -807,7 +807,7 @@ void vc4_plane_async_set_fb(struct drm_plane *plane, struct drm_framebuffer *fb)
static void vc4_plane_atomic_async_update(struct drm_plane *plane,
struct drm_plane_state *state)
{
struct vc4_plane_state *vc4_state = to_vc4_plane_state(plane->state);
struct vc4_plane_state *vc4_state, *new_vc4_state;
if (plane->state->fb != state->fb) {
vc4_plane_async_set_fb(plane, state->fb);
@ -828,7 +828,18 @@ static void vc4_plane_atomic_async_update(struct drm_plane *plane,
plane->state->src_y = state->src_y;
/* Update the display list based on the new crtc_x/y. */
vc4_plane_atomic_check(plane, plane->state);
vc4_plane_atomic_check(plane, state);
new_vc4_state = to_vc4_plane_state(state);
vc4_state = to_vc4_plane_state(plane->state);
/* Update the current vc4_state pos0, pos2 and ptr0 dlist entries. */
vc4_state->dlist[vc4_state->pos0_offset] =
new_vc4_state->dlist[vc4_state->pos0_offset];
vc4_state->dlist[vc4_state->pos2_offset] =
new_vc4_state->dlist[vc4_state->pos2_offset];
vc4_state->dlist[vc4_state->ptr0_offset] =
new_vc4_state->dlist[vc4_state->ptr0_offset];
/* Note that we can't just call vc4_plane_write_dlist()
* because that would smash the context data that the HVS is

View File

@ -353,6 +353,9 @@ static void process_ib_ipinfo(void *in_msg, void *out_msg, int op)
out->body.kvp_ip_val.dhcp_enabled = in->kvp_ip_val.dhcp_enabled;
/* fallthrough */
case KVP_OP_GET_IP_INFO:
utf16s_to_utf8s((wchar_t *)in->kvp_ip_val.adapter_id,
MAX_ADAPTER_ID_SIZE,
UTF16_LITTLE_ENDIAN,
@ -405,7 +408,11 @@ kvp_send_key(struct work_struct *dummy)
process_ib_ipinfo(in_msg, message, KVP_OP_SET_IP_INFO);
break;
case KVP_OP_GET_IP_INFO:
/* We only need to pass on message->kvp_hdr.operation. */
/*
* We only need to pass on the info of operation, adapter_id
* and addr_family to the userland kvp daemon.
*/
process_ib_ipinfo(in_msg, message, KVP_OP_GET_IP_INFO);
break;
case KVP_OP_SET:
switch (in_msg->body.kvp_set.data.value_type) {
@ -446,9 +453,9 @@ kvp_send_key(struct work_struct *dummy)
}
break;
case KVP_OP_GET:
/*
* The key is always a string - utf16 encoding.
*/
message->body.kvp_set.data.key_size =
utf16s_to_utf8s(
(wchar_t *)in_msg->body.kvp_set.data.key,
@ -456,6 +463,17 @@ kvp_send_key(struct work_struct *dummy)
UTF16_LITTLE_ENDIAN,
message->body.kvp_set.data.key,
HV_KVP_EXCHANGE_MAX_KEY_SIZE - 1) + 1;
break;
case KVP_OP_GET:
message->body.kvp_get.data.key_size =
utf16s_to_utf8s(
(wchar_t *)in_msg->body.kvp_get.data.key,
in_msg->body.kvp_get.data.key_size,
UTF16_LITTLE_ENDIAN,
message->body.kvp_get.data.key,
HV_KVP_EXCHANGE_MAX_KEY_SIZE - 1) + 1;
break;
case KVP_OP_DELETE:

View File

@ -797,7 +797,8 @@ static int iommu_init_ga_log(struct amd_iommu *iommu)
entry = iommu_virt_to_phys(iommu->ga_log) | GA_LOG_SIZE_512;
memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_BASE_OFFSET,
&entry, sizeof(entry));
entry = (iommu_virt_to_phys(iommu->ga_log) & 0xFFFFFFFFFFFFFULL) & ~7ULL;
entry = (iommu_virt_to_phys(iommu->ga_log_tail) &
(BIT_ULL(52)-1)) & ~7ULL;
memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_TAIL_OFFSET,
&entry, sizeof(entry));
writel(0x00, iommu->mmio_base + MMIO_GA_HEAD_OFFSET);

View File

@ -3075,7 +3075,7 @@ static int copy_context_table(struct intel_iommu *iommu,
}
if (old_ce)
iounmap(old_ce);
memunmap(old_ce);
ret = 0;
if (devfn < 0x80)

View File

@ -595,7 +595,7 @@ static irqreturn_t prq_event_thread(int irq, void *d)
pr_err("%s: Page request without PASID: %08llx %08llx\n",
iommu->name, ((unsigned long long *)req)[0],
((unsigned long long *)req)[1]);
goto bad_req;
goto no_pasid;
}
if (!svm || svm->pasid != req->pasid) {

View File

@ -498,6 +498,9 @@ static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
static void ipmmu_domain_destroy_context(struct ipmmu_vmsa_domain *domain)
{
if (!domain->mmu)
return;
/*
* Disable the context. Flush the TLB as required when modifying the
* context registers.

View File

@ -807,7 +807,7 @@ int cec_transmit_msg_fh(struct cec_adapter *adap, struct cec_msg *msg,
}
if (adap->transmit_queue_sz >= CEC_MAX_MSG_TX_QUEUE_SZ) {
dprintk(1, "%s: transmit queue full\n", __func__);
dprintk(2, "%s: transmit queue full\n", __func__);
return -EBUSY;
}
@ -1180,6 +1180,8 @@ static int cec_config_log_addr(struct cec_adapter *adap,
{
struct cec_log_addrs *las = &adap->log_addrs;
struct cec_msg msg = { };
const unsigned int max_retries = 2;
unsigned int i;
int err;
if (cec_has_log_addr(adap, log_addr))
@ -1188,19 +1190,44 @@ static int cec_config_log_addr(struct cec_adapter *adap,
/* Send poll message */
msg.len = 1;
msg.msg[0] = (log_addr << 4) | log_addr;
err = cec_transmit_msg_fh(adap, &msg, NULL, true);
for (i = 0; i < max_retries; i++) {
err = cec_transmit_msg_fh(adap, &msg, NULL, true);
/*
* While trying to poll the physical address was reset
* and the adapter was unconfigured, so bail out.
*/
if (!adap->is_configuring)
return -EINTR;
if (err)
return err;
/*
* The message was aborted due to a disconnect or
* unconfigure, just bail out.
*/
if (msg.tx_status & CEC_TX_STATUS_ABORTED)
return -EINTR;
if (msg.tx_status & CEC_TX_STATUS_OK)
return 0;
if (msg.tx_status & CEC_TX_STATUS_NACK)
break;
/*
* Retry up to max_retries times if the message was neither
* OKed or NACKed. This can happen due to e.g. a Lost
* Arbitration condition.
*/
}
/*
* While trying to poll the physical address was reset
* and the adapter was unconfigured, so bail out.
* If we are unable to get an OK or a NACK after max_retries attempts
* (and note that each attempt already consists of four polls), then
* then we assume that something is really weird and that it is not a
* good idea to try and claim this logical address.
*/
if (!adap->is_configuring)
return -EINTR;
if (err)
return err;
if (msg.tx_status & CEC_TX_STATUS_OK)
if (i == max_retries)
return 0;
/*

View File

@ -1918,7 +1918,6 @@ static int tc358743_probe_of(struct tc358743_state *state)
ret = v4l2_fwnode_endpoint_alloc_parse(of_fwnode_handle(ep), &endpoint);
if (ret) {
dev_err(dev, "failed to parse endpoint\n");
ret = ret;
goto put_node;
}

View File

@ -1844,14 +1844,12 @@ fail_mutex_destroy:
static void cio2_pci_remove(struct pci_dev *pci_dev)
{
struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
unsigned int i;
cio2_notifier_exit(cio2);
cio2_fbpt_exit_dummy(cio2);
for (i = 0; i < CIO2_QUEUES; i++)
cio2_queue_exit(cio2, &cio2->queue[i]);
v4l2_device_unregister(&cio2->v4l2_dev);
media_device_unregister(&cio2->media_dev);
cio2_notifier_exit(cio2);
cio2_queues_exit(cio2);
cio2_fbpt_exit_dummy(cio2);
v4l2_device_unregister(&cio2->v4l2_dev);
media_device_cleanup(&cio2->media_dev);
mutex_destroy(&cio2->lock);
}

View File

@ -1587,6 +1587,8 @@ static void isp_pm_complete(struct device *dev)
static void isp_unregister_entities(struct isp_device *isp)
{
media_device_unregister(&isp->media_dev);
omap3isp_csi2_unregister_entities(&isp->isp_csi2a);
omap3isp_ccp2_unregister_entities(&isp->isp_ccp2);
omap3isp_ccdc_unregister_entities(&isp->isp_ccdc);
@ -1597,7 +1599,6 @@ static void isp_unregister_entities(struct isp_device *isp)
omap3isp_stat_unregister_entities(&isp->isp_hist);
v4l2_device_unregister(&isp->v4l2_dev);
media_device_unregister(&isp->media_dev);
media_device_cleanup(&isp->media_dev);
}

View File

@ -42,7 +42,7 @@ MODULE_PARM_DESC(debug, " activates debug info");
#define MAX_WIDTH 4096U
#define MIN_WIDTH 640U
#define MAX_HEIGHT 2160U
#define MIN_HEIGHT 480U
#define MIN_HEIGHT 360U
#define dprintk(dev, fmt, arg...) \
v4l2_dbg(1, debug, &dev->v4l2_dev, "%s: " fmt, __func__, ## arg)

View File

@ -1009,7 +1009,7 @@ static const struct v4l2_m2m_ops m2m_ops = {
static const struct media_device_ops m2m_media_ops = {
.req_validate = vb2_request_validate,
.req_queue = vb2_m2m_request_queue,
.req_queue = v4l2_m2m_request_queue,
};
static int vim2m_probe(struct platform_device *pdev)

View File

@ -1664,6 +1664,11 @@ static int std_validate(const struct v4l2_ctrl *ctrl, u32 idx,
p_mpeg2_slice_params->forward_ref_index >= VIDEO_MAX_FRAME)
return -EINVAL;
if (p_mpeg2_slice_params->pad ||
p_mpeg2_slice_params->picture.pad ||
p_mpeg2_slice_params->sequence.pad)
return -EINVAL;
return 0;
case V4L2_CTRL_TYPE_MPEG2_QUANTIZATION:

View File

@ -193,6 +193,22 @@ int v4l2_event_pending(struct v4l2_fh *fh)
}
EXPORT_SYMBOL_GPL(v4l2_event_pending);
static void __v4l2_event_unsubscribe(struct v4l2_subscribed_event *sev)
{
struct v4l2_fh *fh = sev->fh;
unsigned int i;
lockdep_assert_held(&fh->subscribe_lock);
assert_spin_locked(&fh->vdev->fh_lock);
/* Remove any pending events for this subscription */
for (i = 0; i < sev->in_use; i++) {
list_del(&sev->events[sev_pos(sev, i)].list);
fh->navailable--;
}
list_del(&sev->list);
}
int v4l2_event_subscribe(struct v4l2_fh *fh,
const struct v4l2_event_subscription *sub, unsigned elems,
const struct v4l2_subscribed_event_ops *ops)
@ -224,27 +240,23 @@ int v4l2_event_subscribe(struct v4l2_fh *fh,
spin_lock_irqsave(&fh->vdev->fh_lock, flags);
found_ev = v4l2_event_subscribed(fh, sub->type, sub->id);
if (!found_ev)
list_add(&sev->list, &fh->subscribed);
spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
if (found_ev) {
/* Already listening */
kvfree(sev);
goto out_unlock;
}
if (sev->ops && sev->ops->add) {
} else if (sev->ops && sev->ops->add) {
ret = sev->ops->add(sev, elems);
if (ret) {
spin_lock_irqsave(&fh->vdev->fh_lock, flags);
__v4l2_event_unsubscribe(sev);
spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
kvfree(sev);
goto out_unlock;
}
}
spin_lock_irqsave(&fh->vdev->fh_lock, flags);
list_add(&sev->list, &fh->subscribed);
spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
out_unlock:
mutex_unlock(&fh->subscribe_lock);
return ret;
@ -279,7 +291,6 @@ int v4l2_event_unsubscribe(struct v4l2_fh *fh,
{
struct v4l2_subscribed_event *sev;
unsigned long flags;
int i;
if (sub->type == V4L2_EVENT_ALL) {
v4l2_event_unsubscribe_all(fh);
@ -291,14 +302,8 @@ int v4l2_event_unsubscribe(struct v4l2_fh *fh,
spin_lock_irqsave(&fh->vdev->fh_lock, flags);
sev = v4l2_event_subscribed(fh, sub->type, sub->id);
if (sev != NULL) {
/* Remove any pending events for this subscription */
for (i = 0; i < sev->in_use; i++) {
list_del(&sev->events[sev_pos(sev, i)].list);
fh->navailable--;
}
list_del(&sev->list);
}
if (sev != NULL)
__v4l2_event_unsubscribe(sev);
spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);

View File

@ -953,7 +953,7 @@ void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx,
}
EXPORT_SYMBOL_GPL(v4l2_m2m_buf_queue);
void vb2_m2m_request_queue(struct media_request *req)
void v4l2_m2m_request_queue(struct media_request *req)
{
struct media_request_object *obj, *obj_safe;
struct v4l2_m2m_ctx *m2m_ctx = NULL;
@ -997,7 +997,7 @@ void vb2_m2m_request_queue(struct media_request *req)
if (m2m_ctx)
v4l2_m2m_try_schedule(m2m_ctx);
}
EXPORT_SYMBOL_GPL(vb2_m2m_request_queue);
EXPORT_SYMBOL_GPL(v4l2_m2m_request_queue);
/* Videobuf2 ioctl helpers */

View File

@ -132,7 +132,7 @@ static const struct of_device_id atmel_ssc_dt_ids[] = {
MODULE_DEVICE_TABLE(of, atmel_ssc_dt_ids);
#endif
static inline const struct atmel_ssc_platform_data * __init
static inline const struct atmel_ssc_platform_data *
atmel_ssc_get_driver_data(struct platform_device *pdev)
{
if (pdev->dev.of_node) {

View File

@ -27,6 +27,9 @@
#include <linux/delay.h>
#include <linux/bitops.h>
#include <asm/uv/uv_hub.h>
#include <linux/nospec.h>
#include "gru.h"
#include "grutables.h"
#include "gruhandles.h"
@ -196,6 +199,7 @@ int gru_dump_chiplet_request(unsigned long arg)
/* Currently, only dump by gid is implemented */
if (req.gid >= gru_max_gids)
return -EINVAL;
req.gid = array_index_nospec(req.gid, gru_max_gids);
gru = GID_TO_GRU(req.gid);
ubuf = req.buf;

View File

@ -12,6 +12,7 @@
* - JMicron (hardware and technical support)
*/
#include <linux/bitfield.h>
#include <linux/string.h>
#include <linux/delay.h>
#include <linux/highmem.h>
@ -462,6 +463,9 @@ struct intel_host {
u32 dsm_fns;
int drv_strength;
bool d3_retune;
bool rpm_retune_ok;
u32 glk_rx_ctrl1;
u32 glk_tun_val;
};
static const guid_t intel_dsm_guid =
@ -791,6 +795,77 @@ cleanup:
return ret;
}
#ifdef CONFIG_PM
#define GLK_RX_CTRL1 0x834
#define GLK_TUN_VAL 0x840
#define GLK_PATH_PLL GENMASK(13, 8)
#define GLK_DLY GENMASK(6, 0)
/* Workaround firmware failing to restore the tuning value */
static void glk_rpm_retune_wa(struct sdhci_pci_chip *chip, bool susp)
{
struct sdhci_pci_slot *slot = chip->slots[0];
struct intel_host *intel_host = sdhci_pci_priv(slot);
struct sdhci_host *host = slot->host;
u32 glk_rx_ctrl1;
u32 glk_tun_val;
u32 dly;
if (intel_host->rpm_retune_ok || !mmc_can_retune(host->mmc))
return;
glk_rx_ctrl1 = sdhci_readl(host, GLK_RX_CTRL1);
glk_tun_val = sdhci_readl(host, GLK_TUN_VAL);
if (susp) {
intel_host->glk_rx_ctrl1 = glk_rx_ctrl1;
intel_host->glk_tun_val = glk_tun_val;
return;
}
if (!intel_host->glk_tun_val)
return;
if (glk_rx_ctrl1 != intel_host->glk_rx_ctrl1) {
intel_host->rpm_retune_ok = true;
return;
}
dly = FIELD_PREP(GLK_DLY, FIELD_GET(GLK_PATH_PLL, glk_rx_ctrl1) +
(intel_host->glk_tun_val << 1));
if (dly == FIELD_GET(GLK_DLY, glk_rx_ctrl1))
return;
glk_rx_ctrl1 = (glk_rx_ctrl1 & ~GLK_DLY) | dly;
sdhci_writel(host, glk_rx_ctrl1, GLK_RX_CTRL1);
intel_host->rpm_retune_ok = true;
chip->rpm_retune = true;
mmc_retune_needed(host->mmc);
pr_info("%s: Requiring re-tune after rpm resume", mmc_hostname(host->mmc));
}
static void glk_rpm_retune_chk(struct sdhci_pci_chip *chip, bool susp)
{
if (chip->pdev->device == PCI_DEVICE_ID_INTEL_GLK_EMMC &&
!chip->rpm_retune)
glk_rpm_retune_wa(chip, susp);
}
static int glk_runtime_suspend(struct sdhci_pci_chip *chip)
{
glk_rpm_retune_chk(chip, true);
return sdhci_cqhci_runtime_suspend(chip);
}
static int glk_runtime_resume(struct sdhci_pci_chip *chip)
{
glk_rpm_retune_chk(chip, false);
return sdhci_cqhci_runtime_resume(chip);
}
#endif
#ifdef CONFIG_ACPI
static int ni_set_max_freq(struct sdhci_pci_slot *slot)
{
@ -879,8 +954,8 @@ static const struct sdhci_pci_fixes sdhci_intel_glk_emmc = {
.resume = sdhci_cqhci_resume,
#endif
#ifdef CONFIG_PM
.runtime_suspend = sdhci_cqhci_runtime_suspend,
.runtime_resume = sdhci_cqhci_runtime_resume,
.runtime_suspend = glk_runtime_suspend,
.runtime_resume = glk_runtime_resume,
#endif
.quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
@ -1762,8 +1837,13 @@ static struct sdhci_pci_slot *sdhci_pci_probe_slot(
device_init_wakeup(&pdev->dev, true);
if (slot->cd_idx >= 0) {
ret = mmc_gpiod_request_cd(host->mmc, NULL, slot->cd_idx,
ret = mmc_gpiod_request_cd(host->mmc, "cd", slot->cd_idx,
slot->cd_override_level, 0, NULL);
if (ret && ret != -EPROBE_DEFER)
ret = mmc_gpiod_request_cd(host->mmc, NULL,
slot->cd_idx,
slot->cd_override_level,
0, NULL);
if (ret == -EPROBE_DEFER)
goto remove;

View File

@ -2032,8 +2032,7 @@ atmel_hsmc_nand_controller_legacy_init(struct atmel_hsmc_nand_controller *nc)
int ret;
nand_np = dev->of_node;
nfc_np = of_find_compatible_node(dev->of_node, NULL,
"atmel,sama5d3-nfc");
nfc_np = of_get_compatible_child(dev->of_node, "atmel,sama5d3-nfc");
if (!nfc_np) {
dev_err(dev, "Could not find device node for sama5d3-nfc\n");
return -ENODEV;
@ -2447,15 +2446,19 @@ static int atmel_nand_controller_probe(struct platform_device *pdev)
}
if (caps->legacy_of_bindings) {
struct device_node *nfc_node;
u32 ale_offs = 21;
/*
* If we are parsing legacy DT props and the DT contains a
* valid NFC node, forward the request to the sama5 logic.
*/
if (of_find_compatible_node(pdev->dev.of_node, NULL,
"atmel,sama5d3-nfc"))
nfc_node = of_get_compatible_child(pdev->dev.of_node,
"atmel,sama5d3-nfc");
if (nfc_node) {
caps = &atmel_sama5_nand_caps;
of_node_put(nfc_node);
}
/*
* Even if the compatible says we are dealing with an

View File

@ -150,15 +150,15 @@
#define NAND_VERSION_MINOR_SHIFT 16
/* NAND OP_CMDs */
#define PAGE_READ 0x2
#define PAGE_READ_WITH_ECC 0x3
#define PAGE_READ_WITH_ECC_SPARE 0x4
#define PROGRAM_PAGE 0x6
#define PAGE_PROGRAM_WITH_ECC 0x7
#define PROGRAM_PAGE_SPARE 0x9
#define BLOCK_ERASE 0xa
#define FETCH_ID 0xb
#define RESET_DEVICE 0xd
#define OP_PAGE_READ 0x2
#define OP_PAGE_READ_WITH_ECC 0x3
#define OP_PAGE_READ_WITH_ECC_SPARE 0x4
#define OP_PROGRAM_PAGE 0x6
#define OP_PAGE_PROGRAM_WITH_ECC 0x7
#define OP_PROGRAM_PAGE_SPARE 0x9
#define OP_BLOCK_ERASE 0xa
#define OP_FETCH_ID 0xb
#define OP_RESET_DEVICE 0xd
/* Default Value for NAND_DEV_CMD_VLD */
#define NAND_DEV_CMD_VLD_VAL (READ_START_VLD | WRITE_START_VLD | \
@ -692,11 +692,11 @@ static void update_rw_regs(struct qcom_nand_host *host, int num_cw, bool read)
if (read) {
if (host->use_ecc)
cmd = PAGE_READ_WITH_ECC | PAGE_ACC | LAST_PAGE;
cmd = OP_PAGE_READ_WITH_ECC | PAGE_ACC | LAST_PAGE;
else
cmd = PAGE_READ | PAGE_ACC | LAST_PAGE;
cmd = OP_PAGE_READ | PAGE_ACC | LAST_PAGE;
} else {
cmd = PROGRAM_PAGE | PAGE_ACC | LAST_PAGE;
cmd = OP_PROGRAM_PAGE | PAGE_ACC | LAST_PAGE;
}
if (host->use_ecc) {
@ -1170,7 +1170,7 @@ static int nandc_param(struct qcom_nand_host *host)
* in use. we configure the controller to perform a raw read of 512
* bytes to read onfi params
*/
nandc_set_reg(nandc, NAND_FLASH_CMD, PAGE_READ | PAGE_ACC | LAST_PAGE);
nandc_set_reg(nandc, NAND_FLASH_CMD, OP_PAGE_READ | PAGE_ACC | LAST_PAGE);
nandc_set_reg(nandc, NAND_ADDR0, 0);
nandc_set_reg(nandc, NAND_ADDR1, 0);
nandc_set_reg(nandc, NAND_DEV0_CFG0, 0 << CW_PER_PAGE
@ -1224,7 +1224,7 @@ static int erase_block(struct qcom_nand_host *host, int page_addr)
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
nandc_set_reg(nandc, NAND_FLASH_CMD,
BLOCK_ERASE | PAGE_ACC | LAST_PAGE);
OP_BLOCK_ERASE | PAGE_ACC | LAST_PAGE);
nandc_set_reg(nandc, NAND_ADDR0, page_addr);
nandc_set_reg(nandc, NAND_ADDR1, 0);
nandc_set_reg(nandc, NAND_DEV0_CFG0,
@ -1255,7 +1255,7 @@ static int read_id(struct qcom_nand_host *host, int column)
if (column == -1)
return 0;
nandc_set_reg(nandc, NAND_FLASH_CMD, FETCH_ID);
nandc_set_reg(nandc, NAND_FLASH_CMD, OP_FETCH_ID);
nandc_set_reg(nandc, NAND_ADDR0, column);
nandc_set_reg(nandc, NAND_ADDR1, 0);
nandc_set_reg(nandc, NAND_FLASH_CHIP_SELECT,
@ -1276,7 +1276,7 @@ static int reset(struct qcom_nand_host *host)
struct nand_chip *chip = &host->chip;
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
nandc_set_reg(nandc, NAND_FLASH_CMD, RESET_DEVICE);
nandc_set_reg(nandc, NAND_FLASH_CMD, OP_RESET_DEVICE);
nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);

View File

@ -644,9 +644,23 @@ static int cqspi_indirect_write_execute(struct spi_nor *nor, loff_t to_addr,
ndelay(cqspi->wr_delay);
while (remaining > 0) {
size_t write_words, mod_bytes;
write_bytes = remaining > page_size ? page_size : remaining;
iowrite32_rep(cqspi->ahb_base, txbuf,
DIV_ROUND_UP(write_bytes, 4));
write_words = write_bytes / 4;
mod_bytes = write_bytes % 4;
/* Write 4 bytes at a time then single bytes. */
if (write_words) {
iowrite32_rep(cqspi->ahb_base, txbuf, write_words);
txbuf += (write_words * 4);
}
if (mod_bytes) {
unsigned int temp = 0xFFFFFFFF;
memcpy(&temp, txbuf, mod_bytes);
iowrite32(temp, cqspi->ahb_base);
txbuf += mod_bytes;
}
if (!wait_for_completion_timeout(&cqspi->transfer_complete,
msecs_to_jiffies(CQSPI_TIMEOUT_MS))) {
@ -655,7 +669,6 @@ static int cqspi_indirect_write_execute(struct spi_nor *nor, loff_t to_addr,
goto failwr;
}
txbuf += write_bytes;
remaining -= write_bytes;
if (remaining > 0)

View File

@ -2156,7 +2156,7 @@ spi_nor_set_pp_settings(struct spi_nor_pp_command *pp,
* @nor: pointer to a 'struct spi_nor'
* @addr: offset in the serial flash memory
* @len: number of bytes to read
* @buf: buffer where the data is copied into
* @buf: buffer where the data is copied into (dma-safe memory)
*
* Return: 0 on success, -errno otherwise.
*/
@ -2521,6 +2521,34 @@ static int spi_nor_map_cmp_erase_type(const void *l, const void *r)
return left->size - right->size;
}
/**
* spi_nor_sort_erase_mask() - sort erase mask
* @map: the erase map of the SPI NOR
* @erase_mask: the erase type mask to be sorted
*
* Replicate the sort done for the map's erase types in BFPT: sort the erase
* mask in ascending order with the smallest erase type size starting from
* BIT(0) in the sorted erase mask.
*
* Return: sorted erase mask.
*/
static u8 spi_nor_sort_erase_mask(struct spi_nor_erase_map *map, u8 erase_mask)
{
struct spi_nor_erase_type *erase_type = map->erase_type;
int i;
u8 sorted_erase_mask = 0;
if (!erase_mask)
return 0;
/* Replicate the sort done for the map's erase types. */
for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++)
if (erase_type[i].size && erase_mask & BIT(erase_type[i].idx))
sorted_erase_mask |= BIT(i);
return sorted_erase_mask;
}
/**
* spi_nor_regions_sort_erase_types() - sort erase types in each region
* @map: the erase map of the SPI NOR
@ -2536,19 +2564,13 @@ static int spi_nor_map_cmp_erase_type(const void *l, const void *r)
static void spi_nor_regions_sort_erase_types(struct spi_nor_erase_map *map)
{
struct spi_nor_erase_region *region = map->regions;
struct spi_nor_erase_type *erase_type = map->erase_type;
int i;
u8 region_erase_mask, sorted_erase_mask;
while (region) {
region_erase_mask = region->offset & SNOR_ERASE_TYPE_MASK;
/* Replicate the sort done for the map's erase types. */
sorted_erase_mask = 0;
for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++)
if (erase_type[i].size &&
region_erase_mask & BIT(erase_type[i].idx))
sorted_erase_mask |= BIT(i);
sorted_erase_mask = spi_nor_sort_erase_mask(map,
region_erase_mask);
/* Overwrite erase mask. */
region->offset = (region->offset & ~SNOR_ERASE_TYPE_MASK) |
@ -2855,52 +2877,84 @@ static u8 spi_nor_smpt_read_dummy(const struct spi_nor *nor, const u32 settings)
* spi_nor_get_map_in_use() - get the configuration map in use
* @nor: pointer to a 'struct spi_nor'
* @smpt: pointer to the sector map parameter table
* @smpt_len: sector map parameter table length
*
* Return: pointer to the map in use, ERR_PTR(-errno) otherwise.
*/
static const u32 *spi_nor_get_map_in_use(struct spi_nor *nor, const u32 *smpt)
static const u32 *spi_nor_get_map_in_use(struct spi_nor *nor, const u32 *smpt,
u8 smpt_len)
{
const u32 *ret = NULL;
u32 i, addr;
const u32 *ret;
u8 *buf;
u32 addr;
int err;
u8 i;
u8 addr_width, read_opcode, read_dummy;
u8 read_data_mask, data_byte, map_id;
u8 read_data_mask, map_id;
/* Use a kmalloc'ed bounce buffer to guarantee it is DMA-able. */
buf = kmalloc(sizeof(*buf), GFP_KERNEL);
if (!buf)
return ERR_PTR(-ENOMEM);
addr_width = nor->addr_width;
read_dummy = nor->read_dummy;
read_opcode = nor->read_opcode;
map_id = 0;
i = 0;
/* Determine if there are any optional Detection Command Descriptors */
while (!(smpt[i] & SMPT_DESC_TYPE_MAP)) {
for (i = 0; i < smpt_len; i += 2) {
if (smpt[i] & SMPT_DESC_TYPE_MAP)
break;
read_data_mask = SMPT_CMD_READ_DATA(smpt[i]);
nor->addr_width = spi_nor_smpt_addr_width(nor, smpt[i]);
nor->read_dummy = spi_nor_smpt_read_dummy(nor, smpt[i]);
nor->read_opcode = SMPT_CMD_OPCODE(smpt[i]);
addr = smpt[i + 1];
err = spi_nor_read_raw(nor, addr, 1, &data_byte);
if (err)
err = spi_nor_read_raw(nor, addr, 1, buf);
if (err) {
ret = ERR_PTR(err);
goto out;
}
/*
* Build an index value that is used to select the Sector Map
* Configuration that is currently in use.
*/
map_id = map_id << 1 | !!(data_byte & read_data_mask);
i = i + 2;
map_id = map_id << 1 | !!(*buf & read_data_mask);
}
/* Find the matching configuration map */
while (SMPT_MAP_ID(smpt[i]) != map_id) {
/*
* If command descriptors are provided, they always precede map
* descriptors in the table. There is no need to start the iteration
* over smpt array all over again.
*
* Find the matching configuration map.
*/
ret = ERR_PTR(-EINVAL);
while (i < smpt_len) {
if (SMPT_MAP_ID(smpt[i]) == map_id) {
ret = smpt + i;
break;
}
/*
* If there are no more configuration map descriptors and no
* configuration ID matched the configuration identifier, the
* sector address map is unknown.
*/
if (smpt[i] & SMPT_DESC_END)
goto out;
break;
/* increment the table index to the next map */
i += SMPT_MAP_REGION_COUNT(smpt[i]) + 1;
}
ret = smpt + i;
/* fall through */
out:
kfree(buf);
nor->addr_width = addr_width;
nor->read_dummy = read_dummy;
nor->read_opcode = read_opcode;
@ -2946,7 +3000,7 @@ static int spi_nor_init_non_uniform_erase_map(struct spi_nor *nor,
u64 offset;
u32 region_count;
int i, j;
u8 erase_type;
u8 erase_type, uniform_erase_type;
region_count = SMPT_MAP_REGION_COUNT(*smpt);
/*
@ -2959,7 +3013,7 @@ static int spi_nor_init_non_uniform_erase_map(struct spi_nor *nor,
return -ENOMEM;
map->regions = region;
map->uniform_erase_type = 0xff;
uniform_erase_type = 0xff;
offset = 0;
/* Populate regions. */
for (i = 0; i < region_count; i++) {
@ -2974,12 +3028,15 @@ static int spi_nor_init_non_uniform_erase_map(struct spi_nor *nor,
* Save the erase types that are supported in all regions and
* can erase the entire flash memory.
*/
map->uniform_erase_type &= erase_type;
uniform_erase_type &= erase_type;
offset = (region[i].offset & ~SNOR_ERASE_FLAGS_MASK) +
region[i].size;
}
map->uniform_erase_type = spi_nor_sort_erase_mask(map,
uniform_erase_type);
spi_nor_region_mark_end(&region[i - 1]);
return 0;
@ -3020,9 +3077,9 @@ static int spi_nor_parse_smpt(struct spi_nor *nor,
for (i = 0; i < smpt_header->length; i++)
smpt[i] = le32_to_cpu(smpt[i]);
sector_map = spi_nor_get_map_in_use(nor, smpt);
if (!sector_map) {
ret = -EINVAL;
sector_map = spi_nor_get_map_in_use(nor, smpt, smpt_header->length);
if (IS_ERR(sector_map)) {
ret = PTR_ERR(sector_map);
goto out;
}
@ -3125,7 +3182,7 @@ static int spi_nor_parse_sfdp(struct spi_nor *nor,
if (err)
goto exit;
/* Parse other parameter headers. */
/* Parse optional parameter tables. */
for (i = 0; i < header.nph; i++) {
param_header = &param_headers[i];
@ -3138,8 +3195,17 @@ static int spi_nor_parse_sfdp(struct spi_nor *nor,
break;
}
if (err)
goto exit;
if (err) {
dev_warn(dev, "Failed to parse optional parameter table: %04x\n",
SFDP_PARAM_HEADER_ID(param_header));
/*
* Let's not drop all information we extracted so far
* if optional table parsers fail. In case of failing,
* each optional parser is responsible to roll back to
* the previously known spi_nor data.
*/
err = 0;
}
}
exit:

View File

@ -1848,6 +1848,8 @@ static void ena_down(struct ena_adapter *adapter)
rc = ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason);
if (rc)
dev_err(&adapter->pdev->dev, "Device reset failed\n");
/* stop submitting admin commands on a device that was reset */
ena_com_set_admin_running_state(adapter->ena_dev, false);
}
ena_destroy_all_io_queues(adapter);
@ -1914,6 +1916,9 @@ static int ena_close(struct net_device *netdev)
netif_dbg(adapter, ifdown, netdev, "%s\n", __func__);
if (!test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))
return 0;
if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
ena_down(adapter);
@ -2613,9 +2618,7 @@ static void ena_destroy_device(struct ena_adapter *adapter, bool graceful)
ena_down(adapter);
/* Stop the device from sending AENQ events (in case reset flag is set
* and device is up, ena_close already reset the device
* In case the reset flag is set and the device is up, ena_down()
* already perform the reset, so it can be skipped.
* and device is up, ena_down() already reset the device.
*/
if (!(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags) && dev_up))
ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason);
@ -2694,8 +2697,8 @@ err_device_destroy:
ena_com_abort_admin_commands(ena_dev);
ena_com_wait_for_abort_completion(ena_dev);
ena_com_admin_destroy(ena_dev);
ena_com_mmio_reg_read_request_destroy(ena_dev);
ena_com_dev_reset(ena_dev, ENA_REGS_RESET_DRIVER_INVALID_STATE);
ena_com_mmio_reg_read_request_destroy(ena_dev);
err:
clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
@ -3452,6 +3455,8 @@ err_rss:
ena_com_rss_destroy(ena_dev);
err_free_msix:
ena_com_dev_reset(ena_dev, ENA_REGS_RESET_INIT_ERR);
/* stop submitting admin commands on a device that was reset */
ena_com_set_admin_running_state(ena_dev, false);
ena_free_mgmnt_irq(adapter);
ena_disable_msix(adapter);
err_worker_destroy:
@ -3498,18 +3503,12 @@ static void ena_remove(struct pci_dev *pdev)
cancel_work_sync(&adapter->reset_task);
unregister_netdev(netdev);
/* If the device is running then we want to make sure the device will be
* reset to make sure no more events will be issued by the device.
*/
if (test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))
set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
rtnl_lock();
ena_destroy_device(adapter, true);
rtnl_unlock();
unregister_netdev(netdev);
free_netdev(netdev);
ena_com_rss_destroy(ena_dev);

View File

@ -45,7 +45,7 @@
#define DRV_MODULE_VER_MAJOR 2
#define DRV_MODULE_VER_MINOR 0
#define DRV_MODULE_VER_SUBMINOR 1
#define DRV_MODULE_VER_SUBMINOR 2
#define DRV_MODULE_NAME "ena"
#ifndef DRV_MODULE_VERSION

View File

@ -1419,7 +1419,7 @@ static int sparc_lance_probe_one(struct platform_device *op,
prop = of_get_property(nd, "tpe-link-test?", NULL);
if (!prop)
goto no_link_test;
goto node_put;
if (strcmp(prop, "true")) {
printk(KERN_NOTICE "SunLance: warning: overriding option "
@ -1428,6 +1428,8 @@ static int sparc_lance_probe_one(struct platform_device *op,
"to ecd@skynet.be\n");
auxio_set_lte(AUXIO_LTE_ON);
}
node_put:
of_node_put(nd);
no_link_test:
lp->auto_select = 1;
lp->tpe = 0;

View File

@ -12434,6 +12434,7 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e
{
struct tg3 *tp = netdev_priv(dev);
int i, irq_sync = 0, err = 0;
bool reset_phy = false;
if ((ering->rx_pending > tp->rx_std_ring_mask) ||
(ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
@ -12465,7 +12466,13 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e
if (netif_running(dev)) {
tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
err = tg3_restart_hw(tp, false);
/* Reset PHY to avoid PHY lock up */
if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
tg3_asic_rev(tp) == ASIC_REV_5719 ||
tg3_asic_rev(tp) == ASIC_REV_5720)
reset_phy = true;
err = tg3_restart_hw(tp, reset_phy);
if (!err)
tg3_netif_start(tp);
}
@ -12499,6 +12506,7 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
{
struct tg3 *tp = netdev_priv(dev);
int err = 0;
bool reset_phy = false;
if (tp->link_config.autoneg == AUTONEG_ENABLE)
tg3_warn_mgmt_link_flap(tp);
@ -12568,7 +12576,13 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
if (netif_running(dev)) {
tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
err = tg3_restart_hw(tp, false);
/* Reset PHY to avoid PHY lock up */
if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
tg3_asic_rev(tp) == ASIC_REV_5719 ||
tg3_asic_rev(tp) == ASIC_REV_5720)
reset_phy = true;
err = tg3_restart_hw(tp, reset_phy);
if (!err)
tg3_netif_start(tp);
}

View File

@ -1784,6 +1784,7 @@ static int nicvf_xdp_setup(struct nicvf *nic, struct bpf_prog *prog)
bool if_up = netif_running(nic->netdev);
struct bpf_prog *old_prog;
bool bpf_attached = false;
int ret = 0;
/* For now just support only the usual MTU sized frames */
if (prog && (dev->mtu > 1500)) {
@ -1817,8 +1818,12 @@ static int nicvf_xdp_setup(struct nicvf *nic, struct bpf_prog *prog)
if (nic->xdp_prog) {
/* Attach BPF program */
nic->xdp_prog = bpf_prog_add(nic->xdp_prog, nic->rx_queues - 1);
if (!IS_ERR(nic->xdp_prog))
if (!IS_ERR(nic->xdp_prog)) {
bpf_attached = true;
} else {
ret = PTR_ERR(nic->xdp_prog);
nic->xdp_prog = NULL;
}
}
/* Calculate Tx queues needed for XDP and network stack */
@ -1830,7 +1835,7 @@ static int nicvf_xdp_setup(struct nicvf *nic, struct bpf_prog *prog)
netif_trans_update(nic->netdev);
}
return 0;
return ret;
}
static int nicvf_xdp(struct net_device *netdev, struct netdev_bpf *xdp)

View File

@ -585,10 +585,12 @@ static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq)
if (!sq->dmem.base)
return;
if (sq->tso_hdrs)
if (sq->tso_hdrs) {
dma_free_coherent(&nic->pdev->dev,
sq->dmem.q_len * TSO_HEADER_SIZE,
sq->tso_hdrs, sq->tso_hdrs_phys);
sq->tso_hdrs = NULL;
}
/* Free pending skbs in the queue */
smp_rmb();

View File

@ -660,7 +660,7 @@ static void gmac_clean_txq(struct net_device *netdev, struct gmac_txq *txq,
u64_stats_update_begin(&port->tx_stats_syncp);
port->tx_frag_stats[nfrags]++;
u64_stats_update_end(&port->ir_stats_syncp);
u64_stats_update_end(&port->tx_stats_syncp);
}
}

View File

@ -872,11 +872,10 @@ static irqreturn_t ftmac100_interrupt(int irq, void *dev_id)
struct net_device *netdev = dev_id;
struct ftmac100 *priv = netdev_priv(netdev);
if (likely(netif_running(netdev))) {
/* Disable interrupts for polling */
ftmac100_disable_all_int(priv);
/* Disable interrupts for polling */
ftmac100_disable_all_int(priv);
if (likely(netif_running(netdev)))
napi_schedule(&priv->napi);
}
return IRQ_HANDLED;
}

View File

@ -485,8 +485,8 @@ static void release_rx_pools(struct ibmvnic_adapter *adapter)
for (j = 0; j < rx_pool->size; j++) {
if (rx_pool->rx_buff[j].skb) {
dev_kfree_skb_any(rx_pool->rx_buff[i].skb);
rx_pool->rx_buff[i].skb = NULL;
dev_kfree_skb_any(rx_pool->rx_buff[j].skb);
rx_pool->rx_buff[j].skb = NULL;
}
}
@ -1103,20 +1103,15 @@ static int ibmvnic_open(struct net_device *netdev)
return 0;
}
mutex_lock(&adapter->reset_lock);
if (adapter->state != VNIC_CLOSED) {
rc = ibmvnic_login(netdev);
if (rc) {
mutex_unlock(&adapter->reset_lock);
if (rc)
return rc;
}
rc = init_resources(adapter);
if (rc) {
netdev_err(netdev, "failed to initialize resources\n");
release_resources(adapter);
mutex_unlock(&adapter->reset_lock);
return rc;
}
}
@ -1124,8 +1119,6 @@ static int ibmvnic_open(struct net_device *netdev)
rc = __ibmvnic_open(netdev);
netif_carrier_on(netdev);
mutex_unlock(&adapter->reset_lock);
return rc;
}
@ -1269,10 +1262,8 @@ static int ibmvnic_close(struct net_device *netdev)
return 0;
}
mutex_lock(&adapter->reset_lock);
rc = __ibmvnic_close(netdev);
ibmvnic_cleanup(netdev);
mutex_unlock(&adapter->reset_lock);
return rc;
}
@ -1746,6 +1737,7 @@ static int do_reset(struct ibmvnic_adapter *adapter,
struct ibmvnic_rwi *rwi, u32 reset_state)
{
u64 old_num_rx_queues, old_num_tx_queues;
u64 old_num_rx_slots, old_num_tx_slots;
struct net_device *netdev = adapter->netdev;
int i, rc;
@ -1757,6 +1749,8 @@ static int do_reset(struct ibmvnic_adapter *adapter,
old_num_rx_queues = adapter->req_rx_queues;
old_num_tx_queues = adapter->req_tx_queues;
old_num_rx_slots = adapter->req_rx_add_entries_per_subcrq;
old_num_tx_slots = adapter->req_tx_entries_per_subcrq;
ibmvnic_cleanup(netdev);
@ -1819,21 +1813,20 @@ static int do_reset(struct ibmvnic_adapter *adapter,
if (rc)
return rc;
} else if (adapter->req_rx_queues != old_num_rx_queues ||
adapter->req_tx_queues != old_num_tx_queues) {
adapter->map_id = 1;
adapter->req_tx_queues != old_num_tx_queues ||
adapter->req_rx_add_entries_per_subcrq !=
old_num_rx_slots ||
adapter->req_tx_entries_per_subcrq !=
old_num_tx_slots) {
release_rx_pools(adapter);
release_tx_pools(adapter);
rc = init_rx_pools(netdev);
if (rc)
return rc;
rc = init_tx_pools(netdev);
release_napi(adapter);
release_vpd_data(adapter);
rc = init_resources(adapter);
if (rc)
return rc;
release_napi(adapter);
rc = init_napi(adapter);
if (rc)
return rc;
} else {
rc = reset_tx_pools(adapter);
if (rc)
@ -1917,17 +1910,8 @@ static int do_hard_reset(struct ibmvnic_adapter *adapter,
adapter->state = VNIC_PROBED;
return 0;
}
/* netif_set_real_num_xx_queues needs to take rtnl lock here
* unless wait_for_reset is set, in which case the rtnl lock
* has already been taken before initializing the reset
*/
if (!adapter->wait_for_reset) {
rtnl_lock();
rc = init_resources(adapter);
rtnl_unlock();
} else {
rc = init_resources(adapter);
}
rc = init_resources(adapter);
if (rc)
return rc;
@ -1986,13 +1970,21 @@ static void __ibmvnic_reset(struct work_struct *work)
struct ibmvnic_rwi *rwi;
struct ibmvnic_adapter *adapter;
struct net_device *netdev;
bool we_lock_rtnl = false;
u32 reset_state;
int rc = 0;
adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset);
netdev = adapter->netdev;
mutex_lock(&adapter->reset_lock);
/* netif_set_real_num_xx_queues needs to take rtnl lock here
* unless wait_for_reset is set, in which case the rtnl lock
* has already been taken before initializing the reset
*/
if (!adapter->wait_for_reset) {
rtnl_lock();
we_lock_rtnl = true;
}
reset_state = adapter->state;
rwi = get_next_rwi(adapter);
@ -2020,12 +2012,11 @@ static void __ibmvnic_reset(struct work_struct *work)
if (rc) {
netdev_dbg(adapter->netdev, "Reset failed\n");
free_all_rwi(adapter);
mutex_unlock(&adapter->reset_lock);
return;
}
adapter->resetting = false;
mutex_unlock(&adapter->reset_lock);
if (we_lock_rtnl)
rtnl_unlock();
}
static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
@ -4768,7 +4759,6 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
INIT_LIST_HEAD(&adapter->rwi_list);
mutex_init(&adapter->reset_lock);
mutex_init(&adapter->rwi_lock);
adapter->resetting = false;
@ -4840,8 +4830,8 @@ static int ibmvnic_remove(struct vio_dev *dev)
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
adapter->state = VNIC_REMOVING;
unregister_netdev(netdev);
mutex_lock(&adapter->reset_lock);
rtnl_lock();
unregister_netdevice(netdev);
release_resources(adapter);
release_sub_crqs(adapter, 1);
@ -4852,7 +4842,7 @@ static int ibmvnic_remove(struct vio_dev *dev)
adapter->state = VNIC_REMOVED;
mutex_unlock(&adapter->reset_lock);
rtnl_unlock();
device_remove_file(&dev->dev, &dev_attr_failover);
free_netdev(netdev);
dev_set_drvdata(&dev->dev, NULL);

View File

@ -1075,7 +1075,7 @@ struct ibmvnic_adapter {
struct tasklet_struct tasklet;
enum vnic_state state;
enum ibmvnic_reset_reason reset_reason;
struct mutex reset_lock, rwi_lock;
struct mutex rwi_lock;
struct list_head rwi_list;
struct work_struct ibmvnic_reset;
bool resetting;

View File

@ -569,6 +569,7 @@ struct mlx5e_rq {
unsigned long state;
int ix;
unsigned int hw_mtu;
struct net_dim dim; /* Dynamic Interrupt Moderation */

View File

@ -88,10 +88,8 @@ int mlx5e_port_linkspeed(struct mlx5_core_dev *mdev, u32 *speed)
eth_proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper);
*speed = mlx5e_port_ptys2speed(eth_proto_oper);
if (!(*speed)) {
mlx5_core_warn(mdev, "cannot get port speed\n");
if (!(*speed))
err = -EINVAL;
}
return err;
}
@ -258,7 +256,7 @@ static int mlx5e_fec_admin_field(u32 *pplm,
case 40000:
if (!write)
*fec_policy = MLX5_GET(pplm_reg, pplm,
fec_override_cap_10g_40g);
fec_override_admin_10g_40g);
else
MLX5_SET(pplm_reg, pplm,
fec_override_admin_10g_40g, *fec_policy);
@ -310,7 +308,7 @@ static int mlx5e_get_fec_cap_field(u32 *pplm,
case 10000:
case 40000:
*fec_cap = MLX5_GET(pplm_reg, pplm,
fec_override_admin_10g_40g);
fec_override_cap_10g_40g);
break;
case 25000:
*fec_cap = MLX5_GET(pplm_reg, pplm,
@ -394,12 +392,12 @@ int mlx5e_get_fec_mode(struct mlx5_core_dev *dev, u32 *fec_mode_active,
int mlx5e_set_fec_mode(struct mlx5_core_dev *dev, u8 fec_policy)
{
u8 fec_policy_nofec = BIT(MLX5E_FEC_NOFEC);
bool fec_mode_not_supp_in_speed = false;
u8 no_fec_policy = BIT(MLX5E_FEC_NOFEC);
u32 out[MLX5_ST_SZ_DW(pplm_reg)] = {};
u32 in[MLX5_ST_SZ_DW(pplm_reg)] = {};
int sz = MLX5_ST_SZ_BYTES(pplm_reg);
u32 current_fec_speed;
u8 fec_policy_auto = 0;
u8 fec_caps = 0;
int err;
int i;
@ -415,23 +413,19 @@ int mlx5e_set_fec_mode(struct mlx5_core_dev *dev, u8 fec_policy)
if (err)
return err;
err = mlx5e_port_linkspeed(dev, &current_fec_speed);
if (err)
return err;
MLX5_SET(pplm_reg, out, local_port, 1);
memset(in, 0, sz);
MLX5_SET(pplm_reg, in, local_port, 1);
for (i = 0; i < MLX5E_FEC_SUPPORTED_SPEEDS && !!fec_policy; i++) {
for (i = 0; i < MLX5E_FEC_SUPPORTED_SPEEDS; i++) {
mlx5e_get_fec_cap_field(out, &fec_caps, fec_supported_speeds[i]);
/* policy supported for link speed */
if (!!(fec_caps & fec_policy)) {
mlx5e_fec_admin_field(in, &fec_policy, 1,
/* policy supported for link speed, or policy is auto */
if (fec_caps & fec_policy || fec_policy == fec_policy_auto) {
mlx5e_fec_admin_field(out, &fec_policy, 1,
fec_supported_speeds[i]);
} else {
if (fec_supported_speeds[i] == current_fec_speed)
return -EOPNOTSUPP;
mlx5e_fec_admin_field(in, &no_fec_policy, 1,
fec_supported_speeds[i]);
/* turn off FEC if supported. Else, leave it the same */
if (fec_caps & fec_policy_nofec)
mlx5e_fec_admin_field(out, &fec_policy_nofec, 1,
fec_supported_speeds[i]);
fec_mode_not_supp_in_speed = true;
}
}
@ -441,5 +435,5 @@ int mlx5e_set_fec_mode(struct mlx5_core_dev *dev, u8 fec_policy)
"FEC policy 0x%x is not supported for some speeds",
fec_policy);
return mlx5_core_access_reg(dev, in, sz, out, sz, MLX5_REG_PPLM, 0, 1);
return mlx5_core_access_reg(dev, out, sz, out, sz, MLX5_REG_PPLM, 0, 1);
}

View File

@ -130,8 +130,10 @@ static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu)
int err;
err = mlx5e_port_linkspeed(priv->mdev, &speed);
if (err)
if (err) {
mlx5_core_warn(priv->mdev, "cannot get port speed\n");
return 0;
}
xoff = (301 + 216 * priv->dcbx.cable_len / 100) * speed / 1000 + 272 * mtu / 100;

View File

@ -843,8 +843,7 @@ static int mlx5e_get_link_ksettings(struct net_device *netdev,
ethtool_link_ksettings_add_link_mode(link_ksettings, supported,
Autoneg);
err = get_fec_supported_advertised(mdev, link_ksettings);
if (err)
if (get_fec_supported_advertised(mdev, link_ksettings))
netdev_dbg(netdev, "%s: FEC caps query failed: %d\n",
__func__, err);

View File

@ -502,6 +502,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
rq->channel = c;
rq->ix = c->ix;
rq->mdev = mdev;
rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
rq->stats = &c->priv->channel_stats[c->ix].rq;
rq->xdp_prog = params->xdp_prog ? bpf_prog_inc(params->xdp_prog) : NULL;
@ -1623,13 +1624,15 @@ static int mlx5e_alloc_cq_common(struct mlx5_core_dev *mdev,
int err;
u32 i;
err = mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn);
if (err)
return err;
err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq->wq,
&cq->wq_ctrl);
if (err)
return err;
mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn);
mcq->cqe_sz = 64;
mcq->set_ci_db = cq->wq_ctrl.db.db;
mcq->arm_db = cq->wq_ctrl.db.db + 1;
@ -1687,6 +1690,10 @@ static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
int eqn;
int err;
err = mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used);
if (err)
return err;
inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
sizeof(u64) * cq->wq_ctrl.buf.npages;
in = kvzalloc(inlen, GFP_KERNEL);
@ -1700,8 +1707,6 @@ static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
mlx5_fill_page_frag_array(&cq->wq_ctrl.buf,
(__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used);
MLX5_SET(cqc, cqc, cq_period_mode, param->cq_period_mode);
MLX5_SET(cqc, cqc, c_eqn, eqn);
MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index);
@ -1921,6 +1926,10 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
int err;
int eqn;
err = mlx5_vector2eqn(priv->mdev, ix, &eqn, &irq);
if (err)
return err;
c = kvzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu));
if (!c)
return -ENOMEM;
@ -1937,7 +1946,6 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
c->xdp = !!params->xdp_prog;
c->stats = &priv->channel_stats[ix].ch;
mlx5_vector2eqn(priv->mdev, ix, &eqn, &irq);
c->irq_desc = irq_to_desc(irq);
netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64);
@ -3574,6 +3582,7 @@ static int set_feature_cvlan_filter(struct net_device *netdev, bool enable)
return 0;
}
#ifdef CONFIG_MLX5_ESWITCH
static int set_feature_tc_num_filters(struct net_device *netdev, bool enable)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
@ -3586,6 +3595,7 @@ static int set_feature_tc_num_filters(struct net_device *netdev, bool enable)
return 0;
}
#endif
static int set_feature_rx_all(struct net_device *netdev, bool enable)
{
@ -3684,7 +3694,9 @@ static int mlx5e_set_features(struct net_device *netdev,
err |= MLX5E_HANDLE_FEATURE(NETIF_F_LRO, set_feature_lro);
err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_FILTER,
set_feature_cvlan_filter);
#ifdef CONFIG_MLX5_ESWITCH
err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_TC, set_feature_tc_num_filters);
#endif
err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXALL, set_feature_rx_all);
err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXFCS, set_feature_rx_fcs);
err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_RX, set_feature_rx_vlan);
@ -3755,10 +3767,11 @@ int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
}
if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
bool is_linear = mlx5e_rx_mpwqe_is_linear_skb(priv->mdev, &new_channels.params);
u8 ppw_old = mlx5e_mpwqe_log_pkts_per_wqe(params);
u8 ppw_new = mlx5e_mpwqe_log_pkts_per_wqe(&new_channels.params);
reset = reset && (ppw_old != ppw_new);
reset = reset && (is_linear || (ppw_old != ppw_new));
}
if (!reset) {
@ -4678,7 +4691,9 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
FT_CAP(modify_root) &&
FT_CAP(identified_miss_table_mode) &&
FT_CAP(flow_table_modify)) {
#ifdef CONFIG_MLX5_ESWITCH
netdev->hw_features |= NETIF_F_HW_TC;
#endif
#ifdef CONFIG_MLX5_EN_ARFS
netdev->hw_features |= NETIF_F_NTUPLE;
#endif
@ -5004,11 +5019,21 @@ err_free_netdev:
int mlx5e_attach_netdev(struct mlx5e_priv *priv)
{
const struct mlx5e_profile *profile;
int max_nch;
int err;
profile = priv->profile;
clear_bit(MLX5E_STATE_DESTROYING, &priv->state);
/* max number of channels may have changed */
max_nch = mlx5e_get_max_num_channels(priv->mdev);
if (priv->channels.params.num_channels > max_nch) {
mlx5_core_warn(priv->mdev, "MLX5E: Reducing number of channels to %d\n", max_nch);
priv->channels.params.num_channels = max_nch;
mlx5e_build_default_indir_rqt(priv->channels.params.indirection_rqt,
MLX5E_INDIR_RQT_SIZE, max_nch);
}
err = profile->init_tx(priv);
if (err)
goto out;

View File

@ -1104,6 +1104,12 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
u32 frag_size;
bool consumed;
/* Check packet size. Note LRO doesn't use linear SKB */
if (unlikely(cqe_bcnt > rq->hw_mtu)) {
rq->stats->oversize_pkts_sw_drop++;
return NULL;
}
va = page_address(di->page) + head_offset;
data = va + rx_headroom;
frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt32);

View File

@ -98,18 +98,17 @@ static int mlx5e_test_link_speed(struct mlx5e_priv *priv)
return 1;
}
#ifdef CONFIG_INET
/* loopback test */
#define MLX5E_TEST_PKT_SIZE (MLX5E_RX_MAX_HEAD - NET_IP_ALIGN)
static const char mlx5e_test_text[ETH_GSTRING_LEN] = "MLX5E SELF TEST";
#define MLX5E_TEST_MAGIC 0x5AEED15C001ULL
struct mlx5ehdr {
__be32 version;
__be64 magic;
char text[ETH_GSTRING_LEN];
};
#ifdef CONFIG_INET
/* loopback test */
#define MLX5E_TEST_PKT_SIZE (sizeof(struct ethhdr) + sizeof(struct iphdr) +\
sizeof(struct udphdr) + sizeof(struct mlx5ehdr))
#define MLX5E_TEST_MAGIC 0x5AEED15C001ULL
static struct sk_buff *mlx5e_test_get_udp_skb(struct mlx5e_priv *priv)
{
struct sk_buff *skb = NULL;
@ -117,10 +116,7 @@ static struct sk_buff *mlx5e_test_get_udp_skb(struct mlx5e_priv *priv)
struct ethhdr *ethh;
struct udphdr *udph;
struct iphdr *iph;
int datalen, iplen;
datalen = MLX5E_TEST_PKT_SIZE -
(sizeof(*ethh) + sizeof(*iph) + sizeof(*udph));
int iplen;
skb = netdev_alloc_skb(priv->netdev, MLX5E_TEST_PKT_SIZE);
if (!skb) {
@ -149,7 +145,7 @@ static struct sk_buff *mlx5e_test_get_udp_skb(struct mlx5e_priv *priv)
/* Fill UDP header */
udph->source = htons(9);
udph->dest = htons(9); /* Discard Protocol */
udph->len = htons(datalen + sizeof(struct udphdr));
udph->len = htons(sizeof(struct mlx5ehdr) + sizeof(struct udphdr));
udph->check = 0;
/* Fill IP header */
@ -157,7 +153,8 @@ static struct sk_buff *mlx5e_test_get_udp_skb(struct mlx5e_priv *priv)
iph->ttl = 32;
iph->version = 4;
iph->protocol = IPPROTO_UDP;
iplen = sizeof(struct iphdr) + sizeof(struct udphdr) + datalen;
iplen = sizeof(struct iphdr) + sizeof(struct udphdr) +
sizeof(struct mlx5ehdr);
iph->tot_len = htons(iplen);
iph->frag_off = 0;
iph->saddr = 0;
@ -170,9 +167,6 @@ static struct sk_buff *mlx5e_test_get_udp_skb(struct mlx5e_priv *priv)
mlxh = skb_put(skb, sizeof(*mlxh));
mlxh->version = 0;
mlxh->magic = cpu_to_be64(MLX5E_TEST_MAGIC);
strlcpy(mlxh->text, mlx5e_test_text, sizeof(mlxh->text));
datalen -= sizeof(*mlxh);
skb_put_zero(skb, datalen);
skb->csum = 0;
skb->ip_summed = CHECKSUM_PARTIAL;

View File

@ -83,6 +83,7 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_cqes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_strides) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_oversize_pkts_sw_drop) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_blks) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_pkts) },
@ -161,6 +162,7 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
s->rx_wqe_err += rq_stats->wqe_err;
s->rx_mpwqe_filler_cqes += rq_stats->mpwqe_filler_cqes;
s->rx_mpwqe_filler_strides += rq_stats->mpwqe_filler_strides;
s->rx_oversize_pkts_sw_drop += rq_stats->oversize_pkts_sw_drop;
s->rx_buff_alloc_err += rq_stats->buff_alloc_err;
s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks;
s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts;
@ -1189,6 +1191,7 @@ static const struct counter_desc rq_stats_desc[] = {
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, oversize_pkts_sw_drop) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },

View File

@ -96,6 +96,7 @@ struct mlx5e_sw_stats {
u64 rx_wqe_err;
u64 rx_mpwqe_filler_cqes;
u64 rx_mpwqe_filler_strides;
u64 rx_oversize_pkts_sw_drop;
u64 rx_buff_alloc_err;
u64 rx_cqe_compress_blks;
u64 rx_cqe_compress_pkts;
@ -193,6 +194,7 @@ struct mlx5e_rq_stats {
u64 wqe_err;
u64 mpwqe_filler_cqes;
u64 mpwqe_filler_strides;
u64 oversize_pkts_sw_drop;
u64 buff_alloc_err;
u64 cqe_compress_blks;
u64 cqe_compress_pkts;

View File

@ -1447,31 +1447,21 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
inner_headers);
}
if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
struct flow_dissector_key_eth_addrs *key =
if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
struct flow_dissector_key_basic *key =
skb_flow_dissector_target(f->dissector,
FLOW_DISSECTOR_KEY_ETH_ADDRS,
FLOW_DISSECTOR_KEY_BASIC,
f->key);
struct flow_dissector_key_eth_addrs *mask =
struct flow_dissector_key_basic *mask =
skb_flow_dissector_target(f->dissector,
FLOW_DISSECTOR_KEY_ETH_ADDRS,
FLOW_DISSECTOR_KEY_BASIC,
f->mask);
MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
ntohs(mask->n_proto));
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
ntohs(key->n_proto));
ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
dmac_47_16),
mask->dst);
ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
dmac_47_16),
key->dst);
ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
smac_47_16),
mask->src);
ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
smac_47_16),
key->src);
if (!is_zero_ether_addr(mask->src) || !is_zero_ether_addr(mask->dst))
if (mask->n_proto)
*match_level = MLX5_MATCH_L2;
}
@ -1505,9 +1495,10 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
*match_level = MLX5_MATCH_L2;
}
} else {
} else if (*match_level != MLX5_MATCH_NONE) {
MLX5_SET(fte_match_set_lyr_2_4, headers_c, svlan_tag, 1);
MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
*match_level = MLX5_MATCH_L2;
}
if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CVLAN)) {
@ -1545,21 +1536,31 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
}
}
if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
struct flow_dissector_key_basic *key =
if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
struct flow_dissector_key_eth_addrs *key =
skb_flow_dissector_target(f->dissector,
FLOW_DISSECTOR_KEY_BASIC,
FLOW_DISSECTOR_KEY_ETH_ADDRS,
f->key);
struct flow_dissector_key_basic *mask =
struct flow_dissector_key_eth_addrs *mask =
skb_flow_dissector_target(f->dissector,
FLOW_DISSECTOR_KEY_BASIC,
FLOW_DISSECTOR_KEY_ETH_ADDRS,
f->mask);
MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
ntohs(mask->n_proto));
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
ntohs(key->n_proto));
if (mask->n_proto)
ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
dmac_47_16),
mask->dst);
ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
dmac_47_16),
key->dst);
ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
smac_47_16),
mask->src);
ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
smac_47_16),
key->src);
if (!is_zero_ether_addr(mask->src) || !is_zero_ether_addr(mask->dst))
*match_level = MLX5_MATCH_L2;
}
@ -1586,10 +1587,10 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
/* the HW doesn't need L3 inline to match on frag=no */
if (!(key->flags & FLOW_DIS_IS_FRAGMENT))
*match_level = MLX5_INLINE_MODE_L2;
*match_level = MLX5_MATCH_L2;
/* *** L2 attributes parsing up to here *** */
else
*match_level = MLX5_INLINE_MODE_IP;
*match_level = MLX5_MATCH_L3;
}
}
@ -2979,7 +2980,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
if (!actions_match_supported(priv, exts, parse_attr, flow, extack))
return -EOPNOTSUPP;
if (attr->out_count > 1 && !mlx5_esw_has_fwd_fdb(priv->mdev)) {
if (attr->mirror_count > 0 && !mlx5_esw_has_fwd_fdb(priv->mdev)) {
NL_SET_ERR_MSG_MOD(extack,
"current firmware doesn't support split rule for port mirroring");
netdev_warn_once(priv->netdev, "current firmware doesn't support split rule for port mirroring\n");

View File

@ -83,8 +83,14 @@ struct mlx5_fpga_ipsec_rule {
};
static const struct rhashtable_params rhash_sa = {
.key_len = FIELD_SIZEOF(struct mlx5_fpga_ipsec_sa_ctx, hw_sa),
.key_offset = offsetof(struct mlx5_fpga_ipsec_sa_ctx, hw_sa),
/* Keep out "cmd" field from the key as it's
* value is not constant during the lifetime
* of the key object.
*/
.key_len = FIELD_SIZEOF(struct mlx5_fpga_ipsec_sa_ctx, hw_sa) -
FIELD_SIZEOF(struct mlx5_ifc_fpga_ipsec_sa_v1, cmd),
.key_offset = offsetof(struct mlx5_fpga_ipsec_sa_ctx, hw_sa) +
FIELD_SIZEOF(struct mlx5_ifc_fpga_ipsec_sa_v1, cmd),
.head_offset = offsetof(struct mlx5_fpga_ipsec_sa_ctx, hash),
.automatic_shrinking = true,
.min_size = 1,

View File

@ -560,9 +560,9 @@ static int mlx5i_close(struct net_device *netdev)
netif_carrier_off(epriv->netdev);
mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qp.qpn);
mlx5i_uninit_underlay_qp(epriv);
mlx5e_deactivate_priv_channels(epriv);
mlx5e_close_channels(&epriv->channels);
mlx5i_uninit_underlay_qp(epriv);
unlock:
mutex_unlock(&epriv->state_lock);
return 0;

View File

@ -485,8 +485,16 @@ static u16 *qed_init_qm_get_idx_from_flags(struct qed_hwfn *p_hwfn,
struct qed_qm_info *qm_info = &p_hwfn->qm_info;
/* Can't have multiple flags set here */
if (bitmap_weight((unsigned long *)&pq_flags, sizeof(pq_flags)) > 1)
if (bitmap_weight((unsigned long *)&pq_flags,
sizeof(pq_flags) * BITS_PER_BYTE) > 1) {
DP_ERR(p_hwfn, "requested multiple pq flags 0x%x\n", pq_flags);
goto err;
}
if (!(qed_get_pq_flags(p_hwfn) & pq_flags)) {
DP_ERR(p_hwfn, "pq flag 0x%x is not set\n", pq_flags);
goto err;
}
switch (pq_flags) {
case PQ_FLAGS_RLS:
@ -510,8 +518,7 @@ static u16 *qed_init_qm_get_idx_from_flags(struct qed_hwfn *p_hwfn,
}
err:
DP_ERR(p_hwfn, "BAD pq flags %d\n", pq_flags);
return NULL;
return &qm_info->start_pq;
}
/* save pq index in qm info */
@ -535,20 +542,32 @@ u16 qed_get_cm_pq_idx_mcos(struct qed_hwfn *p_hwfn, u8 tc)
{
u8 max_tc = qed_init_qm_get_num_tcs(p_hwfn);
if (max_tc == 0) {
DP_ERR(p_hwfn, "pq with flag 0x%lx do not exist\n",
PQ_FLAGS_MCOS);
return p_hwfn->qm_info.start_pq;
}
if (tc > max_tc)
DP_ERR(p_hwfn, "tc %d must be smaller than %d\n", tc, max_tc);
return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_MCOS) + tc;
return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_MCOS) + (tc % max_tc);
}
u16 qed_get_cm_pq_idx_vf(struct qed_hwfn *p_hwfn, u16 vf)
{
u16 max_vf = qed_init_qm_get_num_vfs(p_hwfn);
if (max_vf == 0) {
DP_ERR(p_hwfn, "pq with flag 0x%lx do not exist\n",
PQ_FLAGS_VFS);
return p_hwfn->qm_info.start_pq;
}
if (vf > max_vf)
DP_ERR(p_hwfn, "vf %d must be smaller than %d\n", vf, max_vf);
return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_VFS) + vf;
return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_VFS) + (vf % max_vf);
}
u16 qed_get_cm_pq_idx_ofld_mtc(struct qed_hwfn *p_hwfn, u8 tc)

View File

@ -810,17 +810,13 @@ static int vsc85xx_default_config(struct phy_device *phydev)
phydev->mdix_ctrl = ETH_TP_MDI_AUTO;
mutex_lock(&phydev->lock);
rc = phy_select_page(phydev, MSCC_PHY_PAGE_EXTENDED_2);
if (rc < 0)
goto out_unlock;
reg_val = phy_read(phydev, MSCC_PHY_RGMII_CNTL);
reg_val &= ~(RGMII_RX_CLK_DELAY_MASK);
reg_val |= (RGMII_RX_CLK_DELAY_1_1_NS << RGMII_RX_CLK_DELAY_POS);
phy_write(phydev, MSCC_PHY_RGMII_CNTL, reg_val);
reg_val = RGMII_RX_CLK_DELAY_1_1_NS << RGMII_RX_CLK_DELAY_POS;
rc = phy_modify_paged(phydev, MSCC_PHY_PAGE_EXTENDED_2,
MSCC_PHY_RGMII_CNTL, RGMII_RX_CLK_DELAY_MASK,
reg_val);
out_unlock:
rc = phy_restore_page(phydev, rc, rc > 0 ? 0 : rc);
mutex_unlock(&phydev->lock);
return rc;

Some files were not shown because too many files have changed in this diff Show More