Merge USB 4.20-rc8 mergepoint into usb-next

We need the USB changes in here for additional patches to be able to
apply cleanly.

Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Greg Kroah-Hartman 2018-12-21 16:46:08 +01:00
commit cd6a22310e
308 changed files with 3437 additions and 1692 deletions

View File

@ -2541,6 +2541,10 @@ S: Ormond
S: Victoria 3163
S: Australia
N: Eric Miao
E: eric.y.miao@gmail.com
D: MMP support
N: Pauline Middelink
E: middelin@polyware.nl
D: General low-level bug fixes, /proc fixes, identd support
@ -4115,6 +4119,10 @@ S: 1507 145th Place SE #B5
S: Bellevue, Washington 98007
S: USA
N: Haojian Zhuang
E: haojian.zhuang@gmail.com
D: MMP support
N: Richard Zidlicky
E: rz@linux-m68k.org, rdzidlic@geocities.com
W: http://www.geocities.com/rdzidlic

View File

@ -187,6 +187,8 @@ Takes xa_lock internally:
* :c:func:`xa_erase_bh`
* :c:func:`xa_erase_irq`
* :c:func:`xa_cmpxchg`
* :c:func:`xa_cmpxchg_bh`
* :c:func:`xa_cmpxchg_irq`
* :c:func:`xa_store_range`
* :c:func:`xa_alloc`
* :c:func:`xa_alloc_bh`
@ -263,7 +265,8 @@ using :c:func:`xa_lock_irqsave` in both the interrupt handler and process
context, or :c:func:`xa_lock_irq` in process context and :c:func:`xa_lock`
in the interrupt handler. Some of the more common patterns have helper
functions such as :c:func:`xa_store_bh`, :c:func:`xa_store_irq`,
:c:func:`xa_erase_bh` and :c:func:`xa_erase_irq`.
:c:func:`xa_erase_bh`, :c:func:`xa_erase_irq`, :c:func:`xa_cmpxchg_bh`
and :c:func:`xa_cmpxchg_irq`.
Sometimes you need to protect access to the XArray with a mutex because
that lock sits above another mutex in the locking hierarchy. That does

View File

@ -1505,6 +1505,11 @@ enum v4l2_mpeg_video_h264_hierarchical_coding_type -
configuring a stateless hardware decoding pipeline for MPEG-2.
The bitstream parameters are defined according to :ref:`mpeg2part2`.
.. note::
This compound control is not yet part of the public kernel API and
it is expected to change.
.. c:type:: v4l2_ctrl_mpeg2_slice_params
.. cssclass:: longtable
@ -1625,6 +1630,11 @@ enum v4l2_mpeg_video_h264_hierarchical_coding_type -
Specifies quantization matrices (as extracted from the bitstream) for the
associated MPEG-2 slice data.
.. note::
This compound control is not yet part of the public kernel API and
it is expected to change.
.. c:type:: v4l2_ctrl_mpeg2_quantization
.. cssclass:: longtable

View File

@ -1739,13 +1739,17 @@ ARM/Mediatek SoC support
M: Matthias Brugger <matthias.bgg@gmail.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
L: linux-mediatek@lists.infradead.org (moderated for non-subscribers)
W: https://mtk.bcnfs.org/
C: irc://chat.freenode.net/linux-mediatek
S: Maintained
F: arch/arm/boot/dts/mt6*
F: arch/arm/boot/dts/mt7*
F: arch/arm/boot/dts/mt8*
F: arch/arm/mach-mediatek/
F: arch/arm64/boot/dts/mediatek/
F: drivers/soc/mediatek/
N: mtk
N: mt[678]
K: mediatek
ARM/Mediatek USB3 PHY DRIVER
@ -4843,6 +4847,7 @@ F: include/uapi/drm/vmwgfx_drm.h
DRM DRIVERS
M: David Airlie <airlied@linux.ie>
M: Daniel Vetter <daniel@ffwll.ch>
L: dri-devel@lists.freedesktop.org
T: git git://anongit.freedesktop.org/drm/drm
B: https://bugs.freedesktop.org/
@ -8939,7 +8944,7 @@ F: arch/mips/boot/dts/img/pistachio_marduk.dts
MARVELL 88E6XXX ETHERNET SWITCH FABRIC DRIVER
M: Andrew Lunn <andrew@lunn.ch>
M: Vivien Didelot <vivien.didelot@savoirfairelinux.com>
M: Vivien Didelot <vivien.didelot@gmail.com>
L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/dsa/mv88e6xxx/
@ -9444,6 +9449,13 @@ F: drivers/media/platform/mtk-vpu/
F: Documentation/devicetree/bindings/media/mediatek-vcodec.txt
F: Documentation/devicetree/bindings/media/mediatek-vpu.txt
MEDIATEK MT76 WIRELESS LAN DRIVER
M: Felix Fietkau <nbd@nbd.name>
M: Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
L: linux-wireless@vger.kernel.org
S: Maintained
F: drivers/net/wireless/mediatek/mt76/
MEDIATEK MT7601U WIRELESS LAN DRIVER
M: Jakub Kicinski <kubakici@wp.pl>
L: linux-wireless@vger.kernel.org
@ -10006,12 +10018,9 @@ S: Odd Fixes
F: drivers/media/radio/radio-miropcm20*
MMP SUPPORT
M: Eric Miao <eric.y.miao@gmail.com>
M: Haojian Zhuang <haojian.zhuang@gmail.com>
R: Lubomir Rintel <lkundrak@v3.sk>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
T: git git://github.com/hzhuang1/linux.git
T: git git://git.linaro.org/people/ycmiao/pxa-linux.git
S: Maintained
S: Odd Fixes
F: arch/arm/boot/dts/mmp*
F: arch/arm/mach-mmp/
@ -10417,7 +10426,7 @@ F: drivers/net/wireless/
NETWORKING [DSA]
M: Andrew Lunn <andrew@lunn.ch>
M: Vivien Didelot <vivien.didelot@savoirfairelinux.com>
M: Vivien Didelot <vivien.didelot@gmail.com>
M: Florian Fainelli <f.fainelli@gmail.com>
S: Maintained
F: Documentation/devicetree/bindings/net/dsa/

View File

@ -2,7 +2,7 @@
VERSION = 4
PATCHLEVEL = 20
SUBLEVEL = 0
EXTRAVERSION = -rc6
EXTRAVERSION = -rc7
NAME = Shy Crocodile
# *DOCUMENTATION*

View File

@ -634,6 +634,7 @@ setup_arch(char **cmdline_p)
/* Find our memory. */
setup_memory(kernel_end);
memblock_set_bottom_up(true);
/* First guess at cpu cache sizes. Do this before init_arch. */
determine_cpu_caches(cpu->type);

View File

@ -144,14 +144,14 @@ setup_memory_node(int nid, void *kernel_end)
if (!nid && (node_max_pfn < end_kernel_pfn || node_min_pfn > start_kernel_pfn))
panic("kernel loaded out of ram");
memblock_add(PFN_PHYS(node_min_pfn),
(node_max_pfn - node_min_pfn) << PAGE_SHIFT);
/* Zone start phys-addr must be 2^(MAX_ORDER-1) aligned.
Note that we round this down, not up - node memory
has much larger alignment than 8Mb, so it's safe. */
node_min_pfn &= ~((1UL << (MAX_ORDER-1))-1);
memblock_add(PFN_PHYS(node_min_pfn),
(node_max_pfn - node_min_pfn) << PAGE_SHIFT);
NODE_DATA(nid)->node_start_pfn = node_min_pfn;
NODE_DATA(nid)->node_present_pages = node_max_pfn - node_min_pfn;

View File

@ -45,7 +45,7 @@
};
/* The voltage to the MMC card is hardwired at 3.3V */
vmmc: fixedregulator@0 {
vmmc: regulator-vmmc {
compatible = "regulator-fixed";
regulator-name = "vmmc";
regulator-min-microvolt = <3300000>;
@ -53,7 +53,7 @@
regulator-boot-on;
};
veth: fixedregulator@0 {
veth: regulator-veth {
compatible = "regulator-fixed";
regulator-name = "veth";
regulator-min-microvolt = <3300000>;

View File

@ -145,7 +145,7 @@
};
/* The voltage to the MMC card is hardwired at 3.3V */
vmmc: fixedregulator@0 {
vmmc: regulator-vmmc {
compatible = "regulator-fixed";
regulator-name = "vmmc";
regulator-min-microvolt = <3300000>;
@ -153,7 +153,7 @@
regulator-boot-on;
};
veth: fixedregulator@0 {
veth: regulator-veth {
compatible = "regulator-fixed";
regulator-name = "veth";
regulator-min-microvolt = <3300000>;

View File

@ -31,7 +31,7 @@
wifi_pwrseq: wifi-pwrseq {
compatible = "mmc-pwrseq-simple";
reset-gpios = <&expgpio 1 GPIO_ACTIVE_HIGH>;
reset-gpios = <&expgpio 1 GPIO_ACTIVE_LOW>;
};
};

View File

@ -26,7 +26,7 @@
wifi_pwrseq: wifi-pwrseq {
compatible = "mmc-pwrseq-simple";
reset-gpios = <&expgpio 1 GPIO_ACTIVE_HIGH>;
reset-gpios = <&expgpio 1 GPIO_ACTIVE_LOW>;
};
};

View File

@ -86,13 +86,17 @@
compatible = "regulator-fixed";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
clocks = <&clks IMX7D_CLKO2_ROOT_DIV>;
clock-names = "slow";
regulator-name = "reg_wlan";
startup-delay-us = <70000>;
gpio = <&gpio4 21 GPIO_ACTIVE_HIGH>;
enable-active-high;
};
usdhc2_pwrseq: usdhc2_pwrseq {
compatible = "mmc-pwrseq-simple";
clocks = <&clks IMX7D_CLKO2_ROOT_DIV>;
clock-names = "ext_clock";
};
};
&adc1 {
@ -375,6 +379,7 @@
bus-width = <4>;
non-removable;
vmmc-supply = <&reg_wlan>;
mmc-pwrseq = <&usdhc2_pwrseq>;
cap-power-off-card;
keep-power-in-suspend;
status = "okay";

View File

@ -100,6 +100,19 @@
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
};
usdhc2_pwrseq: usdhc2_pwrseq {
compatible = "mmc-pwrseq-simple";
clocks = <&clks IMX7D_CLKO2_ROOT_DIV>;
clock-names = "ext_clock";
};
};
&clks {
assigned-clocks = <&clks IMX7D_CLKO2_ROOT_SRC>,
<&clks IMX7D_CLKO2_ROOT_DIV>;
assigned-clock-parents = <&clks IMX7D_CKIL>;
assigned-clock-rates = <0>, <32768>;
};
&i2c4 {
@ -199,12 +212,13 @@
&usdhc2 { /* Wifi SDIO */
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usdhc2>;
pinctrl-0 = <&pinctrl_usdhc2 &pinctrl_wifi_clk>;
no-1-8-v;
non-removable;
keep-power-in-suspend;
wakeup-source;
vmmc-supply = <&reg_ap6212>;
mmc-pwrseq = <&usdhc2_pwrseq>;
status = "okay";
};
@ -301,6 +315,12 @@
};
&iomuxc_lpsr {
pinctrl_wifi_clk: wificlkgrp {
fsl,pins = <
MX7D_PAD_LPSR_GPIO1_IO03__CCM_CLKO2 0x7d
>;
};
pinctrl_wdog: wdoggrp {
fsl,pins = <
MX7D_PAD_LPSR_GPIO1_IO00__WDOG1_WDOG_B 0x74

View File

@ -314,8 +314,8 @@
&reg_dldo3 {
regulator-always-on;
regulator-min-microvolt = <2500000>;
regulator-max-microvolt = <2500000>;
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
regulator-name = "vcc-pd";
};

View File

@ -110,7 +110,7 @@ int __init imx6sx_cpuidle_init(void)
* except for power up sw2iso which need to be
* larger than LDO ramp up time.
*/
imx_gpc_set_arm_power_up_timing(2, 1);
imx_gpc_set_arm_power_up_timing(0xf, 1);
imx_gpc_set_arm_power_down_timing(1, 1);
return cpuidle_register(&imx6sx_cpuidle_driver, NULL);

View File

@ -44,10 +44,12 @@ static inline int cpu_is_pxa910(void)
#define cpu_is_pxa910() (0)
#endif
#ifdef CONFIG_CPU_MMP2
#if defined(CONFIG_CPU_MMP2) || defined(CONFIG_MACH_MMP2_DT)
static inline int cpu_is_mmp2(void)
{
return (((read_cpuid_id() >> 8) & 0xff) == 0x58);
return (((read_cpuid_id() >> 8) & 0xff) == 0x58) &&
(((mmp_chip_id & 0xfff) == 0x410) ||
((mmp_chip_id & 0xfff) == 0x610));
}
#else
#define cpu_is_mmp2() (0)

View File

@ -20,28 +20,24 @@
compatible = "arm,cortex-a72", "arm,armv8";
reg = <0x000>;
enable-method = "psci";
cpu-idle-states = <&CPU_SLEEP_0>;
};
cpu1: cpu@1 {
device_type = "cpu";
compatible = "arm,cortex-a72", "arm,armv8";
reg = <0x001>;
enable-method = "psci";
cpu-idle-states = <&CPU_SLEEP_0>;
};
cpu2: cpu@100 {
device_type = "cpu";
compatible = "arm,cortex-a72", "arm,armv8";
reg = <0x100>;
enable-method = "psci";
cpu-idle-states = <&CPU_SLEEP_0>;
};
cpu3: cpu@101 {
device_type = "cpu";
compatible = "arm,cortex-a72", "arm,armv8";
reg = <0x101>;
enable-method = "psci";
cpu-idle-states = <&CPU_SLEEP_0>;
};
};
};

View File

@ -28,33 +28,6 @@
method = "smc";
};
cpus {
#address-cells = <1>;
#size-cells = <0>;
idle_states {
entry_method = "arm,pcsi";
CPU_SLEEP_0: cpu-sleep-0 {
compatible = "arm,idle-state";
local-timer-stop;
arm,psci-suspend-param = <0x0010000>;
entry-latency-us = <80>;
exit-latency-us = <160>;
min-residency-us = <320>;
};
CLUSTER_SLEEP_0: cluster-sleep-0 {
compatible = "arm,idle-state";
local-timer-stop;
arm,psci-suspend-param = <0x1010000>;
entry-latency-us = <500>;
exit-latency-us = <1000>;
min-residency-us = <2500>;
};
};
};
ap806 {
#address-cells = <2>;
#size-cells = <2>;

View File

@ -16,8 +16,13 @@
model = "Bananapi BPI-R64";
compatible = "bananapi,bpi-r64", "mediatek,mt7622";
aliases {
serial0 = &uart0;
};
chosen {
bootargs = "earlycon=uart8250,mmio32,0x11002000 console=ttyS0,115200n1 swiotlb=512";
stdout-path = "serial0:115200n8";
bootargs = "earlycon=uart8250,mmio32,0x11002000 swiotlb=512";
};
cpus {

View File

@ -17,8 +17,13 @@
model = "MediaTek MT7622 RFB1 board";
compatible = "mediatek,mt7622-rfb1", "mediatek,mt7622";
aliases {
serial0 = &uart0;
};
chosen {
bootargs = "earlycon=uart8250,mmio32,0x11002000 console=ttyS0,115200n1 swiotlb=512";
stdout-path = "serial0:115200n8";
bootargs = "earlycon=uart8250,mmio32,0x11002000 swiotlb=512";
};
cpus {

View File

@ -227,16 +227,6 @@
#reset-cells = <1>;
};
timer: timer@10004000 {
compatible = "mediatek,mt7622-timer",
"mediatek,mt6577-timer";
reg = <0 0x10004000 0 0x80>;
interrupts = <GIC_SPI 152 IRQ_TYPE_LEVEL_LOW>;
clocks = <&infracfg CLK_INFRA_APXGPT_PD>,
<&topckgen CLK_TOP_RTC>;
clock-names = "system-clk", "rtc-clk";
};
scpsys: scpsys@10006000 {
compatible = "mediatek,mt7622-scpsys",
"syscon";

View File

@ -34,15 +34,6 @@
*/
#define PCI_IO_SIZE SZ_16M
/*
* Log2 of the upper bound of the size of a struct page. Used for sizing
* the vmemmap region only, does not affect actual memory footprint.
* We don't use sizeof(struct page) directly since taking its size here
* requires its definition to be available at this point in the inclusion
* chain, and it may not be a power of 2 in the first place.
*/
#define STRUCT_PAGE_MAX_SHIFT 6
/*
* VMEMMAP_SIZE - allows the whole linear region to be covered by
* a struct page array

View File

@ -429,9 +429,9 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
prot,
__builtin_return_address(0));
if (addr) {
memset(addr, 0, size);
if (!coherent)
__dma_flush_area(page_to_virt(page), iosize);
memset(addr, 0, size);
} else {
iommu_dma_unmap_page(dev, *handle, iosize, 0, attrs);
dma_release_from_contiguous(dev, page,

View File

@ -610,14 +610,6 @@ void __init mem_init(void)
BUILD_BUG_ON(TASK_SIZE_32 > TASK_SIZE_64);
#endif
#ifdef CONFIG_SPARSEMEM_VMEMMAP
/*
* Make sure we chose the upper bound of sizeof(struct page)
* correctly when sizing the VMEMMAP array.
*/
BUILD_BUG_ON(sizeof(struct page) > (1 << STRUCT_PAGE_MAX_SHIFT));
#endif
if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) {
extern int sysctl_overcommit_memory;
/*

View File

@ -197,7 +197,7 @@ $(obj)/empty.c:
$(obj)/zImage.coff.lds $(obj)/zImage.ps3.lds : $(obj)/%: $(srctree)/$(src)/%.S
$(Q)cp $< $@
$(obj)/serial.c: $(obj)/autoconf.h
$(srctree)/$(src)/serial.c: $(obj)/autoconf.h
$(obj)/autoconf.h: $(obj)/%: $(objtree)/include/generated/%
$(Q)cp $< $@

View File

@ -15,7 +15,7 @@
RELA = 7
RELACOUNT = 0x6ffffff9
.text
.data
/* A procedure descriptor used when booting this as a COFF file.
* When making COFF, this comes first in the link and we're
* linked at 0x500000.
@ -23,6 +23,8 @@ RELACOUNT = 0x6ffffff9
.globl _zimage_start_opd
_zimage_start_opd:
.long 0x500000, 0, 0, 0
.text
b _zimage_start
#ifdef __powerpc64__
.balign 8

View File

@ -26,6 +26,8 @@
#include <asm/ptrace.h>
#include <asm/reg.h>
#define perf_arch_bpf_user_pt_regs(regs) &regs->user_regs
/*
* Overload regs->result to specify whether we should use the MSR (result
* is zero) or the SIAR (result is non zero).

View File

@ -1,7 +1,6 @@
# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
generic-y += bpf_perf_event.h
generic-y += param.h
generic-y += poll.h
generic-y += resource.h

View File

@ -0,0 +1,9 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _UAPI__ASM_BPF_PERF_EVENT_H__
#define _UAPI__ASM_BPF_PERF_EVENT_H__
#include <asm/ptrace.h>
typedef struct user_pt_regs bpf_user_pt_regs_t;
#endif /* _UAPI__ASM_BPF_PERF_EVENT_H__ */

View File

@ -372,6 +372,8 @@ void __init find_legacy_serial_ports(void)
/* Now find out if one of these is out firmware console */
path = of_get_property(of_chosen, "linux,stdout-path", NULL);
if (path == NULL)
path = of_get_property(of_chosen, "stdout-path", NULL);
if (path != NULL) {
stdout = of_find_node_by_path(path);
if (stdout)
@ -595,8 +597,10 @@ static int __init check_legacy_serial_console(void)
/* We are getting a weird phandle from OF ... */
/* ... So use the full path instead */
name = of_get_property(of_chosen, "linux,stdout-path", NULL);
if (name == NULL)
name = of_get_property(of_chosen, "stdout-path", NULL);
if (name == NULL) {
DBG(" no linux,stdout-path !\n");
DBG(" no stdout-path !\n");
return -ENODEV;
}
prom_stdout = of_find_node_by_path(name);

View File

@ -34,5 +34,10 @@ void arch_teardown_msi_irqs(struct pci_dev *dev)
{
struct pci_controller *phb = pci_bus_to_host(dev->bus);
phb->controller_ops.teardown_msi_irqs(dev);
/*
* We can be called even when arch_setup_msi_irqs() returns -ENOSYS,
* so check the pointer again.
*/
if (phb->controller_ops.teardown_msi_irqs)
phb->controller_ops.teardown_msi_irqs(dev);
}

View File

@ -3266,12 +3266,17 @@ long do_syscall_trace_enter(struct pt_regs *regs)
user_exit();
if (test_thread_flag(TIF_SYSCALL_EMU)) {
ptrace_report_syscall(regs);
/*
* A nonzero return code from tracehook_report_syscall_entry()
* tells us to prevent the syscall execution, but we are not
* going to execute it anyway.
*
* Returning -1 will skip the syscall execution. We want to
* avoid clobbering any register also, thus, not 'gotoing'
* skip label.
*/
if (tracehook_report_syscall_entry(regs))
;
return -1;
}

View File

@ -19,6 +19,7 @@
#include <linux/hugetlb.h>
#include <linux/io.h>
#include <linux/mm.h>
#include <linux/highmem.h>
#include <linux/sched.h>
#include <linux/seq_file.h>
#include <asm/fixmap.h>

View File

@ -188,15 +188,20 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
pr_debug("vmemmap_populate %lx..%lx, node %d\n", start, end, node);
for (; start < end; start += page_size) {
void *p;
void *p = NULL;
int rc;
if (vmemmap_populated(start, page_size))
continue;
/*
* Allocate from the altmap first if we have one. This may
* fail due to alignment issues when using 16MB hugepages, so
* fall back to system memory if the altmap allocation fail.
*/
if (altmap)
p = altmap_alloc_block_buf(page_size, altmap);
else
if (!p)
p = vmemmap_alloc_block_buf(page_size, node);
if (!p)
return -ENOMEM;
@ -255,8 +260,15 @@ void __ref vmemmap_free(unsigned long start, unsigned long end,
{
unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
unsigned long page_order = get_order(page_size);
unsigned long alt_start = ~0, alt_end = ~0;
unsigned long base_pfn;
start = _ALIGN_DOWN(start, page_size);
if (altmap) {
alt_start = altmap->base_pfn;
alt_end = altmap->base_pfn + altmap->reserve +
altmap->free + altmap->alloc + altmap->align;
}
pr_debug("vmemmap_free %lx...%lx\n", start, end);
@ -280,8 +292,9 @@ void __ref vmemmap_free(unsigned long start, unsigned long end,
page = pfn_to_page(addr >> PAGE_SHIFT);
section_base = pfn_to_page(vmemmap_section_start(start));
nr_pages = 1 << page_order;
base_pfn = PHYS_PFN(addr);
if (altmap) {
if (base_pfn >= alt_start && base_pfn < alt_end) {
vmem_altmap_free(altmap, nr_pages);
} else if (PageReserved(page)) {
/* allocated from bootmem */

View File

@ -140,8 +140,7 @@ config IBMEBUS
Bus device driver for GX bus based adapters.
config PAPR_SCM
depends on PPC_PSERIES && MEMORY_HOTPLUG
select LIBNVDIMM
depends on PPC_PSERIES && MEMORY_HOTPLUG && LIBNVDIMM
tristate "Support for the PAPR Storage Class Memory interface"
help
Enable access to hypervisor provided storage class memory.

View File

@ -55,7 +55,7 @@ static int drc_pmem_bind(struct papr_scm_priv *p)
do {
rc = plpar_hcall(H_SCM_BIND_MEM, ret, p->drc_index, 0,
p->blocks, BIND_ANY_ADDR, token);
token = be64_to_cpu(ret[0]);
token = ret[0];
cond_resched();
} while (rc == H_BUSY);
@ -64,7 +64,7 @@ static int drc_pmem_bind(struct papr_scm_priv *p)
return -ENXIO;
}
p->bound_addr = be64_to_cpu(ret[1]);
p->bound_addr = ret[1];
dev_dbg(&p->pdev->dev, "bound drc %x to %pR\n", p->drc_index, &p->res);
@ -82,7 +82,7 @@ static int drc_pmem_unbind(struct papr_scm_priv *p)
do {
rc = plpar_hcall(H_SCM_UNBIND_MEM, ret, p->drc_index,
p->bound_addr, p->blocks, token);
token = be64_to_cpu(ret);
token = ret[0];
cond_resched();
} while (rc == H_BUSY);
@ -223,6 +223,9 @@ static int papr_scm_nvdimm_init(struct papr_scm_priv *p)
goto err;
}
if (nvdimm_bus_check_dimm_count(p->bus, 1))
goto err;
/* now add the region */
memset(&mapping, 0, sizeof(mapping));
@ -257,9 +260,12 @@ err: nvdimm_bus_unregister(p->bus);
static int papr_scm_probe(struct platform_device *pdev)
{
uint32_t drc_index, metadata_size, unit_cap[2];
struct device_node *dn = pdev->dev.of_node;
u32 drc_index, metadata_size;
u64 blocks, block_size;
struct papr_scm_priv *p;
const char *uuid_str;
u64 uuid[2];
int rc;
/* check we have all the required DT properties */
@ -268,8 +274,18 @@ static int papr_scm_probe(struct platform_device *pdev)
return -ENODEV;
}
if (of_property_read_u32_array(dn, "ibm,unit-capacity", unit_cap, 2)) {
dev_err(&pdev->dev, "%pOF: missing unit-capacity!\n", dn);
if (of_property_read_u64(dn, "ibm,block-size", &block_size)) {
dev_err(&pdev->dev, "%pOF: missing block-size!\n", dn);
return -ENODEV;
}
if (of_property_read_u64(dn, "ibm,number-of-blocks", &blocks)) {
dev_err(&pdev->dev, "%pOF: missing number-of-blocks!\n", dn);
return -ENODEV;
}
if (of_property_read_string(dn, "ibm,unit-guid", &uuid_str)) {
dev_err(&pdev->dev, "%pOF: missing unit-guid!\n", dn);
return -ENODEV;
}
@ -282,8 +298,13 @@ static int papr_scm_probe(struct platform_device *pdev)
p->dn = dn;
p->drc_index = drc_index;
p->block_size = unit_cap[0];
p->blocks = unit_cap[1];
p->block_size = block_size;
p->blocks = blocks;
/* We just need to ensure that set cookies are unique across */
uuid_parse(uuid_str, (uuid_t *) uuid);
p->nd_set.cookie1 = uuid[0];
p->nd_set.cookie2 = uuid[1];
/* might be zero */
p->metadata_size = metadata_size;
@ -296,7 +317,7 @@ static int papr_scm_probe(struct platform_device *pdev)
/* setup the resource for the newly bound range */
p->res.start = p->bound_addr;
p->res.end = p->bound_addr + p->blocks * p->block_size;
p->res.end = p->bound_addr + p->blocks * p->block_size - 1;
p->res.name = pdev->name;
p->res.flags = IORESOURCE_MEM;

View File

@ -24,6 +24,7 @@
#define __IO_PREFIX generic
#include <asm/io_generic.h>
#include <asm/io_trapped.h>
#include <asm-generic/pci_iomap.h>
#include <mach/mangle-port.h>
#define __raw_writeb(v,a) (__chk_io_ptr(a), *(volatile u8 __force *)(a) = (v))

View File

@ -390,6 +390,7 @@
#define MSR_F15H_NB_PERF_CTR 0xc0010241
#define MSR_F15H_PTSC 0xc0010280
#define MSR_F15H_IC_CFG 0xc0011021
#define MSR_F15H_EX_CFG 0xc001102c
/* Fam 10h MSRs */
#define MSR_FAM10H_MMIO_CONF_BASE 0xc0010058

View File

@ -11985,6 +11985,8 @@ static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
kunmap(vmx->nested.pi_desc_page);
kvm_release_page_dirty(vmx->nested.pi_desc_page);
vmx->nested.pi_desc_page = NULL;
vmx->nested.pi_desc = NULL;
vmcs_write64(POSTED_INTR_DESC_ADDR, -1ull);
}
page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->posted_intr_desc_addr);
if (is_error_page(page))

View File

@ -2426,6 +2426,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
case MSR_AMD64_PATCH_LOADER:
case MSR_AMD64_BU_CFG2:
case MSR_AMD64_DC_CFG:
case MSR_F15H_EX_CFG:
break;
case MSR_IA32_UCODE_REV:
@ -2721,6 +2722,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
case MSR_AMD64_BU_CFG2:
case MSR_IA32_PERF_CTL:
case MSR_AMD64_DC_CFG:
case MSR_F15H_EX_CFG:
msr_info->data = 0;
break;
case MSR_F15H_PERF_CTL0 ... MSR_F15H_PERF_CTR5:
@ -7446,7 +7448,7 @@ void kvm_make_scan_ioapic_request(struct kvm *kvm)
static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu)
{
if (!kvm_apic_hw_enabled(vcpu->arch.apic))
if (!kvm_apic_present(vcpu))
return;
bitmap_zero(vcpu->arch.ioapic_handled_vectors, 256);

View File

@ -1261,7 +1261,8 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
if (ret)
goto cleanup;
} else {
zero_fill_bio(bio);
if (bmd->is_our_pages)
zero_fill_bio(bio);
iov_iter_advance(iter, bio->bi_iter.bi_size);
}

View File

@ -378,7 +378,7 @@ static struct blk_zone *blk_alloc_zones(int node, unsigned int *nr_zones)
struct page *page;
int order;
for (order = get_order(size); order > 0; order--) {
for (order = get_order(size); order >= 0; order--) {
page = alloc_pages_node(node, GFP_NOIO | __GFP_ZERO, order);
if (page) {
*nr_zones = min_t(unsigned int, *nr_zones,

View File

@ -297,7 +297,7 @@ static struct clk_alpha_pll gpll0_out_main = {
.hw.init = &(struct clk_init_data){
.name = "gpll0_out_main",
.parent_names = (const char *[])
{ "gpll0_sleep_clk_src" },
{ "cxo" },
.num_parents = 1,
.ops = &clk_alpha_pll_ops,
},

View File

@ -153,6 +153,11 @@ struct chtls_dev {
unsigned int cdev_state;
};
struct chtls_listen {
struct chtls_dev *cdev;
struct sock *sk;
};
struct chtls_hws {
struct sk_buff_head sk_recv_queue;
u8 txqid;
@ -215,6 +220,8 @@ struct chtls_sock {
u16 resv2;
u32 delack_mode;
u32 delack_seq;
u32 snd_win;
u32 rcv_win;
void *passive_reap_next; /* placeholder for passive */
struct chtls_hws tlshws;

View File

@ -21,6 +21,7 @@
#include <linux/kallsyms.h>
#include <linux/kprobes.h>
#include <linux/if_vlan.h>
#include <net/inet_common.h>
#include <net/tcp.h>
#include <net/dst.h>
@ -887,24 +888,6 @@ static unsigned int chtls_select_mss(const struct chtls_sock *csk,
return mtu_idx;
}
static unsigned int select_rcv_wnd(struct chtls_sock *csk)
{
unsigned int rcvwnd;
unsigned int wnd;
struct sock *sk;
sk = csk->sk;
wnd = tcp_full_space(sk);
if (wnd < MIN_RCV_WND)
wnd = MIN_RCV_WND;
rcvwnd = MAX_RCV_WND;
csk_set_flag(csk, CSK_UPDATE_RCV_WND);
return min(wnd, rcvwnd);
}
static unsigned int select_rcv_wscale(int space, int wscale_ok, int win_clamp)
{
int wscale = 0;
@ -951,7 +934,7 @@ static void chtls_pass_accept_rpl(struct sk_buff *skb,
csk->mtu_idx = chtls_select_mss(csk, dst_mtu(__sk_dst_get(sk)),
req);
opt0 = TCAM_BYPASS_F |
WND_SCALE_V((tp)->rx_opt.rcv_wscale) |
WND_SCALE_V(RCV_WSCALE(tp)) |
MSS_IDX_V(csk->mtu_idx) |
L2T_IDX_V(csk->l2t_entry->idx) |
NAGLE_V(!(tp->nonagle & TCP_NAGLE_OFF)) |
@ -1005,6 +988,25 @@ static int chtls_backlog_rcv(struct sock *sk, struct sk_buff *skb)
return 0;
}
static void chtls_set_tcp_window(struct chtls_sock *csk)
{
struct net_device *ndev = csk->egress_dev;
struct port_info *pi = netdev_priv(ndev);
unsigned int linkspeed;
u8 scale;
linkspeed = pi->link_cfg.speed;
scale = linkspeed / SPEED_10000;
#define CHTLS_10G_RCVWIN (256 * 1024)
csk->rcv_win = CHTLS_10G_RCVWIN;
if (scale)
csk->rcv_win *= scale;
#define CHTLS_10G_SNDWIN (256 * 1024)
csk->snd_win = CHTLS_10G_SNDWIN;
if (scale)
csk->snd_win *= scale;
}
static struct sock *chtls_recv_sock(struct sock *lsk,
struct request_sock *oreq,
void *network_hdr,
@ -1067,6 +1069,9 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
csk->port_id = port_id;
csk->egress_dev = ndev;
csk->tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid));
chtls_set_tcp_window(csk);
tp->rcv_wnd = csk->rcv_win;
csk->sndbuf = csk->snd_win;
csk->ulp_mode = ULP_MODE_TLS;
step = cdev->lldi->nrxq / cdev->lldi->nchan;
csk->rss_qid = cdev->lldi->rxq_ids[port_id * step];
@ -1076,9 +1081,9 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
csk->sndbuf = newsk->sk_sndbuf;
csk->smac_idx = cxgb4_tp_smt_idx(cdev->lldi->adapter_type,
cxgb4_port_viid(ndev));
tp->rcv_wnd = select_rcv_wnd(csk);
RCV_WSCALE(tp) = select_rcv_wscale(tcp_full_space(newsk),
WSCALE_OK(tp),
sock_net(newsk)->
ipv4.sysctl_tcp_window_scaling,
tp->window_clamp);
neigh_release(n);
inet_inherit_port(&tcp_hashinfo, lsk, newsk);
@ -1130,6 +1135,7 @@ static void chtls_pass_accept_request(struct sock *sk,
struct cpl_t5_pass_accept_rpl *rpl;
struct cpl_pass_accept_req *req;
struct listen_ctx *listen_ctx;
struct vlan_ethhdr *vlan_eh;
struct request_sock *oreq;
struct sk_buff *reply_skb;
struct chtls_sock *csk;
@ -1142,6 +1148,10 @@ static void chtls_pass_accept_request(struct sock *sk,
unsigned int stid;
unsigned int len;
unsigned int tid;
bool th_ecn, ect;
__u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */
u16 eth_hdr_len;
bool ecn_ok;
req = cplhdr(skb) + RSS_HDR;
tid = GET_TID(req);
@ -1180,24 +1190,40 @@ static void chtls_pass_accept_request(struct sock *sk,
oreq->mss = 0;
oreq->ts_recent = 0;
eh = (struct ethhdr *)(req + 1);
iph = (struct iphdr *)(eh + 1);
eth_hdr_len = T6_ETH_HDR_LEN_G(ntohl(req->hdr_len));
if (eth_hdr_len == ETH_HLEN) {
eh = (struct ethhdr *)(req + 1);
iph = (struct iphdr *)(eh + 1);
network_hdr = (void *)(eh + 1);
} else {
vlan_eh = (struct vlan_ethhdr *)(req + 1);
iph = (struct iphdr *)(vlan_eh + 1);
network_hdr = (void *)(vlan_eh + 1);
}
if (iph->version != 0x4)
goto free_oreq;
network_hdr = (void *)(eh + 1);
tcph = (struct tcphdr *)(iph + 1);
skb_set_network_header(skb, (void *)iph - (void *)req);
tcp_rsk(oreq)->tfo_listener = false;
tcp_rsk(oreq)->rcv_isn = ntohl(tcph->seq);
chtls_set_req_port(oreq, tcph->source, tcph->dest);
inet_rsk(oreq)->ecn_ok = 0;
chtls_set_req_addr(oreq, iph->daddr, iph->saddr);
if (req->tcpopt.wsf <= 14) {
ip_dsfield = ipv4_get_dsfield(iph);
if (req->tcpopt.wsf <= 14 &&
sock_net(sk)->ipv4.sysctl_tcp_window_scaling) {
inet_rsk(oreq)->wscale_ok = 1;
inet_rsk(oreq)->snd_wscale = req->tcpopt.wsf;
}
inet_rsk(oreq)->ir_iif = sk->sk_bound_dev_if;
th_ecn = tcph->ece && tcph->cwr;
if (th_ecn) {
ect = !INET_ECN_is_not_ect(ip_dsfield);
ecn_ok = sock_net(sk)->ipv4.sysctl_tcp_ecn;
if ((!ect && ecn_ok) || tcp_ca_needs_ecn(sk))
inet_rsk(oreq)->ecn_ok = 1;
}
newsk = chtls_recv_sock(sk, oreq, network_hdr, req, cdev);
if (!newsk)

View File

@ -397,7 +397,7 @@ static void tls_tx_data_wr(struct sock *sk, struct sk_buff *skb,
req_wr->lsodisable_to_flags =
htonl(TX_ULP_MODE_V(ULP_MODE_TLS) |
FW_OFLD_TX_DATA_WR_URGENT_V(skb_urgent(skb)) |
TX_URG_V(skb_urgent(skb)) |
T6_TX_FORCE_F | wr_ulp_mode_force |
TX_SHOVE_V((!csk_flag(sk, CSK_TX_MORE_DATA)) &&
skb_queue_empty(&csk->txq)));
@ -534,10 +534,9 @@ static void make_tx_data_wr(struct sock *sk, struct sk_buff *skb,
FW_OFLD_TX_DATA_WR_SHOVE_F);
req->tunnel_to_proxy = htonl(wr_ulp_mode_force |
FW_OFLD_TX_DATA_WR_URGENT_V(skb_urgent(skb)) |
FW_OFLD_TX_DATA_WR_SHOVE_V((!csk_flag
(sk, CSK_TX_MORE_DATA)) &&
skb_queue_empty(&csk->txq)));
TX_URG_V(skb_urgent(skb)) |
TX_SHOVE_V((!csk_flag(sk, CSK_TX_MORE_DATA)) &&
skb_queue_empty(&csk->txq)));
req->plen = htonl(len);
}
@ -995,7 +994,6 @@ int chtls_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
int mss, flags, err;
int recordsz = 0;
int copied = 0;
int hdrlen = 0;
long timeo;
lock_sock(sk);
@ -1032,7 +1030,7 @@ int chtls_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
recordsz = tls_header_read(&hdr, &msg->msg_iter);
size -= TLS_HEADER_LENGTH;
hdrlen += TLS_HEADER_LENGTH;
copied += TLS_HEADER_LENGTH;
csk->tlshws.txleft = recordsz;
csk->tlshws.type = hdr.type;
if (skb)
@ -1083,10 +1081,8 @@ new_buf:
int off = TCP_OFF(sk);
bool merge;
if (!page)
goto wait_for_memory;
pg_size <<= compound_order(page);
if (page)
pg_size <<= compound_order(page);
if (off < pg_size &&
skb_can_coalesce(skb, i, page, off)) {
merge = 1;
@ -1187,7 +1183,7 @@ out:
chtls_tcp_push(sk, flags);
done:
release_sock(sk);
return copied + hdrlen;
return copied;
do_fault:
if (!skb->len) {
__skb_unlink(skb, &csk->txq);

View File

@ -55,24 +55,19 @@ static void unregister_listen_notifier(struct notifier_block *nb)
static int listen_notify_handler(struct notifier_block *this,
unsigned long event, void *data)
{
struct chtls_dev *cdev;
struct sock *sk;
int ret;
struct chtls_listen *clisten;
int ret = NOTIFY_DONE;
sk = data;
ret = NOTIFY_DONE;
clisten = (struct chtls_listen *)data;
switch (event) {
case CHTLS_LISTEN_START:
ret = chtls_listen_start(clisten->cdev, clisten->sk);
kfree(clisten);
break;
case CHTLS_LISTEN_STOP:
mutex_lock(&cdev_list_lock);
list_for_each_entry(cdev, &cdev_list, list) {
if (event == CHTLS_LISTEN_START)
ret = chtls_listen_start(cdev, sk);
else
chtls_listen_stop(cdev, sk);
}
mutex_unlock(&cdev_list_lock);
chtls_listen_stop(clisten->cdev, clisten->sk);
kfree(clisten);
break;
}
return ret;
@ -90,8 +85,9 @@ static int listen_backlog_rcv(struct sock *sk, struct sk_buff *skb)
return 0;
}
static int chtls_start_listen(struct sock *sk)
static int chtls_start_listen(struct chtls_dev *cdev, struct sock *sk)
{
struct chtls_listen *clisten;
int err;
if (sk->sk_protocol != IPPROTO_TCP)
@ -102,21 +98,33 @@ static int chtls_start_listen(struct sock *sk)
return -EADDRNOTAVAIL;
sk->sk_backlog_rcv = listen_backlog_rcv;
clisten = kmalloc(sizeof(*clisten), GFP_KERNEL);
if (!clisten)
return -ENOMEM;
clisten->cdev = cdev;
clisten->sk = sk;
mutex_lock(&notify_mutex);
err = raw_notifier_call_chain(&listen_notify_list,
CHTLS_LISTEN_START, sk);
CHTLS_LISTEN_START, clisten);
mutex_unlock(&notify_mutex);
return err;
}
static void chtls_stop_listen(struct sock *sk)
static void chtls_stop_listen(struct chtls_dev *cdev, struct sock *sk)
{
struct chtls_listen *clisten;
if (sk->sk_protocol != IPPROTO_TCP)
return;
clisten = kmalloc(sizeof(*clisten), GFP_KERNEL);
if (!clisten)
return;
clisten->cdev = cdev;
clisten->sk = sk;
mutex_lock(&notify_mutex);
raw_notifier_call_chain(&listen_notify_list,
CHTLS_LISTEN_STOP, sk);
CHTLS_LISTEN_STOP, clisten);
mutex_unlock(&notify_mutex);
}
@ -138,15 +146,43 @@ static int chtls_inline_feature(struct tls_device *dev)
static int chtls_create_hash(struct tls_device *dev, struct sock *sk)
{
struct chtls_dev *cdev = to_chtls_dev(dev);
if (sk->sk_state == TCP_LISTEN)
return chtls_start_listen(sk);
return chtls_start_listen(cdev, sk);
return 0;
}
static void chtls_destroy_hash(struct tls_device *dev, struct sock *sk)
{
struct chtls_dev *cdev = to_chtls_dev(dev);
if (sk->sk_state == TCP_LISTEN)
chtls_stop_listen(sk);
chtls_stop_listen(cdev, sk);
}
static void chtls_free_uld(struct chtls_dev *cdev)
{
int i;
tls_unregister_device(&cdev->tlsdev);
kvfree(cdev->kmap.addr);
idr_destroy(&cdev->hwtid_idr);
for (i = 0; i < (1 << RSPQ_HASH_BITS); i++)
kfree_skb(cdev->rspq_skb_cache[i]);
kfree(cdev->lldi);
kfree_skb(cdev->askb);
kfree(cdev);
}
static inline void chtls_dev_release(struct kref *kref)
{
struct chtls_dev *cdev;
struct tls_device *dev;
dev = container_of(kref, struct tls_device, kref);
cdev = to_chtls_dev(dev);
chtls_free_uld(cdev);
}
static void chtls_register_dev(struct chtls_dev *cdev)
@ -159,15 +195,12 @@ static void chtls_register_dev(struct chtls_dev *cdev)
tlsdev->feature = chtls_inline_feature;
tlsdev->hash = chtls_create_hash;
tlsdev->unhash = chtls_destroy_hash;
tls_register_device(&cdev->tlsdev);
tlsdev->release = chtls_dev_release;
kref_init(&tlsdev->kref);
tls_register_device(tlsdev);
cdev->cdev_state = CHTLS_CDEV_STATE_UP;
}
static void chtls_unregister_dev(struct chtls_dev *cdev)
{
tls_unregister_device(&cdev->tlsdev);
}
static void process_deferq(struct work_struct *task_param)
{
struct chtls_dev *cdev = container_of(task_param,
@ -262,28 +295,16 @@ out:
return NULL;
}
static void chtls_free_uld(struct chtls_dev *cdev)
{
int i;
chtls_unregister_dev(cdev);
kvfree(cdev->kmap.addr);
idr_destroy(&cdev->hwtid_idr);
for (i = 0; i < (1 << RSPQ_HASH_BITS); i++)
kfree_skb(cdev->rspq_skb_cache[i]);
kfree(cdev->lldi);
kfree_skb(cdev->askb);
kfree(cdev);
}
static void chtls_free_all_uld(void)
{
struct chtls_dev *cdev, *tmp;
mutex_lock(&cdev_mutex);
list_for_each_entry_safe(cdev, tmp, &cdev_list, list) {
if (cdev->cdev_state == CHTLS_CDEV_STATE_UP)
chtls_free_uld(cdev);
if (cdev->cdev_state == CHTLS_CDEV_STATE_UP) {
list_del(&cdev->list);
kref_put(&cdev->tlsdev.kref, cdev->tlsdev.release);
}
}
mutex_unlock(&cdev_mutex);
}
@ -304,7 +325,7 @@ static int chtls_uld_state_change(void *handle, enum cxgb4_state new_state)
mutex_lock(&cdev_mutex);
list_del(&cdev->list);
mutex_unlock(&cdev_mutex);
chtls_free_uld(cdev);
kref_put(&cdev->tlsdev.kref, cdev->tlsdev.release);
break;
default:
break;

View File

@ -330,7 +330,9 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
case CHIP_TOPAZ:
if (((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x81)) ||
((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x83)) ||
((adev->pdev->device == 0x6907) && (adev->pdev->revision == 0x87))) {
((adev->pdev->device == 0x6907) && (adev->pdev->revision == 0x87)) ||
((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0xD1)) ||
((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0xD3))) {
info->is_kicker = true;
strcpy(fw_name, "amdgpu/topaz_k_smc.bin");
} else
@ -351,7 +353,6 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
if (type == CGS_UCODE_ID_SMU) {
if (((adev->pdev->device == 0x67ef) &&
((adev->pdev->revision == 0xe0) ||
(adev->pdev->revision == 0xe2) ||
(adev->pdev->revision == 0xe5))) ||
((adev->pdev->device == 0x67ff) &&
((adev->pdev->revision == 0xcf) ||
@ -359,8 +360,13 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
(adev->pdev->revision == 0xff)))) {
info->is_kicker = true;
strcpy(fw_name, "amdgpu/polaris11_k_smc.bin");
} else
} else if ((adev->pdev->device == 0x67ef) &&
(adev->pdev->revision == 0xe2)) {
info->is_kicker = true;
strcpy(fw_name, "amdgpu/polaris11_k2_smc.bin");
} else {
strcpy(fw_name, "amdgpu/polaris11_smc.bin");
}
} else if (type == CGS_UCODE_ID_SMU_SK) {
strcpy(fw_name, "amdgpu/polaris11_smc_sk.bin");
}
@ -375,17 +381,35 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
(adev->pdev->revision == 0xe7) ||
(adev->pdev->revision == 0xef))) ||
((adev->pdev->device == 0x6fdf) &&
(adev->pdev->revision == 0xef))) {
((adev->pdev->revision == 0xef) ||
(adev->pdev->revision == 0xff)))) {
info->is_kicker = true;
strcpy(fw_name, "amdgpu/polaris10_k_smc.bin");
} else
} else if ((adev->pdev->device == 0x67df) &&
((adev->pdev->revision == 0xe1) ||
(adev->pdev->revision == 0xf7))) {
info->is_kicker = true;
strcpy(fw_name, "amdgpu/polaris10_k2_smc.bin");
} else {
strcpy(fw_name, "amdgpu/polaris10_smc.bin");
}
} else if (type == CGS_UCODE_ID_SMU_SK) {
strcpy(fw_name, "amdgpu/polaris10_smc_sk.bin");
}
break;
case CHIP_POLARIS12:
strcpy(fw_name, "amdgpu/polaris12_smc.bin");
if (((adev->pdev->device == 0x6987) &&
((adev->pdev->revision == 0xc0) ||
(adev->pdev->revision == 0xc3))) ||
((adev->pdev->device == 0x6981) &&
((adev->pdev->revision == 0x00) ||
(adev->pdev->revision == 0x01) ||
(adev->pdev->revision == 0x10)))) {
info->is_kicker = true;
strcpy(fw_name, "amdgpu/polaris12_k_smc.bin");
} else {
strcpy(fw_name, "amdgpu/polaris12_smc.bin");
}
break;
case CHIP_VEGAM:
strcpy(fw_name, "amdgpu/vegam_smc.bin");

View File

@ -124,14 +124,14 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs
goto free_chunk;
}
mutex_lock(&p->ctx->lock);
/* skip guilty context job */
if (atomic_read(&p->ctx->guilty) == 1) {
ret = -ECANCELED;
goto free_chunk;
}
mutex_lock(&p->ctx->lock);
/* get chunks */
chunk_array_user = u64_to_user_ptr(cs->in.chunks);
if (copy_from_user(chunk_array, chunk_array_user,

View File

@ -872,7 +872,13 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x6864, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
{0x1002, 0x6867, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
{0x1002, 0x6868, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
{0x1002, 0x6869, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
{0x1002, 0x686a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
{0x1002, 0x686b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
{0x1002, 0x686c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
{0x1002, 0x686d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
{0x1002, 0x686e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
{0x1002, 0x686f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
{0x1002, 0x687f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
/* Vega 12 */
{0x1002, 0x69A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA12},
@ -885,6 +891,7 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x66A1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
{0x1002, 0x66A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
{0x1002, 0x66A3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
{0x1002, 0x66A4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
{0x1002, 0x66A7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
{0x1002, 0x66AF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
/* Raven */

View File

@ -337,12 +337,19 @@ static const struct kfd_deviceid supported_devices[] = {
{ 0x6864, &vega10_device_info }, /* Vega10 */
{ 0x6867, &vega10_device_info }, /* Vega10 */
{ 0x6868, &vega10_device_info }, /* Vega10 */
{ 0x6869, &vega10_device_info }, /* Vega10 */
{ 0x686A, &vega10_device_info }, /* Vega10 */
{ 0x686B, &vega10_device_info }, /* Vega10 */
{ 0x686C, &vega10_vf_device_info }, /* Vega10 vf*/
{ 0x686D, &vega10_device_info }, /* Vega10 */
{ 0x686E, &vega10_device_info }, /* Vega10 */
{ 0x686F, &vega10_device_info }, /* Vega10 */
{ 0x687F, &vega10_device_info }, /* Vega10 */
{ 0x66a0, &vega20_device_info }, /* Vega20 */
{ 0x66a1, &vega20_device_info }, /* Vega20 */
{ 0x66a2, &vega20_device_info }, /* Vega20 */
{ 0x66a3, &vega20_device_info }, /* Vega20 */
{ 0x66a4, &vega20_device_info }, /* Vega20 */
{ 0x66a7, &vega20_device_info }, /* Vega20 */
{ 0x66af, &vega20_device_info } /* Vega20 */
};

View File

@ -130,7 +130,7 @@ static void vega20_set_default_registry_data(struct pp_hwmgr *hwmgr)
data->registry_data.disable_auto_wattman = 1;
data->registry_data.auto_wattman_debug = 0;
data->registry_data.auto_wattman_sample_period = 100;
data->registry_data.fclk_gfxclk_ratio = 0x3F6CCCCD;
data->registry_data.fclk_gfxclk_ratio = 0;
data->registry_data.auto_wattman_threshold = 50;
data->registry_data.gfxoff_controlled_by_driver = 1;
data->gfxoff_allowed = false;

View File

@ -386,6 +386,8 @@ typedef uint16_t PPSMC_Result;
#define PPSMC_MSG_AgmResetPsm ((uint16_t) 0x403)
#define PPSMC_MSG_ReadVftCell ((uint16_t) 0x404)
#define PPSMC_MSG_ApplyAvfsCksOffVoltage ((uint16_t) 0x415)
#define PPSMC_MSG_GFX_CU_PG_ENABLE ((uint16_t) 0x280)
#define PPSMC_MSG_GFX_CU_PG_DISABLE ((uint16_t) 0x281)
#define PPSMC_MSG_GetCurrPkgPwr ((uint16_t) 0x282)

View File

@ -1985,6 +1985,12 @@ int polaris10_thermal_avfs_enable(struct pp_hwmgr *hwmgr)
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAvfs);
/* Apply avfs cks-off voltages to avoid the overshoot
* when switching to the highest sclk frequency
*/
if (data->apply_avfs_cks_off_voltage)
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ApplyAvfsCksOffVoltage);
return 0;
}

View File

@ -37,10 +37,13 @@ MODULE_FIRMWARE("amdgpu/fiji_smc.bin");
MODULE_FIRMWARE("amdgpu/polaris10_smc.bin");
MODULE_FIRMWARE("amdgpu/polaris10_smc_sk.bin");
MODULE_FIRMWARE("amdgpu/polaris10_k_smc.bin");
MODULE_FIRMWARE("amdgpu/polaris10_k2_smc.bin");
MODULE_FIRMWARE("amdgpu/polaris11_smc.bin");
MODULE_FIRMWARE("amdgpu/polaris11_smc_sk.bin");
MODULE_FIRMWARE("amdgpu/polaris11_k_smc.bin");
MODULE_FIRMWARE("amdgpu/polaris11_k2_smc.bin");
MODULE_FIRMWARE("amdgpu/polaris12_smc.bin");
MODULE_FIRMWARE("amdgpu/polaris12_k_smc.bin");
MODULE_FIRMWARE("amdgpu/vegam_smc.bin");
MODULE_FIRMWARE("amdgpu/vega10_smc.bin");
MODULE_FIRMWARE("amdgpu/vega10_acg_smc.bin");

View File

@ -235,7 +235,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
plane->bpp = skl_pixel_formats[fmt].bpp;
plane->drm_format = skl_pixel_formats[fmt].drm_format;
} else {
plane->tiled = !!(val & DISPPLANE_TILED);
plane->tiled = val & DISPPLANE_TILED;
fmt = bdw_format_to_drm(val & DISPPLANE_PIXFORMAT_MASK);
plane->bpp = bdw_pixel_formats[fmt].bpp;
plane->drm_format = bdw_pixel_formats[fmt].drm_format;

View File

@ -1444,6 +1444,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
intel_uncore_sanitize(dev_priv);
intel_gt_init_workarounds(dev_priv);
i915_gem_load_init_fences(dev_priv);
/* On the 945G/GM, the chipset reports the MSI capability on the

View File

@ -67,6 +67,7 @@
#include "intel_ringbuffer.h"
#include "intel_uncore.h"
#include "intel_wopcm.h"
#include "intel_workarounds.h"
#include "intel_uc.h"
#include "i915_gem.h"
@ -1805,6 +1806,7 @@ struct drm_i915_private {
int dpio_phy_iosf_port[I915_NUM_PHYS_VLV];
struct i915_workarounds workarounds;
struct i915_wa_list gt_wa_list;
struct i915_frontbuffer_tracking fb_tracking;
@ -2148,6 +2150,8 @@ struct drm_i915_private {
struct delayed_work idle_work;
ktime_t last_init_time;
struct i915_vma *scratch;
} gt;
/* perform PHY state sanity checks? */
@ -3870,4 +3874,9 @@ static inline int intel_hws_csb_write_index(struct drm_i915_private *i915)
return I915_HWS_CSB_WRITE_INDEX;
}
static inline u32 i915_scratch_offset(const struct drm_i915_private *i915)
{
return i915_ggtt_offset(i915->gt.scratch);
}
#endif

View File

@ -5305,7 +5305,7 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv)
}
}
intel_gt_workarounds_apply(dev_priv);
intel_gt_apply_workarounds(dev_priv);
i915_gem_init_swizzling(dev_priv);
@ -5500,6 +5500,44 @@ err_active:
goto out_ctx;
}
static int
i915_gem_init_scratch(struct drm_i915_private *i915, unsigned int size)
{
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
int ret;
obj = i915_gem_object_create_stolen(i915, size);
if (!obj)
obj = i915_gem_object_create_internal(i915, size);
if (IS_ERR(obj)) {
DRM_ERROR("Failed to allocate scratch page\n");
return PTR_ERR(obj);
}
vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto err_unref;
}
ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
if (ret)
goto err_unref;
i915->gt.scratch = vma;
return 0;
err_unref:
i915_gem_object_put(obj);
return ret;
}
static void i915_gem_fini_scratch(struct drm_i915_private *i915)
{
i915_vma_unpin_and_release(&i915->gt.scratch, 0);
}
int i915_gem_init(struct drm_i915_private *dev_priv)
{
int ret;
@ -5546,12 +5584,19 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
goto err_unlock;
}
ret = i915_gem_contexts_init(dev_priv);
ret = i915_gem_init_scratch(dev_priv,
IS_GEN2(dev_priv) ? SZ_256K : PAGE_SIZE);
if (ret) {
GEM_BUG_ON(ret == -EIO);
goto err_ggtt;
}
ret = i915_gem_contexts_init(dev_priv);
if (ret) {
GEM_BUG_ON(ret == -EIO);
goto err_scratch;
}
ret = intel_engines_init(dev_priv);
if (ret) {
GEM_BUG_ON(ret == -EIO);
@ -5624,6 +5669,8 @@ err_pm:
err_context:
if (ret != -EIO)
i915_gem_contexts_fini(dev_priv);
err_scratch:
i915_gem_fini_scratch(dev_priv);
err_ggtt:
err_unlock:
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
@ -5675,8 +5722,11 @@ void i915_gem_fini(struct drm_i915_private *dev_priv)
intel_uc_fini(dev_priv);
i915_gem_cleanup_engines(dev_priv);
i915_gem_contexts_fini(dev_priv);
i915_gem_fini_scratch(dev_priv);
mutex_unlock(&dev_priv->drm.struct_mutex);
intel_wa_list_free(&dev_priv->gt_wa_list);
intel_cleanup_gt_powersave(dev_priv);
intel_uc_fini_misc(dev_priv);

View File

@ -1268,7 +1268,7 @@ relocate_entry(struct i915_vma *vma,
else if (gen >= 4)
len = 4;
else
len = 6;
len = 3;
batch = reloc_gpu(eb, vma, len);
if (IS_ERR(batch))
@ -1309,11 +1309,6 @@ relocate_entry(struct i915_vma *vma,
*batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
*batch++ = addr;
*batch++ = target_offset;
/* And again for good measure (blb/pnv) */
*batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
*batch++ = addr;
*batch++ = target_offset;
}
goto out;

View File

@ -1495,7 +1495,7 @@ static void gem_record_rings(struct i915_gpu_state *error)
if (HAS_BROKEN_CS_TLB(i915))
ee->wa_batchbuffer =
i915_error_object_create(i915,
engine->scratch);
i915->gt.scratch);
request_record_user_bo(request, ee);
ee->ctx =

View File

@ -490,46 +490,6 @@ void intel_engine_setup_common(struct intel_engine_cs *engine)
intel_engine_init_cmd_parser(engine);
}
int intel_engine_create_scratch(struct intel_engine_cs *engine,
unsigned int size)
{
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
int ret;
WARN_ON(engine->scratch);
obj = i915_gem_object_create_stolen(engine->i915, size);
if (!obj)
obj = i915_gem_object_create_internal(engine->i915, size);
if (IS_ERR(obj)) {
DRM_ERROR("Failed to allocate scratch page\n");
return PTR_ERR(obj);
}
vma = i915_vma_instance(obj, &engine->i915->ggtt.vm, NULL);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto err_unref;
}
ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
if (ret)
goto err_unref;
engine->scratch = vma;
return 0;
err_unref:
i915_gem_object_put(obj);
return ret;
}
void intel_engine_cleanup_scratch(struct intel_engine_cs *engine)
{
i915_vma_unpin_and_release(&engine->scratch, 0);
}
static void cleanup_status_page(struct intel_engine_cs *engine)
{
if (HWS_NEEDS_PHYSICAL(engine->i915)) {
@ -704,8 +664,6 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
{
struct drm_i915_private *i915 = engine->i915;
intel_engine_cleanup_scratch(engine);
cleanup_status_page(engine);
intel_engine_fini_breadcrumbs(engine);
@ -720,6 +678,8 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
__intel_context_unpin(i915->kernel_context, engine);
i915_timeline_fini(&engine->timeline);
intel_wa_list_free(&engine->wa_list);
}
u64 intel_engine_get_active_head(const struct intel_engine_cs *engine)

View File

@ -442,8 +442,13 @@ static u64 execlists_update_context(struct i915_request *rq)
* may not be visible to the HW prior to the completion of the UC
* register write and that we may begin execution from the context
* before its image is complete leading to invalid PD chasing.
*
* Furthermore, Braswell, at least, wants a full mb to be sure that
* the writes are coherent in memory (visible to the GPU) prior to
* execution, and not just visible to other CPUs (as is the result of
* wmb).
*/
wmb();
mb();
return ce->lrc_desc;
}
@ -1443,9 +1448,10 @@ static int execlists_request_alloc(struct i915_request *request)
static u32 *
gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, u32 *batch)
{
/* NB no one else is allowed to scribble over scratch + 256! */
*batch++ = MI_STORE_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT;
*batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4);
*batch++ = i915_ggtt_offset(engine->scratch) + 256;
*batch++ = i915_scratch_offset(engine->i915) + 256;
*batch++ = 0;
*batch++ = MI_LOAD_REGISTER_IMM(1);
@ -1459,7 +1465,7 @@ gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, u32 *batch)
*batch++ = MI_LOAD_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT;
*batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4);
*batch++ = i915_ggtt_offset(engine->scratch) + 256;
*batch++ = i915_scratch_offset(engine->i915) + 256;
*batch++ = 0;
return batch;
@ -1496,7 +1502,7 @@ static u32 *gen8_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
PIPE_CONTROL_GLOBAL_GTT_IVB |
PIPE_CONTROL_CS_STALL |
PIPE_CONTROL_QW_WRITE,
i915_ggtt_offset(engine->scratch) +
i915_scratch_offset(engine->i915) +
2 * CACHELINE_BYTES);
*batch++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
@ -1573,7 +1579,7 @@ static u32 *gen9_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
PIPE_CONTROL_GLOBAL_GTT_IVB |
PIPE_CONTROL_CS_STALL |
PIPE_CONTROL_QW_WRITE,
i915_ggtt_offset(engine->scratch)
i915_scratch_offset(engine->i915)
+ 2 * CACHELINE_BYTES);
}
@ -1793,6 +1799,8 @@ static bool unexpected_starting_state(struct intel_engine_cs *engine)
static int gen8_init_common_ring(struct intel_engine_cs *engine)
{
intel_engine_apply_workarounds(engine);
intel_mocs_init_engine(engine);
intel_engine_reset_breadcrumbs(engine);
@ -2139,7 +2147,7 @@ static int gen8_emit_flush_render(struct i915_request *request,
{
struct intel_engine_cs *engine = request->engine;
u32 scratch_addr =
i915_ggtt_offset(engine->scratch) + 2 * CACHELINE_BYTES;
i915_scratch_offset(engine->i915) + 2 * CACHELINE_BYTES;
bool vf_flush_wa = false, dc_flush_wa = false;
u32 *cs, flags = 0;
int len;
@ -2476,10 +2484,6 @@ int logical_render_ring_init(struct intel_engine_cs *engine)
if (ret)
return ret;
ret = intel_engine_create_scratch(engine, PAGE_SIZE);
if (ret)
goto err_cleanup_common;
ret = intel_init_workaround_bb(engine);
if (ret) {
/*
@ -2491,11 +2495,9 @@ int logical_render_ring_init(struct intel_engine_cs *engine)
ret);
}
return 0;
intel_engine_init_workarounds(engine);
err_cleanup_common:
intel_engine_cleanup_common(engine);
return ret;
return 0;
}
int logical_xcs_ring_init(struct intel_engine_cs *engine)

View File

@ -69,19 +69,28 @@ unsigned int intel_ring_update_space(struct intel_ring *ring)
static int
gen2_render_ring_flush(struct i915_request *rq, u32 mode)
{
unsigned int num_store_dw;
u32 cmd, *cs;
cmd = MI_FLUSH;
num_store_dw = 0;
if (mode & EMIT_INVALIDATE)
cmd |= MI_READ_FLUSH;
if (mode & EMIT_FLUSH)
num_store_dw = 4;
cs = intel_ring_begin(rq, 2);
cs = intel_ring_begin(rq, 2 + 3 * num_store_dw);
if (IS_ERR(cs))
return PTR_ERR(cs);
*cs++ = cmd;
*cs++ = MI_NOOP;
while (num_store_dw--) {
*cs++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
*cs++ = i915_scratch_offset(rq->i915);
*cs++ = 0;
}
*cs++ = MI_FLUSH | MI_NO_WRITE_FLUSH;
intel_ring_advance(rq, cs);
return 0;
@ -150,8 +159,7 @@ gen4_render_ring_flush(struct i915_request *rq, u32 mode)
*/
if (mode & EMIT_INVALIDATE) {
*cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE;
*cs++ = i915_ggtt_offset(rq->engine->scratch) |
PIPE_CONTROL_GLOBAL_GTT;
*cs++ = i915_scratch_offset(rq->i915) | PIPE_CONTROL_GLOBAL_GTT;
*cs++ = 0;
*cs++ = 0;
@ -159,8 +167,7 @@ gen4_render_ring_flush(struct i915_request *rq, u32 mode)
*cs++ = MI_FLUSH;
*cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE;
*cs++ = i915_ggtt_offset(rq->engine->scratch) |
PIPE_CONTROL_GLOBAL_GTT;
*cs++ = i915_scratch_offset(rq->i915) | PIPE_CONTROL_GLOBAL_GTT;
*cs++ = 0;
*cs++ = 0;
}
@ -212,8 +219,7 @@ gen4_render_ring_flush(struct i915_request *rq, u32 mode)
static int
intel_emit_post_sync_nonzero_flush(struct i915_request *rq)
{
u32 scratch_addr =
i915_ggtt_offset(rq->engine->scratch) + 2 * CACHELINE_BYTES;
u32 scratch_addr = i915_scratch_offset(rq->i915) + 2 * CACHELINE_BYTES;
u32 *cs;
cs = intel_ring_begin(rq, 6);
@ -246,8 +252,7 @@ intel_emit_post_sync_nonzero_flush(struct i915_request *rq)
static int
gen6_render_ring_flush(struct i915_request *rq, u32 mode)
{
u32 scratch_addr =
i915_ggtt_offset(rq->engine->scratch) + 2 * CACHELINE_BYTES;
u32 scratch_addr = i915_scratch_offset(rq->i915) + 2 * CACHELINE_BYTES;
u32 *cs, flags = 0;
int ret;
@ -316,8 +321,7 @@ gen7_render_ring_cs_stall_wa(struct i915_request *rq)
static int
gen7_render_ring_flush(struct i915_request *rq, u32 mode)
{
u32 scratch_addr =
i915_ggtt_offset(rq->engine->scratch) + 2 * CACHELINE_BYTES;
u32 scratch_addr = i915_scratch_offset(rq->i915) + 2 * CACHELINE_BYTES;
u32 *cs, flags = 0;
/*
@ -971,7 +975,7 @@ i965_emit_bb_start(struct i915_request *rq,
}
/* Just userspace ABI convention to limit the wa batch bo to a resonable size */
#define I830_BATCH_LIMIT (256*1024)
#define I830_BATCH_LIMIT SZ_256K
#define I830_TLB_ENTRIES (2)
#define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT)
static int
@ -979,7 +983,9 @@ i830_emit_bb_start(struct i915_request *rq,
u64 offset, u32 len,
unsigned int dispatch_flags)
{
u32 *cs, cs_offset = i915_ggtt_offset(rq->engine->scratch);
u32 *cs, cs_offset = i915_scratch_offset(rq->i915);
GEM_BUG_ON(rq->i915->gt.scratch->size < I830_WA_SIZE);
cs = intel_ring_begin(rq, 6);
if (IS_ERR(cs))
@ -1437,7 +1443,6 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine)
{
struct i915_timeline *timeline;
struct intel_ring *ring;
unsigned int size;
int err;
intel_engine_setup_common(engine);
@ -1462,21 +1467,12 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine)
GEM_BUG_ON(engine->buffer);
engine->buffer = ring;
size = PAGE_SIZE;
if (HAS_BROKEN_CS_TLB(engine->i915))
size = I830_WA_SIZE;
err = intel_engine_create_scratch(engine, size);
err = intel_engine_init_common(engine);
if (err)
goto err_unpin;
err = intel_engine_init_common(engine);
if (err)
goto err_scratch;
return 0;
err_scratch:
intel_engine_cleanup_scratch(engine);
err_unpin:
intel_ring_unpin(ring);
err_ring:
@ -1550,7 +1546,7 @@ static int flush_pd_dir(struct i915_request *rq)
/* Stall until the page table load is complete */
*cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
*cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
*cs++ = i915_ggtt_offset(engine->scratch);
*cs++ = i915_scratch_offset(rq->i915);
*cs++ = MI_NOOP;
intel_ring_advance(rq, cs);
@ -1659,7 +1655,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
/* Insert a delay before the next switch! */
*cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
*cs++ = i915_mmio_reg_offset(last_reg);
*cs++ = i915_ggtt_offset(engine->scratch);
*cs++ = i915_scratch_offset(rq->i915);
*cs++ = MI_NOOP;
}
*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;

View File

@ -15,6 +15,7 @@
#include "i915_selftest.h"
#include "i915_timeline.h"
#include "intel_gpu_commands.h"
#include "intel_workarounds.h"
struct drm_printer;
struct i915_sched_attr;
@ -440,7 +441,7 @@ struct intel_engine_cs {
struct intel_hw_status_page status_page;
struct i915_ctx_workarounds wa_ctx;
struct i915_vma *scratch;
struct i915_wa_list wa_list;
u32 irq_keep_mask; /* always keep these interrupts */
u32 irq_enable_mask; /* bitmask to enable ring interrupt */
@ -898,10 +899,6 @@ void intel_engine_setup_common(struct intel_engine_cs *engine);
int intel_engine_init_common(struct intel_engine_cs *engine);
void intel_engine_cleanup_common(struct intel_engine_cs *engine);
int intel_engine_create_scratch(struct intel_engine_cs *engine,
unsigned int size);
void intel_engine_cleanup_scratch(struct intel_engine_cs *engine);
int intel_init_render_ring_buffer(struct intel_engine_cs *engine);
int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine);
int intel_init_blt_ring_buffer(struct intel_engine_cs *engine);

View File

@ -48,6 +48,20 @@
* - Public functions to init or apply the given workaround type.
*/
static void wa_init_start(struct i915_wa_list *wal, const char *name)
{
wal->name = name;
}
static void wa_init_finish(struct i915_wa_list *wal)
{
if (!wal->count)
return;
DRM_DEBUG_DRIVER("Initialized %u %s workarounds\n",
wal->count, wal->name);
}
static void wa_add(struct drm_i915_private *i915,
i915_reg_t reg, const u32 mask, const u32 val)
{
@ -580,160 +594,175 @@ int intel_ctx_workarounds_emit(struct i915_request *rq)
return 0;
}
static void bdw_gt_workarounds_apply(struct drm_i915_private *dev_priv)
static void
wal_add(struct i915_wa_list *wal, const struct i915_wa *wa)
{
const unsigned int grow = 1 << 4;
GEM_BUG_ON(!is_power_of_2(grow));
if (IS_ALIGNED(wal->count, grow)) { /* Either uninitialized or full. */
struct i915_wa *list;
list = kmalloc_array(ALIGN(wal->count + 1, grow), sizeof(*wa),
GFP_KERNEL);
if (!list) {
DRM_ERROR("No space for workaround init!\n");
return;
}
if (wal->list)
memcpy(list, wal->list, sizeof(*wa) * wal->count);
wal->list = list;
}
wal->list[wal->count++] = *wa;
}
static void chv_gt_workarounds_apply(struct drm_i915_private *dev_priv)
static void
wa_masked_en(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
{
struct i915_wa wa = {
.reg = reg,
.mask = val,
.val = _MASKED_BIT_ENABLE(val)
};
wal_add(wal, &wa);
}
static void gen9_gt_workarounds_apply(struct drm_i915_private *dev_priv)
static void
wa_write_masked_or(struct i915_wa_list *wal, i915_reg_t reg, u32 mask,
u32 val)
{
/* WaContextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl,glk,cfl */
I915_WRITE(GEN9_CSFE_CHICKEN1_RCS,
_MASKED_BIT_ENABLE(GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE));
struct i915_wa wa = {
.reg = reg,
.mask = mask,
.val = val
};
/* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl,glk,cfl */
I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) |
GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
wal_add(wal, &wa);
}
static void
wa_write(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
{
wa_write_masked_or(wal, reg, ~0, val);
}
static void
wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
{
wa_write_masked_or(wal, reg, val, val);
}
static void gen9_gt_workarounds_init(struct drm_i915_private *i915)
{
struct i915_wa_list *wal = &i915->gt_wa_list;
/* WaDisableKillLogic:bxt,skl,kbl */
if (!IS_COFFEELAKE(dev_priv))
I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
ECOCHK_DIS_TLB);
if (!IS_COFFEELAKE(i915))
wa_write_or(wal,
GAM_ECOCHK,
ECOCHK_DIS_TLB);
if (HAS_LLC(dev_priv)) {
if (HAS_LLC(i915)) {
/* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl
*
* Must match Display Engine. See
* WaCompressedResourceDisplayNewHashMode.
*/
I915_WRITE(MMCD_MISC_CTRL,
I915_READ(MMCD_MISC_CTRL) |
MMCD_PCLA |
MMCD_HOTSPOT_EN);
wa_write_or(wal,
MMCD_MISC_CTRL,
MMCD_PCLA | MMCD_HOTSPOT_EN);
}
/* WaDisableHDCInvalidation:skl,bxt,kbl,cfl */
I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
BDW_DISABLE_HDC_INVALIDATION);
/* WaProgramL3SqcReg1DefaultForPerf:bxt,glk */
if (IS_GEN9_LP(dev_priv)) {
u32 val = I915_READ(GEN8_L3SQCREG1);
val &= ~L3_PRIO_CREDITS_MASK;
val |= L3_GENERAL_PRIO_CREDITS(62) | L3_HIGH_PRIO_CREDITS(2);
I915_WRITE(GEN8_L3SQCREG1, val);
}
/* WaOCLCoherentLineFlush:skl,bxt,kbl,cfl */
I915_WRITE(GEN8_L3SQCREG4,
I915_READ(GEN8_L3SQCREG4) | GEN8_LQSC_FLUSH_COHERENT_LINES);
/* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl,[cnl] */
I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
_MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
wa_write_or(wal,
GAM_ECOCHK,
BDW_DISABLE_HDC_INVALIDATION);
}
static void skl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
static void skl_gt_workarounds_init(struct drm_i915_private *i915)
{
gen9_gt_workarounds_apply(dev_priv);
struct i915_wa_list *wal = &i915->gt_wa_list;
/* WaEnableGapsTsvCreditFix:skl */
I915_WRITE(GEN8_GARBCNTL,
I915_READ(GEN8_GARBCNTL) | GEN9_GAPS_TSV_CREDIT_DISABLE);
gen9_gt_workarounds_init(i915);
/* WaDisableGafsUnitClkGating:skl */
I915_WRITE(GEN7_UCGCTL4,
I915_READ(GEN7_UCGCTL4) | GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
wa_write_or(wal,
GEN7_UCGCTL4,
GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
/* WaInPlaceDecompressionHang:skl */
if (IS_SKL_REVID(dev_priv, SKL_REVID_H0, REVID_FOREVER))
I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
if (IS_SKL_REVID(i915, SKL_REVID_H0, REVID_FOREVER))
wa_write_or(wal,
GEN9_GAMT_ECO_REG_RW_IA,
GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
}
static void bxt_gt_workarounds_apply(struct drm_i915_private *dev_priv)
static void bxt_gt_workarounds_init(struct drm_i915_private *i915)
{
gen9_gt_workarounds_apply(dev_priv);
struct i915_wa_list *wal = &i915->gt_wa_list;
/* WaDisablePooledEuLoadBalancingFix:bxt */
I915_WRITE(FF_SLICE_CS_CHICKEN2,
_MASKED_BIT_ENABLE(GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE));
gen9_gt_workarounds_init(i915);
/* WaInPlaceDecompressionHang:bxt */
I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
wa_write_or(wal,
GEN9_GAMT_ECO_REG_RW_IA,
GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
}
static void kbl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
static void kbl_gt_workarounds_init(struct drm_i915_private *i915)
{
gen9_gt_workarounds_apply(dev_priv);
struct i915_wa_list *wal = &i915->gt_wa_list;
/* WaEnableGapsTsvCreditFix:kbl */
I915_WRITE(GEN8_GARBCNTL,
I915_READ(GEN8_GARBCNTL) | GEN9_GAPS_TSV_CREDIT_DISABLE);
gen9_gt_workarounds_init(i915);
/* WaDisableDynamicCreditSharing:kbl */
if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
I915_WRITE(GAMT_CHKN_BIT_REG,
I915_READ(GAMT_CHKN_BIT_REG) |
GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING);
if (IS_KBL_REVID(i915, 0, KBL_REVID_B0))
wa_write_or(wal,
GAMT_CHKN_BIT_REG,
GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING);
/* WaDisableGafsUnitClkGating:kbl */
I915_WRITE(GEN7_UCGCTL4,
I915_READ(GEN7_UCGCTL4) | GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
wa_write_or(wal,
GEN7_UCGCTL4,
GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
/* WaInPlaceDecompressionHang:kbl */
I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
/* WaKBLVECSSemaphoreWaitPoll:kbl */
if (IS_KBL_REVID(dev_priv, KBL_REVID_A0, KBL_REVID_E0)) {
struct intel_engine_cs *engine;
unsigned int tmp;
for_each_engine(engine, dev_priv, tmp) {
if (engine->id == RCS)
continue;
I915_WRITE(RING_SEMA_WAIT_POLL(engine->mmio_base), 1);
}
}
wa_write_or(wal,
GEN9_GAMT_ECO_REG_RW_IA,
GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
}
static void glk_gt_workarounds_apply(struct drm_i915_private *dev_priv)
static void glk_gt_workarounds_init(struct drm_i915_private *i915)
{
gen9_gt_workarounds_apply(dev_priv);
gen9_gt_workarounds_init(i915);
}
static void cfl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
static void cfl_gt_workarounds_init(struct drm_i915_private *i915)
{
gen9_gt_workarounds_apply(dev_priv);
struct i915_wa_list *wal = &i915->gt_wa_list;
/* WaEnableGapsTsvCreditFix:cfl */
I915_WRITE(GEN8_GARBCNTL,
I915_READ(GEN8_GARBCNTL) | GEN9_GAPS_TSV_CREDIT_DISABLE);
gen9_gt_workarounds_init(i915);
/* WaDisableGafsUnitClkGating:cfl */
I915_WRITE(GEN7_UCGCTL4,
I915_READ(GEN7_UCGCTL4) | GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
wa_write_or(wal,
GEN7_UCGCTL4,
GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
/* WaInPlaceDecompressionHang:cfl */
I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
wa_write_or(wal,
GEN9_GAMT_ECO_REG_RW_IA,
GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
}
static void wa_init_mcr(struct drm_i915_private *dev_priv)
{
const struct sseu_dev_info *sseu = &(INTEL_INFO(dev_priv)->sseu);
u32 mcr;
struct i915_wa_list *wal = &dev_priv->gt_wa_list;
u32 mcr_slice_subslice_mask;
/*
@ -770,8 +799,6 @@ static void wa_init_mcr(struct drm_i915_private *dev_priv)
WARN_ON((enabled_mask & disabled_mask) != enabled_mask);
}
mcr = I915_READ(GEN8_MCR_SELECTOR);
if (INTEL_GEN(dev_priv) >= 11)
mcr_slice_subslice_mask = GEN11_MCR_SLICE_MASK |
GEN11_MCR_SUBSLICE_MASK;
@ -789,148 +816,170 @@ static void wa_init_mcr(struct drm_i915_private *dev_priv)
* occasions, such as INSTDONE, where this value is dependent
* on s/ss combo, the read should be done with read_subslice_reg.
*/
mcr &= ~mcr_slice_subslice_mask;
mcr |= intel_calculate_mcr_s_ss_select(dev_priv);
I915_WRITE(GEN8_MCR_SELECTOR, mcr);
wa_write_masked_or(wal,
GEN8_MCR_SELECTOR,
mcr_slice_subslice_mask,
intel_calculate_mcr_s_ss_select(dev_priv));
}
static void cnl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
static void cnl_gt_workarounds_init(struct drm_i915_private *i915)
{
wa_init_mcr(dev_priv);
struct i915_wa_list *wal = &i915->gt_wa_list;
wa_init_mcr(i915);
/* WaDisableI2mCycleOnWRPort:cnl (pre-prod) */
if (IS_CNL_REVID(dev_priv, CNL_REVID_B0, CNL_REVID_B0))
I915_WRITE(GAMT_CHKN_BIT_REG,
I915_READ(GAMT_CHKN_BIT_REG) |
GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT);
if (IS_CNL_REVID(i915, CNL_REVID_B0, CNL_REVID_B0))
wa_write_or(wal,
GAMT_CHKN_BIT_REG,
GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT);
/* WaInPlaceDecompressionHang:cnl */
I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
/* WaEnablePreemptionGranularityControlByUMD:cnl */
I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
_MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
wa_write_or(wal,
GEN9_GAMT_ECO_REG_RW_IA,
GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
}
static void icl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
static void icl_gt_workarounds_init(struct drm_i915_private *i915)
{
wa_init_mcr(dev_priv);
struct i915_wa_list *wal = &i915->gt_wa_list;
/* This is not an Wa. Enable for better image quality */
I915_WRITE(_3D_CHICKEN3,
_MASKED_BIT_ENABLE(_3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE));
wa_init_mcr(i915);
/* WaInPlaceDecompressionHang:icl */
I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA, I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
/* WaPipelineFlushCoherentLines:icl */
I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
GEN8_LQSC_FLUSH_COHERENT_LINES);
/* Wa_1405543622:icl
* Formerly known as WaGAPZPriorityScheme
*/
I915_WRITE(GEN8_GARBCNTL, I915_READ(GEN8_GARBCNTL) |
GEN11_ARBITRATION_PRIO_ORDER_MASK);
/* Wa_1604223664:icl
* Formerly known as WaL3BankAddressHashing
*/
I915_WRITE(GEN8_GARBCNTL,
(I915_READ(GEN8_GARBCNTL) & ~GEN11_HASH_CTRL_EXCL_MASK) |
GEN11_HASH_CTRL_EXCL_BIT0);
I915_WRITE(GEN11_GLBLINVL,
(I915_READ(GEN11_GLBLINVL) & ~GEN11_BANK_HASH_ADDR_EXCL_MASK) |
GEN11_BANK_HASH_ADDR_EXCL_BIT0);
wa_write_or(wal,
GEN9_GAMT_ECO_REG_RW_IA,
GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
/* WaModifyGamTlbPartitioning:icl */
I915_WRITE(GEN11_GACB_PERF_CTRL,
(I915_READ(GEN11_GACB_PERF_CTRL) & ~GEN11_HASH_CTRL_MASK) |
GEN11_HASH_CTRL_BIT0 | GEN11_HASH_CTRL_BIT4);
/* Wa_1405733216:icl
* Formerly known as WaDisableCleanEvicts
*/
I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
GEN11_LQSC_CLEAN_EVICT_DISABLE);
wa_write_masked_or(wal,
GEN11_GACB_PERF_CTRL,
GEN11_HASH_CTRL_MASK,
GEN11_HASH_CTRL_BIT0 | GEN11_HASH_CTRL_BIT4);
/* Wa_1405766107:icl
* Formerly known as WaCL2SFHalfMaxAlloc
*/
I915_WRITE(GEN11_LSN_UNSLCVC, I915_READ(GEN11_LSN_UNSLCVC) |
GEN11_LSN_UNSLCVC_GAFS_HALF_SF_MAXALLOC |
GEN11_LSN_UNSLCVC_GAFS_HALF_CL2_MAXALLOC);
wa_write_or(wal,
GEN11_LSN_UNSLCVC,
GEN11_LSN_UNSLCVC_GAFS_HALF_SF_MAXALLOC |
GEN11_LSN_UNSLCVC_GAFS_HALF_CL2_MAXALLOC);
/* Wa_220166154:icl
* Formerly known as WaDisCtxReload
*/
I915_WRITE(GAMW_ECO_DEV_RW_IA_REG, I915_READ(GAMW_ECO_DEV_RW_IA_REG) |
GAMW_ECO_DEV_CTX_RELOAD_DISABLE);
wa_write_or(wal,
GEN8_GAMW_ECO_DEV_RW_IA,
GAMW_ECO_DEV_CTX_RELOAD_DISABLE);
/* Wa_1405779004:icl (pre-prod) */
if (IS_ICL_REVID(dev_priv, ICL_REVID_A0, ICL_REVID_A0))
I915_WRITE(SLICE_UNIT_LEVEL_CLKGATE,
I915_READ(SLICE_UNIT_LEVEL_CLKGATE) |
MSCUNIT_CLKGATE_DIS);
if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_A0))
wa_write_or(wal,
SLICE_UNIT_LEVEL_CLKGATE,
MSCUNIT_CLKGATE_DIS);
/* Wa_1406680159:icl */
I915_WRITE(SUBSLICE_UNIT_LEVEL_CLKGATE,
I915_READ(SUBSLICE_UNIT_LEVEL_CLKGATE) |
GWUNIT_CLKGATE_DIS);
/* Wa_1604302699:icl */
I915_WRITE(GEN10_L3_CHICKEN_MODE_REGISTER,
I915_READ(GEN10_L3_CHICKEN_MODE_REGISTER) |
GEN11_I2M_WRITE_DISABLE);
wa_write_or(wal,
SUBSLICE_UNIT_LEVEL_CLKGATE,
GWUNIT_CLKGATE_DIS);
/* Wa_1406838659:icl (pre-prod) */
if (IS_ICL_REVID(dev_priv, ICL_REVID_A0, ICL_REVID_B0))
I915_WRITE(INF_UNIT_LEVEL_CLKGATE,
I915_READ(INF_UNIT_LEVEL_CLKGATE) |
CGPSF_CLKGATE_DIS);
/* WaForwardProgressSoftReset:icl */
I915_WRITE(GEN10_SCRATCH_LNCF2,
I915_READ(GEN10_SCRATCH_LNCF2) |
PMFLUSHDONE_LNICRSDROP |
PMFLUSH_GAPL3UNBLOCK |
PMFLUSHDONE_LNEBLK);
if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_B0))
wa_write_or(wal,
INF_UNIT_LEVEL_CLKGATE,
CGPSF_CLKGATE_DIS);
/* Wa_1406463099:icl
* Formerly known as WaGamTlbPendError
*/
I915_WRITE(GAMT_CHKN_BIT_REG,
I915_READ(GAMT_CHKN_BIT_REG) |
GAMT_CHKN_DISABLE_L3_COH_PIPE);
wa_write_or(wal,
GAMT_CHKN_BIT_REG,
GAMT_CHKN_DISABLE_L3_COH_PIPE);
}
void intel_gt_workarounds_apply(struct drm_i915_private *dev_priv)
void intel_gt_init_workarounds(struct drm_i915_private *i915)
{
if (INTEL_GEN(dev_priv) < 8)
struct i915_wa_list *wal = &i915->gt_wa_list;
wa_init_start(wal, "GT");
if (INTEL_GEN(i915) < 8)
return;
else if (IS_BROADWELL(dev_priv))
bdw_gt_workarounds_apply(dev_priv);
else if (IS_CHERRYVIEW(dev_priv))
chv_gt_workarounds_apply(dev_priv);
else if (IS_SKYLAKE(dev_priv))
skl_gt_workarounds_apply(dev_priv);
else if (IS_BROXTON(dev_priv))
bxt_gt_workarounds_apply(dev_priv);
else if (IS_KABYLAKE(dev_priv))
kbl_gt_workarounds_apply(dev_priv);
else if (IS_GEMINILAKE(dev_priv))
glk_gt_workarounds_apply(dev_priv);
else if (IS_COFFEELAKE(dev_priv))
cfl_gt_workarounds_apply(dev_priv);
else if (IS_CANNONLAKE(dev_priv))
cnl_gt_workarounds_apply(dev_priv);
else if (IS_ICELAKE(dev_priv))
icl_gt_workarounds_apply(dev_priv);
else if (IS_BROADWELL(i915))
return;
else if (IS_CHERRYVIEW(i915))
return;
else if (IS_SKYLAKE(i915))
skl_gt_workarounds_init(i915);
else if (IS_BROXTON(i915))
bxt_gt_workarounds_init(i915);
else if (IS_KABYLAKE(i915))
kbl_gt_workarounds_init(i915);
else if (IS_GEMINILAKE(i915))
glk_gt_workarounds_init(i915);
else if (IS_COFFEELAKE(i915))
cfl_gt_workarounds_init(i915);
else if (IS_CANNONLAKE(i915))
cnl_gt_workarounds_init(i915);
else if (IS_ICELAKE(i915))
icl_gt_workarounds_init(i915);
else
MISSING_CASE(INTEL_GEN(dev_priv));
MISSING_CASE(INTEL_GEN(i915));
wa_init_finish(wal);
}
static enum forcewake_domains
wal_get_fw_for_rmw(struct drm_i915_private *dev_priv,
const struct i915_wa_list *wal)
{
enum forcewake_domains fw = 0;
struct i915_wa *wa;
unsigned int i;
for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
fw |= intel_uncore_forcewake_for_reg(dev_priv,
wa->reg,
FW_REG_READ |
FW_REG_WRITE);
return fw;
}
static void
wa_list_apply(struct drm_i915_private *dev_priv, const struct i915_wa_list *wal)
{
enum forcewake_domains fw;
unsigned long flags;
struct i915_wa *wa;
unsigned int i;
if (!wal->count)
return;
fw = wal_get_fw_for_rmw(dev_priv, wal);
spin_lock_irqsave(&dev_priv->uncore.lock, flags);
intel_uncore_forcewake_get__locked(dev_priv, fw);
for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
u32 val = I915_READ_FW(wa->reg);
val &= ~wa->mask;
val |= wa->val;
I915_WRITE_FW(wa->reg, val);
}
intel_uncore_forcewake_put__locked(dev_priv, fw);
spin_unlock_irqrestore(&dev_priv->uncore.lock, flags);
DRM_DEBUG_DRIVER("Applied %u %s workarounds\n", wal->count, wal->name);
}
void intel_gt_apply_workarounds(struct drm_i915_private *dev_priv)
{
wa_list_apply(dev_priv, &dev_priv->gt_wa_list);
}
struct whitelist {
@ -1077,6 +1126,146 @@ void intel_whitelist_workarounds_apply(struct intel_engine_cs *engine)
whitelist_apply(engine, whitelist_build(engine, &w));
}
static void rcs_engine_wa_init(struct intel_engine_cs *engine)
{
struct drm_i915_private *i915 = engine->i915;
struct i915_wa_list *wal = &engine->wa_list;
if (IS_ICELAKE(i915)) {
/* This is not an Wa. Enable for better image quality */
wa_masked_en(wal,
_3D_CHICKEN3,
_3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE);
/* WaPipelineFlushCoherentLines:icl */
wa_write_or(wal,
GEN8_L3SQCREG4,
GEN8_LQSC_FLUSH_COHERENT_LINES);
/*
* Wa_1405543622:icl
* Formerly known as WaGAPZPriorityScheme
*/
wa_write_or(wal,
GEN8_GARBCNTL,
GEN11_ARBITRATION_PRIO_ORDER_MASK);
/*
* Wa_1604223664:icl
* Formerly known as WaL3BankAddressHashing
*/
wa_write_masked_or(wal,
GEN8_GARBCNTL,
GEN11_HASH_CTRL_EXCL_MASK,
GEN11_HASH_CTRL_EXCL_BIT0);
wa_write_masked_or(wal,
GEN11_GLBLINVL,
GEN11_BANK_HASH_ADDR_EXCL_MASK,
GEN11_BANK_HASH_ADDR_EXCL_BIT0);
/*
* Wa_1405733216:icl
* Formerly known as WaDisableCleanEvicts
*/
wa_write_or(wal,
GEN8_L3SQCREG4,
GEN11_LQSC_CLEAN_EVICT_DISABLE);
/* Wa_1604302699:icl */
wa_write_or(wal,
GEN10_L3_CHICKEN_MODE_REGISTER,
GEN11_I2M_WRITE_DISABLE);
/* WaForwardProgressSoftReset:icl */
wa_write_or(wal,
GEN10_SCRATCH_LNCF2,
PMFLUSHDONE_LNICRSDROP |
PMFLUSH_GAPL3UNBLOCK |
PMFLUSHDONE_LNEBLK);
}
if (IS_GEN9(i915) || IS_CANNONLAKE(i915)) {
/* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl,cnl */
wa_masked_en(wal,
GEN7_FF_SLICE_CS_CHICKEN1,
GEN9_FFSC_PERCTX_PREEMPT_CTRL);
}
if (IS_SKYLAKE(i915) || IS_KABYLAKE(i915) || IS_COFFEELAKE(i915)) {
/* WaEnableGapsTsvCreditFix:skl,kbl,cfl */
wa_write_or(wal,
GEN8_GARBCNTL,
GEN9_GAPS_TSV_CREDIT_DISABLE);
}
if (IS_BROXTON(i915)) {
/* WaDisablePooledEuLoadBalancingFix:bxt */
wa_masked_en(wal,
FF_SLICE_CS_CHICKEN2,
GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE);
}
if (IS_GEN9(i915)) {
/* WaContextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl,glk,cfl */
wa_masked_en(wal,
GEN9_CSFE_CHICKEN1_RCS,
GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE);
/* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl,glk,cfl */
wa_write_or(wal,
BDW_SCRATCH1,
GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
/* WaProgramL3SqcReg1DefaultForPerf:bxt,glk */
if (IS_GEN9_LP(i915))
wa_write_masked_or(wal,
GEN8_L3SQCREG1,
L3_PRIO_CREDITS_MASK,
L3_GENERAL_PRIO_CREDITS(62) |
L3_HIGH_PRIO_CREDITS(2));
/* WaOCLCoherentLineFlush:skl,bxt,kbl,cfl */
wa_write_or(wal,
GEN8_L3SQCREG4,
GEN8_LQSC_FLUSH_COHERENT_LINES);
}
}
static void xcs_engine_wa_init(struct intel_engine_cs *engine)
{
struct drm_i915_private *i915 = engine->i915;
struct i915_wa_list *wal = &engine->wa_list;
/* WaKBLVECSSemaphoreWaitPoll:kbl */
if (IS_KBL_REVID(i915, KBL_REVID_A0, KBL_REVID_E0)) {
wa_write(wal,
RING_SEMA_WAIT_POLL(engine->mmio_base),
1);
}
}
void intel_engine_init_workarounds(struct intel_engine_cs *engine)
{
struct i915_wa_list *wal = &engine->wa_list;
if (GEM_WARN_ON(INTEL_GEN(engine->i915) < 8))
return;
wa_init_start(wal, engine->name);
if (engine->id == RCS)
rcs_engine_wa_init(engine);
else
xcs_engine_wa_init(engine);
wa_init_finish(wal);
}
void intel_engine_apply_workarounds(struct intel_engine_cs *engine)
{
wa_list_apply(engine->i915, &engine->wa_list);
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/intel_workarounds.c"
#endif

View File

@ -7,11 +7,35 @@
#ifndef _I915_WORKAROUNDS_H_
#define _I915_WORKAROUNDS_H_
#include <linux/slab.h>
struct i915_wa {
i915_reg_t reg;
u32 mask;
u32 val;
};
struct i915_wa_list {
const char *name;
struct i915_wa *list;
unsigned int count;
};
static inline void intel_wa_list_free(struct i915_wa_list *wal)
{
kfree(wal->list);
memset(wal, 0, sizeof(*wal));
}
int intel_ctx_workarounds_init(struct drm_i915_private *dev_priv);
int intel_ctx_workarounds_emit(struct i915_request *rq);
void intel_gt_workarounds_apply(struct drm_i915_private *dev_priv);
void intel_gt_init_workarounds(struct drm_i915_private *dev_priv);
void intel_gt_apply_workarounds(struct drm_i915_private *dev_priv);
void intel_whitelist_workarounds_apply(struct intel_engine_cs *engine);
void intel_engine_init_workarounds(struct intel_engine_cs *engine);
void intel_engine_apply_workarounds(struct intel_engine_cs *engine);
#endif

View File

@ -818,10 +818,13 @@ static int mtk_dsi_create_conn_enc(struct drm_device *drm, struct mtk_dsi *dsi)
dsi->encoder.possible_crtcs = 1;
/* If there's a bridge, attach to it and let it create the connector */
ret = drm_bridge_attach(&dsi->encoder, dsi->bridge, NULL);
if (ret) {
DRM_ERROR("Failed to attach bridge to drm\n");
if (dsi->bridge) {
ret = drm_bridge_attach(&dsi->encoder, dsi->bridge, NULL);
if (ret) {
DRM_ERROR("Failed to attach bridge to drm\n");
goto err_encoder_cleanup;
}
} else {
/* Otherwise create our own connector and attach to a panel */
ret = mtk_dsi_create_connector(drm, dsi);
if (ret)

View File

@ -198,6 +198,22 @@ nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
/******************************************************************************
* EVO channel helpers
*****************************************************************************/
static void
evo_flush(struct nv50_dmac *dmac)
{
/* Push buffer fetches are not coherent with BAR1, we need to ensure
* writes have been flushed right through to VRAM before writing PUT.
*/
if (dmac->push.type & NVIF_MEM_VRAM) {
struct nvif_device *device = dmac->base.device;
nvif_wr32(&device->object, 0x070000, 0x00000001);
nvif_msec(device, 2000,
if (!(nvif_rd32(&device->object, 0x070000) & 0x00000002))
break;
);
}
}
u32 *
evo_wait(struct nv50_dmac *evoc, int nr)
{
@ -208,6 +224,7 @@ evo_wait(struct nv50_dmac *evoc, int nr)
mutex_lock(&dmac->lock);
if (put + nr >= (PAGE_SIZE / 4) - 8) {
dmac->ptr[put] = 0x20000000;
evo_flush(dmac);
nvif_wr32(&dmac->base.user, 0x0000, 0x00000000);
if (nvif_msec(device, 2000,
@ -230,17 +247,7 @@ evo_kick(u32 *push, struct nv50_dmac *evoc)
{
struct nv50_dmac *dmac = evoc;
/* Push buffer fetches are not coherent with BAR1, we need to ensure
* writes have been flushed right through to VRAM before writing PUT.
*/
if (dmac->push.type & NVIF_MEM_VRAM) {
struct nvif_device *device = dmac->base.device;
nvif_wr32(&device->object, 0x070000, 0x00000001);
nvif_msec(device, 2000,
if (!(nvif_rd32(&device->object, 0x070000) & 0x00000002))
break;
);
}
evo_flush(dmac);
nvif_wr32(&dmac->base.user, 0x0000, (push - dmac->ptr) << 2);
mutex_unlock(&dmac->lock);
@ -1264,6 +1271,7 @@ nv50_mstm_del(struct nv50_mstm **pmstm)
{
struct nv50_mstm *mstm = *pmstm;
if (mstm) {
drm_dp_mst_topology_mgr_destroy(&mstm->mgr);
kfree(*pmstm);
*pmstm = NULL;
}

View File

@ -1171,10 +1171,16 @@ nouveau_platform_device_create(const struct nvkm_device_tegra_func *func,
goto err_free;
}
err = nouveau_drm_device_init(drm);
if (err)
goto err_put;
platform_set_drvdata(pdev, drm);
return drm;
err_put:
drm_dev_put(drm);
err_free:
nvkm_device_del(pdevice);

View File

@ -448,11 +448,6 @@ static int rockchip_drm_platform_remove(struct platform_device *pdev)
return 0;
}
static void rockchip_drm_platform_shutdown(struct platform_device *pdev)
{
rockchip_drm_platform_remove(pdev);
}
static const struct of_device_id rockchip_drm_dt_ids[] = {
{ .compatible = "rockchip,display-subsystem", },
{ /* sentinel */ },
@ -462,7 +457,6 @@ MODULE_DEVICE_TABLE(of, rockchip_drm_dt_ids);
static struct platform_driver rockchip_drm_platform_driver = {
.probe = rockchip_drm_platform_probe,
.remove = rockchip_drm_platform_remove,
.shutdown = rockchip_drm_platform_shutdown,
.driver = {
.name = "rockchip-drm",
.of_match_table = rockchip_drm_dt_ids,

View File

@ -49,6 +49,8 @@
#define VMWGFX_REPO "In Tree"
#define VMWGFX_VALIDATION_MEM_GRAN (16*PAGE_SIZE)
/**
* Fully encoded drm commands. Might move to vmw_drm.h
@ -918,7 +920,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
spin_unlock(&dev_priv->cap_lock);
}
vmw_validation_mem_init_ttm(dev_priv, VMWGFX_VALIDATION_MEM_GRAN);
ret = vmw_kms_init(dev_priv);
if (unlikely(ret != 0))
goto out_no_kms;

View File

@ -606,6 +606,9 @@ struct vmw_private {
struct vmw_cmdbuf_man *cman;
DECLARE_BITMAP(irqthread_pending, VMW_IRQTHREAD_MAX);
/* Validation memory reservation */
struct vmw_validation_mem vvm;
};
static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res)
@ -846,6 +849,8 @@ extern int vmw_ttm_global_init(struct vmw_private *dev_priv);
extern void vmw_ttm_global_release(struct vmw_private *dev_priv);
extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma);
extern void vmw_validation_mem_init_ttm(struct vmw_private *dev_priv,
size_t gran);
/**
* TTM buffer object driver - vmwgfx_ttm_buffer.c
*/

View File

@ -1738,7 +1738,6 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
void *buf)
{
struct vmw_buffer_object *vmw_bo;
int ret;
struct {
uint32_t header;
@ -1748,7 +1747,6 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
return vmw_translate_guest_ptr(dev_priv, sw_context,
&cmd->body.ptr,
&vmw_bo);
return ret;
}
@ -3837,6 +3835,8 @@ int vmw_execbuf_process(struct drm_file *file_priv,
struct sync_file *sync_file = NULL;
DECLARE_VAL_CONTEXT(val_ctx, &sw_context->res_ht, 1);
vmw_validation_set_val_mem(&val_ctx, &dev_priv->vvm);
if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
if (out_fence_fd < 0) {

View File

@ -96,3 +96,39 @@ void vmw_ttm_global_release(struct vmw_private *dev_priv)
drm_global_item_unref(&dev_priv->bo_global_ref.ref);
drm_global_item_unref(&dev_priv->mem_global_ref);
}
/* struct vmw_validation_mem callback */
static int vmw_vmt_reserve(struct vmw_validation_mem *m, size_t size)
{
static struct ttm_operation_ctx ctx = {.interruptible = false,
.no_wait_gpu = false};
struct vmw_private *dev_priv = container_of(m, struct vmw_private, vvm);
return ttm_mem_global_alloc(vmw_mem_glob(dev_priv), size, &ctx);
}
/* struct vmw_validation_mem callback */
static void vmw_vmt_unreserve(struct vmw_validation_mem *m, size_t size)
{
struct vmw_private *dev_priv = container_of(m, struct vmw_private, vvm);
return ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
}
/**
* vmw_validation_mem_init_ttm - Interface the validation memory tracker
* to ttm.
* @dev_priv: Pointer to struct vmw_private. The reason we choose a vmw private
* rather than a struct vmw_validation_mem is to make sure assumption in the
* callbacks that struct vmw_private derives from struct vmw_validation_mem
* holds true.
* @gran: The recommended allocation granularity
*/
void vmw_validation_mem_init_ttm(struct vmw_private *dev_priv, size_t gran)
{
struct vmw_validation_mem *vvm = &dev_priv->vvm;
vvm->reserve_mem = vmw_vmt_reserve;
vvm->unreserve_mem = vmw_vmt_unreserve;
vvm->gran = gran;
}

View File

@ -104,11 +104,25 @@ void *vmw_validation_mem_alloc(struct vmw_validation_context *ctx,
return NULL;
if (ctx->mem_size_left < size) {
struct page *page = alloc_page(GFP_KERNEL | __GFP_ZERO);
struct page *page;
if (ctx->vm && ctx->vm_size_left < PAGE_SIZE) {
int ret = ctx->vm->reserve_mem(ctx->vm, ctx->vm->gran);
if (ret)
return NULL;
ctx->vm_size_left += ctx->vm->gran;
ctx->total_mem += ctx->vm->gran;
}
page = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!page)
return NULL;
if (ctx->vm)
ctx->vm_size_left -= PAGE_SIZE;
list_add_tail(&page->lru, &ctx->page_list);
ctx->page_address = page_address(page);
ctx->mem_size_left = PAGE_SIZE;
@ -138,6 +152,11 @@ static void vmw_validation_mem_free(struct vmw_validation_context *ctx)
}
ctx->mem_size_left = 0;
if (ctx->vm && ctx->total_mem) {
ctx->vm->unreserve_mem(ctx->vm, ctx->total_mem);
ctx->total_mem = 0;
ctx->vm_size_left = 0;
}
}
/**

View File

@ -33,6 +33,21 @@
#include <linux/ww_mutex.h>
#include <drm/ttm/ttm_execbuf_util.h>
/**
* struct vmw_validation_mem - Custom interface to provide memory reservations
* for the validation code.
* @reserve_mem: Callback to reserve memory
* @unreserve_mem: Callback to unreserve memory
* @gran: Reservation granularity. Contains a hint how much memory should
* be reserved in each call to @reserve_mem(). A slow implementation may want
* reservation to be done in large batches.
*/
struct vmw_validation_mem {
int (*reserve_mem)(struct vmw_validation_mem *m, size_t size);
void (*unreserve_mem)(struct vmw_validation_mem *m, size_t size);
size_t gran;
};
/**
* struct vmw_validation_context - Per command submission validation context
* @ht: Hash table used to find resource- or buffer object duplicates
@ -47,6 +62,10 @@
* buffer objects
* @mem_size_left: Free memory left in the last page in @page_list
* @page_address: Kernel virtual address of the last page in @page_list
* @vm: A pointer to the memory reservation interface or NULL if no
* memory reservation is needed.
* @vm_size_left: Amount of reserved memory that so far has not been allocated.
* @total_mem: Amount of reserved memory.
*/
struct vmw_validation_context {
struct drm_open_hash *ht;
@ -59,6 +78,9 @@ struct vmw_validation_context {
unsigned int merge_dups;
unsigned int mem_size_left;
u8 *page_address;
struct vmw_validation_mem *vm;
size_t vm_size_left;
size_t total_mem;
};
struct vmw_buffer_object;
@ -101,6 +123,21 @@ vmw_validation_has_bos(struct vmw_validation_context *ctx)
return !list_empty(&ctx->bo_list);
}
/**
* vmw_validation_set_val_mem - Register a validation mem object for
* validation memory reservation
* @ctx: The validation context
* @vm: Pointer to a struct vmw_validation_mem
*
* Must be set before the first attempt to allocate validation memory.
*/
static inline void
vmw_validation_set_val_mem(struct vmw_validation_context *ctx,
struct vmw_validation_mem *vm)
{
ctx->vm = vm;
}
/**
* vmw_validation_set_ht - Register a hash table for duplicate finding
* @ctx: The validation context

View File

@ -17,6 +17,9 @@
#ifndef HID_IDS_H_FILE
#define HID_IDS_H_FILE
#define USB_VENDOR_ID_258A 0x258a
#define USB_DEVICE_ID_258A_6A88 0x6a88
#define USB_VENDOR_ID_3M 0x0596
#define USB_DEVICE_ID_3M1968 0x0500
#define USB_DEVICE_ID_3M2256 0x0502
@ -941,6 +944,10 @@
#define USB_VENDOR_ID_REALTEK 0x0bda
#define USB_DEVICE_ID_REALTEK_READER 0x0152
#define USB_VENDOR_ID_RETROUSB 0xf000
#define USB_DEVICE_ID_RETROUSB_SNES_RETROPAD 0x0003
#define USB_DEVICE_ID_RETROUSB_SNES_RETROPORT 0x00f1
#define USB_VENDOR_ID_ROCCAT 0x1e7d
#define USB_DEVICE_ID_ROCCAT_ARVO 0x30d4
#define USB_DEVICE_ID_ROCCAT_ISKU 0x319c

View File

@ -42,6 +42,7 @@ static int ite_event(struct hid_device *hdev, struct hid_field *field,
static const struct hid_device_id ite_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_ITE, USB_DEVICE_ID_ITE8595) },
{ HID_USB_DEVICE(USB_VENDOR_ID_258A, USB_DEVICE_ID_258A_6A88) },
{ }
};
MODULE_DEVICE_TABLE(hid, ite_devices);

View File

@ -137,6 +137,8 @@ static const struct hid_device_id hid_quirks[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3003), HID_QUIRK_NOGET },
{ HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3008), HID_QUIRK_NOGET },
{ HID_USB_DEVICE(USB_VENDOR_ID_REALTEK, USB_DEVICE_ID_REALTEK_READER), HID_QUIRK_NO_INIT_REPORTS },
{ HID_USB_DEVICE(USB_VENDOR_ID_RETROUSB, USB_DEVICE_ID_RETROUSB_SNES_RETROPAD), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
{ HID_USB_DEVICE(USB_VENDOR_ID_RETROUSB, USB_DEVICE_ID_RETROUSB_SNES_RETROPORT), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
{ HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RUMBLEPAD), HID_QUIRK_BADPAD },
{ HID_USB_DEVICE(USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD2), HID_QUIRK_NO_INIT_REPORTS },
{ HID_USB_DEVICE(USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD), HID_QUIRK_NO_INIT_REPORTS },

View File

@ -267,6 +267,9 @@ is_upper_ndev_bond_master_filter(struct ib_device *ib_dev, u8 port,
struct net_device *cookie_ndev = cookie;
bool match = false;
if (!rdma_ndev)
return false;
rcu_read_lock();
if (netif_is_bond_master(cookie_ndev) &&
rdma_is_upper_dev_rcu(rdma_ndev, cookie_ndev))

View File

@ -12500,7 +12500,8 @@ static int init_cntrs(struct hfi1_devdata *dd)
}
/* allocate space for the counter values */
dd->cntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
dd->cntrs = kcalloc(dd->ndevcntrs + num_driver_cntrs, sizeof(u64),
GFP_KERNEL);
if (!dd->cntrs)
goto bail;

View File

@ -155,6 +155,8 @@ struct hfi1_ib_stats {
extern struct hfi1_ib_stats hfi1_stats;
extern const struct pci_error_handlers hfi1_pci_err_handler;
extern int num_driver_cntrs;
/*
* First-cut criterion for "device is active" is
* two thousand dwords combined Tx, Rx traffic per

View File

@ -340,6 +340,13 @@ int hfi1_setup_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe, bool *call_send)
default:
break;
}
/*
* System latency between send and schedule is large enough that
* forcing call_send to true for piothreshold packets is necessary.
*/
if (wqe->length <= piothreshold)
*call_send = true;
return 0;
}

View File

@ -1479,7 +1479,7 @@ static const char * const driver_cntr_names[] = {
static DEFINE_MUTEX(cntr_names_lock); /* protects the *_cntr_names bufers */
static const char **dev_cntr_names;
static const char **port_cntr_names;
static int num_driver_cntrs = ARRAY_SIZE(driver_cntr_names);
int num_driver_cntrs = ARRAY_SIZE(driver_cntr_names);
static int num_dev_cntrs;
static int num_port_cntrs;
static int cntr_names_initialized;

View File

@ -1066,7 +1066,9 @@ static int devx_umem_get(struct mlx5_ib_dev *dev, struct ib_ucontext *ucontext,
err = uverbs_get_flags32(&access, attrs,
MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS,
IB_ACCESS_SUPPORTED);
IB_ACCESS_LOCAL_WRITE |
IB_ACCESS_REMOTE_WRITE |
IB_ACCESS_REMOTE_READ);
if (err)
return err;

View File

@ -506,14 +506,13 @@ void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *imr)
static int pagefault_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
u64 io_virt, size_t bcnt, u32 *bytes_mapped)
{
int npages = 0, current_seq, page_shift, ret, np;
bool implicit = false;
struct ib_umem_odp *odp_mr = to_ib_umem_odp(mr->umem);
u64 access_mask = ODP_READ_ALLOWED_BIT;
int npages = 0, page_shift, np;
u64 start_idx, page_mask;
struct ib_umem_odp *odp;
int current_seq;
size_t size;
int ret;
if (!odp_mr->page_list) {
odp = implicit_mr_get_data(mr, io_virt, bcnt);
@ -521,7 +520,7 @@ static int pagefault_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
if (IS_ERR(odp))
return PTR_ERR(odp);
mr = odp->private;
implicit = true;
} else {
odp = odp_mr;
}
@ -600,7 +599,7 @@ next_mr:
out:
if (ret == -EAGAIN) {
if (mr->parent || !odp->dying) {
if (implicit || !odp->dying) {
unsigned long timeout =
msecs_to_jiffies(MMU_NOTIFIER_TIMEOUT);

View File

@ -930,6 +930,10 @@ static int blocks_are_clean_separate_dirty(struct dm_cache_metadata *cmd,
bool dirty_flag;
*result = true;
if (from_cblock(cmd->cache_blocks) == 0)
/* Nothing to do */
return 0;
r = dm_bitset_cursor_begin(&cmd->dirty_info, cmd->dirty_root,
from_cblock(cmd->cache_blocks), &cmd->dirty_cursor);
if (r) {

View File

@ -195,7 +195,7 @@ static void throttle_unlock(struct throttle *t)
struct dm_thin_new_mapping;
/*
* The pool runs in 4 modes. Ordered in degraded order for comparisons.
* The pool runs in various modes. Ordered in degraded order for comparisons.
*/
enum pool_mode {
PM_WRITE, /* metadata may be changed */
@ -282,9 +282,38 @@ struct pool {
mempool_t mapping_pool;
};
static enum pool_mode get_pool_mode(struct pool *pool);
static void metadata_operation_failed(struct pool *pool, const char *op, int r);
static enum pool_mode get_pool_mode(struct pool *pool)
{
return pool->pf.mode;
}
static void notify_of_pool_mode_change(struct pool *pool)
{
const char *descs[] = {
"write",
"out-of-data-space",
"read-only",
"read-only",
"fail"
};
const char *extra_desc = NULL;
enum pool_mode mode = get_pool_mode(pool);
if (mode == PM_OUT_OF_DATA_SPACE) {
if (!pool->pf.error_if_no_space)
extra_desc = " (queue IO)";
else
extra_desc = " (error IO)";
}
dm_table_event(pool->ti->table);
DMINFO("%s: switching pool to %s%s mode",
dm_device_name(pool->pool_md),
descs[(int)mode], extra_desc ? : "");
}
/*
* Target context for a pool.
*/
@ -2351,8 +2380,6 @@ static void do_waker(struct work_struct *ws)
queue_delayed_work(pool->wq, &pool->waker, COMMIT_PERIOD);
}
static void notify_of_pool_mode_change_to_oods(struct pool *pool);
/*
* We're holding onto IO to allow userland time to react. After the
* timeout either the pool will have been resized (and thus back in
@ -2365,7 +2392,7 @@ static void do_no_space_timeout(struct work_struct *ws)
if (get_pool_mode(pool) == PM_OUT_OF_DATA_SPACE && !pool->pf.error_if_no_space) {
pool->pf.error_if_no_space = true;
notify_of_pool_mode_change_to_oods(pool);
notify_of_pool_mode_change(pool);
error_retry_list_with_code(pool, BLK_STS_NOSPC);
}
}
@ -2433,26 +2460,6 @@ static void noflush_work(struct thin_c *tc, void (*fn)(struct work_struct *))
/*----------------------------------------------------------------*/
static enum pool_mode get_pool_mode(struct pool *pool)
{
return pool->pf.mode;
}
static void notify_of_pool_mode_change(struct pool *pool, const char *new_mode)
{
dm_table_event(pool->ti->table);
DMINFO("%s: switching pool to %s mode",
dm_device_name(pool->pool_md), new_mode);
}
static void notify_of_pool_mode_change_to_oods(struct pool *pool)
{
if (!pool->pf.error_if_no_space)
notify_of_pool_mode_change(pool, "out-of-data-space (queue IO)");
else
notify_of_pool_mode_change(pool, "out-of-data-space (error IO)");
}
static bool passdown_enabled(struct pool_c *pt)
{
return pt->adjusted_pf.discard_passdown;
@ -2501,8 +2508,6 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
switch (new_mode) {
case PM_FAIL:
if (old_mode != new_mode)
notify_of_pool_mode_change(pool, "failure");
dm_pool_metadata_read_only(pool->pmd);
pool->process_bio = process_bio_fail;
pool->process_discard = process_bio_fail;
@ -2516,8 +2521,6 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
case PM_OUT_OF_METADATA_SPACE:
case PM_READ_ONLY:
if (!is_read_only_pool_mode(old_mode))
notify_of_pool_mode_change(pool, "read-only");
dm_pool_metadata_read_only(pool->pmd);
pool->process_bio = process_bio_read_only;
pool->process_discard = process_bio_success;
@ -2538,8 +2541,6 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
* alarming rate. Adjust your low water mark if you're
* frequently seeing this mode.
*/
if (old_mode != new_mode)
notify_of_pool_mode_change_to_oods(pool);
pool->out_of_data_space = true;
pool->process_bio = process_bio_read_only;
pool->process_discard = process_discard_bio;
@ -2552,8 +2553,6 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
break;
case PM_WRITE:
if (old_mode != new_mode)
notify_of_pool_mode_change(pool, "write");
if (old_mode == PM_OUT_OF_DATA_SPACE)
cancel_delayed_work_sync(&pool->no_space_timeout);
pool->out_of_data_space = false;
@ -2573,6 +2572,9 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
* doesn't cause an unexpected mode transition on resume.
*/
pt->adjusted_pf.mode = new_mode;
if (old_mode != new_mode)
notify_of_pool_mode_change(pool);
}
static void abort_transaction(struct pool *pool)
@ -4023,7 +4025,7 @@ static struct target_type pool_target = {
.name = "thin-pool",
.features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
DM_TARGET_IMMUTABLE,
.version = {1, 20, 0},
.version = {1, 21, 0},
.module = THIS_MODULE,
.ctr = pool_ctr,
.dtr = pool_dtr,
@ -4397,7 +4399,7 @@ static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits)
static struct target_type thin_target = {
.name = "thin",
.version = {1, 20, 0},
.version = {1, 21, 0},
.module = THIS_MODULE,
.ctr = thin_ctr,
.dtr = thin_dtr,

View File

@ -20,7 +20,6 @@ struct dmz_bioctx {
struct dm_zone *zone;
struct bio *bio;
refcount_t ref;
blk_status_t status;
};
/*
@ -78,65 +77,66 @@ static inline void dmz_bio_endio(struct bio *bio, blk_status_t status)
{
struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
if (bioctx->status == BLK_STS_OK && status != BLK_STS_OK)
bioctx->status = status;
bio_endio(bio);
if (status != BLK_STS_OK && bio->bi_status == BLK_STS_OK)
bio->bi_status = status;
if (refcount_dec_and_test(&bioctx->ref)) {
struct dm_zone *zone = bioctx->zone;
if (zone) {
if (bio->bi_status != BLK_STS_OK &&
bio_op(bio) == REQ_OP_WRITE &&
dmz_is_seq(zone))
set_bit(DMZ_SEQ_WRITE_ERR, &zone->flags);
dmz_deactivate_zone(zone);
}
bio_endio(bio);
}
}
/*
* Partial clone read BIO completion callback. This terminates the
* Completion callback for an internally cloned target BIO. This terminates the
* target BIO when there are no more references to its context.
*/
static void dmz_read_bio_end_io(struct bio *bio)
static void dmz_clone_endio(struct bio *clone)
{
struct dmz_bioctx *bioctx = bio->bi_private;
blk_status_t status = bio->bi_status;
struct dmz_bioctx *bioctx = clone->bi_private;
blk_status_t status = clone->bi_status;
bio_put(bio);
bio_put(clone);
dmz_bio_endio(bioctx->bio, status);
}
/*
* Issue a BIO to a zone. The BIO may only partially process the
* Issue a clone of a target BIO. The clone may only partially process the
* original target BIO.
*/
static int dmz_submit_read_bio(struct dmz_target *dmz, struct dm_zone *zone,
struct bio *bio, sector_t chunk_block,
unsigned int nr_blocks)
static int dmz_submit_bio(struct dmz_target *dmz, struct dm_zone *zone,
struct bio *bio, sector_t chunk_block,
unsigned int nr_blocks)
{
struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
sector_t sector;
struct bio *clone;
/* BIO remap sector */
sector = dmz_start_sect(dmz->metadata, zone) + dmz_blk2sect(chunk_block);
/* If the read is not partial, there is no need to clone the BIO */
if (nr_blocks == dmz_bio_blocks(bio)) {
/* Setup and submit the BIO */
bio->bi_iter.bi_sector = sector;
refcount_inc(&bioctx->ref);
generic_make_request(bio);
return 0;
}
/* Partial BIO: we need to clone the BIO */
clone = bio_clone_fast(bio, GFP_NOIO, &dmz->bio_set);
if (!clone)
return -ENOMEM;
/* Setup the clone */
clone->bi_iter.bi_sector = sector;
bio_set_dev(clone, dmz->dev->bdev);
clone->bi_iter.bi_sector =
dmz_start_sect(dmz->metadata, zone) + dmz_blk2sect(chunk_block);
clone->bi_iter.bi_size = dmz_blk2sect(nr_blocks) << SECTOR_SHIFT;
clone->bi_end_io = dmz_read_bio_end_io;
clone->bi_end_io = dmz_clone_endio;
clone->bi_private = bioctx;
bio_advance(bio, clone->bi_iter.bi_size);
/* Submit the clone */
refcount_inc(&bioctx->ref);
generic_make_request(clone);
if (bio_op(bio) == REQ_OP_WRITE && dmz_is_seq(zone))
zone->wp_block += nr_blocks;
return 0;
}
@ -214,7 +214,7 @@ static int dmz_handle_read(struct dmz_target *dmz, struct dm_zone *zone,
if (nr_blocks) {
/* Valid blocks found: read them */
nr_blocks = min_t(unsigned int, nr_blocks, end_block - chunk_block);
ret = dmz_submit_read_bio(dmz, rzone, bio, chunk_block, nr_blocks);
ret = dmz_submit_bio(dmz, rzone, bio, chunk_block, nr_blocks);
if (ret)
return ret;
chunk_block += nr_blocks;
@ -228,25 +228,6 @@ static int dmz_handle_read(struct dmz_target *dmz, struct dm_zone *zone,
return 0;
}
/*
* Issue a write BIO to a zone.
*/
static void dmz_submit_write_bio(struct dmz_target *dmz, struct dm_zone *zone,
struct bio *bio, sector_t chunk_block,
unsigned int nr_blocks)
{
struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
/* Setup and submit the BIO */
bio_set_dev(bio, dmz->dev->bdev);
bio->bi_iter.bi_sector = dmz_start_sect(dmz->metadata, zone) + dmz_blk2sect(chunk_block);
refcount_inc(&bioctx->ref);
generic_make_request(bio);
if (dmz_is_seq(zone))
zone->wp_block += nr_blocks;
}
/*
* Write blocks directly in a data zone, at the write pointer.
* If a buffer zone is assigned, invalidate the blocks written
@ -265,7 +246,9 @@ static int dmz_handle_direct_write(struct dmz_target *dmz,
return -EROFS;
/* Submit write */
dmz_submit_write_bio(dmz, zone, bio, chunk_block, nr_blocks);
ret = dmz_submit_bio(dmz, zone, bio, chunk_block, nr_blocks);
if (ret)
return ret;
/*
* Validate the blocks in the data zone and invalidate
@ -301,7 +284,9 @@ static int dmz_handle_buffered_write(struct dmz_target *dmz,
return -EROFS;
/* Submit write */
dmz_submit_write_bio(dmz, bzone, bio, chunk_block, nr_blocks);
ret = dmz_submit_bio(dmz, bzone, bio, chunk_block, nr_blocks);
if (ret)
return ret;
/*
* Validate the blocks in the buffer zone
@ -600,7 +585,6 @@ static int dmz_map(struct dm_target *ti, struct bio *bio)
bioctx->zone = NULL;
bioctx->bio = bio;
refcount_set(&bioctx->ref, 1);
bioctx->status = BLK_STS_OK;
/* Set the BIO pending in the flush list */
if (!nr_sectors && bio_op(bio) == REQ_OP_WRITE) {
@ -623,35 +607,6 @@ static int dmz_map(struct dm_target *ti, struct bio *bio)
return DM_MAPIO_SUBMITTED;
}
/*
* Completed target BIO processing.
*/
static int dmz_end_io(struct dm_target *ti, struct bio *bio, blk_status_t *error)
{
struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
if (bioctx->status == BLK_STS_OK && *error)
bioctx->status = *error;
if (!refcount_dec_and_test(&bioctx->ref))
return DM_ENDIO_INCOMPLETE;
/* Done */
bio->bi_status = bioctx->status;
if (bioctx->zone) {
struct dm_zone *zone = bioctx->zone;
if (*error && bio_op(bio) == REQ_OP_WRITE) {
if (dmz_is_seq(zone))
set_bit(DMZ_SEQ_WRITE_ERR, &zone->flags);
}
dmz_deactivate_zone(zone);
}
return DM_ENDIO_DONE;
}
/*
* Get zoned device information.
*/
@ -946,7 +901,6 @@ static struct target_type dmz_type = {
.ctr = dmz_ctr,
.dtr = dmz_dtr,
.map = dmz_map,
.end_io = dmz_end_io,
.io_hints = dmz_io_hints,
.prepare_ioctl = dmz_prepare_ioctl,
.postsuspend = dmz_suspend,

View File

@ -1593,6 +1593,8 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md,
return ret;
}
blk_queue_split(md->queue, &bio);
init_clone_info(&ci, md, map, bio);
if (bio->bi_opf & REQ_PREFLUSH) {

View File

@ -110,6 +110,19 @@ config MEDIA_CONTROLLER_DVB
This is currently experimental.
config MEDIA_CONTROLLER_REQUEST_API
bool "Enable Media controller Request API (EXPERIMENTAL)"
depends on MEDIA_CONTROLLER && STAGING_MEDIA
default n
---help---
DO NOT ENABLE THIS OPTION UNLESS YOU KNOW WHAT YOU'RE DOING.
This option enables the Request API for the Media controller and V4L2
interfaces. It is currently needed by a few stateless codec drivers.
There is currently no intention to provide API or ABI stability for
this new API as of yet.
#
# Video4Linux support
# Only enables if one of the V4L2 types (ATV, webcam, radio) is selected

View File

@ -947,7 +947,7 @@ void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state)
}
atomic_dec(&q->owned_by_drv_count);
if (vb->req_obj.req) {
if (state != VB2_BUF_STATE_QUEUED && vb->req_obj.req) {
/* This is not supported at the moment */
WARN_ON(state == VB2_BUF_STATE_REQUEUEING);
media_request_object_unbind(&vb->req_obj);
@ -1359,8 +1359,12 @@ static void vb2_req_release(struct media_request_object *obj)
{
struct vb2_buffer *vb = container_of(obj, struct vb2_buffer, req_obj);
if (vb->state == VB2_BUF_STATE_IN_REQUEST)
if (vb->state == VB2_BUF_STATE_IN_REQUEST) {
vb->state = VB2_BUF_STATE_DEQUEUED;
if (vb->request)
media_request_put(vb->request);
vb->request = NULL;
}
}
static const struct media_request_object_ops vb2_core_req_ops = {
@ -1528,6 +1532,18 @@ int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb,
return ret;
vb->state = VB2_BUF_STATE_IN_REQUEST;
/*
* Increment the refcount and store the request.
* The request refcount is decremented again when the
* buffer is dequeued. This is to prevent vb2_buffer_done()
* from freeing the request from interrupt context, which can
* happen if the application closed the request fd after
* queueing the request.
*/
media_request_get(req);
vb->request = req;
/* Fill buffer information for the userspace */
if (pb) {
call_void_bufop(q, copy_timestamp, vb, pb);
@ -1749,10 +1765,6 @@ static void __vb2_dqbuf(struct vb2_buffer *vb)
call_void_memop(vb, unmap_dmabuf, vb->planes[i].mem_priv);
vb->planes[i].dbuf_mapped = 0;
}
if (vb->req_obj.req) {
media_request_object_unbind(&vb->req_obj);
media_request_object_put(&vb->req_obj);
}
call_void_bufop(q, init_buffer, vb);
}
@ -1797,6 +1809,14 @@ int vb2_core_dqbuf(struct vb2_queue *q, unsigned int *pindex, void *pb,
/* go back to dequeued state */
__vb2_dqbuf(vb);
if (WARN_ON(vb->req_obj.req)) {
media_request_object_unbind(&vb->req_obj);
media_request_object_put(&vb->req_obj);
}
if (vb->request)
media_request_put(vb->request);
vb->request = NULL;
dprintk(2, "dqbuf of buffer %d, with state %d\n",
vb->index, vb->state);
@ -1903,6 +1923,14 @@ static void __vb2_queue_cancel(struct vb2_queue *q)
vb->prepared = false;
}
__vb2_dqbuf(vb);
if (vb->req_obj.req) {
media_request_object_unbind(&vb->req_obj);
media_request_object_put(&vb->req_obj);
}
if (vb->request)
media_request_put(vb->request);
vb->request = NULL;
}
}
@ -1940,10 +1968,8 @@ int vb2_core_streamon(struct vb2_queue *q, unsigned int type)
if (ret)
return ret;
ret = vb2_start_streaming(q);
if (ret) {
__vb2_queue_cancel(q);
if (ret)
return ret;
}
}
q->streaming = 1;

View File

@ -333,10 +333,10 @@ static int vb2_fill_vb2_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b
}
static int vb2_queue_or_prepare_buf(struct vb2_queue *q, struct media_device *mdev,
struct v4l2_buffer *b,
const char *opname,
struct v4l2_buffer *b, bool is_prepare,
struct media_request **p_req)
{
const char *opname = is_prepare ? "prepare_buf" : "qbuf";
struct media_request *req;
struct vb2_v4l2_buffer *vbuf;
struct vb2_buffer *vb;
@ -378,6 +378,9 @@ static int vb2_queue_or_prepare_buf(struct vb2_queue *q, struct media_device *md
return ret;
}
if (is_prepare)
return 0;
if (!(b->flags & V4L2_BUF_FLAG_REQUEST_FD)) {
if (q->uses_requests) {
dprintk(1, "%s: queue uses requests\n", opname);
@ -631,8 +634,10 @@ static void fill_buf_caps(struct vb2_queue *q, u32 *caps)
*caps |= V4L2_BUF_CAP_SUPPORTS_USERPTR;
if (q->io_modes & VB2_DMABUF)
*caps |= V4L2_BUF_CAP_SUPPORTS_DMABUF;
#ifdef CONFIG_MEDIA_CONTROLLER_REQUEST_API
if (q->supports_requests)
*caps |= V4L2_BUF_CAP_SUPPORTS_REQUESTS;
#endif
}
int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
@ -657,7 +662,7 @@ int vb2_prepare_buf(struct vb2_queue *q, struct media_device *mdev,
if (b->flags & V4L2_BUF_FLAG_REQUEST_FD)
return -EINVAL;
ret = vb2_queue_or_prepare_buf(q, mdev, b, "prepare_buf", NULL);
ret = vb2_queue_or_prepare_buf(q, mdev, b, true, NULL);
return ret ? ret : vb2_core_prepare_buf(q, b->index, b);
}
@ -729,7 +734,7 @@ int vb2_qbuf(struct vb2_queue *q, struct media_device *mdev,
return -EBUSY;
}
ret = vb2_queue_or_prepare_buf(q, mdev, b, "qbuf", &req);
ret = vb2_queue_or_prepare_buf(q, mdev, b, false, &req);
if (ret)
return ret;
ret = vb2_core_qbuf(q, b->index, b, req);

View File

@ -381,10 +381,14 @@ static long media_device_get_topology(struct media_device *mdev, void *arg)
static long media_device_request_alloc(struct media_device *mdev,
int *alloc_fd)
{
#ifdef CONFIG_MEDIA_CONTROLLER_REQUEST_API
if (!mdev->ops || !mdev->ops->req_validate || !mdev->ops->req_queue)
return -ENOTTY;
return media_request_alloc(mdev, alloc_fd);
#else
return -ENOTTY;
#endif
}
static long copy_arg_from_user(void *karg, void __user *uarg, unsigned int cmd)

View File

@ -997,11 +997,18 @@ static int vicodec_start_streaming(struct vb2_queue *q,
q_data->sequence = 0;
if (!V4L2_TYPE_IS_OUTPUT(q->type))
if (!V4L2_TYPE_IS_OUTPUT(q->type)) {
if (!ctx->is_enc) {
state->width = q_data->width;
state->height = q_data->height;
}
return 0;
}
state->width = q_data->width;
state->height = q_data->height;
if (ctx->is_enc) {
state->width = q_data->width;
state->height = q_data->height;
}
state->ref_frame.width = state->ref_frame.height = 0;
state->ref_frame.luma = kvmalloc(size + 2 * size / chroma_div,
GFP_KERNEL);

View File

@ -276,8 +276,6 @@ static int sdr_cap_start_streaming(struct vb2_queue *vq, unsigned count)
list_for_each_entry_safe(buf, tmp, &dev->sdr_cap_active, list) {
list_del(&buf->list);
v4l2_ctrl_request_complete(buf->vb.vb2_buf.req_obj.req,
&dev->ctrl_hdl_sdr_cap);
vb2_buffer_done(&buf->vb.vb2_buf,
VB2_BUF_STATE_QUEUED);
}

View File

@ -204,8 +204,6 @@ static int vbi_cap_start_streaming(struct vb2_queue *vq, unsigned count)
list_for_each_entry_safe(buf, tmp, &dev->vbi_cap_active, list) {
list_del(&buf->list);
v4l2_ctrl_request_complete(buf->vb.vb2_buf.req_obj.req,
&dev->ctrl_hdl_vbi_cap);
vb2_buffer_done(&buf->vb.vb2_buf,
VB2_BUF_STATE_QUEUED);
}

View File

@ -96,8 +96,6 @@ static int vbi_out_start_streaming(struct vb2_queue *vq, unsigned count)
list_for_each_entry_safe(buf, tmp, &dev->vbi_out_active, list) {
list_del(&buf->list);
v4l2_ctrl_request_complete(buf->vb.vb2_buf.req_obj.req,
&dev->ctrl_hdl_vbi_out);
vb2_buffer_done(&buf->vb.vb2_buf,
VB2_BUF_STATE_QUEUED);
}

View File

@ -243,8 +243,6 @@ static int vid_cap_start_streaming(struct vb2_queue *vq, unsigned count)
list_for_each_entry_safe(buf, tmp, &dev->vid_cap_active, list) {
list_del(&buf->list);
v4l2_ctrl_request_complete(buf->vb.vb2_buf.req_obj.req,
&dev->ctrl_hdl_vid_cap);
vb2_buffer_done(&buf->vb.vb2_buf,
VB2_BUF_STATE_QUEUED);
}

Some files were not shown because too many files have changed in this diff Show More