Linux 5.8

-----BEGIN PGP SIGNATURE-----
 
 iQFSBAABCAA8FiEEq68RxlopcLEwq+PEeb4+QwBBGIYFAl8nLmkeHHRvcnZhbGRz
 QGxpbnV4LWZvdW5kYXRpb24ub3JnAAoJEHm+PkMAQRiGBEkH/RJAnEan4gcdkBDf
 2xS0yk4XLMjKZwbz61VSeKMUBkGCsh1cWsaAtJJAIVYj/6o/7mld01sZCnOASLnJ
 ET0nXL2NiT/f+prYkTE5qeYH225/Yfh5jgmrqZtx/uXFCwgE5Nzi3f72IXQDmCR+
 kmpNhNox3YqQTKXhv7DXobDKcO0n8nZavnhxmA9SBZn2h9RHvmvJghD0UOfLjMpA
 1SbknaE67n5JN/JjI6TkYWk4nuJmqfvmBL5IYVDEZYO4UlM5Bqzhw0XN7Ax70K3M
 KRK/eiqRmNwun5MxWnbzQU7t7iTgVmzjHLTpWGcM3V4blgGXC3uhjc+p/R8KTQUE
 bIydSzs=
 =fDeo
 -----END PGP SIGNATURE-----

Merge tag 'v5.8' into drm-next

I need to backmerge 5.8 as I've got a bunch of fixes sitting
on an rc7 base that I want to land.

Signed-off-by: Dave Airlie <airlied@redhat.com>
This commit is contained in:
Dave Airlie 2020-08-11 11:58:31 +10:00
commit c44264f9f7
463 changed files with 4869 additions and 3868 deletions

View File

@ -198,6 +198,9 @@ Maxime Ripard <mripard@kernel.org> <maxime.ripard@free-electrons.com>
Mayuresh Janorkar <mayur@ti.com>
Michael Buesch <m@bues.ch>
Michel Dänzer <michel@tungstengraphics.com>
Mike Rapoport <rppt@kernel.org> <mike@compulab.co.il>
Mike Rapoport <rppt@kernel.org> <mike.rapoport@gmail.com>
Mike Rapoport <rppt@kernel.org> <rppt@linux.ibm.com>
Miodrag Dinic <miodrag.dinic@mips.com> <miodrag.dinic@imgtec.com>
Miquel Raynal <miquel.raynal@bootlin.com> <miquel.raynal@free-electrons.com>
Mitesh shah <mshah@teja.com>

View File

@ -16,7 +16,16 @@ Description: Allow the root user to disable/enable in runtime the clock
gating mechanism in Gaudi. Due to how Gaudi is built, the
clock gating needs to be disabled in order to access the
registers of the TPC and MME engines. This is sometimes needed
during debug by the user and hence the user needs this option
during debug by the user and hence the user needs this option.
The user can supply a bitmask value, each bit represents
a different engine to disable/enable its clock gating feature.
The bitmask is composed of 20 bits:
0 - 7 : DMA channels
8 - 11 : MME engines
12 - 19 : TPC engines
The bit's location of a specific engine can be determined
using (1 << GAUDI_ENGINE_ID_*). GAUDI_ENGINE_ID_* values
are defined in uapi habanalabs.h file in enum gaudi_engine_id
What: /sys/kernel/debug/habanalabs/hl<n>/command_buffers
Date: Jan 2019

View File

@ -47,6 +47,9 @@ properties:
$ref: /schemas/types.yaml#/definitions/phandle-array
description: Phandle to the device SRAM
iommus:
maxItems: 1
memory-region:
description:
CMA pool to use for buffers allocation instead of the default

View File

@ -378,6 +378,8 @@ examples:
- |
sound {
compatible = "simple-audio-card";
#address-cells = <1>;
#size-cells = <0>;
simple-audio-card,name = "rsnd-ak4643";
simple-audio-card,format = "left_j";
@ -391,10 +393,12 @@ examples:
"ak4642 Playback", "DAI1 Playback";
dpcmcpu: simple-audio-card,cpu@0 {
reg = <0>;
sound-dai = <&rcar_sound 0>;
};
simple-audio-card,cpu@1 {
reg = <1>;
sound-dai = <&rcar_sound 1>;
};
@ -418,6 +422,8 @@ examples:
- |
sound {
compatible = "simple-audio-card";
#address-cells = <1>;
#size-cells = <0>;
simple-audio-card,routing =
"pcm3168a Playback", "DAI1 Playback",
@ -426,6 +432,7 @@ examples:
"pcm3168a Playback", "DAI4 Playback";
simple-audio-card,dai-link@0 {
reg = <0>;
format = "left_j";
bitclock-master = <&sndcpu0>;
frame-master = <&sndcpu0>;
@ -439,22 +446,23 @@ examples:
};
simple-audio-card,dai-link@1 {
reg = <1>;
format = "i2s";
bitclock-master = <&sndcpu1>;
frame-master = <&sndcpu1>;
convert-channels = <8>; /* TDM Split */
sndcpu1: cpu@0 {
sndcpu1: cpu0 {
sound-dai = <&rcar_sound 1>;
};
cpu@1 {
cpu1 {
sound-dai = <&rcar_sound 2>;
};
cpu@2 {
cpu2 {
sound-dai = <&rcar_sound 3>;
};
cpu@3 {
cpu3 {
sound-dai = <&rcar_sound 4>;
};
codec {
@ -466,6 +474,7 @@ examples:
};
simple-audio-card,dai-link@2 {
reg = <2>;
format = "i2s";
bitclock-master = <&sndcpu2>;
frame-master = <&sndcpu2>;

View File

@ -23,6 +23,7 @@ PTP hardware clock infrastructure for Linux
+ Ancillary clock features
- Time stamp external events
- Period output signals configurable from user space
- Low Pass Filter (LPF) access from user space
- Synchronization of the Linux system time via the PPS subsystem
PTP hardware clock kernel API
@ -94,3 +95,14 @@ Supported hardware
- Auxiliary Slave/Master Mode Snapshot (optional interrupt)
- Target Time (optional interrupt)
* Renesas (IDT) ClockMatrix™
- Up to 4 independent PHC channels
- Integrated low pass filter (LPF), access via .adjPhase (compliant to ITU-T G.8273.2)
- Programmable output periodic signals
- Programmable inputs can time stamp external triggers
- Driver and/or hardware configuration through firmware (idtcm.bin)
- LPF settings (bandwidth, phase limiting, automatic holdover, physical layer assist (per ITU-T G.8273.2))
- Programmable output PTP clocks, any frequency up to 1GHz (to other PHY/MAC time stampers, refclk to ASSPs/SoCs/FPGAs)
- Lock to GNSS input, automatic switching between GNSS and user-space PHC control (optional)

View File

@ -8,9 +8,8 @@ There are various L3 encapsulation standards using UDP being discussed to
leverage the UDP based load balancing capability of different networks.
MPLSoUDP (__ https://tools.ietf.org/html/rfc7510) is one among them.
The Bareudp tunnel module provides a generic L3 encapsulation tunnelling
support for tunnelling different L3 protocols like MPLS, IP, NSH etc. inside
a UDP tunnel.
The Bareudp tunnel module provides a generic L3 encapsulation support for
tunnelling different L3 protocols like MPLS, IP, NSH etc. inside a UDP tunnel.
Special Handling
----------------
@ -26,7 +25,7 @@ Usage
1) Device creation & deletion
a) ip link add dev bareudp0 type bareudp dstport 6635 ethertype 0x8847.
a) ip link add dev bareudp0 type bareudp dstport 6635 ethertype mpls_uc
This creates a bareudp tunnel device which tunnels L3 traffic with ethertype
0x8847 (MPLS traffic). The destination port of the UDP header will be set to
@ -34,14 +33,21 @@ Usage
b) ip link delete bareudp0
2) Device creation with multiple proto mode enabled
2) Device creation with multiproto mode enabled
There are two ways to create a bareudp device for MPLS & IP with multiproto mode
enabled.
The multiproto mode allows bareudp tunnels to handle several protocols of the
same family. It is currently only available for IP and MPLS. This mode has to
be enabled explicitly with the "multiproto" flag.
a) ip link add dev bareudp0 type bareudp dstport 6635 ethertype 0x8847 multiproto
a) ip link add dev bareudp0 type bareudp dstport 6635 ethertype ipv4 multiproto
b) ip link add dev bareudp0 type bareudp dstport 6635 ethertype mpls
For an IPv4 tunnel the multiproto mode allows the tunnel to also handle
IPv6.
b) ip link add dev bareudp0 type bareudp dstport 6635 ethertype mpls_uc multiproto
For MPLS, the multiproto mode allows the tunnel to handle both unicast
and multicast MPLS packets.
3) Device Usage

View File

@ -486,6 +486,10 @@ narrow. The description of these groups must be added to the following table:
- Contains packet traps for packets that should be locally delivered after
routing, but do not match more specific packet traps (e.g.,
``ipv4_bgp``)
* - ``external_delivery``
- Contains packet traps for packets that should be routed through an
external interface (e.g., management interface) that does not belong to
the same device (e.g., switch ASIC) as the ingress interface
* - ``ipv6``
- Contains packet traps for various IPv6 control packets (e.g., Router
Advertisements)

View File

@ -782,7 +782,7 @@ F: include/dt-bindings/reset/altr,rst-mgr-a10sr.h
F: include/linux/mfd/altera-a10sr.h
ALTERA TRIPLE SPEED ETHERNET DRIVER
M: Thor Thayer <thor.thayer@linux.intel.com>
M: Joyce Ooi <joyce.ooi@intel.com>
L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/ethernet/altera/
@ -1425,7 +1425,7 @@ F: arch/arm*/include/asm/perf_event.h
F: arch/arm*/kernel/hw_breakpoint.c
F: arch/arm*/kernel/perf_*
F: arch/arm/oprofile/common.c
F: drivers/perf/*
F: drivers/perf/
F: include/linux/perf/arm_pmu.h
ARM PORT
@ -6961,6 +6961,7 @@ M: Timur Tabi <timur@kernel.org>
M: Nicolin Chen <nicoleotsuka@gmail.com>
M: Xiubo Li <Xiubo.Lee@gmail.com>
R: Fabio Estevam <festevam@gmail.com>
R: Shengjiu Wang <shengjiu.wang@gmail.com>
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
L: linuxppc-dev@lists.ozlabs.org
S: Maintained
@ -9310,6 +9311,17 @@ F: Documentation/kbuild/kconfig*
F: scripts/Kconfig.include
F: scripts/kconfig/
KCOV
R: Dmitry Vyukov <dvyukov@google.com>
R: Andrey Konovalov <andreyknvl@google.com>
L: kasan-dev@googlegroups.com
S: Maintained
F: Documentation/dev-tools/kcov.rst
F: include/linux/kcov.h
F: include/uapi/linux/kcov.h
F: kernel/kcov.c
F: scripts/Makefile.kcov
KCSAN
M: Marco Elver <elver@google.com>
R: Dmitry Vyukov <dvyukov@google.com>
@ -11245,7 +11257,7 @@ S: Maintained
F: drivers/crypto/atmel-ecc.*
MICROCHIP I2C DRIVER
M: Ludovic Desroches <ludovic.desroches@microchip.com>
M: Codrin Ciubotariu <codrin.ciubotariu@microchip.com>
L: linux-i2c@vger.kernel.org
S: Supported
F: drivers/i2c/busses/i2c-at91-*.c
@ -11338,17 +11350,17 @@ F: drivers/iio/adc/at91-sama5d2_adc.c
F: include/dt-bindings/iio/adc/at91-sama5d2_adc.h
MICROCHIP SAMA5D2-COMPATIBLE SHUTDOWN CONTROLLER
M: Nicolas Ferre <nicolas.ferre@microchip.com>
M: Claudiu Beznea <claudiu.beznea@microchip.com>
S: Supported
F: drivers/power/reset/at91-sama5d2_shdwc.c
MICROCHIP SPI DRIVER
M: Nicolas Ferre <nicolas.ferre@microchip.com>
M: Tudor Ambarus <tudor.ambarus@microchip.com>
S: Supported
F: drivers/spi/spi-atmel.*
MICROCHIP SSC DRIVER
M: Nicolas Ferre <nicolas.ferre@microchip.com>
M: Codrin Ciubotariu <codrin.ciubotariu@microchip.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Supported
F: drivers/misc/atmel-ssc.c
@ -14181,7 +14193,8 @@ F: Documentation/devicetree/bindings/net/qcom,ethqos.txt
F: drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
QUALCOMM GENERIC INTERFACE I2C DRIVER
M: Alok Chauhan <alokc@codeaurora.org>
M: Akash Asthana <akashast@codeaurora.org>
M: Mukesh Savaliya <msavaliy@codeaurora.org>
L: linux-i2c@vger.kernel.org
L: linux-arm-msm@vger.kernel.org
S: Supported
@ -14867,6 +14880,7 @@ F: drivers/s390/block/dasd*
F: include/linux/dasd_mod.h
S390 IOMMU (PCI)
M: Matthew Rosato <mjrosato@linux.ibm.com>
M: Gerald Schaefer <gerald.schaefer@linux.ibm.com>
L: linux-s390@vger.kernel.org
S: Supported

View File

@ -2,7 +2,7 @@
VERSION = 5
PATCHLEVEL = 8
SUBLEVEL = 0
EXTRAVERSION = -rc6
EXTRAVERSION =
NAME = Kleptomaniac Octopus
# *DOCUMENTATION*
@ -567,7 +567,7 @@ ifneq ($(shell $(CC) --version 2>&1 | head -n 1 | grep clang),)
ifneq ($(CROSS_COMPILE),)
CLANG_FLAGS += --target=$(notdir $(CROSS_COMPILE:%-=%))
GCC_TOOLCHAIN_DIR := $(dir $(shell which $(CROSS_COMPILE)elfedit))
CLANG_FLAGS += --prefix=$(GCC_TOOLCHAIN_DIR)
CLANG_FLAGS += --prefix=$(GCC_TOOLCHAIN_DIR)$(notdir $(CROSS_COMPILE))
GCC_TOOLCHAIN := $(realpath $(GCC_TOOLCHAIN_DIR)/..)
endif
ifneq ($(GCC_TOOLCHAIN),)
@ -1754,7 +1754,7 @@ PHONY += descend $(build-dirs)
descend: $(build-dirs)
$(build-dirs): prepare
$(Q)$(MAKE) $(build)=$@ \
single-build=$(if $(filter-out $@/, $(filter $@/%, $(single-no-ko))),1) \
single-build=$(if $(filter-out $@/, $(filter $@/%, $(KBUILD_SINGLE_TARGETS))),1) \
need-builtin=1 need-modorder=1
clean-dirs := $(addprefix _clean_, $(clean-dirs))

View File

@ -342,7 +342,8 @@
comphy: phy@18300 {
compatible = "marvell,armada-380-comphy";
reg = <0x18300 0x100>;
reg-names = "comphy", "conf";
reg = <0x18300 0x100>, <0x18460 4>;
#address-cells = <1>;
#size-cells = <0>;

View File

@ -397,7 +397,7 @@
pinctrl_usbotg: usbotggrp {
fsl,pins = <
MX6QDL_PAD_GPIO_1__USB_OTG_ID 0x17059
MX6QDL_PAD_ENET_RX_ER__USB_OTG_ID 0x17059
>;
};
@ -409,6 +409,7 @@
MX6QDL_PAD_SD1_DAT1__SD1_DATA1 0x17070
MX6QDL_PAD_SD1_DAT2__SD1_DATA2 0x17070
MX6QDL_PAD_SD1_DAT3__SD1_DATA3 0x17070
MX6QDL_PAD_GPIO_1__GPIO1_IO01 0x1b0b0
>;
};

View File

@ -99,7 +99,7 @@
&fec2 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_enet2>;
phy-mode = "rgmii";
phy-mode = "rgmii-id";
phy-handle = <&ethphy0>;
fsl,magic-packet;
status = "okay";

View File

@ -213,7 +213,7 @@
&fec2 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_enet2>;
phy-mode = "rgmii";
phy-mode = "rgmii-id";
phy-handle = <&ethphy2>;
status = "okay";
};

View File

@ -402,7 +402,7 @@
&gbe0 {
phy-handle = <&ethphy0>;
phy-mode = "rgmii-id";
phy-mode = "rgmii-rxid";
status = "okay";
};

View File

@ -198,7 +198,7 @@
default-pool {
compatible = "shared-dma-pool";
size = <0x6000000>;
alloc-ranges = <0x4a000000 0x6000000>;
alloc-ranges = <0x40000000 0x10000000>;
reusable;
linux,cma-default;
};

View File

@ -117,7 +117,7 @@
default-pool {
compatible = "shared-dma-pool";
size = <0x6000000>;
alloc-ranges = <0x4a000000 0x6000000>;
alloc-ranges = <0x40000000 0x10000000>;
reusable;
linux,cma-default;
};

View File

@ -181,7 +181,7 @@
default-pool {
compatible = "shared-dma-pool";
size = <0x6000000>;
alloc-ranges = <0x4a000000 0x6000000>;
alloc-ranges = <0x40000000 0x10000000>;
reusable;
linux,cma-default;
};

View File

@ -5,6 +5,8 @@
#ifndef _ASM_ARM_PERCPU_H_
#define _ASM_ARM_PERCPU_H_
#include <asm/thread_info.h>
/*
* Same as asm-generic/percpu.h, except that we store the per cpu offset
* in the TPIDRPRW. TPIDRPRW only exists on V6K and V7

View File

@ -683,6 +683,12 @@ static void disable_single_step(struct perf_event *bp)
arch_install_hw_breakpoint(bp);
}
static int watchpoint_fault_on_uaccess(struct pt_regs *regs,
struct arch_hw_breakpoint *info)
{
return !user_mode(regs) && info->ctrl.privilege == ARM_BREAKPOINT_USER;
}
static void watchpoint_handler(unsigned long addr, unsigned int fsr,
struct pt_regs *regs)
{
@ -742,16 +748,27 @@ static void watchpoint_handler(unsigned long addr, unsigned int fsr,
}
pr_debug("watchpoint fired: address = 0x%x\n", info->trigger);
/*
* If we triggered a user watchpoint from a uaccess routine,
* then handle the stepping ourselves since userspace really
* can't help us with this.
*/
if (watchpoint_fault_on_uaccess(regs, info))
goto step;
perf_bp_event(wp, regs);
/*
* If no overflow handler is present, insert a temporary
* mismatch breakpoint so we can single-step over the
* watchpoint trigger.
* Defer stepping to the overflow handler if one is installed.
* Otherwise, insert a temporary mismatch breakpoint so that
* we can single-step over the watchpoint trigger.
*/
if (is_default_overflow_handler(wp))
enable_single_step(wp, instruction_pointer(regs));
if (!is_default_overflow_handler(wp))
goto unlock;
step:
enable_single_step(wp, instruction_pointer(regs));
unlock:
rcu_read_unlock();
}

View File

@ -184,6 +184,7 @@ static void __init patch_vdso(void *ehdr)
if (!cntvct_ok) {
vdso_nullpatch_one(&einfo, "__vdso_gettimeofday");
vdso_nullpatch_one(&einfo, "__vdso_clock_gettime");
vdso_nullpatch_one(&einfo, "__vdso_clock_gettime64");
}
}

View File

@ -966,7 +966,7 @@ void __init create_mapping_late(struct mm_struct *mm, struct map_desc *md,
pud_t *pud;
p4d = p4d_alloc(mm, pgd_offset(mm, md->virtual), md->virtual);
if (!WARN_ON(!p4d))
if (WARN_ON(!p4d))
return;
pud = pud_alloc(mm, p4d, md->virtual);
if (WARN_ON(!pud))

View File

@ -137,7 +137,7 @@ export TEXT_OFFSET
core-y += arch/arm64/
libs-y := arch/arm64/lib/ $(libs-y)
core-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a
libs-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a
# Default target when executing plain make
boot := arch/arm64/boot

View File

@ -161,6 +161,7 @@
resets = <&ccu RST_BUS_VE>;
interrupts = <GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>;
allwinner,sram = <&ve_sram 1>;
iommus = <&iommu 3>;
};
gpu: gpu@1800000 {

View File

@ -454,10 +454,7 @@
status = "okay";
phy-mode = "2500base-x";
phys = <&cp1_comphy5 2>;
fixed-link {
speed = <2500>;
full-duplex;
};
managed = "in-band-status";
};
&cp1_spi1 {

View File

@ -77,9 +77,9 @@ static inline void apply_alternatives_module(void *start, size_t length) { }
"663:\n\t" \
newinstr "\n" \
"664:\n\t" \
".previous\n\t" \
".org . - (664b-663b) + (662b-661b)\n\t" \
".org . - (662b-661b) + (664b-663b)\n" \
".org . - (662b-661b) + (664b-663b)\n\t" \
".previous\n" \
".endif\n"
#define __ALTERNATIVE_CFG_CB(oldinstr, feature, cfg_enabled, cb) \

View File

@ -24,16 +24,17 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
{
__uint128_t tmp;
u64 sum;
int n = ihl; /* we want it signed */
tmp = *(const __uint128_t *)iph;
iph += 16;
ihl -= 4;
n -= 4;
tmp += ((tmp >> 64) | (tmp << 64));
sum = tmp >> 64;
do {
sum += *(const u32 *)iph;
iph += 4;
} while (--ihl);
} while (--n > 0);
sum += ((sum >> 32) | (sum << 32));
return csum_fold((__force u32)(sum >> 32));

View File

@ -380,9 +380,14 @@ struct kvm_vcpu_arch {
#define vcpu_has_sve(vcpu) (system_supports_sve() && \
((vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_SVE))
#define vcpu_has_ptrauth(vcpu) ((system_supports_address_auth() || \
system_supports_generic_auth()) && \
((vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_PTRAUTH))
#ifdef CONFIG_ARM64_PTR_AUTH
#define vcpu_has_ptrauth(vcpu) \
((cpus_have_final_cap(ARM64_HAS_ADDRESS_AUTH) || \
cpus_have_final_cap(ARM64_HAS_GENERIC_AUTH)) && \
(vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_PTRAUTH)
#else
#define vcpu_has_ptrauth(vcpu) false
#endif
#define vcpu_gp_regs(v) (&(v)->arch.ctxt.gp_regs)

View File

@ -30,7 +30,6 @@
#include <linux/threads.h>
#include <linux/cpumask.h>
#include <linux/thread_info.h>
#include <asm/pointer_auth.h>
DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number);

View File

@ -14,7 +14,7 @@ COMPAT_GCC_TOOLCHAIN_DIR := $(dir $(shell which $(CROSS_COMPILE_COMPAT)elfedit))
COMPAT_GCC_TOOLCHAIN := $(realpath $(COMPAT_GCC_TOOLCHAIN_DIR)/..)
CC_COMPAT_CLANG_FLAGS := --target=$(notdir $(CROSS_COMPILE_COMPAT:%-=%))
CC_COMPAT_CLANG_FLAGS += --prefix=$(COMPAT_GCC_TOOLCHAIN_DIR)
CC_COMPAT_CLANG_FLAGS += --prefix=$(COMPAT_GCC_TOOLCHAIN_DIR)$(notdir $(CROSS_COMPILE_COMPAT))
CC_COMPAT_CLANG_FLAGS += -no-integrated-as -Qunused-arguments
ifneq ($(COMPAT_GCC_TOOLCHAIN),)
CC_COMPAT_CLANG_FLAGS += --gcc-toolchain=$(COMPAT_GCC_TOOLCHAIN)

View File

@ -1326,7 +1326,7 @@ static bool stage2_get_leaf_entry(struct kvm *kvm, phys_addr_t addr,
return true;
}
static bool stage2_is_exec(struct kvm *kvm, phys_addr_t addr)
static bool stage2_is_exec(struct kvm *kvm, phys_addr_t addr, unsigned long sz)
{
pud_t *pudp;
pmd_t *pmdp;
@ -1338,11 +1338,11 @@ static bool stage2_is_exec(struct kvm *kvm, phys_addr_t addr)
return false;
if (pudp)
return kvm_s2pud_exec(pudp);
return sz <= PUD_SIZE && kvm_s2pud_exec(pudp);
else if (pmdp)
return kvm_s2pmd_exec(pmdp);
return sz <= PMD_SIZE && kvm_s2pmd_exec(pmdp);
else
return kvm_s2pte_exec(ptep);
return sz == PAGE_SIZE && kvm_s2pte_exec(ptep);
}
static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
@ -1958,7 +1958,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
* execute permissions, and we preserve whatever we have.
*/
needs_exec = exec_fault ||
(fault_status == FSC_PERM && stage2_is_exec(kvm, fault_ipa));
(fault_status == FSC_PERM &&
stage2_is_exec(kvm, fault_ipa, vma_pagesize));
if (vma_pagesize == PUD_SIZE) {
pud_t new_pud = kvm_pfn_pud(pfn, mem_type);

View File

@ -212,6 +212,8 @@ atomic64_set(atomic64_t *v, s64 i)
_atomic_spin_unlock_irqrestore(v, flags);
}
#define atomic64_set_release(v, i) atomic64_set((v), (i))
static __inline__ s64
atomic64_read(const atomic64_t *v)
{

View File

@ -60,6 +60,7 @@ extern void __cmpxchg_called_with_bad_pointer(void);
extern unsigned long __cmpxchg_u32(volatile unsigned int *m, unsigned int old,
unsigned int new_);
extern u64 __cmpxchg_u64(volatile u64 *ptr, u64 old, u64 new_);
extern u8 __cmpxchg_u8(volatile u8 *ptr, u8 old, u8 new_);
/* don't worry...optimizer will get rid of most of this */
static inline unsigned long
@ -71,6 +72,7 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size)
#endif
case 4: return __cmpxchg_u32((unsigned int *)ptr,
(unsigned int)old, (unsigned int)new_);
case 1: return __cmpxchg_u8((u8 *)ptr, (u8)old, (u8)new_);
}
__cmpxchg_called_with_bad_pointer();
return old;

View File

@ -79,3 +79,15 @@ unsigned long __cmpxchg_u32(volatile unsigned int *ptr, unsigned int old, unsign
_atomic_spin_unlock_irqrestore(ptr, flags);
return (unsigned long)prev;
}
u8 __cmpxchg_u8(volatile u8 *ptr, u8 old, u8 new)
{
unsigned long flags;
u8 prev;
_atomic_spin_lock_irqsave(ptr, flags);
if ((prev = *ptr) == old)
*ptr = new;
_atomic_spin_unlock_irqrestore(ptr, flags);
return prev;
}

View File

@ -3072,10 +3072,18 @@ do_hash_page:
ori r0,r0,DSISR_BAD_FAULT_64S@l
and. r0,r5,r0 /* weird error? */
bne- handle_page_fault /* if not, try to insert a HPTE */
/*
* If we are in an "NMI" (e.g., an interrupt when soft-disabled), then
* don't call hash_page, just fail the fault. This is required to
* prevent re-entrancy problems in the hash code, namely perf
* interrupts hitting while something holds H_PAGE_BUSY, and taking a
* hash fault. See the comment in hash_preload().
*/
ld r11, PACA_THREAD_INFO(r13)
lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */
andis. r0,r0,NMI_MASK@h /* (i.e. an irq when soft-disabled) */
bne 77f /* then don't call hash_page now */
lwz r0,TI_PREEMPT(r11)
andis. r0,r0,NMI_MASK@h
bne 77f
/*
* r3 contains the trap number

View File

@ -1559,6 +1559,7 @@ static void hash_preload(struct mm_struct *mm, pte_t *ptep, unsigned long ea,
pgd_t *pgdir;
int rc, ssize, update_flags = 0;
unsigned long access = _PAGE_PRESENT | _PAGE_READ | (is_exec ? _PAGE_EXEC : 0);
unsigned long flags;
BUG_ON(get_region_id(ea) != USER_REGION_ID);
@ -1592,6 +1593,28 @@ static void hash_preload(struct mm_struct *mm, pte_t *ptep, unsigned long ea,
return;
#endif /* CONFIG_PPC_64K_PAGES */
/*
* __hash_page_* must run with interrupts off, as it sets the
* H_PAGE_BUSY bit. It's possible for perf interrupts to hit at any
* time and may take a hash fault reading the user stack, see
* read_user_stack_slow() in the powerpc/perf code.
*
* If that takes a hash fault on the same page as we lock here, it
* will bail out when seeing H_PAGE_BUSY set, and retry the access
* leading to an infinite loop.
*
* Disabling interrupts here does not prevent perf interrupts, but it
* will prevent them taking hash faults (see the NMI test in
* do_hash_page), then read_user_stack's copy_from_user_nofault will
* fail and perf will fall back to read_user_stack_slow(), which
* walks the Linux page tables.
*
* Interrupts must also be off for the duration of the
* mm_is_thread_local test and update, to prevent preempt running the
* mm on another CPU (XXX: this may be racy vs kthread_use_mm).
*/
local_irq_save(flags);
/* Is that local to this CPU ? */
if (mm_is_thread_local(mm))
update_flags |= HPTE_LOCAL_UPDATE;
@ -1614,6 +1637,8 @@ static void hash_preload(struct mm_struct *mm, pte_t *ptep, unsigned long ea,
mm_ctx_user_psize(&mm->context),
mm_ctx_user_psize(&mm->context),
pte_val(*ptep));
local_irq_restore(flags);
}
/*

View File

@ -2179,6 +2179,12 @@ static void __perf_event_interrupt(struct pt_regs *regs)
perf_read_regs(regs);
/*
* If perf interrupts hit in a local_irq_disable (soft-masked) region,
* we consider them as NMIs. This is required to prevent hash faults on
* user addresses when reading callchains. See the NMI test in
* do_hash_page.
*/
nmi = perf_intr_is_nmi(regs);
if (nmi)
nmi_enter();

View File

@ -95,19 +95,40 @@ void __init mem_init(void)
#ifdef CONFIG_BLK_DEV_INITRD
static void __init setup_initrd(void)
{
phys_addr_t start;
unsigned long size;
if (initrd_start >= initrd_end) {
pr_info("initrd not found or empty");
goto disable;
}
if (__pa_symbol(initrd_end) > PFN_PHYS(max_low_pfn)) {
pr_err("initrd extends beyond end of memory");
/* Ignore the virtul address computed during device tree parsing */
initrd_start = initrd_end = 0;
if (!phys_initrd_size)
return;
/*
* Round the memory region to page boundaries as per free_initrd_mem()
* This allows us to detect whether the pages overlapping the initrd
* are in use, but more importantly, reserves the entire set of pages
* as we don't want these pages allocated for other purposes.
*/
start = round_down(phys_initrd_start, PAGE_SIZE);
size = phys_initrd_size + (phys_initrd_start - start);
size = round_up(size, PAGE_SIZE);
if (!memblock_is_region_memory(start, size)) {
pr_err("INITRD: 0x%08llx+0x%08lx is not a memory region",
(u64)start, size);
goto disable;
}
size = initrd_end - initrd_start;
memblock_reserve(__pa_symbol(initrd_start), size);
if (memblock_is_region_reserved(start, size)) {
pr_err("INITRD: 0x%08llx+0x%08lx overlaps in-use memory region\n",
(u64)start, size);
goto disable;
}
memblock_reserve(start, size);
/* Now convert initrd to virtual addresses */
initrd_start = (unsigned long)__va(phys_initrd_start);
initrd_end = initrd_start + phys_initrd_size;
initrd_below_start_ok = 1;
pr_info("Initial ramdisk at: 0x%p (%lu bytes)\n",
@ -126,33 +147,36 @@ void __init setup_bootmem(void)
{
struct memblock_region *reg;
phys_addr_t mem_size = 0;
phys_addr_t total_mem = 0;
phys_addr_t mem_start, end = 0;
phys_addr_t vmlinux_end = __pa_symbol(&_end);
phys_addr_t vmlinux_start = __pa_symbol(&_start);
/* Find the memory region containing the kernel */
for_each_memblock(memory, reg) {
phys_addr_t end = reg->base + reg->size;
if (reg->base <= vmlinux_start && vmlinux_end <= end) {
mem_size = min(reg->size, (phys_addr_t)-PAGE_OFFSET);
/*
* Remove memblock from the end of usable area to the
* end of region
*/
if (reg->base + mem_size < end)
memblock_remove(reg->base + mem_size,
end - reg->base - mem_size);
}
end = reg->base + reg->size;
if (!total_mem)
mem_start = reg->base;
if (reg->base <= vmlinux_start && vmlinux_end <= end)
BUG_ON(reg->size == 0);
total_mem = total_mem + reg->size;
}
BUG_ON(mem_size == 0);
/*
* Remove memblock from the end of usable area to the
* end of region
*/
mem_size = min(total_mem, (phys_addr_t)-PAGE_OFFSET);
if (mem_start + mem_size < end)
memblock_remove(mem_start + mem_size,
end - mem_start - mem_size);
/* Reserve from the start of the kernel to the end of the kernel */
memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start);
set_max_mapnr(PFN_DOWN(mem_size));
max_pfn = PFN_DOWN(memblock_end_of_DRAM());
max_low_pfn = max_pfn;
set_max_mapnr(max_low_pfn);
#ifdef CONFIG_BLK_DEV_INITRD
setup_initrd();

View File

@ -44,7 +44,7 @@ asmlinkage void __init kasan_early_init(void)
(__pa(((uintptr_t) kasan_early_shadow_pmd))),
__pgprot(_PAGE_TABLE)));
flush_tlb_all();
local_flush_tlb_all();
}
static void __init populate(void *start, void *end)
@ -79,7 +79,7 @@ static void __init populate(void *start, void *end)
pfn_pgd(PFN_DOWN(__pa(&pmd[offset])),
__pgprot(_PAGE_TABLE)));
flush_tlb_all();
local_flush_tlb_all();
memset(start, 0, end - start);
}

View File

@ -292,7 +292,7 @@ CPUMF_EVENT_ATTR(cf_z15, TX_C_TABORT_SPECIAL, 0x00f5);
CPUMF_EVENT_ATTR(cf_z15, DFLT_ACCESS, 0x00f7);
CPUMF_EVENT_ATTR(cf_z15, DFLT_CYCLES, 0x00fc);
CPUMF_EVENT_ATTR(cf_z15, DFLT_CC, 0x00108);
CPUMF_EVENT_ATTR(cf_z15, DFLT_CCERROR, 0x00109);
CPUMF_EVENT_ATTR(cf_z15, DFLT_CCFINISH, 0x00109);
CPUMF_EVENT_ATTR(cf_z15, MT_DIAG_CYCLES_ONE_THR_ACTIVE, 0x01c0);
CPUMF_EVENT_ATTR(cf_z15, MT_DIAG_CYCLES_TWO_THR_ACTIVE, 0x01c1);
@ -629,7 +629,7 @@ static struct attribute *cpumcf_z15_pmu_event_attr[] __initdata = {
CPUMF_EVENT_PTR(cf_z15, DFLT_ACCESS),
CPUMF_EVENT_PTR(cf_z15, DFLT_CYCLES),
CPUMF_EVENT_PTR(cf_z15, DFLT_CC),
CPUMF_EVENT_PTR(cf_z15, DFLT_CCERROR),
CPUMF_EVENT_PTR(cf_z15, DFLT_CCFINISH),
CPUMF_EVENT_PTR(cf_z15, MT_DIAG_CYCLES_ONE_THR_ACTIVE),
CPUMF_EVENT_PTR(cf_z15, MT_DIAG_CYCLES_TWO_THR_ACTIVE),
NULL,

View File

@ -12,6 +12,7 @@ extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
extern pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address);
extern void pmd_free(struct mm_struct *mm, pmd_t *pmd);
#define __pmd_free_tlb(tlb, pmdp, addr) pmd_free((tlb)->mm, (pmdp))
#endif
static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
@ -33,13 +34,4 @@ do { \
tlb_remove_page((tlb), (pte)); \
} while (0)
#if CONFIG_PGTABLE_LEVELS > 2
#define __pmd_free_tlb(tlb, pmdp, addr) \
do { \
struct page *page = virt_to_page(pmdp); \
pgtable_pmd_page_dtor(page); \
tlb_remove_page((tlb), page); \
} while (0);
#endif
#endif /* __ASM_SH_PGALLOC_H */

View File

@ -199,7 +199,7 @@ syscall_trace_entry:
mov.l @(OFF_R7,r15), r7 ! arg3
mov.l @(OFF_R3,r15), r3 ! syscall_nr
!
mov.l 2f, r10 ! Number of syscalls
mov.l 6f, r10 ! Number of syscalls
cmp/hs r10, r3
bf syscall_call
mov #-ENOSYS, r0
@ -353,7 +353,7 @@ ENTRY(system_call)
tst r9, r8
bf syscall_trace_entry
!
mov.l 2f, r8 ! Number of syscalls
mov.l 6f, r8 ! Number of syscalls
cmp/hs r8, r3
bt syscall_badsys
!
@ -392,7 +392,7 @@ syscall_exit:
#if !defined(CONFIG_CPU_SH2)
1: .long TRA
#endif
2: .long NR_syscalls
6: .long NR_syscalls
3: .long sys_call_table
7: .long do_syscall_trace_enter
8: .long do_syscall_trace_leave

View File

@ -39,6 +39,7 @@
#define BT_MBI_UNIT_PMC 0x04
#define BT_MBI_UNIT_GFX 0x06
#define BT_MBI_UNIT_SMI 0x0C
#define BT_MBI_UNIT_CCK 0x14
#define BT_MBI_UNIT_USB 0x43
#define BT_MBI_UNIT_SATA 0xA3
#define BT_MBI_UNIT_PCIE 0xA6

View File

@ -71,6 +71,22 @@ static void printk_stack_address(unsigned long address, int reliable,
printk("%s %s%pB\n", log_lvl, reliable ? "" : "? ", (void *)address);
}
static int copy_code(struct pt_regs *regs, u8 *buf, unsigned long src,
unsigned int nbytes)
{
if (!user_mode(regs))
return copy_from_kernel_nofault(buf, (u8 *)src, nbytes);
/*
* Make sure userspace isn't trying to trick us into dumping kernel
* memory by pointing the userspace instruction pointer at it.
*/
if (__chk_range_not_ok(src, nbytes, TASK_SIZE_MAX))
return -EINVAL;
return copy_from_user_nmi(buf, (void __user *)src, nbytes);
}
/*
* There are a couple of reasons for the 2/3rd prologue, courtesy of Linus:
*
@ -97,17 +113,8 @@ void show_opcodes(struct pt_regs *regs, const char *loglvl)
#define OPCODE_BUFSIZE (PROLOGUE_SIZE + 1 + EPILOGUE_SIZE)
u8 opcodes[OPCODE_BUFSIZE];
unsigned long prologue = regs->ip - PROLOGUE_SIZE;
bool bad_ip;
/*
* Make sure userspace isn't trying to trick us into dumping kernel
* memory by pointing the userspace instruction pointer at it.
*/
bad_ip = user_mode(regs) &&
__chk_range_not_ok(prologue, OPCODE_BUFSIZE, TASK_SIZE_MAX);
if (bad_ip || copy_from_kernel_nofault(opcodes, (u8 *)prologue,
OPCODE_BUFSIZE)) {
if (copy_code(regs, opcodes, prologue, sizeof(opcodes))) {
printk("%sCode: Bad RIP value.\n", loglvl);
} else {
printk("%sCode: %" __stringify(PROLOGUE_SIZE) "ph <%02x> %"

View File

@ -1074,7 +1074,7 @@ int copy_xstate_to_kernel(void *kbuf, struct xregs_state *xsave, unsigned int of
copy_part(offsetof(struct fxregs_state, st_space), 128,
&xsave->i387.st_space, &kbuf, &offset_start, &count);
if (header.xfeatures & XFEATURE_MASK_SSE)
copy_part(xstate_offsets[XFEATURE_MASK_SSE], 256,
copy_part(xstate_offsets[XFEATURE_SSE], 256,
&xsave->i387.xmm_space, &kbuf, &offset_start, &count);
/*
* Fill xsave->i387.sw_reserved value for ptrace frame:

View File

@ -207,7 +207,7 @@ spurious_8259A_irq:
* lets ACK and report it. [once per IRQ]
*/
if (!(spurious_irq_mask & irqmask)) {
printk(KERN_DEBUG
printk_deferred(KERN_DEBUG
"spurious 8259A interrupt: IRQ%d.\n", irq);
spurious_irq_mask |= irqmask;
}

View File

@ -58,7 +58,6 @@ int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
* or a page fault), which can make frame pointers
* unreliable.
*/
if (IS_ENABLED(CONFIG_FRAME_POINTER))
return -EINVAL;
}
@ -81,10 +80,6 @@ int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
if (unwind_error(&state))
return -EINVAL;
/* Success path for non-user tasks, i.e. kthreads and idle tasks */
if (!(task->flags & (PF_KTHREAD | PF_IDLE)))
return -EINVAL;
return 0;
}

View File

@ -440,8 +440,11 @@ bool unwind_next_frame(struct unwind_state *state)
/*
* Find the orc_entry associated with the text address.
*
* Decrement call return addresses by one so they work for sibling
* calls and calls to noreturn functions.
* For a call frame (as opposed to a signal frame), state->ip points to
* the instruction after the call. That instruction's stack layout
* could be different from the call instruction's layout, for example
* if the call was to a noreturn function. So get the ORC data for the
* call instruction itself.
*/
orc = orc_find(state->signal ? state->ip : state->ip - 1);
if (!orc) {
@ -662,6 +665,7 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task,
state->sp = task->thread.sp;
state->bp = READ_ONCE_NOCHECK(frame->bp);
state->ip = READ_ONCE_NOCHECK(frame->ret_addr);
state->signal = (void *)state->ip == ret_from_fork;
}
if (get_stack_info((unsigned long *)state->sp, state->task,

View File

@ -358,6 +358,7 @@ SECTIONS
.bss : AT(ADDR(.bss) - LOAD_OFFSET) {
__bss_start = .;
*(.bss..page_aligned)
. = ALIGN(PAGE_SIZE);
*(BSS_MAIN)
BSS_DECRYPTED
. = ALIGN(PAGE_SIZE);

View File

@ -2195,7 +2195,7 @@ void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data)
{
struct kvm_lapic *apic = vcpu->arch.apic;
if (!lapic_in_kernel(vcpu) || apic_lvtt_oneshot(apic) ||
if (!kvm_apic_present(vcpu) || apic_lvtt_oneshot(apic) ||
apic_lvtt_period(apic))
return;

View File

@ -1090,7 +1090,7 @@ static void init_vmcb(struct vcpu_svm *svm)
svm->nested.vmcb = 0;
svm->vcpu.arch.hflags = 0;
if (pause_filter_count) {
if (!kvm_pause_in_guest(svm->vcpu.kvm)) {
control->pause_filter_count = pause_filter_count;
if (pause_filter_thresh)
control->pause_filter_thresh = pause_filter_thresh;
@ -2693,7 +2693,7 @@ static int pause_interception(struct vcpu_svm *svm)
struct kvm_vcpu *vcpu = &svm->vcpu;
bool in_kernel = (svm_get_cpl(vcpu) == 0);
if (pause_filter_thresh)
if (!kvm_pause_in_guest(vcpu->kvm))
grow_ple_window(vcpu);
kvm_vcpu_on_spin(vcpu, in_kernel);
@ -3780,7 +3780,7 @@ static void svm_handle_exit_irqoff(struct kvm_vcpu *vcpu)
static void svm_sched_in(struct kvm_vcpu *vcpu, int cpu)
{
if (pause_filter_thresh)
if (!kvm_pause_in_guest(vcpu->kvm))
shrink_ple_window(vcpu);
}
@ -3958,6 +3958,9 @@ static void svm_vm_destroy(struct kvm *kvm)
static int svm_vm_init(struct kvm *kvm)
{
if (!pause_filter_count || !pause_filter_thresh)
kvm->arch.pause_in_guest = true;
if (avic) {
int ret = avic_vm_init(kvm);
if (ret)

View File

@ -6079,6 +6079,9 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
~(KVM_STATE_NESTED_SMM_GUEST_MODE | KVM_STATE_NESTED_SMM_VMXON))
return -EINVAL;
if (kvm_state->hdr.vmx.flags & ~KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE)
return -EINVAL;
/*
* SMM temporarily disables VMX, so we cannot be in guest mode,
* nor can VMLAUNCH/VMRESUME be pending. Outside SMM, SMM flags
@ -6108,9 +6111,16 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
if (ret)
return ret;
/* Empty 'VMXON' state is permitted */
if (kvm_state->size < sizeof(*kvm_state) + sizeof(*vmcs12))
return 0;
/* Empty 'VMXON' state is permitted if no VMCS loaded */
if (kvm_state->size < sizeof(*kvm_state) + sizeof(*vmcs12)) {
/* See vmx_has_valid_vmcs12. */
if ((kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE) ||
(kvm_state->flags & KVM_STATE_NESTED_EVMCS) ||
(kvm_state->hdr.vmx.vmcs12_pa != -1ull))
return -EINVAL;
else
return 0;
}
if (kvm_state->hdr.vmx.vmcs12_pa != -1ull) {
if (kvm_state->hdr.vmx.vmcs12_pa == kvm_state->hdr.vmx.vmxon_pa ||

View File

@ -47,6 +47,11 @@ static inline struct vmcs12 *get_shadow_vmcs12(struct kvm_vcpu *vcpu)
return to_vmx(vcpu)->nested.cached_shadow_vmcs12;
}
/*
* Note: the same condition is checked against the state provided by userspace
* in vmx_set_nested_state; if it is satisfied, the nested state must include
* the VMCS12.
*/
static inline int vmx_has_valid_vmcs12(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);

View File

@ -57,7 +57,7 @@ static inline
__wsum csum_and_copy_from_user(const void __user *src, void *dst,
int len, __wsum sum, int *err_ptr)
{
if (access_ok(dst, len))
if (access_ok(src, len))
return csum_partial_copy_generic((__force const void *)src, dst,
len, sum, err_ptr, NULL);
if (len)

View File

@ -947,7 +947,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
trace_binder_unmap_user_end(alloc, index);
}
mmap_read_unlock(mm);
mmput(mm);
mmput_async(mm);
trace_binder_unmap_kernel_start(alloc, index);

View File

@ -433,9 +433,15 @@ static int atmtcp_remove_persistent(int itf)
return -EMEDIUMTYPE;
}
dev_data = PRIV(dev);
if (!dev_data->persist) return 0;
if (!dev_data->persist) {
atm_dev_put(dev);
return 0;
}
dev_data->persist = 0;
if (PRIV(dev)->vcc) return 0;
if (PRIV(dev)->vcc) {
atm_dev_put(dev);
return 0;
}
kfree(dev_data);
atm_dev_put(dev);
atm_dev_deregister(dev);

View File

@ -721,7 +721,7 @@ struct fwnode_handle *device_get_next_child_node(struct device *dev,
return next;
/* When no more children in primary, continue with secondary */
if (!IS_ERR_OR_NULL(fwnode->secondary))
if (fwnode && !IS_ERR_OR_NULL(fwnode->secondary))
next = fwnode_get_next_child_node(fwnode->secondary, child);
return next;

View File

@ -2865,6 +2865,24 @@ static int sysc_check_disabled_devices(struct sysc *ddata)
return error;
}
/*
* Ignore timers tagged with no-reset and no-idle. These are likely in use,
* for example by drivers/clocksource/timer-ti-dm-systimer.c. If more checks
* are needed, we could also look at the timer register configuration.
*/
static int sysc_check_active_timer(struct sysc *ddata)
{
if (ddata->cap->type != TI_SYSC_OMAP2_TIMER &&
ddata->cap->type != TI_SYSC_OMAP4_TIMER)
return 0;
if ((ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT) &&
(ddata->cfg.quirks & SYSC_QUIRK_NO_IDLE))
return -EBUSY;
return 0;
}
static const struct of_device_id sysc_match_table[] = {
{ .compatible = "simple-bus", },
{ /* sentinel */ },
@ -2921,6 +2939,10 @@ static int sysc_probe(struct platform_device *pdev)
if (error)
return error;
error = sysc_check_active_timer(ddata);
if (error)
return error;
error = sysc_get_clocks(ddata);
if (error)
return error;

View File

@ -814,7 +814,8 @@ static struct inode *devmem_inode;
#ifdef CONFIG_IO_STRICT_DEVMEM
void revoke_devmem(struct resource *res)
{
struct inode *inode = READ_ONCE(devmem_inode);
/* pairs with smp_store_release() in devmem_init_inode() */
struct inode *inode = smp_load_acquire(&devmem_inode);
/*
* Check that the initialization has completed. Losing the race
@ -1028,8 +1029,11 @@ static int devmem_init_inode(void)
return rc;
}
/* publish /dev/mem initialized */
WRITE_ONCE(devmem_inode, inode);
/*
* Publish /dev/mem initialized.
* Pairs with smp_load_acquire() in revoke_devmem().
*/
smp_store_release(&devmem_inode, inode);
return 0;
}

View File

@ -1277,6 +1277,7 @@ void add_interrupt_randomness(int irq, int irq_flags)
fast_mix(fast_pool);
add_interrupt_bench(cycles);
this_cpu_add(net_rand_state.s1, fast_pool->pool[cycles & 3]);
if (unlikely(crng_init == 0)) {
if ((fast_pool->count >= 64) &&

View File

@ -19,7 +19,7 @@
/* For type1, set SYSC_OMAP2_CLOCKACTIVITY for fck off on idle, l4 clock on */
#define DMTIMER_TYPE1_ENABLE ((1 << 9) | (SYSC_IDLE_SMART << 3) | \
SYSC_OMAP2_ENAWAKEUP | SYSC_OMAP2_AUTOIDLE)
#define DMTIMER_TYPE1_DISABLE (SYSC_OMAP2_SOFTRESET | SYSC_OMAP2_AUTOIDLE)
#define DMTIMER_TYPE2_ENABLE (SYSC_IDLE_SMART_WKUP << 2)
#define DMTIMER_RESET_WAIT 100000
@ -44,6 +44,8 @@ struct dmtimer_systimer {
u8 ctrl;
u8 wakeup;
u8 ifctrl;
struct clk *fck;
struct clk *ick;
unsigned long rate;
};
@ -298,16 +300,20 @@ static void __init dmtimer_systimer_select_best(void)
}
/* Interface clocks are only available on some SoCs variants */
static int __init dmtimer_systimer_init_clock(struct device_node *np,
static int __init dmtimer_systimer_init_clock(struct dmtimer_systimer *t,
struct device_node *np,
const char *name,
unsigned long *rate)
{
struct clk *clock;
unsigned long r;
bool is_ick = false;
int error;
is_ick = !strncmp(name, "ick", 3);
clock = of_clk_get_by_name(np, name);
if ((PTR_ERR(clock) == -EINVAL) && !strncmp(name, "ick", 3))
if ((PTR_ERR(clock) == -EINVAL) && is_ick)
return 0;
else if (IS_ERR(clock))
return PTR_ERR(clock);
@ -320,6 +326,11 @@ static int __init dmtimer_systimer_init_clock(struct device_node *np,
if (!r)
return -ENODEV;
if (is_ick)
t->ick = clock;
else
t->fck = clock;
*rate = r;
return 0;
@ -339,7 +350,10 @@ static void dmtimer_systimer_enable(struct dmtimer_systimer *t)
static void dmtimer_systimer_disable(struct dmtimer_systimer *t)
{
writel_relaxed(0, t->base + t->sysc);
if (!dmtimer_systimer_revision1(t))
return;
writel_relaxed(DMTIMER_TYPE1_DISABLE, t->base + t->sysc);
}
static int __init dmtimer_systimer_setup(struct device_node *np,
@ -366,13 +380,13 @@ static int __init dmtimer_systimer_setup(struct device_node *np,
pr_err("%s: clock source init failed: %i\n", __func__, error);
/* For ti-sysc, we have timer clocks at the parent module level */
error = dmtimer_systimer_init_clock(np->parent, "fck", &rate);
error = dmtimer_systimer_init_clock(t, np->parent, "fck", &rate);
if (error)
goto err_unmap;
t->rate = rate;
error = dmtimer_systimer_init_clock(np->parent, "ick", &rate);
error = dmtimer_systimer_init_clock(t, np->parent, "ick", &rate);
if (error)
goto err_unmap;
@ -496,12 +510,18 @@ static void omap_clockevent_idle(struct clock_event_device *evt)
struct dmtimer_systimer *t = &clkevt->t;
dmtimer_systimer_disable(t);
clk_disable(t->fck);
}
static void omap_clockevent_unidle(struct clock_event_device *evt)
{
struct dmtimer_clockevent *clkevt = to_dmtimer_clockevent(evt);
struct dmtimer_systimer *t = &clkevt->t;
int error;
error = clk_enable(t->fck);
if (error)
pr_err("could not enable timer fck on resume: %i\n", error);
dmtimer_systimer_enable(t);
writel_relaxed(OMAP_TIMER_INT_OVERFLOW, t->base + t->irq_ena);
@ -570,8 +590,8 @@ static int __init dmtimer_clockevent_init(struct device_node *np)
3, /* Timer internal resynch latency */
0xffffffff);
if (of_device_is_compatible(np, "ti,am33xx") ||
of_device_is_compatible(np, "ti,am43")) {
if (of_machine_is_compatible("ti,am33xx") ||
of_machine_is_compatible("ti,am43")) {
dev->suspend = omap_clockevent_idle;
dev->resume = omap_clockevent_unidle;
}
@ -616,12 +636,18 @@ static void dmtimer_clocksource_suspend(struct clocksource *cs)
clksrc->loadval = readl_relaxed(t->base + t->counter);
dmtimer_systimer_disable(t);
clk_disable(t->fck);
}
static void dmtimer_clocksource_resume(struct clocksource *cs)
{
struct dmtimer_clocksource *clksrc = to_dmtimer_clocksource(cs);
struct dmtimer_systimer *t = &clksrc->t;
int error;
error = clk_enable(t->fck);
if (error)
pr_err("could not enable timer fck on resume: %i\n", error);
dmtimer_systimer_enable(t);
writel_relaxed(clksrc->loadval, t->base + t->counter);
@ -653,8 +679,8 @@ static int __init dmtimer_clocksource_init(struct device_node *np)
dev->mask = CLOCKSOURCE_MASK(32);
dev->flags = CLOCK_SOURCE_IS_CONTINUOUS;
if (of_device_is_compatible(np, "ti,am33xx") ||
of_device_is_compatible(np, "ti,am43")) {
/* Unlike for clockevent, legacy code sets suspend only for am4 */
if (of_machine_is_compatible("ti,am43")) {
dev->suspend = dmtimer_clocksource_suspend;
dev->resume = dmtimer_clocksource_resume;
}

View File

@ -102,7 +102,7 @@ static struct net_device *chtls_find_netdev(struct chtls_dev *cdev,
case PF_INET:
if (likely(!inet_sk(sk)->inet_rcv_saddr))
return ndev;
ndev = ip_dev_find(&init_net, inet_sk(sk)->inet_rcv_saddr);
ndev = __ip_dev_find(&init_net, inet_sk(sk)->inet_rcv_saddr, false);
break;
#if IS_ENABLED(CONFIG_IPV6)
case PF_INET6:

View File

@ -1052,14 +1052,15 @@ int chtls_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
&record_type);
if (err)
goto out_err;
/* Avoid appending tls handshake, alert to tls data */
if (skb)
tx_skb_finalize(skb);
}
recordsz = size;
csk->tlshws.txleft = recordsz;
csk->tlshws.type = record_type;
if (skb)
ULP_SKB_CB(skb)->ulp.tls.type = record_type;
}
if (!skb || (ULP_SKB_CB(skb)->flags & ULPCB_FLAG_NO_APPEND) ||

View File

@ -356,10 +356,7 @@ static struct pstore_info efi_pstore_info = {
static __init int efivars_pstore_init(void)
{
if (!efi_rt_services_supported(EFI_RT_SUPPORTED_VARIABLE_SERVICES))
return 0;
if (!efivars_kobject())
if (!efivars_kobject() || !efivar_supports_writes())
return 0;
if (efivars_pstore_disable)

View File

@ -176,11 +176,13 @@ static struct efivar_operations generic_ops;
static int generic_ops_register(void)
{
generic_ops.get_variable = efi.get_variable;
generic_ops.set_variable = efi.set_variable;
generic_ops.set_variable_nonblocking = efi.set_variable_nonblocking;
generic_ops.get_next_variable = efi.get_next_variable;
generic_ops.query_variable_store = efi_query_variable_store;
if (efi_rt_services_supported(EFI_RT_SUPPORTED_SET_VARIABLE)) {
generic_ops.set_variable = efi.set_variable;
generic_ops.set_variable_nonblocking = efi.set_variable_nonblocking;
}
return efivars_register(&generic_efivars, &generic_ops, efi_kobj);
}
@ -382,7 +384,8 @@ static int __init efisubsys_init(void)
return -ENOMEM;
}
if (efi_rt_services_supported(EFI_RT_SUPPORTED_VARIABLE_SERVICES)) {
if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE |
EFI_RT_SUPPORTED_GET_NEXT_VARIABLE_NAME)) {
efivar_ssdt_load();
error = generic_ops_register();
if (error)
@ -416,7 +419,8 @@ static int __init efisubsys_init(void)
err_remove_group:
sysfs_remove_group(efi_kobj, &efi_subsys_attr_group);
err_unregister:
if (efi_rt_services_supported(EFI_RT_SUPPORTED_VARIABLE_SERVICES))
if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE |
EFI_RT_SUPPORTED_GET_NEXT_VARIABLE_NAME))
generic_ops_unregister();
err_put:
kobject_put(efi_kobj);

View File

@ -680,11 +680,8 @@ int efivars_sysfs_init(void)
struct kobject *parent_kobj = efivars_kobject();
int error = 0;
if (!efi_rt_services_supported(EFI_RT_SUPPORTED_VARIABLE_SERVICES))
return -ENODEV;
/* No efivars has been registered yet */
if (!parent_kobj)
if (!parent_kobj || !efivar_supports_writes())
return 0;
printk(KERN_INFO "EFI Variables Facility v%s %s\n", EFIVARS_VERSION,

View File

@ -6,8 +6,7 @@
# enabled, even if doing so doesn't break the build.
#
cflags-$(CONFIG_X86_32) := -march=i386
cflags-$(CONFIG_X86_64) := -mcmodel=small \
$(call cc-option,-maccumulate-outgoing-args)
cflags-$(CONFIG_X86_64) := -mcmodel=small
cflags-$(CONFIG_X86) += -m$(BITS) -D__KERNEL__ \
-fPIC -fno-strict-aliasing -mno-red-zone \
-mno-mmx -mno-sse -fshort-wchar \

View File

@ -44,7 +44,7 @@ efi_status_t efi_allocate_pages_aligned(unsigned long size, unsigned long *addr,
*addr = ALIGN((unsigned long)alloc_addr, align);
if (slack > 0) {
int l = (alloc_addr % align) / EFI_PAGE_SIZE;
int l = (alloc_addr & (align - 1)) / EFI_PAGE_SIZE;
if (l) {
efi_bs_call(free_pages, alloc_addr, slack - l + 1);

View File

@ -121,23 +121,6 @@ static unsigned long get_dram_base(void)
return membase;
}
/*
* This function handles the architcture specific differences between arm and
* arm64 regarding where the kernel image must be loaded and any memory that
* must be reserved. On failure it is required to free all
* all allocations it has made.
*/
efi_status_t handle_kernel_image(unsigned long *image_addr,
unsigned long *image_size,
unsigned long *reserve_addr,
unsigned long *reserve_size,
unsigned long dram_base,
efi_loaded_image_t *image);
asmlinkage void __noreturn efi_enter_kernel(unsigned long entrypoint,
unsigned long fdt_addr,
unsigned long fdt_size);
/*
* EFI entry point for the arm/arm64 EFI stubs. This is the entrypoint
* that is described in the PE/COFF header. Most of the code is the same

View File

@ -776,6 +776,22 @@ efi_status_t efi_load_initrd(efi_loaded_image_t *image,
unsigned long *load_size,
unsigned long soft_limit,
unsigned long hard_limit);
/*
* This function handles the architcture specific differences between arm and
* arm64 regarding where the kernel image must be loaded and any memory that
* must be reserved. On failure it is required to free all
* all allocations it has made.
*/
efi_status_t handle_kernel_image(unsigned long *image_addr,
unsigned long *image_size,
unsigned long *reserve_addr,
unsigned long *reserve_size,
unsigned long dram_base,
efi_loaded_image_t *image);
asmlinkage void __noreturn efi_enter_kernel(unsigned long entrypoint,
unsigned long fdt_addr,
unsigned long fdt_size);
void efi_handle_post_ebs_state(void);

View File

@ -8,6 +8,7 @@
#include <linux/efi.h>
#include <linux/pci.h>
#include <linux/stddef.h>
#include <asm/efi.h>
#include <asm/e820/types.h>
@ -361,8 +362,6 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
int options_size = 0;
efi_status_t status;
char *cmdline_ptr;
unsigned long ramdisk_addr;
unsigned long ramdisk_size;
efi_system_table = sys_table_arg;
@ -390,8 +389,9 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
hdr = &boot_params->hdr;
/* Copy the second sector to boot_params */
memcpy(&hdr->jump, image_base + 512, 512);
/* Copy the setup header from the second sector to boot_params */
memcpy(&hdr->jump, image_base + 512,
sizeof(struct setup_header) - offsetof(struct setup_header, jump));
/*
* Fill out some of the header fields ourselves because the

View File

@ -1229,3 +1229,9 @@ out:
return rv;
}
EXPORT_SYMBOL_GPL(efivars_unregister);
int efivar_supports_writes(void)
{
return __efivars && __efivars->ops->set_variable;
}
EXPORT_SYMBOL_GPL(efivar_supports_writes);

View File

@ -605,8 +605,10 @@ static int fw_cfg_register_file(const struct fw_cfg_file *f)
/* register entry under "/sys/firmware/qemu_fw_cfg/by_key/" */
err = kobject_init_and_add(&entry->kobj, &fw_cfg_sysfs_entry_ktype,
fw_cfg_sel_ko, "%d", entry->select);
if (err)
goto err_register;
if (err) {
kobject_put(&entry->kobj);
return err;
}
/* add raw binary content access */
err = sysfs_create_bin_file(&entry->kobj, &fw_cfg_sysfs_attr_raw);
@ -622,7 +624,6 @@ static int fw_cfg_register_file(const struct fw_cfg_file *f)
err_add_raw:
kobject_del(&entry->kobj);
err_register:
kfree(entry);
return err;
}

View File

@ -83,7 +83,8 @@ int __afu_port_disable(struct platform_device *pdev)
* on this port and minimum soft reset pulse width has elapsed.
* Driver polls port_soft_reset_ack to determine if reset done by HW.
*/
if (readq_poll_timeout(base + PORT_HDR_CTRL, v, v & PORT_CTRL_SFTRST,
if (readq_poll_timeout(base + PORT_HDR_CTRL, v,
v & PORT_CTRL_SFTRST_ACK,
RST_POLL_INVL, RST_POLL_TIMEOUT)) {
dev_err(&pdev->dev, "timeout, fail to reset device\n");
return -ETIMEDOUT;

View File

@ -227,7 +227,6 @@ static int cci_pci_sriov_configure(struct pci_dev *pcidev, int num_vfs)
{
struct cci_drvdata *drvdata = pci_get_drvdata(pcidev);
struct dfl_fpga_cdev *cdev = drvdata->cdev;
int ret = 0;
if (!num_vfs) {
/*
@ -239,6 +238,8 @@ static int cci_pci_sriov_configure(struct pci_dev *pcidev, int num_vfs)
dfl_fpga_cdev_config_ports_pf(cdev);
} else {
int ret;
/*
* before enable SRIOV, put released ports into VF access mode
* first of all.

View File

@ -707,9 +707,10 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
return n ? -EFAULT : 0;
}
case AMDGPU_INFO_DEV_INFO: {
struct drm_amdgpu_info_device dev_info = {};
struct drm_amdgpu_info_device dev_info;
uint64_t vm_size;
memset(&dev_info, 0, sizeof(dev_info));
dev_info.device_id = dev->pdev->device;
dev_info.chip_rev = adev->rev_id;
dev_info.external_rev = adev->external_rev_id;

View File

@ -8759,20 +8759,38 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
* the same resource. If we have a new DC context as part of
* the DM atomic state from validation we need to free it and
* retain the existing one instead.
*
* Furthermore, since the DM atomic state only contains the DC
* context and can safely be annulled, we can free the state
* and clear the associated private object now to free
* some memory and avoid a possible use-after-free later.
*/
struct dm_atomic_state *new_dm_state, *old_dm_state;
new_dm_state = dm_atomic_get_new_state(state);
old_dm_state = dm_atomic_get_old_state(state);
for (i = 0; i < state->num_private_objs; i++) {
struct drm_private_obj *obj = state->private_objs[i].ptr;
if (new_dm_state && old_dm_state) {
if (new_dm_state->context)
dc_release_state(new_dm_state->context);
if (obj->funcs == adev->dm.atomic_obj.funcs) {
int j = state->num_private_objs-1;
new_dm_state->context = old_dm_state->context;
dm_atomic_destroy_state(obj,
state->private_objs[i].state);
if (old_dm_state->context)
dc_retain_state(old_dm_state->context);
/* If i is not at the end of the array then the
* last element needs to be moved to where i was
* before the array can safely be truncated.
*/
if (i != j)
state->private_objs[i] =
state->private_objs[j];
state->private_objs[j].ptr = NULL;
state->private_objs[j].state = NULL;
state->private_objs[j].old_state = NULL;
state->private_objs[j].new_state = NULL;
state->num_private_objs = j;
break;
}
}
}

View File

@ -151,6 +151,7 @@ int bochs_kms_init(struct bochs_device *bochs)
bochs->dev->mode_config.preferred_depth = 24;
bochs->dev->mode_config.prefer_shadow = 0;
bochs->dev->mode_config.prefer_shadow_fbdev = 1;
bochs->dev->mode_config.fbdev_use_iomem = true;
bochs->dev->mode_config.quirk_addfb_prefer_host_byte_order = true;
bochs->dev->mode_config.funcs = &bochs_mode_funcs;

View File

@ -1283,6 +1283,7 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
adv7511->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID
| DRM_BRIDGE_OP_HPD;
adv7511->bridge.of_node = dev->of_node;
adv7511->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
drm_bridge_add(&adv7511->bridge);

View File

@ -918,11 +918,6 @@ static int nwl_dsi_bridge_attach(struct drm_bridge *bridge,
struct drm_panel *panel;
int ret;
if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
DRM_ERROR("Fix bridge driver to make connector optional!");
return -EINVAL;
}
ret = drm_of_find_panel_or_bridge(dsi->dev->of_node, 1, 0, &panel,
&panel_bridge);
if (ret)

View File

@ -399,7 +399,11 @@ static void drm_fb_helper_dirty_blit_real(struct drm_fb_helper *fb_helper,
unsigned int y;
for (y = clip->y1; y < clip->y2; y++) {
memcpy(dst, src, len);
if (!fb_helper->dev->mode_config.fbdev_use_iomem)
memcpy(dst, src, len);
else
memcpy_toio((void __iomem *)dst, src, len);
src += fb->pitches[0];
dst += fb->pitches[0];
}

View File

@ -879,9 +879,6 @@ err:
* @file_priv: drm file-private structure
*
* Open an object using the global name, returning a handle and the size.
*
* This handle (of course) holds a reference to the object, so the object
* will not go away until the handle is deleted.
*/
int
drm_gem_open_ioctl(struct drm_device *dev, void *data,
@ -906,14 +903,15 @@ drm_gem_open_ioctl(struct drm_device *dev, void *data,
/* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
ret = drm_gem_handle_create_tail(file_priv, obj, &handle);
drm_gem_object_put(obj);
if (ret)
return ret;
goto err;
args->handle = handle;
args->size = obj->size;
return 0;
err:
drm_gem_object_put(obj);
return ret;
}
/**

View File

@ -918,7 +918,7 @@ static int mipi_dbi_spi1_transfer(struct mipi_dbi *dbi, int dc,
}
}
tr.len = chunk;
tr.len = chunk * 2;
len -= chunk;
ret = spi_sync(spi, &m);

View File

@ -331,10 +331,8 @@ static int drm_of_lvds_get_remote_pixels_type(
* configurations by passing the endpoints explicitly to
* drm_of_lvds_get_dual_link_pixel_order().
*/
if (!current_pt || pixels_type != current_pt) {
of_node_put(remote_port);
if (!current_pt || pixels_type != current_pt)
return -EINVAL;
}
}
return pixels_type;

View File

@ -271,6 +271,8 @@ void lima_pp_fini(struct lima_ip *ip)
int lima_pp_bcast_resume(struct lima_ip *ip)
{
/* PP has been reset by individual PP resume */
ip->data.async_reset = false;
return 0;
}

View File

@ -1060,9 +1060,14 @@ static void mcde_display_update(struct drm_simple_display_pipe *pipe,
*/
if (fb) {
mcde_set_extsrc(mcde, drm_fb_cma_get_gem_addr(fb, pstate, 0));
if (!mcde->video_mode)
/* Send a single frame using software sync */
mcde_display_send_one_frame(mcde);
if (!mcde->video_mode) {
/*
* Send a single frame using software sync if the flow
* is not active yet.
*/
if (mcde->flow_active == 0)
mcde_display_send_one_frame(mcde);
}
dev_info_once(mcde->dev, "sent first display update\n");
} else {
/*

View File

@ -2157,7 +2157,7 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
*/
if (core->assign_windows) {
core->func->wndw.owner(core);
core->func->update(core, interlock, false);
nv50_disp_atomic_commit_core(state, interlock);
core->assign_windows = false;
interlock[NV50_DISP_INTERLOCK_CORE] = 0;
}
@ -2613,7 +2613,7 @@ nv50_display_create(struct drm_device *dev)
if (disp->disp->object.oclass >= TU102_DISP)
nouveau_display(dev)->format_modifiers = wndwc57e_modifiers;
else
if (disp->disp->object.oclass >= GF110_DISP)
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_FERMI)
nouveau_display(dev)->format_modifiers = disp90xx_modifiers;
else
nouveau_display(dev)->format_modifiers = disp50xx_modifiers;

View File

@ -139,6 +139,7 @@ nouveau_decode_mod(struct nouveau_drm *drm,
uint32_t *tile_mode,
uint8_t *kind)
{
struct nouveau_display *disp = nouveau_display(drm->dev);
BUG_ON(!tile_mode || !kind);
if (modifier == DRM_FORMAT_MOD_LINEAR) {
@ -150,6 +151,12 @@ nouveau_decode_mod(struct nouveau_drm *drm,
* Extract the block height and kind from the corresponding
* modifier fields. See drm_fourcc.h for details.
*/
if ((modifier & (0xffull << 12)) == 0ull) {
/* Legacy modifier. Translate to this dev's 'kind.' */
modifier |= disp->format_modifiers[0] & (0xffull << 12);
}
*tile_mode = (uint32_t)(modifier & 0xF);
*kind = (uint8_t)((modifier >> 12) & 0xFF);
@ -175,6 +182,16 @@ nouveau_framebuffer_get_layout(struct drm_framebuffer *fb,
}
}
static const u64 legacy_modifiers[] = {
DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(0),
DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(1),
DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(2),
DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(3),
DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(4),
DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(5),
DRM_FORMAT_MOD_INVALID
};
static int
nouveau_validate_decode_mod(struct nouveau_drm *drm,
uint64_t modifier,
@ -195,8 +212,14 @@ nouveau_validate_decode_mod(struct nouveau_drm *drm,
(disp->format_modifiers[mod] != modifier);
mod++);
if (disp->format_modifiers[mod] == DRM_FORMAT_MOD_INVALID)
return -EINVAL;
if (disp->format_modifiers[mod] == DRM_FORMAT_MOD_INVALID) {
for (mod = 0;
(legacy_modifiers[mod] != DRM_FORMAT_MOD_INVALID) &&
(legacy_modifiers[mod] != modifier);
mod++);
if (legacy_modifiers[mod] == DRM_FORMAT_MOD_INVALID)
return -EINVAL;
}
nouveau_decode_mod(drm, modifier, tile_mode, kind);

View File

@ -317,7 +317,7 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
struct drm_framebuffer *fb;
struct nouveau_channel *chan;
struct nouveau_bo *nvbo;
struct drm_mode_fb_cmd2 mode_cmd;
struct drm_mode_fb_cmd2 mode_cmd = {};
int ret;
mode_cmd.width = sizes->surface_width;
@ -592,6 +592,7 @@ fini:
drm_fb_helper_fini(&fbcon->helper);
free:
kfree(fbcon);
drm->fbcon = NULL;
return ret;
}

View File

@ -117,15 +117,6 @@ nvkm_outp_acquire_hda(struct nvkm_outp *outp, enum nvkm_ior_type type,
{
struct nvkm_ior *ior;
/* First preference is to reuse the OR that is currently armed
* on HW, if any, in order to prevent unnecessary switching.
*/
list_for_each_entry(ior, &outp->disp->ior, head) {
if (!ior->identity && !!ior->func->hda.hpd == hda &&
!ior->asy.outp && ior->arm.outp == outp)
return nvkm_outp_acquire_ior(outp, user, ior);
}
/* Failing that, a completely unused OR is the next best thing. */
list_for_each_entry(ior, &outp->disp->ior, head) {
if (!ior->identity && !!ior->func->hda.hpd == hda &&
@ -173,6 +164,27 @@ nvkm_outp_acquire(struct nvkm_outp *outp, u8 user, bool hda)
return nvkm_outp_acquire_ior(outp, user, ior);
}
/* First preference is to reuse the OR that is currently armed
* on HW, if any, in order to prevent unnecessary switching.
*/
list_for_each_entry(ior, &outp->disp->ior, head) {
if (!ior->identity && !ior->asy.outp && ior->arm.outp == outp) {
/*XXX: For various complicated reasons, we can't outright switch
* the boot-time OR on the first modeset without some fairly
* invasive changes.
*
* The systems that were fixed by modifying the OR selection
* code to account for HDA support shouldn't regress here as
* the HDA-enabled ORs match the relevant output's pad macro
* index, and the firmware seems to select an OR this way.
*
* This warning is to make it obvious if that proves wrong.
*/
WARN_ON(hda && !ior->func->hda.hpd);
return nvkm_outp_acquire_ior(outp, user, ior);
}
}
/* If we don't need HDA, first try to acquire an OR that doesn't
* support it to leave free the ones that do.
*/

View File

@ -614,9 +614,9 @@ static const struct panel_desc boe_tv101wum_nl6_desc = {
static const struct drm_display_mode auo_kd101n80_45na_default_mode = {
.clock = 157000,
.hdisplay = 1200,
.hsync_start = 1200 + 80,
.hsync_end = 1200 + 80 + 24,
.htotal = 1200 + 80 + 24 + 36,
.hsync_start = 1200 + 60,
.hsync_end = 1200 + 60 + 24,
.htotal = 1200 + 60 + 24 + 56,
.vdisplay = 1920,
.vsync_start = 1920 + 16,
.vsync_end = 1920 + 16 + 4,

View File

@ -1260,7 +1260,21 @@ static const struct panel_desc boe_nv133fhm_n61 = {
.height = 165,
},
.delay = {
.hpd_absent_delay = 200,
/*
* When power is first given to the panel there's a short
* spike on the HPD line. It was explained that this spike
* was until the TCON data download was complete. On
* one system this was measured at 8 ms. We'll put 15 ms
* in the prepare delay just to be safe and take it away
* from the hpd_absent_delay (which would otherwise be 200 ms)
* to handle this. That means:
* - If HPD isn't hooked up you still have 200 ms delay.
* - If HPD is hooked up we won't try to look at it for the
* first 15 ms.
*/
.prepare = 15,
.hpd_absent_delay = 185,
.unprepare = 500,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,

View File

@ -260,7 +260,7 @@ sun4i_hdmi_connector_detect(struct drm_connector *connector, bool force)
unsigned long reg;
reg = readl(hdmi->base + SUN4I_HDMI_HPD_REG);
if (reg & SUN4I_HDMI_HPD_HIGH) {
if (!(reg & SUN4I_HDMI_HPD_HIGH)) {
cec_phys_addr_invalidate(hdmi->cec_adap);
return connector_status_disconnected;
}

View File

@ -421,20 +421,21 @@ static irqreturn_t cdns_i2c_master_isr(void *ptr)
/* Read data if receive data valid is set */
while (cdns_i2c_readreg(CDNS_I2C_SR_OFFSET) &
CDNS_I2C_SR_RXDV) {
/*
* Clear hold bit that was set for FIFO control if
* RX data left is less than FIFO depth, unless
* repeated start is selected.
*/
if ((id->recv_count < CDNS_I2C_FIFO_DEPTH) &&
!id->bus_hold_flag)
cdns_i2c_clear_bus_hold(id);
if (id->recv_count > 0) {
*(id->p_recv_buf)++ =
cdns_i2c_readreg(CDNS_I2C_DATA_OFFSET);
id->recv_count--;
id->curr_recv_count--;
/*
* Clear hold bit that was set for FIFO control
* if RX data left is less than or equal to
* FIFO DEPTH unless repeated start is selected
*/
if (id->recv_count <= CDNS_I2C_FIFO_DEPTH &&
!id->bus_hold_flag)
cdns_i2c_clear_bus_hold(id);
} else {
dev_err(id->adap.dev.parent,
"xfer_size reg rollover. xfer aborted!\n");
@ -594,10 +595,8 @@ static void cdns_i2c_mrecv(struct cdns_i2c *id)
* Check for the message size against FIFO depth and set the
* 'hold bus' bit if it is greater than FIFO depth.
*/
if ((id->recv_count > CDNS_I2C_FIFO_DEPTH) || id->bus_hold_flag)
if (id->recv_count > CDNS_I2C_FIFO_DEPTH)
ctrl_reg |= CDNS_I2C_CR_HOLD;
else
ctrl_reg = ctrl_reg & ~CDNS_I2C_CR_HOLD;
cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET);
@ -654,11 +653,8 @@ static void cdns_i2c_msend(struct cdns_i2c *id)
* Check for the message size against FIFO depth and set the
* 'hold bus' bit if it is greater than FIFO depth.
*/
if ((id->send_count > CDNS_I2C_FIFO_DEPTH) || id->bus_hold_flag)
if (id->send_count > CDNS_I2C_FIFO_DEPTH)
ctrl_reg |= CDNS_I2C_CR_HOLD;
else
ctrl_reg = ctrl_reg & ~CDNS_I2C_CR_HOLD;
cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET);
/* Clear the interrupts in interrupt status register. */

View File

@ -367,7 +367,6 @@ static int geni_i2c_rx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
geni_se_select_mode(se, GENI_SE_FIFO);
writel_relaxed(len, se->base + SE_I2C_RX_TRANS_LEN);
geni_se_setup_m_cmd(se, I2C_READ, m_param);
if (dma_buf && geni_se_rx_dma_prep(se, dma_buf, len, &rx_dma)) {
geni_se_select_mode(se, GENI_SE_FIFO);
@ -375,6 +374,8 @@ static int geni_i2c_rx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
dma_buf = NULL;
}
geni_se_setup_m_cmd(se, I2C_READ, m_param);
time_left = wait_for_completion_timeout(&gi2c->done, XFER_TIMEOUT);
if (!time_left)
geni_i2c_abort_xfer(gi2c);
@ -408,7 +409,6 @@ static int geni_i2c_tx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
geni_se_select_mode(se, GENI_SE_FIFO);
writel_relaxed(len, se->base + SE_I2C_TX_TRANS_LEN);
geni_se_setup_m_cmd(se, I2C_WRITE, m_param);
if (dma_buf && geni_se_tx_dma_prep(se, dma_buf, len, &tx_dma)) {
geni_se_select_mode(se, GENI_SE_FIFO);
@ -416,6 +416,8 @@ static int geni_i2c_tx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
dma_buf = NULL;
}
geni_se_setup_m_cmd(se, I2C_WRITE, m_param);
if (!dma_buf) /* Get FIFO IRQ */
writel_relaxed(1, se->base + SE_GENI_TX_WATERMARK_REG);

View File

@ -868,6 +868,7 @@ static int rcar_unreg_slave(struct i2c_client *slave)
/* disable irqs and ensure none is running before clearing ptr */
rcar_i2c_write(priv, ICSIER, 0);
rcar_i2c_write(priv, ICSCR, 0);
rcar_i2c_write(priv, ICSAR, 0); /* Gen2: must be 0 if not using slave */
synchronize_irq(priv->irq);
priv->slave = NULL;
@ -969,6 +970,8 @@ static int rcar_i2c_probe(struct platform_device *pdev)
if (ret < 0)
goto out_pm_put;
rcar_i2c_write(priv, ICSAR, 0); /* Gen2: must be 0 if not using slave */
if (priv->devtype == I2C_RCAR_GEN3) {
priv->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
if (!IS_ERR(priv->rstc)) {

View File

@ -18,10 +18,8 @@ int i2c_slave_register(struct i2c_client *client, i2c_slave_cb_t slave_cb)
{
int ret;
if (!client || !slave_cb) {
WARN(1, "insufficient data\n");
if (WARN(IS_ERR_OR_NULL(client) || !slave_cb, "insufficient data\n"))
return -EINVAL;
}
if (!(client->flags & I2C_CLIENT_SLAVE))
dev_warn(&client->dev, "%s: client slave flag not set. You might see address collisions\n",
@ -60,6 +58,9 @@ int i2c_slave_unregister(struct i2c_client *client)
{
int ret;
if (IS_ERR_OR_NULL(client))
return -EINVAL;
if (!client->adapter->algo->unreg_slave) {
dev_err(&client->dev, "%s: not supported by adapter\n", __func__);
return -EOPNOTSUPP;

View File

@ -3676,10 +3676,12 @@ static int cm_send_sidr_rep_locked(struct cm_id_private *cm_id_priv,
return ret;
}
cm_id_priv->id.state = IB_CM_IDLE;
spin_lock_irq(&cm.lock);
if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node)) {
rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
RB_CLEAR_NODE(&cm_id_priv->sidr_id_node);
}
spin_unlock_irq(&cm.lock);
return 0;
}

View File

@ -72,6 +72,15 @@ static void rdma_dim_init(struct ib_cq *cq)
INIT_WORK(&dim->work, ib_cq_rdma_dim_work);
}
static void rdma_dim_destroy(struct ib_cq *cq)
{
if (!cq->dim)
return;
cancel_work_sync(&cq->dim->work);
kfree(cq->dim);
}
static int __poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc)
{
int rc;
@ -266,6 +275,7 @@ struct ib_cq *__ib_alloc_cq_user(struct ib_device *dev, void *private,
return cq;
out_destroy_cq:
rdma_dim_destroy(cq);
rdma_restrack_del(&cq->res);
cq->device->ops.destroy_cq(cq, udata);
out_free_wc:
@ -331,12 +341,10 @@ void ib_free_cq_user(struct ib_cq *cq, struct ib_udata *udata)
WARN_ON_ONCE(1);
}
rdma_dim_destroy(cq);
trace_cq_free(cq);
rdma_restrack_del(&cq->res);
cq->device->ops.destroy_cq(cq, udata);
if (cq->dim)
cancel_work_sync(&cq->dim->work);
kfree(cq->dim);
kfree(cq->wc);
kfree(cq);
}

View File

@ -649,9 +649,6 @@ void rdma_alloc_commit_uobject(struct ib_uobject *uobj,
{
struct ib_uverbs_file *ufile = attrs->ufile;
/* alloc_commit consumes the uobj kref */
uobj->uapi_object->type_class->alloc_commit(uobj);
/* kref is held so long as the uobj is on the uobj list. */
uverbs_uobject_get(uobj);
spin_lock_irq(&ufile->uobjects_lock);
@ -661,6 +658,9 @@ void rdma_alloc_commit_uobject(struct ib_uobject *uobj,
/* matches atomic_set(-1) in alloc_uobj */
atomic_set(&uobj->usecnt, 0);
/* alloc_commit consumes the uobj kref */
uobj->uapi_object->type_class->alloc_commit(uobj);
/* Matches the down_read in rdma_alloc_begin_uobject */
up_read(&ufile->hw_destroy_rwsem);
}

View File

@ -1084,6 +1084,8 @@ static ssize_t ucma_connect(struct ucma_file *file, const char __user *inbuf,
size_t in_size;
int ret;
if (in_len < offsetofend(typeof(cmd), reserved))
return -EINVAL;
in_size = min_t(size_t, in_len, sizeof(cmd));
if (copy_from_user(&cmd, inbuf, in_size))
return -EFAULT;
@ -1141,6 +1143,8 @@ static ssize_t ucma_accept(struct ucma_file *file, const char __user *inbuf,
size_t in_size;
int ret;
if (in_len < offsetofend(typeof(cmd), reserved))
return -EINVAL;
in_size = min_t(size_t, in_len, sizeof(cmd));
if (copy_from_user(&cmd, inbuf, in_size))
return -EFAULT;

Some files were not shown because too many files have changed in this diff Show More