Linux 4.14-rc6

-----BEGIN PGP SIGNATURE-----
 
 iQEcBAABAgAGBQJZ7clWAAoJEHm+PkMAQRiG07AH/iKcej+AsurISHx6i/LUEDC1
 a9wo5HAR5kEj+ohdE3JSkD9BHLcyhcCXaqIk9yOrwi9xv1DrPv8U/nGkKzZJzFi2
 mGWK09Zgi+vgSpA+YSErgl05IVGtgaryQQPqQdawpyRpqTUwP0+2pLnKEnJe0f05
 fpv+S4bDKUCuE8GcVNjF9gxXDg8j60fFa+oAcn7QPS6dCun/H6TbDRue5oeky0Y+
 50ZYjjioy9S9DIm2VF7pktMCP/mK/fgb+Q+4Up09VJGHGhq+891SRJ27yDulxo47
 /gq22SRIGBX2PGNllSwhYslgaCRRlYTMBYOIWrBreanA4NpGD662dp+GgWhD154=
 =TAMw
 -----END PGP SIGNATURE-----

Merge tag 'v4.14-rc6' into locking/core, to pick up fixes

Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Ingo Molnar 2017-10-24 13:17:20 +02:00
commit 9babb091e0
479 changed files with 5188 additions and 2350 deletions

View File

@ -14,3 +14,11 @@ Description:
Show or set the gain boost of the amp, from 0-31 range.
18 = indoors (default)
14 = outdoors
What /sys/bus/iio/devices/iio:deviceX/noise_level_tripped
Date: May 2017
KernelVersion: 4.13
Contact: Matt Ranostay <matt.ranostay@konsulko.com>
Description:
When 1 the noise level is over the trip level and not reporting
valid data

View File

@ -14,13 +14,3 @@ Description: Enable/disable VMA based swap readahead.
still used for tmpfs etc. other users. If set to
false, the global swap readahead algorithm will be
used for all swappable pages.
What: /sys/kernel/mm/swap/vma_ra_max_order
Date: August 2017
Contact: Linux memory management mailing list <linux-mm@kvack.org>
Description: The max readahead size in order for VMA based swap readahead
VMA based swap readahead algorithm will readahead at
most 1 << max_order pages for each readahead. The
real readahead size for each readahead will be scaled
according to the estimation algorithm.

View File

@ -352,44 +352,30 @@ Read-Copy Update (RCU)
----------------------
.. kernel-doc:: include/linux/rcupdate.h
:external:
.. kernel-doc:: include/linux/rcupdate_wait.h
:external:
.. kernel-doc:: include/linux/rcutree.h
:external:
.. kernel-doc:: kernel/rcu/tree.c
:external:
.. kernel-doc:: kernel/rcu/tree_plugin.h
:external:
.. kernel-doc:: kernel/rcu/tree_exp.h
:external:
.. kernel-doc:: kernel/rcu/update.c
:external:
.. kernel-doc:: include/linux/srcu.h
:external:
.. kernel-doc:: kernel/rcu/srcutree.c
:external:
.. kernel-doc:: include/linux/rculist_bl.h
:external:
.. kernel-doc:: include/linux/rculist.h
:external:
.. kernel-doc:: include/linux/rculist_nulls.h
:external:
.. kernel-doc:: include/linux/rcu_sync.h
:external:
.. kernel-doc:: kernel/rcu/sync.c
:external:

View File

@ -16,6 +16,10 @@ Optional properties:
- ams,tuning-capacitor-pf: Calibration tuning capacitor stepping
value 0 - 120pF. This will require using the calibration data from
the manufacturer.
- ams,nflwdth: Set the noise and watchdog threshold register on
startup. This will need to set according to the noise from the
MCU board, and possibly the local environment. Refer to the
datasheet for the threshold settings.
Example:
@ -27,4 +31,5 @@ as3935@0 {
interrupt-parent = <&gpio1>;
interrupts = <16 1>;
ams,tuning-capacitor-pf = <80>;
ams,nflwdth = <0x44>;
};

View File

@ -99,7 +99,7 @@ Examples:
compatible = "arm,gic-v3-its";
msi-controller;
#msi-cells = <1>;
reg = <0x0 0x2c200000 0 0x200000>;
reg = <0x0 0x2c200000 0 0x20000>;
};
};
@ -124,14 +124,14 @@ Examples:
compatible = "arm,gic-v3-its";
msi-controller;
#msi-cells = <1>;
reg = <0x0 0x2c200000 0 0x200000>;
reg = <0x0 0x2c200000 0 0x20000>;
};
gic-its@2c400000 {
compatible = "arm,gic-v3-its";
msi-controller;
#msi-cells = <1>;
reg = <0x0 0x2c400000 0 0x200000>;
reg = <0x0 0x2c400000 0 0x20000>;
};
ppi-partitions {

View File

@ -25,6 +25,7 @@ Below are the essential guides that every developer should read.
submitting-patches
coding-style
email-clients
kernel-enforcement-statement
Other guides to the community that are of interest to most developers are:

View File

@ -0,0 +1,147 @@
Linux Kernel Enforcement Statement
----------------------------------
As developers of the Linux kernel, we have a keen interest in how our software
is used and how the license for our software is enforced. Compliance with the
reciprocal sharing obligations of GPL-2.0 is critical to the long-term
sustainability of our software and community.
Although there is a right to enforce the separate copyright interests in the
contributions made to our community, we share an interest in ensuring that
individual enforcement actions are conducted in a manner that benefits our
community and do not have an unintended negative impact on the health and
growth of our software ecosystem. In order to deter unhelpful enforcement
actions, we agree that it is in the best interests of our development
community to undertake the following commitment to users of the Linux kernel
on behalf of ourselves and any successors to our copyright interests:
Notwithstanding the termination provisions of the GPL-2.0, we agree that
it is in the best interests of our development community to adopt the
following provisions of GPL-3.0 as additional permissions under our
license with respect to any non-defensive assertion of rights under the
license.
However, if you cease all violation of this License, then your license
from a particular copyright holder is reinstated (a) provisionally,
unless and until the copyright holder explicitly and finally
terminates your license, and (b) permanently, if the copyright holder
fails to notify you of the violation by some reasonable means prior to
60 days after the cessation.
Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from that
copyright holder, and you cure the violation prior to 30 days after
your receipt of the notice.
Our intent in providing these assurances is to encourage more use of the
software. We want companies and individuals to use, modify and distribute
this software. We want to work with users in an open and transparent way to
eliminate any uncertainty about our expectations regarding compliance or
enforcement that might limit adoption of our software. We view legal action
as a last resort, to be initiated only when other community efforts have
failed to resolve the problem.
Finally, once a non-compliance issue is resolved, we hope the user will feel
welcome to join us in our efforts on this project. Working together, we will
be stronger.
Except where noted below, we speak only for ourselves, and not for any company
we might work for today, have in the past, or will in the future.
- Bjorn Andersson (Linaro)
- Andrea Arcangeli (Red Hat)
- Neil Armstrong
- Jens Axboe
- Pablo Neira Ayuso
- Khalid Aziz
- Ralf Baechle
- Felipe Balbi
- Arnd Bergmann
- Ard Biesheuvel
- Paolo Bonzini (Red Hat)
- Christian Borntraeger
- Mark Brown (Linaro)
- Paul Burton
- Javier Martinez Canillas
- Rob Clark
- Jonathan Corbet
- Vivien Didelot (Savoir-faire Linux)
- Hans de Goede (Red Hat)
- Mel Gorman (SUSE)
- Sven Eckelmann
- Alex Elder (Linaro)
- Fabio Estevam
- Larry Finger
- Bhumika Goyal
- Andy Gross
- Juergen Gross
- Shawn Guo
- Ulf Hansson
- Tejun Heo
- Rob Herring
- Masami Hiramatsu
- Michal Hocko
- Simon Horman
- Johan Hovold (Hovold Consulting AB)
- Christophe JAILLET
- Olof Johansson
- Lee Jones (Linaro)
- Heiner Kallweit
- Srinivas Kandagatla
- Jan Kara
- Shuah Khan (Samsung)
- David Kershner
- Jaegeuk Kim
- Namhyung Kim
- Colin Ian King
- Jeff Kirsher
- Greg Kroah-Hartman (Linux Foundation)
- Christian König
- Vinod Koul
- Krzysztof Kozlowski
- Viresh Kumar
- Aneesh Kumar K.V
- Julia Lawall
- Doug Ledford (Red Hat)
- Chuck Lever (Oracle)
- Daniel Lezcano
- Shaohua Li
- Xin Long (Red Hat)
- Tony Luck
- Mike Marshall
- Chris Mason
- Paul E. McKenney
- David S. Miller
- Ingo Molnar
- Kuninori Morimoto
- Borislav Petkov
- Jiri Pirko
- Josh Poimboeuf
- Sebastian Reichel (Collabora)
- Guenter Roeck
- Joerg Roedel
- Leon Romanovsky
- Steven Rostedt (VMware)
- Ivan Safonov
- Ivan Safonov
- Anna Schumaker
- Jes Sorensen
- K.Y. Srinivasan
- Heiko Stuebner
- Jiri Kosina (SUSE)
- Dmitry Torokhov
- Linus Torvalds
- Thierry Reding
- Rik van Riel
- Geert Uytterhoeven (Glider bvba)
- Daniel Vetter
- Linus Walleij
- Richard Weinberger
- Dan Williams
- Rafael J. Wysocki
- Arvind Yadav
- Masahiro Yamada
- Wei Yongjun
- Lv Zheng

View File

@ -5346,9 +5346,7 @@ M: "J. Bruce Fields" <bfields@fieldses.org>
L: linux-fsdevel@vger.kernel.org
S: Maintained
F: include/linux/fcntl.h
F: include/linux/fs.h
F: include/uapi/linux/fcntl.h
F: include/uapi/linux/fs.h
F: fs/fcntl.c
F: fs/locks.c
@ -5357,6 +5355,8 @@ M: Alexander Viro <viro@zeniv.linux.org.uk>
L: linux-fsdevel@vger.kernel.org
S: Maintained
F: fs/*
F: include/linux/fs.h
F: include/uapi/linux/fs.h
FINTEK F75375S HARDWARE MONITOR AND FAN CONTROLLER DRIVER
M: Riku Voipio <riku.voipio@iki.fi>
@ -7571,7 +7571,7 @@ F: arch/mips/include/asm/kvm*
F: arch/mips/kvm/
KERNEL VIRTUAL MACHINE FOR POWERPC (KVM/powerpc)
M: Alexander Graf <agraf@suse.com>
M: Paul Mackerras <paulus@ozlabs.org>
L: kvm-ppc@vger.kernel.org
W: http://www.linux-kvm.org/
T: git git://github.com/agraf/linux-2.6.git
@ -9213,7 +9213,6 @@ F: include/linux/isicom.h
MUSB MULTIPOINT HIGH SPEED DUAL-ROLE CONTROLLER
M: Bin Liu <b-liu@ti.com>
L: linux-usb@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/balbi/usb.git
S: Maintained
F: drivers/usb/musb/
@ -10560,6 +10559,8 @@ M: Peter Zijlstra <peterz@infradead.org>
M: Ingo Molnar <mingo@redhat.com>
M: Arnaldo Carvalho de Melo <acme@kernel.org>
R: Alexander Shishkin <alexander.shishkin@linux.intel.com>
R: Jiri Olsa <jolsa@redhat.com>
R: Namhyung Kim <namhyung@kernel.org>
L: linux-kernel@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git perf/core
S: Supported

View File

@ -1,7 +1,7 @@
VERSION = 4
PATCHLEVEL = 14
SUBLEVEL = 0
EXTRAVERSION = -rc4
EXTRAVERSION = -rc6
NAME = Fearless Coyote
# *DOCUMENTATION*
@ -933,7 +933,11 @@ ifdef CONFIG_STACK_VALIDATION
ifeq ($(has_libelf),1)
objtool_target := tools/objtool FORCE
else
$(warning "Cannot use CONFIG_STACK_VALIDATION, please install libelf-dev, libelf-devel or elfutils-libelf-devel")
ifdef CONFIG_ORC_UNWINDER
$(error "Cannot generate ORC metadata for CONFIG_ORC_UNWINDER=y, please install libelf-dev, libelf-devel or elfutils-libelf-devel")
else
$(warning "Cannot use CONFIG_STACK_VALIDATION=y, please install libelf-dev, libelf-devel or elfutils-libelf-devel")
endif
SKIP_STACK_VALIDATION := 1
export SKIP_STACK_VALIDATION
endif

View File

@ -131,7 +131,7 @@ endif
KBUILD_CFLAGS +=$(CFLAGS_ABI) $(CFLAGS_ISA) $(arch-y) $(tune-y) $(call cc-option,-mshort-load-bytes,$(call cc-option,-malignment-traps,)) -msoft-float -Uarm
KBUILD_AFLAGS +=$(CFLAGS_ABI) $(AFLAGS_ISA) $(arch-y) $(tune-y) -include asm/unified.h -msoft-float
CHECKFLAGS += -D__arm__
CHECKFLAGS += -D__arm__ -m32
#Default value
head-y := arch/arm/kernel/head$(MMUEXT).o

View File

@ -23,7 +23,11 @@ ENTRY(putc)
strb r0, [r1]
mov r0, #0x03 @ SYS_WRITEC
ARM( svc #0x123456 )
#ifdef CONFIG_CPU_V7M
THUMB( bkpt #0xab )
#else
THUMB( svc #0xab )
#endif
mov pc, lr
.align 2
1: .word _GLOBAL_OFFSET_TABLE_ - .

View File

@ -178,7 +178,7 @@
};
i2c0: i2c@11000 {
compatible = "marvell,mv64xxx-i2c";
compatible = "marvell,mv78230-a0-i2c", "marvell,mv64xxx-i2c";
reg = <0x11000 0x20>;
#address-cells = <1>;
#size-cells = <0>;
@ -189,7 +189,7 @@
};
i2c1: i2c@11100 {
compatible = "marvell,mv64xxx-i2c";
compatible = "marvell,mv78230-a0-i2c", "marvell,mv64xxx-i2c";
reg = <0x11100 0x20>;
#address-cells = <1>;
#size-cells = <0>;

View File

@ -67,8 +67,8 @@
pinctrl-0 = <&pinctrl_macb0_default>;
phy-mode = "rmii";
ethernet-phy@1 {
reg = <0x1>;
ethernet-phy@0 {
reg = <0x0>;
interrupt-parent = <&pioA>;
interrupts = <PIN_PD31 IRQ_TYPE_LEVEL_LOW>;
pinctrl-names = "default";

View File

@ -309,7 +309,7 @@
vddana-supply = <&vdd_3v3_lp_reg>;
vref-supply = <&vdd_3v3_lp_reg>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_adc_default>;
pinctrl-0 = <&pinctrl_adc_default &pinctrl_adtrg_default>;
status = "okay";
};
@ -340,6 +340,20 @@
bias-disable;
};
/*
* The ADTRG pin can work on any edge type.
* In here it's being pulled up, so need to
* connect it to ground to get an edge e.g.
* Trigger can be configured on falling, rise
* or any edge, and the pull-up can be changed
* to pull-down or left floating according to
* needs.
*/
pinctrl_adtrg_default: adtrg_default {
pinmux = <PIN_PD31__ADTRG>;
bias-pull-up;
};
pinctrl_charger_chglev: charger_chglev {
pinmux = <PIN_PA12__GPIO>;
bias-disable;

View File

@ -18,12 +18,9 @@
compatible = "raspberrypi,model-zero-w", "brcm,bcm2835";
model = "Raspberry Pi Zero W";
/* Needed by firmware to properly init UARTs */
aliases {
uart0 = "/soc/serial@7e201000";
uart1 = "/soc/serial@7e215040";
serial0 = "/soc/serial@7e201000";
serial1 = "/soc/serial@7e215040";
chosen {
/* 8250 auxiliary UART instead of pl011 */
stdout-path = "serial1:115200n8";
};
leds {

View File

@ -8,6 +8,11 @@
compatible = "raspberrypi,3-model-b", "brcm,bcm2837";
model = "Raspberry Pi 3 Model B";
chosen {
/* 8250 auxiliary UART instead of pl011 */
stdout-path = "serial1:115200n8";
};
memory {
reg = <0 0x40000000>;
};

View File

@ -20,8 +20,13 @@
#address-cells = <1>;
#size-cells = <1>;
aliases {
serial0 = &uart0;
serial1 = &uart1;
};
chosen {
bootargs = "earlyprintk console=ttyAMA0";
stdout-path = "serial0:115200n8";
};
thermal-zones {

View File

@ -145,11 +145,12 @@
};
watchdog@41000000 {
compatible = "cortina,gemini-watchdog";
compatible = "cortina,gemini-watchdog", "faraday,ftwdt010";
reg = <0x41000000 0x1000>;
interrupts = <3 IRQ_TYPE_LEVEL_HIGH>;
resets = <&syscon GEMINI_RESET_WDOG>;
clocks = <&syscon GEMINI_CLK_APB>;
clock-names = "PCLK";
};
uart0: serial@42000000 {

View File

@ -144,10 +144,10 @@
interrupt-names = "msi";
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 0x7>;
interrupt-map = <0 0 0 1 &intc GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>,
<0 0 0 2 &intc GIC_SPI 124 IRQ_TYPE_LEVEL_HIGH>,
<0 0 0 3 &intc GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
<0 0 0 4 &intc GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>;
interrupt-map = <0 0 0 1 &intc GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>,
<0 0 0 2 &intc GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
<0 0 0 3 &intc GIC_SPI 124 IRQ_TYPE_LEVEL_HIGH>,
<0 0 0 4 &intc GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks IMX7D_PCIE_CTRL_ROOT_CLK>,
<&clks IMX7D_PLL_ENET_MAIN_100M_CLK>,
<&clks IMX7D_PCIE_PHY_ROOT_CLK>;

View File

@ -87,9 +87,10 @@
};
watchdog: watchdog@98500000 {
compatible = "moxa,moxart-watchdog";
compatible = "moxa,moxart-watchdog", "faraday,ftwdt010";
reg = <0x98500000 0x10>;
clocks = <&clk_apb>;
clock-names = "PCLK";
};
sdhci: sdhci@98e00000 {

View File

@ -1430,6 +1430,7 @@
atmel,min-sample-rate-hz = <200000>;
atmel,max-sample-rate-hz = <20000000>;
atmel,startup-time-ms = <4>;
atmel,trigger-edge-type = <IRQ_TYPE_EDGE_RISING>;
status = "disabled";
};

View File

@ -311,8 +311,8 @@
#size-cells = <0>;
reg = <0>;
tcon1_in_drc1: endpoint@0 {
reg = <0>;
tcon1_in_drc1: endpoint@1 {
reg = <1>;
remote-endpoint = <&drc1_out_tcon1>;
};
};
@ -1012,8 +1012,8 @@
#size-cells = <0>;
reg = <1>;
be1_out_drc1: endpoint@0 {
reg = <0>;
be1_out_drc1: endpoint@1 {
reg = <1>;
remote-endpoint = <&drc1_in_be1>;
};
};
@ -1042,8 +1042,8 @@
#size-cells = <0>;
reg = <0>;
drc1_in_be1: endpoint@0 {
reg = <0>;
drc1_in_be1: endpoint@1 {
reg = <1>;
remote-endpoint = <&be1_out_drc1>;
};
};
@ -1053,8 +1053,8 @@
#size-cells = <0>;
reg = <1>;
drc1_out_tcon1: endpoint@0 {
reg = <0>;
drc1_out_tcon1: endpoint@1 {
reg = <1>;
remote-endpoint = <&tcon1_in_drc1>;
};
};

View File

@ -115,7 +115,11 @@ ENTRY(printascii)
mov r1, r0
mov r0, #0x04 @ SYS_WRITE0
ARM( svc #0x123456 )
#ifdef CONFIG_CPU_V7M
THUMB( bkpt #0xab )
#else
THUMB( svc #0xab )
#endif
ret lr
ENDPROC(printascii)
@ -124,7 +128,11 @@ ENTRY(printch)
strb r0, [r1]
mov r0, #0x03 @ SYS_WRITEC
ARM( svc #0x123456 )
#ifdef CONFIG_CPU_V7M
THUMB( bkpt #0xab )
#else
THUMB( svc #0xab )
#endif
ret lr
ENDPROC(printch)

View File

@ -32,6 +32,7 @@
#include <asm/mach/arch.h>
#include "db8500-regs.h"
#include "pm_domains.h"
static int __init ux500_l2x0_unlock(void)
{
@ -157,6 +158,9 @@ static const struct of_device_id u8500_local_bus_nodes[] = {
static void __init u8500_init_machine(void)
{
/* Initialize ux500 power domains */
ux500_pm_domains_init();
/* automatically probe child nodes of dbx5x0 devices */
if (of_machine_is_compatible("st-ericsson,u8540"))
of_platform_populate(NULL, u8500_local_bus_nodes,

View File

@ -19,7 +19,6 @@
#include <linux/of_address.h>
#include "db8500-regs.h"
#include "pm_domains.h"
/* ARM WFI Standby signal register */
#define PRCM_ARM_WFI_STANDBY (prcmu_base + 0x130)
@ -203,7 +202,4 @@ void __init ux500_pm_init(u32 phy_base, u32 size)
/* Set up ux500 suspend callbacks. */
suspend_set_ops(UX500_SUSPEND_OPS);
/* Initialize ux500 power domains */
ux500_pm_domains_init();
}

View File

@ -344,6 +344,11 @@ void __init arm_mm_memblock_reserve(void)
* reserved here.
*/
#endif
/*
* In any case, always ensure address 0 is never used as many things
* get very confused if 0 is returned as a legitimate address.
*/
memblock_reserve(0, 1);
}
void __init adjust_lowmem_bounds(void)

View File

@ -61,13 +61,6 @@
chosen {
stdout-path = "serial0:115200n8";
};
reg_vcc3v3: vcc3v3 {
compatible = "regulator-fixed";
regulator-name = "vcc3v3";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
};
};
&ehci0 {
@ -91,7 +84,7 @@
&mmc0 {
pinctrl-names = "default";
pinctrl-0 = <&mmc0_pins>;
vmmc-supply = <&reg_vcc3v3>;
vmmc-supply = <&reg_dcdc1>;
cd-gpios = <&pio 5 6 GPIO_ACTIVE_HIGH>;
cd-inverted;
disable-wp;

View File

@ -336,7 +336,7 @@
/* non-prefetchable memory */
0x82000000 0 0xf6000000 0 0xf6000000 0 0xf00000>;
interrupt-map-mask = <0 0 0 0>;
interrupt-map = <0 0 0 0 &cpm_icu 0 ICU_GRP_NSR 22 IRQ_TYPE_LEVEL_HIGH>;
interrupt-map = <0 0 0 0 &cpm_icu ICU_GRP_NSR 22 IRQ_TYPE_LEVEL_HIGH>;
interrupts = <ICU_GRP_NSR 22 IRQ_TYPE_LEVEL_HIGH>;
num-lanes = <1>;
clocks = <&cpm_clk 1 13>;
@ -362,7 +362,7 @@
/* non-prefetchable memory */
0x82000000 0 0xf7000000 0 0xf7000000 0 0xf00000>;
interrupt-map-mask = <0 0 0 0>;
interrupt-map = <0 0 0 0 &cpm_icu 0 ICU_GRP_NSR 24 IRQ_TYPE_LEVEL_HIGH>;
interrupt-map = <0 0 0 0 &cpm_icu ICU_GRP_NSR 24 IRQ_TYPE_LEVEL_HIGH>;
interrupts = <ICU_GRP_NSR 24 IRQ_TYPE_LEVEL_HIGH>;
num-lanes = <1>;
@ -389,7 +389,7 @@
/* non-prefetchable memory */
0x82000000 0 0xf8000000 0 0xf8000000 0 0xf00000>;
interrupt-map-mask = <0 0 0 0>;
interrupt-map = <0 0 0 0 &cpm_icu 0 ICU_GRP_NSR 23 IRQ_TYPE_LEVEL_HIGH>;
interrupt-map = <0 0 0 0 &cpm_icu ICU_GRP_NSR 23 IRQ_TYPE_LEVEL_HIGH>;
interrupts = <ICU_GRP_NSR 23 IRQ_TYPE_LEVEL_HIGH>;
num-lanes = <1>;

View File

@ -335,7 +335,7 @@
/* non-prefetchable memory */
0x82000000 0 0xfa000000 0 0xfa000000 0 0xf00000>;
interrupt-map-mask = <0 0 0 0>;
interrupt-map = <0 0 0 0 &cps_icu 0 ICU_GRP_NSR 22 IRQ_TYPE_LEVEL_HIGH>;
interrupt-map = <0 0 0 0 &cps_icu ICU_GRP_NSR 22 IRQ_TYPE_LEVEL_HIGH>;
interrupts = <ICU_GRP_NSR 22 IRQ_TYPE_LEVEL_HIGH>;
num-lanes = <1>;
clocks = <&cps_clk 1 13>;
@ -361,7 +361,7 @@
/* non-prefetchable memory */
0x82000000 0 0xfb000000 0 0xfb000000 0 0xf00000>;
interrupt-map-mask = <0 0 0 0>;
interrupt-map = <0 0 0 0 &cps_icu 0 ICU_GRP_NSR 24 IRQ_TYPE_LEVEL_HIGH>;
interrupt-map = <0 0 0 0 &cps_icu ICU_GRP_NSR 24 IRQ_TYPE_LEVEL_HIGH>;
interrupts = <ICU_GRP_NSR 24 IRQ_TYPE_LEVEL_HIGH>;
num-lanes = <1>;
@ -388,7 +388,7 @@
/* non-prefetchable memory */
0x82000000 0 0xfc000000 0 0xfc000000 0 0xf00000>;
interrupt-map-mask = <0 0 0 0>;
interrupt-map = <0 0 0 0 &cps_icu 0 ICU_GRP_NSR 23 IRQ_TYPE_LEVEL_HIGH>;
interrupt-map = <0 0 0 0 &cps_icu ICU_GRP_NSR 23 IRQ_TYPE_LEVEL_HIGH>;
interrupts = <ICU_GRP_NSR 23 IRQ_TYPE_LEVEL_HIGH>;
num-lanes = <1>;

View File

@ -62,6 +62,7 @@
brightness-levels = <256 128 64 16 8 4 0>;
default-brightness-level = <6>;
power-supply = <&reg_12v>;
enable-gpios = <&gpio6 7 GPIO_ACTIVE_HIGH>;
};
@ -83,6 +84,15 @@
regulator-always-on;
};
reg_12v: regulator2 {
compatible = "regulator-fixed";
regulator-name = "fixed-12V";
regulator-min-microvolt = <12000000>;
regulator-max-microvolt = <12000000>;
regulator-boot-on;
regulator-always-on;
};
rsnd_ak4613: sound {
compatible = "simple-audio-card";

View File

@ -582,7 +582,7 @@
vop_mmu: iommu@ff373f00 {
compatible = "rockchip,iommu";
reg = <0x0 0xff373f00 0x0 0x100>;
interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH 0>;
interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "vop_mmu";
#iommu-cells = <0>;
status = "disabled";

View File

@ -740,7 +740,7 @@
iep_mmu: iommu@ff900800 {
compatible = "rockchip,iommu";
reg = <0x0 0xff900800 0x0 0x100>;
interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH 0>;
interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "iep_mmu";
#iommu-cells = <0>;
status = "disabled";

View File

@ -371,10 +371,10 @@
regulator-always-on;
regulator-boot-on;
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <3300000>;
regulator-max-microvolt = <3000000>;
regulator-state-mem {
regulator-on-in-suspend;
regulator-suspend-microvolt = <3300000>;
regulator-suspend-microvolt = <3000000>;
};
};

View File

@ -325,12 +325,12 @@
vcc_sd: LDO_REG4 {
regulator-name = "vcc_sd";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <3300000>;
regulator-max-microvolt = <3000000>;
regulator-always-on;
regulator-boot-on;
regulator-state-mem {
regulator-on-in-suspend;
regulator-suspend-microvolt = <3300000>;
regulator-suspend-microvolt = <3000000>;
};
};

View File

@ -315,10 +315,10 @@
regulator-always-on;
regulator-boot-on;
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <3300000>;
regulator-max-microvolt = <3000000>;
regulator-state-mem {
regulator-on-in-suspend;
regulator-suspend-microvolt = <3300000>;
regulator-suspend-microvolt = <3000000>;
};
};

View File

@ -155,14 +155,16 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
return __cmpxchg_small(ptr, old, new, size);
case 4:
return __cmpxchg_asm("ll", "sc", (volatile u32 *)ptr, old, new);
return __cmpxchg_asm("ll", "sc", (volatile u32 *)ptr,
(u32)old, new);
case 8:
/* lld/scd are only available for MIPS64 */
if (!IS_ENABLED(CONFIG_64BIT))
return __cmpxchg_called_with_bad_pointer();
return __cmpxchg_asm("lld", "scd", (volatile u64 *)ptr, old, new);
return __cmpxchg_asm("lld", "scd", (volatile u64 *)ptr,
(u64)old, new);
default:
return __cmpxchg_called_with_bad_pointer();

View File

@ -183,18 +183,20 @@ int ls1x_eth_mux_init(struct platform_device *pdev, void *priv)
}
static struct plat_stmmacenet_data ls1x_eth0_pdata = {
.bus_id = 0,
.phy_addr = -1,
.bus_id = 0,
.phy_addr = -1,
#if defined(CONFIG_LOONGSON1_LS1B)
.interface = PHY_INTERFACE_MODE_MII,
.interface = PHY_INTERFACE_MODE_MII,
#elif defined(CONFIG_LOONGSON1_LS1C)
.interface = PHY_INTERFACE_MODE_RMII,
.interface = PHY_INTERFACE_MODE_RMII,
#endif
.mdio_bus_data = &ls1x_mdio_bus_data,
.dma_cfg = &ls1x_eth_dma_cfg,
.has_gmac = 1,
.tx_coe = 1,
.init = ls1x_eth_mux_init,
.mdio_bus_data = &ls1x_mdio_bus_data,
.dma_cfg = &ls1x_eth_dma_cfg,
.has_gmac = 1,
.tx_coe = 1,
.rx_queues_to_use = 1,
.tx_queues_to_use = 1,
.init = ls1x_eth_mux_init,
};
static struct resource ls1x_eth0_resources[] = {
@ -222,14 +224,16 @@ struct platform_device ls1x_eth0_pdev = {
#ifdef CONFIG_LOONGSON1_LS1B
static struct plat_stmmacenet_data ls1x_eth1_pdata = {
.bus_id = 1,
.phy_addr = -1,
.interface = PHY_INTERFACE_MODE_MII,
.mdio_bus_data = &ls1x_mdio_bus_data,
.dma_cfg = &ls1x_eth_dma_cfg,
.has_gmac = 1,
.tx_coe = 1,
.init = ls1x_eth_mux_init,
.bus_id = 1,
.phy_addr = -1,
.interface = PHY_INTERFACE_MODE_MII,
.mdio_bus_data = &ls1x_mdio_bus_data,
.dma_cfg = &ls1x_eth_dma_cfg,
.has_gmac = 1,
.tx_coe = 1,
.rx_queues_to_use = 1,
.tx_queues_to_use = 1,
.init = ls1x_eth_mux_init,
};
static struct resource ls1x_eth1_resources[] = {

View File

@ -2558,7 +2558,6 @@ dcopuop:
break;
default:
/* Reserved R6 ops */
pr_err("Reserved MIPS R6 CMP.condn.S operation\n");
return SIGILL;
}
}
@ -2719,7 +2718,6 @@ dcopuop:
break;
default:
/* Reserved R6 ops */
pr_err("Reserved MIPS R6 CMP.condn.D operation\n");
return SIGILL;
}
}

View File

@ -667,7 +667,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
{
int src, dst, r, td, ts, mem_off, b_off;
bool need_swap, did_move, cmp_eq;
unsigned int target;
unsigned int target = 0;
u64 t64;
s64 t64s;
int bpf_op = BPF_OP(insn->code);

View File

@ -30,8 +30,6 @@ cfg="$4"
boards_origin="$5"
shift 5
cd "${srctree}"
# Only print Skipping... lines if the user explicitly specified BOARDS=. In the
# general case it only serves to obscure the useful output about what actually
# was included.
@ -48,7 +46,7 @@ environment*)
esac
for board in $@; do
board_cfg="arch/mips/configs/generic/board-${board}.config"
board_cfg="${srctree}/arch/mips/configs/generic/board-${board}.config"
if [ ! -f "${board_cfg}" ]; then
echo "WARNING: Board config '${board_cfg}' not found"
continue
@ -84,7 +82,7 @@ for board in $@; do
done || continue
# Merge this board config fragment into our final config file
./scripts/kconfig/merge_config.sh \
${srctree}/scripts/kconfig/merge_config.sh \
-m -O ${objtree} ${cfg} ${board_cfg} \
| grep -Ev '^(#|Using)'
done

View File

@ -35,12 +35,12 @@ EXPORT_SYMBOL(memset);
EXPORT_SYMBOL(__xchg8);
EXPORT_SYMBOL(__xchg32);
EXPORT_SYMBOL(__cmpxchg_u32);
EXPORT_SYMBOL(__cmpxchg_u64);
#ifdef CONFIG_SMP
EXPORT_SYMBOL(__atomic_hash);
#endif
#ifdef CONFIG_64BIT
EXPORT_SYMBOL(__xchg64);
EXPORT_SYMBOL(__cmpxchg_u64);
#endif
#include <linux/uaccess.h>

View File

@ -742,7 +742,7 @@ lws_compare_and_swap_2:
10: ldd 0(%r25), %r25
11: ldd 0(%r24), %r24
#else
/* Load new value into r22/r23 - high/low */
/* Load old value into r22/r23 - high/low */
10: ldw 0(%r25), %r22
11: ldw 4(%r25), %r23
/* Load new value into fr4 for atomic store later */
@ -834,11 +834,11 @@ cas2_action:
copy %r0, %r28
#else
/* Compare first word */
19: ldw,ma 0(%r26), %r29
19: ldw 0(%r26), %r29
sub,= %r29, %r22, %r0
b,n cas2_end
/* Compare second word */
20: ldw,ma 4(%r26), %r29
20: ldw 4(%r26), %r29
sub,= %r29, %r23, %r0
b,n cas2_end
/* Perform the store */

View File

@ -253,7 +253,10 @@ static int __init init_cr16_clocksource(void)
cpu0_loc = per_cpu(cpu_data, 0).cpu_loc;
for_each_online_cpu(cpu) {
if (cpu0_loc == per_cpu(cpu_data, cpu).cpu_loc)
if (cpu == 0)
continue;
if ((cpu0_loc != 0) &&
(cpu0_loc == per_cpu(cpu_data, cpu).cpu_loc))
continue;
clocksource_cr16.name = "cr16_unstable";

View File

@ -181,34 +181,25 @@ _GLOBAL(ftrace_stub)
* - we have no stack frame and can not allocate one
* - LR points back to the original caller (in A)
* - CTR holds the new NIP in C
* - r0 & r12 are free
*
* r0 can't be used as the base register for a DS-form load or store, so
* we temporarily shuffle r1 (stack pointer) into r0 and then put it back.
* - r0, r11 & r12 are free
*/
livepatch_handler:
CURRENT_THREAD_INFO(r12, r1)
/* Save stack pointer into r0 */
mr r0, r1
/* Allocate 3 x 8 bytes */
ld r1, TI_livepatch_sp(r12)
addi r1, r1, 24
std r1, TI_livepatch_sp(r12)
ld r11, TI_livepatch_sp(r12)
addi r11, r11, 24
std r11, TI_livepatch_sp(r12)
/* Save toc & real LR on livepatch stack */
std r2, -24(r1)
std r2, -24(r11)
mflr r12
std r12, -16(r1)
std r12, -16(r11)
/* Store stack end marker */
lis r12, STACK_END_MAGIC@h
ori r12, r12, STACK_END_MAGIC@l
std r12, -8(r1)
/* Restore real stack pointer */
mr r1, r0
std r12, -8(r11)
/* Put ctr in r12 for global entry and branch there */
mfctr r12
@ -216,36 +207,30 @@ livepatch_handler:
/*
* Now we are returning from the patched function to the original
* caller A. We are free to use r0 and r12, and we can use r2 until we
* caller A. We are free to use r11, r12 and we can use r2 until we
* restore it.
*/
CURRENT_THREAD_INFO(r12, r1)
/* Save stack pointer into r0 */
mr r0, r1
ld r1, TI_livepatch_sp(r12)
ld r11, TI_livepatch_sp(r12)
/* Check stack marker hasn't been trashed */
lis r2, STACK_END_MAGIC@h
ori r2, r2, STACK_END_MAGIC@l
ld r12, -8(r1)
ld r12, -8(r11)
1: tdne r12, r2
EMIT_BUG_ENTRY 1b, __FILE__, __LINE__ - 1, 0
/* Restore LR & toc from livepatch stack */
ld r12, -16(r1)
ld r12, -16(r11)
mtlr r12
ld r2, -24(r1)
ld r2, -24(r11)
/* Pop livepatch stack frame */
CURRENT_THREAD_INFO(r12, r0)
subi r1, r1, 24
std r1, TI_livepatch_sp(r12)
/* Restore real stack pointer */
mr r1, r0
CURRENT_THREAD_INFO(r12, r1)
subi r11, r11, 24
std r11, TI_livepatch_sp(r12)
/* Return to original caller of live patched function */
blr

View File

@ -1684,11 +1684,13 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
* Logical instructions
*/
case 26: /* cntlzw */
op->val = __builtin_clz((unsigned int) regs->gpr[rd]);
val = (unsigned int) regs->gpr[rd];
op->val = ( val ? __builtin_clz(val) : 32 );
goto logical_done;
#ifdef __powerpc64__
case 58: /* cntlzd */
op->val = __builtin_clzl(regs->gpr[rd]);
val = regs->gpr[rd];
op->val = ( val ? __builtin_clzl(val) : 64 );
goto logical_done;
#endif
case 28: /* and */

View File

@ -1438,7 +1438,6 @@ out:
int arch_update_cpu_topology(void)
{
lockdep_assert_cpus_held();
return numa_update_cpu_topology(true);
}

View File

@ -399,6 +399,20 @@ static void nest_imc_counters_release(struct perf_event *event)
/* Take the mutex lock for this node and then decrement the reference count */
mutex_lock(&ref->lock);
if (ref->refc == 0) {
/*
* The scenario where this is true is, when perf session is
* started, followed by offlining of all cpus in a given node.
*
* In the cpuhotplug offline path, ppc_nest_imc_cpu_offline()
* function set the ref->count to zero, if the cpu which is
* about to offline is the last cpu in a given node and make
* an OPAL call to disable the engine in that node.
*
*/
mutex_unlock(&ref->lock);
return;
}
ref->refc--;
if (ref->refc == 0) {
rc = opal_imc_counters_stop(OPAL_IMC_COUNTERS_NEST,
@ -523,8 +537,8 @@ static int core_imc_mem_init(int cpu, int size)
/* We need only vbase for core counters */
mem_info->vbase = page_address(alloc_pages_node(phys_id,
GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE,
get_order(size)));
GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE |
__GFP_NOWARN, get_order(size)));
if (!mem_info->vbase)
return -ENOMEM;
@ -646,6 +660,20 @@ static void core_imc_counters_release(struct perf_event *event)
return;
mutex_lock(&ref->lock);
if (ref->refc == 0) {
/*
* The scenario where this is true is, when perf session is
* started, followed by offlining of all cpus in a given core.
*
* In the cpuhotplug offline path, ppc_core_imc_cpu_offline()
* function set the ref->count to zero, if the cpu which is
* about to offline is the last cpu in a given core and make
* an OPAL call to disable the engine in that core.
*
*/
mutex_unlock(&ref->lock);
return;
}
ref->refc--;
if (ref->refc == 0) {
rc = opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE,
@ -763,8 +791,8 @@ static int thread_imc_mem_alloc(int cpu_id, int size)
* free the memory in cpu offline path.
*/
local_mem = page_address(alloc_pages_node(phys_id,
GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE,
get_order(size)));
GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE |
__GFP_NOWARN, get_order(size)));
if (!local_mem)
return -ENOMEM;
@ -1148,7 +1176,8 @@ static void imc_common_cpuhp_mem_free(struct imc_pmu *pmu_ptr)
}
/* Only free the attr_groups which are dynamically allocated */
kfree(pmu_ptr->attr_groups[IMC_EVENT_ATTR]->attrs);
if (pmu_ptr->attr_groups[IMC_EVENT_ATTR])
kfree(pmu_ptr->attr_groups[IMC_EVENT_ATTR]->attrs);
kfree(pmu_ptr->attr_groups[IMC_EVENT_ATTR]);
kfree(pmu_ptr);
return;

View File

@ -27,6 +27,7 @@ CONFIG_NET=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_DEVTMPFS=y
# CONFIG_FIRMWARE_IN_KERNEL is not set
CONFIG_BLK_DEV_RAM=y
# CONFIG_BLK_DEV_XPRAM is not set
# CONFIG_DCSSBLK is not set
# CONFIG_DASD is not set
@ -59,6 +60,7 @@ CONFIG_CONFIGFS_FS=y
# CONFIG_NETWORK_FILESYSTEMS is not set
CONFIG_PRINTK_TIME=y
CONFIG_DEBUG_INFO=y
CONFIG_DEBUG_FS=y
CONFIG_DEBUG_KERNEL=y
CONFIG_PANIC_ON_OOPS=y
# CONFIG_SCHED_DEBUG is not set

View File

@ -293,7 +293,10 @@ static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk)
lc->lpp = LPP_MAGIC;
lc->current_pid = tsk->pid;
lc->user_timer = tsk->thread.user_timer;
lc->guest_timer = tsk->thread.guest_timer;
lc->system_timer = tsk->thread.system_timer;
lc->hardirq_timer = tsk->thread.hardirq_timer;
lc->softirq_timer = tsk->thread.softirq_timer;
lc->steal_timer = 0;
}

View File

@ -176,7 +176,7 @@
/*
* This is a sneaky trick to help the unwinder find pt_regs on the stack. The
* frame pointer is replaced with an encoded pointer to pt_regs. The encoding
* is just setting the LSB, which makes it an invalid stack address and is also
* is just clearing the MSB, which makes it an invalid stack address and is also
* a signal to the unwinder that it's a pt_regs pointer in disguise.
*
* NOTE: This macro must be used *after* SAVE_ALL because it corrupts the
@ -185,7 +185,7 @@
.macro ENCODE_FRAME_POINTER
#ifdef CONFIG_FRAME_POINTER
mov %esp, %ebp
orl $0x1, %ebp
andl $0x7fffffff, %ebp
#endif
.endm

View File

@ -822,7 +822,7 @@ static int __init uncore_type_init(struct intel_uncore_type *type, bool setid)
pmus[i].type = type;
pmus[i].boxes = kzalloc(size, GFP_KERNEL);
if (!pmus[i].boxes)
return -ENOMEM;
goto err;
}
type->pmus = pmus;
@ -836,7 +836,7 @@ static int __init uncore_type_init(struct intel_uncore_type *type, bool setid)
attr_group = kzalloc(sizeof(struct attribute *) * (i + 1) +
sizeof(*attr_group), GFP_KERNEL);
if (!attr_group)
return -ENOMEM;
goto err;
attrs = (struct attribute **)(attr_group + 1);
attr_group->name = "events";
@ -849,7 +849,15 @@ static int __init uncore_type_init(struct intel_uncore_type *type, bool setid)
}
type->pmu_group = &uncore_pmu_attr_group;
return 0;
err:
for (i = 0; i < type->num_boxes; i++)
kfree(pmus[i].boxes);
kfree(pmus);
return -ENOMEM;
}
static int __init

View File

@ -85,6 +85,8 @@ EXPORT_SYMBOL_GPL(hyperv_cs);
u32 *hv_vp_index;
EXPORT_SYMBOL_GPL(hv_vp_index);
u32 hv_max_vp_index;
static int hv_cpu_init(unsigned int cpu)
{
u64 msr_vp_index;
@ -93,6 +95,9 @@ static int hv_cpu_init(unsigned int cpu)
hv_vp_index[smp_processor_id()] = msr_vp_index;
if (msr_vp_index > hv_max_vp_index)
hv_max_vp_index = msr_vp_index;
return 0;
}

View File

@ -36,9 +36,9 @@ struct hv_flush_pcpu_ex {
/* Each gva in gva_list encodes up to 4096 pages to flush */
#define HV_TLB_FLUSH_UNIT (4096 * PAGE_SIZE)
static struct hv_flush_pcpu __percpu *pcpu_flush;
static struct hv_flush_pcpu __percpu **pcpu_flush;
static struct hv_flush_pcpu_ex __percpu *pcpu_flush_ex;
static struct hv_flush_pcpu_ex __percpu **pcpu_flush_ex;
/*
* Fills in gva_list starting from offset. Returns the number of items added.
@ -76,6 +76,18 @@ static inline int cpumask_to_vp_set(struct hv_flush_pcpu_ex *flush,
{
int cpu, vcpu, vcpu_bank, vcpu_offset, nr_bank = 1;
/* valid_bank_mask can represent up to 64 banks */
if (hv_max_vp_index / 64 >= 64)
return 0;
/*
* Clear all banks up to the maximum possible bank as hv_flush_pcpu_ex
* structs are not cleared between calls, we risk flushing unneeded
* vCPUs otherwise.
*/
for (vcpu_bank = 0; vcpu_bank <= hv_max_vp_index / 64; vcpu_bank++)
flush->hv_vp_set.bank_contents[vcpu_bank] = 0;
/*
* Some banks may end up being empty but this is acceptable.
*/
@ -83,11 +95,6 @@ static inline int cpumask_to_vp_set(struct hv_flush_pcpu_ex *flush,
vcpu = hv_cpu_number_to_vp_number(cpu);
vcpu_bank = vcpu / 64;
vcpu_offset = vcpu % 64;
/* valid_bank_mask can represent up to 64 banks */
if (vcpu_bank >= 64)
return 0;
__set_bit(vcpu_offset, (unsigned long *)
&flush->hv_vp_set.bank_contents[vcpu_bank]);
if (vcpu_bank >= nr_bank)
@ -102,6 +109,7 @@ static void hyperv_flush_tlb_others(const struct cpumask *cpus,
const struct flush_tlb_info *info)
{
int cpu, vcpu, gva_n, max_gvas;
struct hv_flush_pcpu **flush_pcpu;
struct hv_flush_pcpu *flush;
u64 status = U64_MAX;
unsigned long flags;
@ -116,7 +124,17 @@ static void hyperv_flush_tlb_others(const struct cpumask *cpus,
local_irq_save(flags);
flush = this_cpu_ptr(pcpu_flush);
flush_pcpu = this_cpu_ptr(pcpu_flush);
if (unlikely(!*flush_pcpu))
*flush_pcpu = page_address(alloc_page(GFP_ATOMIC));
flush = *flush_pcpu;
if (unlikely(!flush)) {
local_irq_restore(flags);
goto do_native;
}
if (info->mm) {
flush->address_space = virt_to_phys(info->mm->pgd);
@ -173,6 +191,7 @@ static void hyperv_flush_tlb_others_ex(const struct cpumask *cpus,
const struct flush_tlb_info *info)
{
int nr_bank = 0, max_gvas, gva_n;
struct hv_flush_pcpu_ex **flush_pcpu;
struct hv_flush_pcpu_ex *flush;
u64 status = U64_MAX;
unsigned long flags;
@ -187,7 +206,17 @@ static void hyperv_flush_tlb_others_ex(const struct cpumask *cpus,
local_irq_save(flags);
flush = this_cpu_ptr(pcpu_flush_ex);
flush_pcpu = this_cpu_ptr(pcpu_flush_ex);
if (unlikely(!*flush_pcpu))
*flush_pcpu = page_address(alloc_page(GFP_ATOMIC));
flush = *flush_pcpu;
if (unlikely(!flush)) {
local_irq_restore(flags);
goto do_native;
}
if (info->mm) {
flush->address_space = virt_to_phys(info->mm->pgd);
@ -222,18 +251,18 @@ static void hyperv_flush_tlb_others_ex(const struct cpumask *cpus,
flush->flags |= HV_FLUSH_NON_GLOBAL_MAPPINGS_ONLY;
status = hv_do_rep_hypercall(
HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX,
0, nr_bank + 2, flush, NULL);
0, nr_bank, flush, NULL);
} else if (info->end &&
((info->end - info->start)/HV_TLB_FLUSH_UNIT) > max_gvas) {
status = hv_do_rep_hypercall(
HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX,
0, nr_bank + 2, flush, NULL);
0, nr_bank, flush, NULL);
} else {
gva_n = fill_gva_list(flush->gva_list, nr_bank,
info->start, info->end);
status = hv_do_rep_hypercall(
HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX,
gva_n, nr_bank + 2, flush, NULL);
gva_n, nr_bank, flush, NULL);
}
local_irq_restore(flags);
@ -266,7 +295,7 @@ void hyper_alloc_mmu(void)
return;
if (!(ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED))
pcpu_flush = __alloc_percpu(PAGE_SIZE, PAGE_SIZE);
pcpu_flush = alloc_percpu(struct hv_flush_pcpu *);
else
pcpu_flush_ex = __alloc_percpu(PAGE_SIZE, PAGE_SIZE);
pcpu_flush_ex = alloc_percpu(struct hv_flush_pcpu_ex *);
}

View File

@ -62,8 +62,10 @@
#define new_len2 145f-144f
/*
* max without conditionals. Idea adapted from:
* gas compatible max based on the idea from:
* http://graphics.stanford.edu/~seander/bithacks.html#IntegerMinOrMax
*
* The additional "-" is needed because gas uses a "true" value of -1.
*/
#define alt_max_short(a, b) ((a) ^ (((a) ^ (b)) & -(-((a) < (b)))))

View File

@ -103,12 +103,12 @@ static inline int alternatives_text_reserved(void *start, void *end)
alt_end_marker ":\n"
/*
* max without conditionals. Idea adapted from:
* gas compatible max based on the idea from:
* http://graphics.stanford.edu/~seander/bithacks.html#IntegerMinOrMax
*
* The additional "-" is needed because gas works with s32s.
* The additional "-" is needed because gas uses a "true" value of -1.
*/
#define alt_max_short(a, b) "((" a ") ^ (((" a ") ^ (" b ")) & -(-((" a ") - (" b ")))))"
#define alt_max_short(a, b) "((" a ") ^ (((" a ") ^ (" b ")) & -(-((" a ") < (" b ")))))"
/*
* Pad the second replacement alternative with additional NOPs if it is

View File

@ -110,6 +110,10 @@ build_mmio_write(__writeq, "q", unsigned long, "r", )
#endif
#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
extern int valid_phys_addr_range(phys_addr_t addr, size_t size);
extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size);
/**
* virt_to_phys - map virtual addresses to physical
* @address: address to remap

View File

@ -187,7 +187,6 @@ struct mca_msr_regs {
extern struct mce_vendor_flags mce_flags;
extern struct mca_config mca_cfg;
extern struct mca_msr_regs msr_ops;
enum mce_notifier_prios {

View File

@ -126,13 +126,7 @@ static inline void switch_ldt(struct mm_struct *prev, struct mm_struct *next)
DEBUG_LOCKS_WARN_ON(preemptible());
}
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
int cpu = smp_processor_id();
if (cpumask_test_cpu(cpu, mm_cpumask(mm)))
cpumask_clear_cpu(cpu, mm_cpumask(mm));
}
void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk);
static inline int init_new_context(struct task_struct *tsk,
struct mm_struct *mm)

View File

@ -289,6 +289,7 @@ static inline u64 hv_do_rep_hypercall(u16 code, u16 rep_count, u16 varhead_size,
* to this information.
*/
extern u32 *hv_vp_index;
extern u32 hv_max_vp_index;
/**
* hv_cpu_number_to_vp_number() - Map CPU to VP.

View File

@ -82,6 +82,22 @@ static inline u64 inc_mm_tlb_gen(struct mm_struct *mm)
#define __flush_tlb_single(addr) __native_flush_tlb_single(addr)
#endif
static inline bool tlb_defer_switch_to_init_mm(void)
{
/*
* If we have PCID, then switching to init_mm is reasonably
* fast. If we don't have PCID, then switching to init_mm is
* quite slow, so we try to defer it in the hopes that we can
* avoid it entirely. The latter approach runs the risk of
* receiving otherwise unnecessary IPIs.
*
* This choice is just a heuristic. The tlb code can handle this
* function returning true or false regardless of whether we have
* PCID.
*/
return !static_cpu_has(X86_FEATURE_PCID);
}
/*
* 6 because 6 should be plenty and struct tlb_state will fit in
* two cache lines.
@ -104,6 +120,23 @@ struct tlb_state {
u16 loaded_mm_asid;
u16 next_asid;
/*
* We can be in one of several states:
*
* - Actively using an mm. Our CPU's bit will be set in
* mm_cpumask(loaded_mm) and is_lazy == false;
*
* - Not using a real mm. loaded_mm == &init_mm. Our CPU's bit
* will not be set in mm_cpumask(&init_mm) and is_lazy == false.
*
* - Lazily using a real mm. loaded_mm != &init_mm, our bit
* is set in mm_cpumask(loaded_mm), but is_lazy == true.
* We're heuristically guessing that the CR3 load we
* skipped more than makes up for the overhead added by
* lazy mode.
*/
bool is_lazy;
/*
* Access to this CR4 shadow and to H/W CR4 is protected by
* disabling interrupts when modifying either one.

View File

@ -573,11 +573,21 @@ static u32 bdx_deadline_rev(void)
return ~0U;
}
static u32 skx_deadline_rev(void)
{
switch (boot_cpu_data.x86_mask) {
case 0x03: return 0x01000136;
case 0x04: return 0x02000014;
}
return ~0U;
}
static const struct x86_cpu_id deadline_match[] = {
DEADLINE_MODEL_MATCH_FUNC( INTEL_FAM6_HASWELL_X, hsx_deadline_rev),
DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_BROADWELL_X, 0x0b000020),
DEADLINE_MODEL_MATCH_FUNC( INTEL_FAM6_BROADWELL_XEON_D, bdx_deadline_rev),
DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_SKYLAKE_X, 0x02000014),
DEADLINE_MODEL_MATCH_FUNC( INTEL_FAM6_SKYLAKE_X, skx_deadline_rev),
DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_HASWELL_CORE, 0x22),
DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_HASWELL_ULT, 0x20),
@ -600,7 +610,8 @@ static void apic_check_deadline_errata(void)
const struct x86_cpu_id *m;
u32 rev;
if (!boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER))
if (!boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER) ||
boot_cpu_has(X86_FEATURE_HYPERVISOR))
return;
m = x86_match_cpu(deadline_match);

View File

@ -831,7 +831,6 @@ static int __cache_amd_cpumap_setup(unsigned int cpu, int index,
} else if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
unsigned int apicid, nshared, first, last;
this_leaf = this_cpu_ci->info_list + index;
nshared = base->eax.split.num_threads_sharing + 1;
apicid = cpu_data(cpu).apicid;
first = apicid - (apicid % nshared);

View File

@ -1,3 +1,6 @@
#ifndef __X86_MCE_INTERNAL_H__
#define __X86_MCE_INTERNAL_H__
#include <linux/device.h>
#include <asm/mce.h>
@ -108,3 +111,7 @@ static inline void mce_work_trigger(void) { }
static inline void mce_register_injector_chain(struct notifier_block *nb) { }
static inline void mce_unregister_injector_chain(struct notifier_block *nb) { }
#endif
extern struct mca_config mca_cfg;
#endif /* __X86_MCE_INTERNAL_H__ */

View File

@ -28,6 +28,8 @@
#include <asm/msr.h>
#include <asm/trace/irq_vectors.h>
#include "mce-internal.h"
#define NR_BLOCKS 5
#define THRESHOLD_MAX 0xFFF
#define INT_TYPE_APIC 0x00020000

View File

@ -122,9 +122,6 @@ static bool __init check_loader_disabled_bsp(void)
bool *res = &dis_ucode_ldr;
#endif
if (!have_cpuid_p())
return *res;
/*
* CPUID(1).ECX[31]: reserved for hypervisor use. This is still not
* completely accurate as xen pv guests don't see that CPUID bit set but
@ -166,24 +163,36 @@ bool get_builtin_firmware(struct cpio_data *cd, const char *name)
void __init load_ucode_bsp(void)
{
unsigned int cpuid_1_eax;
bool intel = true;
if (check_loader_disabled_bsp())
if (!have_cpuid_p())
return;
cpuid_1_eax = native_cpuid_eax(1);
switch (x86_cpuid_vendor()) {
case X86_VENDOR_INTEL:
if (x86_family(cpuid_1_eax) >= 6)
load_ucode_intel_bsp();
if (x86_family(cpuid_1_eax) < 6)
return;
break;
case X86_VENDOR_AMD:
if (x86_family(cpuid_1_eax) >= 0x10)
load_ucode_amd_bsp(cpuid_1_eax);
if (x86_family(cpuid_1_eax) < 0x10)
return;
intel = false;
break;
default:
break;
return;
}
if (check_loader_disabled_bsp())
return;
if (intel)
load_ucode_intel_bsp();
else
load_ucode_amd_bsp(cpuid_1_eax);
}
static bool check_loader_disabled_ap(void)

View File

@ -34,6 +34,7 @@
#include <linux/mm.h>
#include <asm/microcode_intel.h>
#include <asm/intel-family.h>
#include <asm/processor.h>
#include <asm/tlbflush.h>
#include <asm/setup.h>
@ -918,6 +919,18 @@ static int get_ucode_fw(void *to, const void *from, size_t n)
return 0;
}
static bool is_blacklisted(unsigned int cpu)
{
struct cpuinfo_x86 *c = &cpu_data(cpu);
if (c->x86 == 6 && c->x86_model == INTEL_FAM6_BROADWELL_X) {
pr_err_once("late loading on model 79 is disabled.\n");
return true;
}
return false;
}
static enum ucode_state request_microcode_fw(int cpu, struct device *device,
bool refresh_fw)
{
@ -926,6 +939,9 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device,
const struct firmware *firmware;
enum ucode_state ret;
if (is_blacklisted(cpu))
return UCODE_NFOUND;
sprintf(name, "intel-ucode/%02x-%02x-%02x",
c->x86, c->x86_model, c->x86_mask);
@ -950,6 +966,9 @@ static int get_ucode_user(void *to, const void *from, size_t n)
static enum ucode_state
request_microcode_user(int cpu, const void __user *buf, size_t size)
{
if (is_blacklisted(cpu))
return UCODE_NFOUND;
return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
}

View File

@ -30,10 +30,11 @@ static void __init i386_default_early_setup(void)
asmlinkage __visible void __init i386_start_kernel(void)
{
cr4_init_shadow();
/* Make sure IDT is set up before any exception happens */
idt_setup_early_handler();
cr4_init_shadow();
sanitize_boot_params(&boot_params);
x86_early_init_platform_quirks();

View File

@ -3,6 +3,15 @@
/* Kprobes and Optprobes common header */
#include <asm/asm.h>
#ifdef CONFIG_FRAME_POINTER
# define SAVE_RBP_STRING " push %" _ASM_BP "\n" \
" mov %" _ASM_SP ", %" _ASM_BP "\n"
#else
# define SAVE_RBP_STRING " push %" _ASM_BP "\n"
#endif
#ifdef CONFIG_X86_64
#define SAVE_REGS_STRING \
/* Skip cs, ip, orig_ax. */ \
@ -17,7 +26,7 @@
" pushq %r10\n" \
" pushq %r11\n" \
" pushq %rbx\n" \
" pushq %rbp\n" \
SAVE_RBP_STRING \
" pushq %r12\n" \
" pushq %r13\n" \
" pushq %r14\n" \
@ -48,7 +57,7 @@
" pushl %es\n" \
" pushl %ds\n" \
" pushl %eax\n" \
" pushl %ebp\n" \
SAVE_RBP_STRING \
" pushl %edi\n" \
" pushl %esi\n" \
" pushl %edx\n" \

View File

@ -1080,8 +1080,6 @@ int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
* raw stack chunk with redzones:
*/
__memcpy(kcb->jprobes_stack, (kprobe_opcode_t *)addr, MIN_STACK_SIZE(addr));
regs->flags &= ~X86_EFLAGS_IF;
trace_hardirqs_off();
regs->ip = (unsigned long)(jp->entry);
/*

View File

@ -105,6 +105,10 @@ void __noreturn machine_real_restart(unsigned int type)
load_cr3(initial_page_table);
#else
write_cr3(real_mode_header->trampoline_pgd);
/* Exiting long mode will fail if CR4.PCIDE is set. */
if (static_cpu_has(X86_FEATURE_PCID))
cr4_clear_bits(X86_CR4_PCIDE);
#endif
/* Jump to the identity-mapped low memory code */

View File

@ -44,7 +44,8 @@ static void unwind_dump(struct unwind_state *state)
state->stack_info.type, state->stack_info.next_sp,
state->stack_mask, state->graph_idx);
for (sp = state->orig_sp; sp; sp = PTR_ALIGN(stack_info.next_sp, sizeof(long))) {
for (sp = PTR_ALIGN(state->orig_sp, sizeof(long)); sp;
sp = PTR_ALIGN(stack_info.next_sp, sizeof(long))) {
if (get_stack_info(sp, state->task, &stack_info, &visit_mask))
break;
@ -174,6 +175,7 @@ static bool is_last_task_frame(struct unwind_state *state)
* This determines if the frame pointer actually contains an encoded pointer to
* pt_regs on the stack. See ENCODE_FRAME_POINTER.
*/
#ifdef CONFIG_X86_64
static struct pt_regs *decode_frame_pointer(unsigned long *bp)
{
unsigned long regs = (unsigned long)bp;
@ -183,6 +185,23 @@ static struct pt_regs *decode_frame_pointer(unsigned long *bp)
return (struct pt_regs *)(regs & ~0x1);
}
#else
static struct pt_regs *decode_frame_pointer(unsigned long *bp)
{
unsigned long regs = (unsigned long)bp;
if (regs & 0x80000000)
return NULL;
return (struct pt_regs *)(regs | 0x80000000);
}
#endif
#ifdef CONFIG_X86_32
#define KERNEL_REGS_SIZE (sizeof(struct pt_regs) - 2*sizeof(long))
#else
#define KERNEL_REGS_SIZE (sizeof(struct pt_regs))
#endif
static bool update_stack_state(struct unwind_state *state,
unsigned long *next_bp)
@ -202,7 +221,7 @@ static bool update_stack_state(struct unwind_state *state,
regs = decode_frame_pointer(next_bp);
if (regs) {
frame = (unsigned long *)regs;
len = regs_size(regs);
len = KERNEL_REGS_SIZE;
state->got_irq = true;
} else {
frame = next_bp;
@ -226,6 +245,14 @@ static bool update_stack_state(struct unwind_state *state,
frame < prev_frame_end)
return false;
/*
* On 32-bit with user mode regs, make sure the last two regs are safe
* to access:
*/
if (IS_ENABLED(CONFIG_X86_32) && regs && user_mode(regs) &&
!on_stack(info, frame, len + 2*sizeof(long)))
return false;
/* Move state to the next frame: */
if (regs) {
state->regs = regs;
@ -328,6 +355,13 @@ bad_address:
state->regs->sp < (unsigned long)task_pt_regs(state->task))
goto the_end;
/*
* There are some known frame pointer issues on 32-bit. Disable
* unwinder warnings on 32-bit until it gets objtool support.
*/
if (IS_ENABLED(CONFIG_X86_32))
goto the_end;
if (state->regs) {
printk_deferred_once(KERN_WARNING
"WARNING: kernel stack regs at %p in %s:%d has bad 'bp' value %p\n",

View File

@ -3973,13 +3973,6 @@ static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn,
static inline bool is_last_gpte(struct kvm_mmu *mmu,
unsigned level, unsigned gpte)
{
/*
* PT_PAGE_TABLE_LEVEL always terminates. The RHS has bit 7 set
* iff level <= PT_PAGE_TABLE_LEVEL, which for our purpose means
* level == PT_PAGE_TABLE_LEVEL; set PT_PAGE_SIZE_MASK in gpte then.
*/
gpte |= level - PT_PAGE_TABLE_LEVEL - 1;
/*
* The RHS has bit 7 set iff level < mmu->last_nonleaf_level.
* If it is clear, there are no large pages at this level, so clear
@ -3987,6 +3980,13 @@ static inline bool is_last_gpte(struct kvm_mmu *mmu,
*/
gpte &= level - mmu->last_nonleaf_level;
/*
* PT_PAGE_TABLE_LEVEL always terminates. The RHS has bit 7 set
* iff level <= PT_PAGE_TABLE_LEVEL, which for our purpose means
* level == PT_PAGE_TABLE_LEVEL; set PT_PAGE_SIZE_MASK in gpte then.
*/
gpte |= level - PT_PAGE_TABLE_LEVEL - 1;
return gpte & PT_PAGE_SIZE_MASK;
}
@ -4555,6 +4555,7 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
update_permission_bitmask(vcpu, context, true);
update_pkru_bitmask(vcpu, context, true);
update_last_nonleaf_level(vcpu, context);
reset_rsvds_bits_mask_ept(vcpu, context, execonly);
reset_ept_shadow_zero_bits_mask(vcpu, context, execonly);
}

View File

@ -334,10 +334,11 @@ retry_walk:
--walker->level;
index = PT_INDEX(addr, walker->level);
table_gfn = gpte_to_gfn(pte);
offset = index * sizeof(pt_element_t);
pte_gpa = gfn_to_gpa(table_gfn) + offset;
BUG_ON(walker->level < 1);
walker->table_gfn[walker->level - 1] = table_gfn;
walker->pte_gpa[walker->level - 1] = pte_gpa;

View File

@ -11297,7 +11297,7 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
/* Same as above - no reason to call set_cr4_guest_host_mask(). */
vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
kvm_set_cr4(vcpu, vmcs12->host_cr4);
vmx_set_cr4(vcpu, vmcs12->host_cr4);
nested_ept_uninit_mmu_context(vcpu);

View File

@ -1,5 +1,12 @@
# Kernel does not boot with instrumentation of tlb.c.
KCOV_INSTRUMENT_tlb.o := n
# Kernel does not boot with instrumentation of tlb.c and mem_encrypt.c
KCOV_INSTRUMENT_tlb.o := n
KCOV_INSTRUMENT_mem_encrypt.o := n
KASAN_SANITIZE_mem_encrypt.o := n
ifdef CONFIG_FUNCTION_TRACER
CFLAGS_REMOVE_mem_encrypt.o = -pg
endif
obj-y := init.o init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \
pat.o pgtable.o physaddr.o setup_nx.o tlb.o

View File

@ -174,3 +174,15 @@ const char *arch_vma_name(struct vm_area_struct *vma)
return "[mpx]";
return NULL;
}
int valid_phys_addr_range(phys_addr_t addr, size_t count)
{
return addr + count <= __pa(high_memory);
}
int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
{
phys_addr_t addr = (phys_addr_t)pfn << PAGE_SHIFT;
return valid_phys_addr_range(addr, count);
}

View File

@ -30,6 +30,7 @@
atomic64_t last_mm_ctx_id = ATOMIC64_INIT(1);
static void choose_new_asid(struct mm_struct *next, u64 next_tlb_gen,
u16 *new_asid, bool *need_flush)
{
@ -80,7 +81,7 @@ void leave_mm(int cpu)
return;
/* Warn if we're not lazy. */
WARN_ON(cpumask_test_cpu(smp_processor_id(), mm_cpumask(loaded_mm)));
WARN_ON(!this_cpu_read(cpu_tlbstate.is_lazy));
switch_mm(NULL, &init_mm, NULL);
}
@ -142,45 +143,24 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
__flush_tlb_all();
}
#endif
this_cpu_write(cpu_tlbstate.is_lazy, false);
if (real_prev == next) {
VM_BUG_ON(this_cpu_read(cpu_tlbstate.ctxs[prev_asid].ctx_id) !=
next->context.ctx_id);
if (cpumask_test_cpu(cpu, mm_cpumask(next))) {
/*
* There's nothing to do: we weren't lazy, and we
* aren't changing our mm. We don't need to flush
* anything, nor do we need to update CR3, CR4, or
* LDTR.
*/
return;
}
/* Resume remote flushes and then read tlb_gen. */
cpumask_set_cpu(cpu, mm_cpumask(next));
next_tlb_gen = atomic64_read(&next->context.tlb_gen);
if (this_cpu_read(cpu_tlbstate.ctxs[prev_asid].tlb_gen) <
next_tlb_gen) {
/*
* Ideally, we'd have a flush_tlb() variant that
* takes the known CR3 value as input. This would
* be faster on Xen PV and on hypothetical CPUs
* on which INVPCID is fast.
*/
this_cpu_write(cpu_tlbstate.ctxs[prev_asid].tlb_gen,
next_tlb_gen);
write_cr3(build_cr3(next, prev_asid));
trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH,
TLB_FLUSH_ALL);
}
VM_WARN_ON(this_cpu_read(cpu_tlbstate.ctxs[prev_asid].ctx_id) !=
next->context.ctx_id);
/*
* We just exited lazy mode, which means that CR4 and/or LDTR
* may be stale. (Changes to the required CR4 and LDTR states
* are not reflected in tlb_gen.)
* We don't currently support having a real mm loaded without
* our cpu set in mm_cpumask(). We have all the bookkeeping
* in place to figure out whether we would need to flush
* if our cpu were cleared in mm_cpumask(), but we don't
* currently use it.
*/
if (WARN_ON_ONCE(real_prev != &init_mm &&
!cpumask_test_cpu(cpu, mm_cpumask(next))))
cpumask_set_cpu(cpu, mm_cpumask(next));
return;
} else {
u16 new_asid;
bool need_flush;
@ -199,10 +179,9 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
}
/* Stop remote flushes for the previous mm */
if (cpumask_test_cpu(cpu, mm_cpumask(real_prev)))
cpumask_clear_cpu(cpu, mm_cpumask(real_prev));
VM_WARN_ON_ONCE(cpumask_test_cpu(cpu, mm_cpumask(next)));
VM_WARN_ON_ONCE(!cpumask_test_cpu(cpu, mm_cpumask(real_prev)) &&
real_prev != &init_mm);
cpumask_clear_cpu(cpu, mm_cpumask(real_prev));
/*
* Start remote flushes and then read tlb_gen.
@ -232,6 +211,40 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
switch_ldt(real_prev, next);
}
/*
* Please ignore the name of this function. It should be called
* switch_to_kernel_thread().
*
* enter_lazy_tlb() is a hint from the scheduler that we are entering a
* kernel thread or other context without an mm. Acceptable implementations
* include doing nothing whatsoever, switching to init_mm, or various clever
* lazy tricks to try to minimize TLB flushes.
*
* The scheduler reserves the right to call enter_lazy_tlb() several times
* in a row. It will notify us that we're going back to a real mm by
* calling switch_mm_irqs_off().
*/
void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
if (this_cpu_read(cpu_tlbstate.loaded_mm) == &init_mm)
return;
if (tlb_defer_switch_to_init_mm()) {
/*
* There's a significant optimization that may be possible
* here. We have accurate enough TLB flush tracking that we
* don't need to maintain coherence of TLB per se when we're
* lazy. We do, however, need to maintain coherence of
* paging-structure caches. We could, in principle, leave our
* old mm loaded and only switch to init_mm when
* tlb_remove_page() happens.
*/
this_cpu_write(cpu_tlbstate.is_lazy, true);
} else {
switch_mm(NULL, &init_mm, NULL);
}
}
/*
* Call this when reinitializing a CPU. It fixes the following potential
* problems:
@ -303,16 +316,20 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f,
/* This code cannot presently handle being reentered. */
VM_WARN_ON(!irqs_disabled());
if (unlikely(loaded_mm == &init_mm))
return;
VM_WARN_ON(this_cpu_read(cpu_tlbstate.ctxs[loaded_mm_asid].ctx_id) !=
loaded_mm->context.ctx_id);
if (!cpumask_test_cpu(smp_processor_id(), mm_cpumask(loaded_mm))) {
if (this_cpu_read(cpu_tlbstate.is_lazy)) {
/*
* We're in lazy mode -- don't flush. We can get here on
* remote flushes due to races and on local flushes if a
* kernel thread coincidentally flushes the mm it's lazily
* still using.
* We're in lazy mode. We need to at least flush our
* paging-structure cache to avoid speculatively reading
* garbage into our TLB. Since switching to init_mm is barely
* slower than a minimal flush, just switch to init_mm.
*/
switch_mm_irqs_off(NULL, &init_mm, NULL);
return;
}

View File

@ -93,11 +93,11 @@ int xen_cpuhp_setup(int (*cpu_up_prepare_cb)(unsigned int),
int rc;
rc = cpuhp_setup_state_nocalls(CPUHP_XEN_PREPARE,
"x86/xen/hvm_guest:prepare",
"x86/xen/guest:prepare",
cpu_up_prepare_cb, cpu_dead_cb);
if (rc >= 0) {
rc = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
"x86/xen/hvm_guest:online",
"x86/xen/guest:online",
xen_cpu_up_online, NULL);
if (rc < 0)
cpuhp_remove_state_nocalls(CPUHP_XEN_PREPARE);

View File

@ -1239,8 +1239,8 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
*/
bmd->is_our_pages = map_data ? 0 : 1;
memcpy(bmd->iov, iter->iov, sizeof(struct iovec) * iter->nr_segs);
iov_iter_init(&bmd->iter, iter->type, bmd->iov,
iter->nr_segs, iter->count);
bmd->iter = *iter;
bmd->iter.iov = bmd->iov;
ret = -ENOMEM;
bio = bio_kmalloc(gfp_mask, nr_pages);
@ -1331,6 +1331,7 @@ struct bio *bio_map_user_iov(struct request_queue *q,
int ret, offset;
struct iov_iter i;
struct iovec iov;
struct bio_vec *bvec;
iov_for_each(iov, i, *iter) {
unsigned long uaddr = (unsigned long) iov.iov_base;
@ -1375,7 +1376,12 @@ struct bio *bio_map_user_iov(struct request_queue *q,
ret = get_user_pages_fast(uaddr, local_nr_pages,
(iter->type & WRITE) != WRITE,
&pages[cur_page]);
if (ret < local_nr_pages) {
if (unlikely(ret < local_nr_pages)) {
for (j = cur_page; j < page_limit; j++) {
if (!pages[j])
break;
put_page(pages[j]);
}
ret = -EFAULT;
goto out_unmap;
}
@ -1383,6 +1389,7 @@ struct bio *bio_map_user_iov(struct request_queue *q,
offset = offset_in_page(uaddr);
for (j = cur_page; j < page_limit; j++) {
unsigned int bytes = PAGE_SIZE - offset;
unsigned short prev_bi_vcnt = bio->bi_vcnt;
if (len <= 0)
break;
@ -1397,6 +1404,13 @@ struct bio *bio_map_user_iov(struct request_queue *q,
bytes)
break;
/*
* check if vector was merged with previous
* drop page reference if needed
*/
if (bio->bi_vcnt == prev_bi_vcnt)
put_page(pages[j]);
len -= bytes;
offset = 0;
}
@ -1423,10 +1437,8 @@ struct bio *bio_map_user_iov(struct request_queue *q,
return bio;
out_unmap:
for (j = 0; j < nr_pages; j++) {
if (!pages[j])
break;
put_page(pages[j]);
bio_for_each_segment_all(bvec, bio, j) {
put_page(bvec->bv_page);
}
out:
kfree(pages);

View File

@ -57,6 +57,8 @@ struct key *find_asymmetric_key(struct key *keyring,
char *req, *p;
int len;
BUG_ON(!id_0 && !id_1);
if (id_0) {
lookup = id_0->data;
len = id_0->len;
@ -105,7 +107,7 @@ struct key *find_asymmetric_key(struct key *keyring,
if (id_0 && id_1) {
const struct asymmetric_key_ids *kids = asymmetric_key_ids(key);
if (!kids->id[0]) {
if (!kids->id[1]) {
pr_debug("First ID matches, but second is missing\n");
goto reject;
}

View File

@ -88,6 +88,9 @@ static int pkcs7_check_authattrs(struct pkcs7_message *msg)
bool want = false;
sinfo = msg->signed_infos;
if (!sinfo)
goto inconsistent;
if (sinfo->authattrs) {
want = true;
msg->have_authattrs = true;

View File

@ -41,7 +41,7 @@ static int shash_setkey_unaligned(struct crypto_shash *tfm, const u8 *key,
int err;
absize = keylen + (alignmask & ~(crypto_tfm_ctx_alignment() - 1));
buffer = kmalloc(absize, GFP_KERNEL);
buffer = kmalloc(absize, GFP_ATOMIC);
if (!buffer)
return -ENOMEM;
@ -275,12 +275,14 @@ static int shash_async_finup(struct ahash_request *req)
int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc)
{
struct scatterlist *sg = req->src;
unsigned int offset = sg->offset;
unsigned int nbytes = req->nbytes;
struct scatterlist *sg;
unsigned int offset;
int err;
if (nbytes < min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset)) {
if (nbytes &&
(sg = req->src, offset = sg->offset,
nbytes < min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset))) {
void *data;
data = kmap_atomic(sg_page(sg));

View File

@ -426,14 +426,9 @@ static int skcipher_copy_iv(struct skcipher_walk *walk)
static int skcipher_walk_first(struct skcipher_walk *walk)
{
walk->nbytes = 0;
if (WARN_ON_ONCE(in_irq()))
return -EDEADLK;
if (unlikely(!walk->total))
return 0;
walk->buffer = NULL;
if (unlikely(((unsigned long)walk->iv & walk->alignmask))) {
int err = skcipher_copy_iv(walk);
@ -452,10 +447,15 @@ static int skcipher_walk_skcipher(struct skcipher_walk *walk,
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
walk->total = req->cryptlen;
walk->nbytes = 0;
if (unlikely(!walk->total))
return 0;
scatterwalk_start(&walk->in, req->src);
scatterwalk_start(&walk->out, req->dst);
walk->total = req->cryptlen;
walk->iv = req->iv;
walk->oiv = req->iv;
@ -509,6 +509,11 @@ static int skcipher_walk_aead_common(struct skcipher_walk *walk,
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
int err;
walk->nbytes = 0;
if (unlikely(!walk->total))
return 0;
walk->flags &= ~SKCIPHER_WALK_PHYS;
scatterwalk_start(&walk->in, req->src);

View File

@ -554,8 +554,10 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb)
ctx->name[len - 1] = 0;
if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
"xts(%s)", ctx->name) >= CRYPTO_MAX_ALG_NAME)
return -ENAMETOOLONG;
"xts(%s)", ctx->name) >= CRYPTO_MAX_ALG_NAME) {
err = -ENAMETOOLONG;
goto err_drop_spawn;
}
} else
goto err_drop_spawn;

View File

@ -571,10 +571,9 @@ static int acpi_data_get_property_array(const struct acpi_device_data *data,
* }
* }
*
* Calling this function with index %2 return %-ENOENT and with index %3
* returns the last entry. If the property does not contain any more values
* %-ENODATA is returned. The NULL entry must be single integer and
* preferably contain value %0.
* Calling this function with index %2 or index %3 return %-ENOENT. If the
* property does not contain any more values %-ENOENT is returned. The NULL
* entry must be single integer and preferably contain value %0.
*
* Return: %0 on success, negative error code on failure.
*/
@ -590,11 +589,11 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
data = acpi_device_data_of_node(fwnode);
if (!data)
return -EINVAL;
return -ENOENT;
ret = acpi_data_get_property(data, propname, ACPI_TYPE_ANY, &obj);
if (ret)
return ret;
return ret == -EINVAL ? -ENOENT : -EINVAL;
/*
* The simplest case is when the value is a single reference. Just
@ -606,7 +605,7 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
ret = acpi_bus_get_device(obj->reference.handle, &device);
if (ret)
return ret;
return ret == -ENODEV ? -EINVAL : ret;
args->adev = device;
args->nargs = 0;
@ -622,8 +621,10 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
* The index argument is then used to determine which reference
* the caller wants (along with the arguments).
*/
if (obj->type != ACPI_TYPE_PACKAGE || index >= obj->package.count)
return -EPROTO;
if (obj->type != ACPI_TYPE_PACKAGE)
return -EINVAL;
if (index >= obj->package.count)
return -ENOENT;
element = obj->package.elements;
end = element + obj->package.count;
@ -635,7 +636,7 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
ret = acpi_bus_get_device(element->reference.handle,
&device);
if (ret)
return -ENODEV;
return -EINVAL;
nargs = 0;
element++;
@ -649,11 +650,11 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
else if (type == ACPI_TYPE_LOCAL_REFERENCE)
break;
else
return -EPROTO;
return -EINVAL;
}
if (nargs > MAX_ACPI_REFERENCE_ARGS)
return -EPROTO;
return -EINVAL;
if (idx == index) {
args->adev = device;
@ -670,13 +671,13 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
return -ENOENT;
element++;
} else {
return -EPROTO;
return -EINVAL;
}
idx++;
}
return -ENODATA;
return -ENOENT;
}
EXPORT_SYMBOL_GPL(__acpi_node_get_property_reference);

View File

@ -2582,6 +2582,48 @@ static bool binder_proc_transaction(struct binder_transaction *t,
return true;
}
/**
* binder_get_node_refs_for_txn() - Get required refs on node for txn
* @node: struct binder_node for which to get refs
* @proc: returns @node->proc if valid
* @error: if no @proc then returns BR_DEAD_REPLY
*
* User-space normally keeps the node alive when creating a transaction
* since it has a reference to the target. The local strong ref keeps it
* alive if the sending process dies before the target process processes
* the transaction. If the source process is malicious or has a reference
* counting bug, relying on the local strong ref can fail.
*
* Since user-space can cause the local strong ref to go away, we also take
* a tmpref on the node to ensure it survives while we are constructing
* the transaction. We also need a tmpref on the proc while we are
* constructing the transaction, so we take that here as well.
*
* Return: The target_node with refs taken or NULL if no @node->proc is NULL.
* Also sets @proc if valid. If the @node->proc is NULL indicating that the
* target proc has died, @error is set to BR_DEAD_REPLY
*/
static struct binder_node *binder_get_node_refs_for_txn(
struct binder_node *node,
struct binder_proc **procp,
uint32_t *error)
{
struct binder_node *target_node = NULL;
binder_node_inner_lock(node);
if (node->proc) {
target_node = node;
binder_inc_node_nilocked(node, 1, 0, NULL);
binder_inc_node_tmpref_ilocked(node);
node->proc->tmp_ref++;
*procp = node->proc;
} else
*error = BR_DEAD_REPLY;
binder_node_inner_unlock(node);
return target_node;
}
static void binder_transaction(struct binder_proc *proc,
struct binder_thread *thread,
struct binder_transaction_data *tr, int reply,
@ -2685,43 +2727,35 @@ static void binder_transaction(struct binder_proc *proc,
ref = binder_get_ref_olocked(proc, tr->target.handle,
true);
if (ref) {
binder_inc_node(ref->node, 1, 0, NULL);
target_node = ref->node;
target_node = binder_get_node_refs_for_txn(
ref->node, &target_proc,
&return_error);
} else {
binder_user_error("%d:%d got transaction to invalid handle\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
}
binder_proc_unlock(proc);
if (target_node == NULL) {
binder_user_error("%d:%d got transaction to invalid handle\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
return_error_param = -EINVAL;
return_error_line = __LINE__;
goto err_invalid_target_handle;
}
} else {
mutex_lock(&context->context_mgr_node_lock);
target_node = context->binder_context_mgr_node;
if (target_node == NULL) {
if (target_node)
target_node = binder_get_node_refs_for_txn(
target_node, &target_proc,
&return_error);
else
return_error = BR_DEAD_REPLY;
mutex_unlock(&context->context_mgr_node_lock);
return_error_line = __LINE__;
goto err_no_context_mgr_node;
}
binder_inc_node(target_node, 1, 0, NULL);
mutex_unlock(&context->context_mgr_node_lock);
}
e->to_node = target_node->debug_id;
binder_node_lock(target_node);
target_proc = target_node->proc;
if (target_proc == NULL) {
binder_node_unlock(target_node);
return_error = BR_DEAD_REPLY;
if (!target_node) {
/*
* return_error is set above
*/
return_error_param = -EINVAL;
return_error_line = __LINE__;
goto err_dead_binder;
}
binder_inner_proc_lock(target_proc);
target_proc->tmp_ref++;
binder_inner_proc_unlock(target_proc);
binder_node_unlock(target_node);
e->to_node = target_node->debug_id;
if (security_binder_transaction(proc->tsk,
target_proc->tsk) < 0) {
return_error = BR_FAILED_REPLY;
@ -3071,6 +3105,8 @@ static void binder_transaction(struct binder_proc *proc,
if (target_thread)
binder_thread_dec_tmpref(target_thread);
binder_proc_dec_tmpref(target_proc);
if (target_node)
binder_dec_node_tmpref(target_node);
/*
* write barrier to synchronize with initialization
* of log entry
@ -3090,6 +3126,8 @@ err_bad_parent:
err_copy_data_failed:
trace_binder_transaction_failed_buffer_release(t->buffer);
binder_transaction_buffer_release(target_proc, t->buffer, offp);
if (target_node)
binder_dec_node_tmpref(target_node);
target_node = NULL;
t->buffer->transaction = NULL;
binder_alloc_free_buf(&target_proc->alloc, t->buffer);
@ -3104,13 +3142,14 @@ err_bad_call_stack:
err_empty_call_stack:
err_dead_binder:
err_invalid_target_handle:
err_no_context_mgr_node:
if (target_thread)
binder_thread_dec_tmpref(target_thread);
if (target_proc)
binder_proc_dec_tmpref(target_proc);
if (target_node)
if (target_node) {
binder_dec_node(target_node, 1, 0);
binder_dec_node_tmpref(target_node);
}
binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
"%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
@ -3623,12 +3662,6 @@ static void binder_stat_br(struct binder_proc *proc,
}
}
static int binder_has_thread_work(struct binder_thread *thread)
{
return !binder_worklist_empty(thread->proc, &thread->todo) ||
thread->looper_need_return;
}
static int binder_put_node_cmd(struct binder_proc *proc,
struct binder_thread *thread,
void __user **ptrp,
@ -4258,12 +4291,9 @@ static unsigned int binder_poll(struct file *filp,
binder_inner_proc_unlock(thread->proc);
if (binder_has_work(thread, wait_for_proc_work))
return POLLIN;
poll_wait(filp, &thread->wait, wait);
if (binder_has_thread_work(thread))
if (binder_has_work(thread, wait_for_proc_work))
return POLLIN;
return 0;

View File

@ -215,17 +215,12 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
}
}
if (!vma && need_mm)
mm = get_task_mm(alloc->tsk);
if (!vma && need_mm && mmget_not_zero(alloc->vma_vm_mm))
mm = alloc->vma_vm_mm;
if (mm) {
down_write(&mm->mmap_sem);
vma = alloc->vma;
if (vma && mm != alloc->vma_vm_mm) {
pr_err("%d: vma mm and task mm mismatch\n",
alloc->pid);
vma = NULL;
}
}
if (!vma && need_mm) {
@ -565,7 +560,7 @@ static void binder_delete_free_buffer(struct binder_alloc *alloc,
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
"%d: merge free, buffer %pK do not share page with %pK or %pK\n",
alloc->pid, buffer->data,
prev->data, next->data);
prev->data, next ? next->data : NULL);
binder_update_page_range(alloc, 0, buffer_start_page(buffer),
buffer_start_page(buffer) + PAGE_SIZE,
NULL);
@ -720,6 +715,7 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
barrier();
alloc->vma = vma;
alloc->vma_vm_mm = vma->vm_mm;
mmgrab(alloc->vma_vm_mm);
return 0;
@ -795,6 +791,8 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
vfree(alloc->buffer);
}
mutex_unlock(&alloc->mutex);
if (alloc->vma_vm_mm)
mmdrop(alloc->vma_vm_mm);
binder_alloc_debug(BINDER_DEBUG_OPEN_CLOSE,
"%s: %d buffers %d, pages %d\n",
@ -889,7 +887,6 @@ int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
void binder_alloc_vma_close(struct binder_alloc *alloc)
{
WRITE_ONCE(alloc->vma, NULL);
WRITE_ONCE(alloc->vma_vm_mm, NULL);
}
/**
@ -926,9 +923,9 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
vma = alloc->vma;
if (vma) {
mm = get_task_mm(alloc->tsk);
if (!mm)
goto err_get_task_mm_failed;
if (!mmget_not_zero(alloc->vma_vm_mm))
goto err_mmget;
mm = alloc->vma_vm_mm;
if (!down_write_trylock(&mm->mmap_sem))
goto err_down_write_mmap_sem_failed;
}
@ -963,7 +960,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
err_down_write_mmap_sem_failed:
mmput_async(mm);
err_get_task_mm_failed:
err_mmget:
err_page_already_freed:
mutex_unlock(&alloc->mutex);
err_get_alloc_mutex_failed:
@ -1002,7 +999,6 @@ struct shrinker binder_shrinker = {
*/
void binder_alloc_init(struct binder_alloc *alloc)
{
alloc->tsk = current->group_leader;
alloc->pid = current->group_leader->pid;
mutex_init(&alloc->mutex);
INIT_LIST_HEAD(&alloc->buffers);

View File

@ -100,7 +100,6 @@ struct binder_lru_page {
*/
struct binder_alloc {
struct mutex mutex;
struct task_struct *tsk;
struct vm_area_struct *vma;
struct mm_struct *vma_vm_mm;
void *buffer;

View File

@ -27,13 +27,21 @@ static struct bus_type node_subsys = {
static ssize_t node_read_cpumap(struct device *dev, bool list, char *buf)
{
ssize_t n;
cpumask_var_t mask;
struct node *node_dev = to_node(dev);
const struct cpumask *mask = cpumask_of_node(node_dev->dev.id);
/* 2008/04/07: buf currently PAGE_SIZE, need 9 chars per 32 bits. */
BUILD_BUG_ON((NR_CPUS/32 * 9) > (PAGE_SIZE-1));
return cpumap_print_to_pagebuf(list, buf, mask);
if (!alloc_cpumask_var(&mask, GFP_KERNEL))
return 0;
cpumask_and(mask, cpumask_of_node(node_dev->dev.id), cpu_online_mask);
n = cpumap_print_to_pagebuf(list, buf, mask);
free_cpumask_var(mask);
return n;
}
static inline ssize_t node_read_cpumask(struct device *dev,

View File

@ -21,6 +21,7 @@
#include <linux/phy.h>
struct property_set {
struct device *dev;
struct fwnode_handle fwnode;
const struct property_entry *properties;
};
@ -682,6 +683,10 @@ EXPORT_SYMBOL_GPL(fwnode_property_match_string);
* Caller is responsible to call fwnode_handle_put() on the returned
* args->fwnode pointer.
*
* Returns: %0 on success
* %-ENOENT when the index is out of bounds, the index has an empty
* reference or the property was not found
* %-EINVAL on parse error
*/
int fwnode_property_get_reference_args(const struct fwnode_handle *fwnode,
const char *prop, const char *nargs_prop,
@ -891,6 +896,7 @@ static struct property_set *pset_copy_set(const struct property_set *pset)
void device_remove_properties(struct device *dev)
{
struct fwnode_handle *fwnode;
struct property_set *pset;
fwnode = dev_fwnode(dev);
if (!fwnode)
@ -900,16 +906,16 @@ void device_remove_properties(struct device *dev)
* the pset. If there is no real firmware node (ACPI/DT) primary
* will hold the pset.
*/
if (is_pset_node(fwnode)) {
pset = to_pset_node(fwnode);
if (pset) {
set_primary_fwnode(dev, NULL);
pset_free_set(to_pset_node(fwnode));
} else {
fwnode = fwnode->secondary;
if (!IS_ERR(fwnode) && is_pset_node(fwnode)) {
pset = to_pset_node(fwnode->secondary);
if (pset && dev == pset->dev)
set_secondary_fwnode(dev, NULL);
pset_free_set(to_pset_node(fwnode));
}
}
if (pset && dev == pset->dev)
pset_free_set(pset);
}
EXPORT_SYMBOL_GPL(device_remove_properties);
@ -938,6 +944,7 @@ int device_add_properties(struct device *dev,
p->fwnode.ops = &pset_fwnode_ops;
set_secondary_fwnode(dev, &p->fwnode);
p->dev = dev;
return 0;
}
EXPORT_SYMBOL_GPL(device_add_properties);

View File

@ -243,7 +243,6 @@ static void nbd_size_set(struct nbd_device *nbd, loff_t blocksize,
struct nbd_config *config = nbd->config;
config->blksize = blocksize;
config->bytesize = blocksize * nr_blocks;
nbd_size_update(nbd);
}
static void nbd_complete_rq(struct request *req)
@ -1094,6 +1093,7 @@ static int nbd_start_device(struct nbd_device *nbd)
args->index = i;
queue_work(recv_workqueue, &args->work);
}
nbd_size_update(nbd);
return error;
}

View File

@ -2604,7 +2604,7 @@ static void *skd_alloc_dma(struct skd_device *skdev, struct kmem_cache *s,
return NULL;
*dma_handle = dma_map_single(dev, buf, s->size, dir);
if (dma_mapping_error(dev, *dma_handle)) {
kfree(buf);
kmem_cache_free(s, buf);
buf = NULL;
}
return buf;

View File

@ -720,7 +720,7 @@ mvebu_mbus_default_setup_cpu_target(struct mvebu_mbus_state *mbus)
if (mbus->hw_io_coherency)
w->mbus_attr |= ATTR_HW_COHERENCY;
w->base = base & DDR_BASE_CS_LOW_MASK;
w->size = (size | ~DDR_SIZE_MASK) + 1;
w->size = (u64)(size | ~DDR_SIZE_MASK) + 1;
}
}
mvebu_mbus_dram_info.num_cs = cs;

View File

@ -117,7 +117,8 @@ static irqreturn_t mfgpt_tick(int irq, void *dev_id)
/* Turn off the clock (and clear the event) */
disable_timer(cs5535_event_clock);
if (clockevent_state_shutdown(&cs5535_clockevent))
if (clockevent_state_detached(&cs5535_clockevent) ||
clockevent_state_shutdown(&cs5535_clockevent))
return IRQ_HANDLED;
/* Clear the counter */

View File

@ -349,8 +349,6 @@ struct artpec6_crypto_aead_req_ctx {
/* The crypto framework makes it hard to avoid this global. */
static struct device *artpec6_crypto_dev;
static struct dentry *dbgfs_root;
#ifdef CONFIG_FAULT_INJECTION
static DECLARE_FAULT_ATTR(artpec6_crypto_fail_status_read);
static DECLARE_FAULT_ATTR(artpec6_crypto_fail_dma_array_full);
@ -2984,6 +2982,8 @@ struct dbgfs_u32 {
char *desc;
};
static struct dentry *dbgfs_root;
static void artpec6_crypto_init_debugfs(void)
{
dbgfs_root = debugfs_create_dir("artpec6_crypto", NULL);

View File

@ -553,9 +553,9 @@ static int stm32_hash_dma_send(struct stm32_hash_dev *hdev)
{
struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
struct scatterlist sg[1], *tsg;
int err = 0, len = 0, reg, ncp;
int err = 0, len = 0, reg, ncp = 0;
unsigned int i;
const u32 *buffer = (const u32 *)rctx->buffer;
u32 *buffer = (void *)rctx->buffer;
rctx->sg = hdev->req->src;
rctx->total = hdev->req->nbytes;
@ -620,10 +620,13 @@ static int stm32_hash_dma_send(struct stm32_hash_dev *hdev)
reg |= HASH_CR_DMAA;
stm32_hash_write(hdev, HASH_CR, reg);
for (i = 0; i < DIV_ROUND_UP(ncp, sizeof(u32)); i++)
stm32_hash_write(hdev, HASH_DIN, buffer[i]);
stm32_hash_set_nblw(hdev, ncp);
if (ncp) {
memset(buffer + ncp, 0,
DIV_ROUND_UP(ncp, sizeof(u32)) - ncp);
writesl(hdev->io_base + HASH_DIN, buffer,
DIV_ROUND_UP(ncp, sizeof(u32)));
}
stm32_hash_set_nblw(hdev, DIV_ROUND_UP(ncp, sizeof(u32)));
reg = stm32_hash_read(hdev, HASH_STR);
reg |= HASH_STR_DCAL;
stm32_hash_write(hdev, HASH_STR, reg);

View File

@ -383,7 +383,7 @@ err_put_fd:
return err;
}
static void sync_fill_fence_info(struct dma_fence *fence,
static int sync_fill_fence_info(struct dma_fence *fence,
struct sync_fence_info *info)
{
strlcpy(info->obj_name, fence->ops->get_timeline_name(fence),
@ -399,6 +399,8 @@ static void sync_fill_fence_info(struct dma_fence *fence,
test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags) ?
ktime_to_ns(fence->timestamp) :
ktime_set(0, 0);
return info->status;
}
static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
@ -424,8 +426,12 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
* sync_fence_info and return the actual number of fences on
* info->num_fences.
*/
if (!info.num_fences)
if (!info.num_fences) {
info.status = dma_fence_is_signaled(sync_file->fence);
goto no_fences;
} else {
info.status = 1;
}
if (info.num_fences < num_fences)
return -EINVAL;
@ -435,8 +441,10 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
if (!fence_info)
return -ENOMEM;
for (i = 0; i < num_fences; i++)
sync_fill_fence_info(fences[i], &fence_info[i]);
for (i = 0; i < num_fences; i++) {
int status = sync_fill_fence_info(fences[i], &fence_info[i]);
info.status = info.status <= 0 ? info.status : status;
}
if (copy_to_user(u64_to_user_ptr(info.sync_fence_info), fence_info,
size)) {
@ -446,7 +454,6 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
no_fences:
sync_file_get_name(sync_file, info.name, sizeof(info.name));
info.status = dma_fence_is_signaled(sync_file->fence);
info.num_fences = num_fences;
if (copy_to_user((void __user *)arg, &info, sizeof(info)))

View File

@ -212,11 +212,12 @@ struct msgdma_device {
static struct msgdma_sw_desc *msgdma_get_descriptor(struct msgdma_device *mdev)
{
struct msgdma_sw_desc *desc;
unsigned long flags;
spin_lock_bh(&mdev->lock);
spin_lock_irqsave(&mdev->lock, flags);
desc = list_first_entry(&mdev->free_list, struct msgdma_sw_desc, node);
list_del(&desc->node);
spin_unlock_bh(&mdev->lock);
spin_unlock_irqrestore(&mdev->lock, flags);
INIT_LIST_HEAD(&desc->tx_list);
@ -306,13 +307,14 @@ static dma_cookie_t msgdma_tx_submit(struct dma_async_tx_descriptor *tx)
struct msgdma_device *mdev = to_mdev(tx->chan);
struct msgdma_sw_desc *new;
dma_cookie_t cookie;
unsigned long flags;
new = tx_to_desc(tx);
spin_lock_bh(&mdev->lock);
spin_lock_irqsave(&mdev->lock, flags);
cookie = dma_cookie_assign(tx);
list_add_tail(&new->node, &mdev->pending_list);
spin_unlock_bh(&mdev->lock);
spin_unlock_irqrestore(&mdev->lock, flags);
return cookie;
}
@ -336,17 +338,18 @@ msgdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst,
struct msgdma_extended_desc *desc;
size_t copy;
u32 desc_cnt;
unsigned long irqflags;
desc_cnt = DIV_ROUND_UP(len, MSGDMA_MAX_TRANS_LEN);
spin_lock_bh(&mdev->lock);
spin_lock_irqsave(&mdev->lock, irqflags);
if (desc_cnt > mdev->desc_free_cnt) {
spin_unlock_bh(&mdev->lock);
spin_unlock_irqrestore(&mdev->lock, irqflags);
dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev);
return NULL;
}
mdev->desc_free_cnt -= desc_cnt;
spin_unlock_bh(&mdev->lock);
spin_unlock_irqrestore(&mdev->lock, irqflags);
do {
/* Allocate and populate the descriptor */
@ -397,18 +400,19 @@ msgdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
u32 desc_cnt = 0, i;
struct scatterlist *sg;
u32 stride;
unsigned long irqflags;
for_each_sg(sgl, sg, sg_len, i)
desc_cnt += DIV_ROUND_UP(sg_dma_len(sg), MSGDMA_MAX_TRANS_LEN);
spin_lock_bh(&mdev->lock);
spin_lock_irqsave(&mdev->lock, irqflags);
if (desc_cnt > mdev->desc_free_cnt) {
spin_unlock_bh(&mdev->lock);
spin_unlock_irqrestore(&mdev->lock, irqflags);
dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev);
return NULL;
}
mdev->desc_free_cnt -= desc_cnt;
spin_unlock_bh(&mdev->lock);
spin_unlock_irqrestore(&mdev->lock, irqflags);
avail = sg_dma_len(sgl);
@ -566,10 +570,11 @@ static void msgdma_start_transfer(struct msgdma_device *mdev)
static void msgdma_issue_pending(struct dma_chan *chan)
{
struct msgdma_device *mdev = to_mdev(chan);
unsigned long flags;
spin_lock_bh(&mdev->lock);
spin_lock_irqsave(&mdev->lock, flags);
msgdma_start_transfer(mdev);
spin_unlock_bh(&mdev->lock);
spin_unlock_irqrestore(&mdev->lock, flags);
}
/**
@ -634,10 +639,11 @@ static void msgdma_free_descriptors(struct msgdma_device *mdev)
static void msgdma_free_chan_resources(struct dma_chan *dchan)
{
struct msgdma_device *mdev = to_mdev(dchan);
unsigned long flags;
spin_lock_bh(&mdev->lock);
spin_lock_irqsave(&mdev->lock, flags);
msgdma_free_descriptors(mdev);
spin_unlock_bh(&mdev->lock);
spin_unlock_irqrestore(&mdev->lock, flags);
kfree(mdev->sw_desq);
}
@ -682,8 +688,9 @@ static void msgdma_tasklet(unsigned long data)
u32 count;
u32 __maybe_unused size;
u32 __maybe_unused status;
unsigned long flags;
spin_lock(&mdev->lock);
spin_lock_irqsave(&mdev->lock, flags);
/* Read number of responses that are available */
count = ioread32(mdev->csr + MSGDMA_CSR_RESP_FILL_LEVEL);
@ -698,13 +705,13 @@ static void msgdma_tasklet(unsigned long data)
* bits. So we need to just drop these values.
*/
size = ioread32(mdev->resp + MSGDMA_RESP_BYTES_TRANSFERRED);
status = ioread32(mdev->resp - MSGDMA_RESP_STATUS);
status = ioread32(mdev->resp + MSGDMA_RESP_STATUS);
msgdma_complete_descriptor(mdev);
msgdma_chan_desc_cleanup(mdev);
}
spin_unlock(&mdev->lock);
spin_unlock_irqrestore(&mdev->lock, flags);
}
/**

View File

@ -1143,11 +1143,24 @@ static struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
struct edma_desc *edesc;
struct device *dev = chan->device->dev;
struct edma_chan *echan = to_edma_chan(chan);
unsigned int width, pset_len;
unsigned int width, pset_len, array_size;
if (unlikely(!echan || !len))
return NULL;
/* Align the array size (acnt block) with the transfer properties */
switch (__ffs((src | dest | len))) {
case 0:
array_size = SZ_32K - 1;
break;
case 1:
array_size = SZ_32K - 2;
break;
default:
array_size = SZ_32K - 4;
break;
}
if (len < SZ_64K) {
/*
* Transfer size less than 64K can be handled with one paRAM
@ -1169,7 +1182,7 @@ static struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
* When the full_length is multibple of 32767 one slot can be
* used to complete the transfer.
*/
width = SZ_32K - 1;
width = array_size;
pset_len = rounddown(len, width);
/* One slot is enough for lengths multiple of (SZ_32K -1) */
if (unlikely(pset_len == len))
@ -1217,7 +1230,7 @@ static struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
}
dest += pset_len;
src += pset_len;
pset_len = width = len % (SZ_32K - 1);
pset_len = width = len % array_size;
ret = edma_config_pset(chan, &edesc->pset[1], src, dest, 1,
width, pset_len, DMA_MEM_TO_MEM);

View File

@ -262,13 +262,14 @@ static void *ti_dra7_xbar_route_allocate(struct of_phandle_args *dma_spec,
mutex_lock(&xbar->mutex);
map->xbar_out = find_first_zero_bit(xbar->dma_inuse,
xbar->dma_requests);
mutex_unlock(&xbar->mutex);
if (map->xbar_out == xbar->dma_requests) {
mutex_unlock(&xbar->mutex);
dev_err(&pdev->dev, "Run out of free DMA requests\n");
kfree(map);
return ERR_PTR(-ENOMEM);
}
set_bit(map->xbar_out, xbar->dma_inuse);
mutex_unlock(&xbar->mutex);
map->xbar_in = (u16)dma_spec->args[0];

Some files were not shown because too many files have changed in this diff Show More