Merge tag 'mvebu-fixes-4.12-1' of git://git.infradead.org/linux-mvebu into fixes

mvebu fixes for 4.12

Fix the interrupt description of the crypto node for device tree of
the Armada 7K/8K SoCs

* tag 'mvebu-fixes-4.12-1' of git://git.infradead.org/linux-mvebu: (316 commits)
  arm64: marvell: dts: fix interrupts in 7k/8k crypto nodes
  + Linux 4.12-rc2

Signed-off-by: Olof Johansson <olof@lixom.net>
This commit is contained in:
Olof Johansson 2017-06-01 16:56:06 -07:00
commit 1ba2eaaacd
349 changed files with 3191 additions and 1432 deletions

View File

@ -1,31 +0,0 @@
Hi6220 SoC ION
===================================================================
Required properties:
- compatible : "hisilicon,hi6220-ion"
- list of the ION heaps
- heap name : maybe heap_sys_user@0
- heap id : id should be unique in the system.
- heap base : base ddr address of the heap,0 means that
it is dynamic.
- heap size : memory size and 0 means it is dynamic.
- heap type : the heap type of the heap, please also
see the define in ion.h(drivers/staging/android/uapi/ion.h)
-------------------------------------------------------------------
Example:
hi6220-ion {
compatible = "hisilicon,hi6220-ion";
heap_sys_user@0 {
heap-name = "sys_user";
heap-id = <0x0>;
heap-base = <0x0>;
heap-size = <0x0>;
heap-type = "ion_system";
};
heap_sys_contig@0 {
heap-name = "sys_contig";
heap-id = <0x1>;
heap-base = <0x0>;
heap-size = <0x0>;
heap-type = "ion_system_contig";
};
};

View File

@ -114,8 +114,7 @@ the details during registration. The class offers the following API for
registering/unregistering cables and their plugs:
.. kernel-doc:: drivers/usb/typec/typec.c
:functions: typec_register_cable typec_unregister_cable typec_register_plug
typec_unregister_plug
:functions: typec_register_cable typec_unregister_cable typec_register_plug typec_unregister_plug
The class will provide a handle to struct typec_cable and struct typec_plug if
the registration is successful, or NULL if it isn't.
@ -137,8 +136,7 @@ during connection of a partner or cable, the port driver must use the following
APIs to report it to the class:
.. kernel-doc:: drivers/usb/typec/typec.c
:functions: typec_set_data_role typec_set_pwr_role typec_set_vconn_role
typec_set_pwr_opmode
:functions: typec_set_data_role typec_set_pwr_role typec_set_vconn_role typec_set_pwr_opmode
Alternate Modes
~~~~~~~~~~~~~~~

View File

@ -117,7 +117,7 @@ nowayout: Watchdog cannot be stopped once started
-------------------------------------------------
iTCO_wdt:
heartbeat: Watchdog heartbeat in seconds.
(2<heartbeat<39 (TCO v1) or 613 (TCO v2), default=30)
(5<=heartbeat<=74 (TCO v1) or 1226 (TCO v2), default=30)
nowayout: Watchdog cannot be stopped once started
(default=kernel config parameter)
-------------------------------------------------

View File

@ -846,7 +846,6 @@ M: Laura Abbott <labbott@redhat.com>
M: Sumit Semwal <sumit.semwal@linaro.org>
L: devel@driverdev.osuosl.org
S: Supported
F: Documentation/devicetree/bindings/staging/ion/
F: drivers/staging/android/ion
F: drivers/staging/android/uapi/ion.h
F: drivers/staging/android/uapi/ion_test.h
@ -3114,6 +3113,14 @@ F: drivers/net/ieee802154/cc2520.c
F: include/linux/spi/cc2520.h
F: Documentation/devicetree/bindings/net/ieee802154/cc2520.txt
CCREE ARM TRUSTZONE CRYPTOCELL 700 REE DRIVER
M: Gilad Ben-Yossef <gilad@benyossef.com>
L: linux-crypto@vger.kernel.org
L: driverdev-devel@linuxdriverproject.org
S: Supported
F: drivers/staging/ccree/
W: https://developer.arm.com/products/system-ip/trustzone-cryptocell/cryptocell-700-family
CEC FRAMEWORK
M: Hans Verkuil <hans.verkuil@cisco.com>
L: linux-media@vger.kernel.org
@ -5693,7 +5700,7 @@ M: Alex Elder <elder@kernel.org>
M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
S: Maintained
F: drivers/staging/greybus/
L: greybus-dev@lists.linaro.org
L: greybus-dev@lists.linaro.org (moderated for non-subscribers)
GREYBUS AUDIO PROTOCOLS DRIVERS
M: Vaibhav Agarwal <vaibhav.sr@gmail.com>
@ -9551,10 +9558,6 @@ F: drivers/net/wireless/intersil/orinoco/
OSD LIBRARY and FILESYSTEM
M: Boaz Harrosh <ooo@electrozaur.com>
M: Benny Halevy <bhalevy@primarydata.com>
L: osd-dev@open-osd.org
W: http://open-osd.org
T: git git://git.open-osd.org/open-osd.git
S: Maintained
F: drivers/scsi/osd/
F: include/scsi/osd_*

View File

@ -1,7 +1,7 @@
VERSION = 4
PATCHLEVEL = 12
SUBLEVEL = 0
EXTRAVERSION = -rc1
EXTRAVERSION = -rc2
NAME = Fearless Coyote
# *DOCUMENTATION*
@ -1172,7 +1172,7 @@ headers_check_all: headers_install_all
PHONY += headers_check
headers_check: headers_install
$(Q)$(MAKE) $(hdr-inst)=include/uapi HDRCHECK=1
$(Q)$(MAKE) $(hdr-inst)=arch/$(hdr-arch)/include/uapi/ $(hdr-dst) HDRCHECK=1
$(Q)$(MAKE) $(hdr-inst)=arch/$(hdr-arch)/include/uapi $(hdr-dst) HDRCHECK=1
# ---------------------------------------------------------------------------
# Kernel selftest

View File

@ -1201,8 +1201,10 @@ SYSCALL_DEFINE4(osf_wait4, pid_t, pid, int __user *, ustatus, int, options,
if (!access_ok(VERIFY_WRITE, ur, sizeof(*ur)))
return -EFAULT;
err = 0;
err |= put_user(status, ustatus);
err = put_user(status, ustatus);
if (ret < 0)
return err ? err : ret;
err |= __put_user(r.ru_utime.tv_sec, &ur->ru_utime.tv_sec);
err |= __put_user(r.ru_utime.tv_usec, &ur->ru_utime.tv_usec);
err |= __put_user(r.ru_stime.tv_sec, &ur->ru_stime.tv_sec);

View File

@ -31,7 +31,8 @@ void kvm_register_target_coproc_table(struct kvm_coproc_target_table *table);
int kvm_handle_cp10_id(struct kvm_vcpu *vcpu, struct kvm_run *run);
int kvm_handle_cp_0_13_access(struct kvm_vcpu *vcpu, struct kvm_run *run);
int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run);
int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run);
int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run);
int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run);
int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run);
int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run);

View File

@ -32,6 +32,7 @@
#include <asm/vfp.h>
#include "../vfp/vfpinstr.h"
#define CREATE_TRACE_POINTS
#include "trace.h"
#include "coproc.h"
@ -111,12 +112,6 @@ int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run)
return 1;
}
int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
kvm_inject_undefined(vcpu);
return 1;
}
static void reset_mpidr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
{
/*
@ -284,7 +279,7 @@ static bool access_gic_sre(struct kvm_vcpu *vcpu,
* must always support PMCCNTR (the cycle counter): we just RAZ/WI for
* all PM registers, which doesn't crash the guest kernel at least.
*/
static bool pm_fake(struct kvm_vcpu *vcpu,
static bool trap_raz_wi(struct kvm_vcpu *vcpu,
const struct coproc_params *p,
const struct coproc_reg *r)
{
@ -294,19 +289,19 @@ static bool pm_fake(struct kvm_vcpu *vcpu,
return read_zero(vcpu, p);
}
#define access_pmcr pm_fake
#define access_pmcntenset pm_fake
#define access_pmcntenclr pm_fake
#define access_pmovsr pm_fake
#define access_pmselr pm_fake
#define access_pmceid0 pm_fake
#define access_pmceid1 pm_fake
#define access_pmccntr pm_fake
#define access_pmxevtyper pm_fake
#define access_pmxevcntr pm_fake
#define access_pmuserenr pm_fake
#define access_pmintenset pm_fake
#define access_pmintenclr pm_fake
#define access_pmcr trap_raz_wi
#define access_pmcntenset trap_raz_wi
#define access_pmcntenclr trap_raz_wi
#define access_pmovsr trap_raz_wi
#define access_pmselr trap_raz_wi
#define access_pmceid0 trap_raz_wi
#define access_pmceid1 trap_raz_wi
#define access_pmccntr trap_raz_wi
#define access_pmxevtyper trap_raz_wi
#define access_pmxevcntr trap_raz_wi
#define access_pmuserenr trap_raz_wi
#define access_pmintenset trap_raz_wi
#define access_pmintenclr trap_raz_wi
/* Architected CP15 registers.
* CRn denotes the primary register number, but is copied to the CRm in the
@ -532,12 +527,7 @@ static int emulate_cp15(struct kvm_vcpu *vcpu,
return 1;
}
/**
* kvm_handle_cp15_64 -- handles a mrrc/mcrr trap on a guest CP15 access
* @vcpu: The VCPU pointer
* @run: The kvm_run struct
*/
int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
static struct coproc_params decode_64bit_hsr(struct kvm_vcpu *vcpu)
{
struct coproc_params params;
@ -551,9 +541,38 @@ int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
params.Rt2 = (kvm_vcpu_get_hsr(vcpu) >> 10) & 0xf;
params.CRm = 0;
return params;
}
/**
* kvm_handle_cp15_64 -- handles a mrrc/mcrr trap on a guest CP15 access
* @vcpu: The VCPU pointer
* @run: The kvm_run struct
*/
int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
struct coproc_params params = decode_64bit_hsr(vcpu);
return emulate_cp15(vcpu, &params);
}
/**
* kvm_handle_cp14_64 -- handles a mrrc/mcrr trap on a guest CP14 access
* @vcpu: The VCPU pointer
* @run: The kvm_run struct
*/
int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
struct coproc_params params = decode_64bit_hsr(vcpu);
/* raz_wi cp14 */
trap_raz_wi(vcpu, &params, NULL);
/* handled */
kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
return 1;
}
static void reset_coproc_regs(struct kvm_vcpu *vcpu,
const struct coproc_reg *table, size_t num)
{
@ -564,12 +583,7 @@ static void reset_coproc_regs(struct kvm_vcpu *vcpu,
table[i].reset(vcpu, &table[i]);
}
/**
* kvm_handle_cp15_32 -- handles a mrc/mcr trap on a guest CP15 access
* @vcpu: The VCPU pointer
* @run: The kvm_run struct
*/
int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
static struct coproc_params decode_32bit_hsr(struct kvm_vcpu *vcpu)
{
struct coproc_params params;
@ -583,9 +597,37 @@ int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
params.Op2 = (kvm_vcpu_get_hsr(vcpu) >> 17) & 0x7;
params.Rt2 = 0;
return params;
}
/**
* kvm_handle_cp15_32 -- handles a mrc/mcr trap on a guest CP15 access
* @vcpu: The VCPU pointer
* @run: The kvm_run struct
*/
int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
struct coproc_params params = decode_32bit_hsr(vcpu);
return emulate_cp15(vcpu, &params);
}
/**
* kvm_handle_cp14_32 -- handles a mrc/mcr trap on a guest CP14 access
* @vcpu: The VCPU pointer
* @run: The kvm_run struct
*/
int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
struct coproc_params params = decode_32bit_hsr(vcpu);
/* raz_wi cp14 */
trap_raz_wi(vcpu, &params, NULL);
/* handled */
kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
return 1;
}
/******************************************************************************
* Userspace API
*****************************************************************************/

View File

@ -95,9 +95,9 @@ static exit_handle_fn arm_exit_handlers[] = {
[HSR_EC_WFI] = kvm_handle_wfx,
[HSR_EC_CP15_32] = kvm_handle_cp15_32,
[HSR_EC_CP15_64] = kvm_handle_cp15_64,
[HSR_EC_CP14_MR] = kvm_handle_cp14_access,
[HSR_EC_CP14_MR] = kvm_handle_cp14_32,
[HSR_EC_CP14_LS] = kvm_handle_cp14_load_store,
[HSR_EC_CP14_64] = kvm_handle_cp14_access,
[HSR_EC_CP14_64] = kvm_handle_cp14_64,
[HSR_EC_CP_0_13] = kvm_handle_cp_0_13_access,
[HSR_EC_CP10_ID] = kvm_handle_cp10_id,
[HSR_EC_HVC] = handle_hvc,

View File

@ -2,6 +2,8 @@
# Makefile for Kernel-based Virtual Machine module, HYP part
#
ccflags-y += -fno-stack-protector
KVM=../../../../virt/kvm
obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v2-sr.o

View File

@ -48,7 +48,9 @@ static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu, u32 *fpexc_host)
write_sysreg(HSTR_T(15), HSTR);
write_sysreg(HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11), HCPTR);
val = read_sysreg(HDCR);
write_sysreg(val | HDCR_TPM | HDCR_TPMCR, HDCR);
val |= HDCR_TPM | HDCR_TPMCR; /* trap performance monitors */
val |= HDCR_TDRA | HDCR_TDOSA | HDCR_TDA; /* trap debug regs */
write_sysreg(val, HDCR);
}
static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu)

View File

@ -1,5 +1,5 @@
#if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_KVM_H
#if !defined(_TRACE_ARM_KVM_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_ARM_KVM_H
#include <linux/tracepoint.h>
@ -74,10 +74,10 @@ TRACE_EVENT(kvm_hvc,
__entry->vcpu_pc, __entry->r0, __entry->imm)
);
#endif /* _TRACE_KVM_H */
#endif /* _TRACE_ARM_KVM_H */
#undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH arch/arm/kvm
#define TRACE_INCLUDE_PATH .
#undef TRACE_INCLUDE_FILE
#define TRACE_INCLUDE_FILE trace

View File

@ -231,8 +231,7 @@
cpm_crypto: crypto@800000 {
compatible = "inside-secure,safexcel-eip197";
reg = <0x800000 0x200000>;
interrupts = <GIC_SPI 34 (IRQ_TYPE_EDGE_RISING
| IRQ_TYPE_LEVEL_HIGH)>,
interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>,

View File

@ -221,8 +221,7 @@
cps_crypto: crypto@800000 {
compatible = "inside-secure,safexcel-eip197";
reg = <0x800000 0x200000>;
interrupts = <GIC_SPI 34 (IRQ_TYPE_EDGE_RISING
| IRQ_TYPE_LEVEL_HIGH)>,
interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 278 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 279 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 280 IRQ_TYPE_LEVEL_HIGH>,

View File

@ -264,7 +264,6 @@ __LL_SC_PREFIX(__cmpxchg_case_##name(volatile void *ptr, \
" st" #rel "xr" #sz "\t%w[tmp], %" #w "[new], %[v]\n" \
" cbnz %w[tmp], 1b\n" \
" " #mb "\n" \
" mov %" #w "[oldval], %" #w "[old]\n" \
"2:" \
: [tmp] "=&r" (tmp), [oldval] "=&r" (oldval), \
[v] "+Q" (*(unsigned long *)ptr) \

View File

@ -115,6 +115,7 @@ struct arm64_cpu_capabilities {
extern DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS];
extern struct static_key_false arm64_const_caps_ready;
bool this_cpu_has_cap(unsigned int cap);
@ -124,7 +125,7 @@ static inline bool cpu_have_feature(unsigned int num)
}
/* System capability check for constant caps */
static inline bool cpus_have_const_cap(int num)
static inline bool __cpus_have_const_cap(int num)
{
if (num >= ARM64_NCAPS)
return false;
@ -138,6 +139,14 @@ static inline bool cpus_have_cap(unsigned int num)
return test_bit(num, cpu_hwcaps);
}
static inline bool cpus_have_const_cap(int num)
{
if (static_branch_likely(&arm64_const_caps_ready))
return __cpus_have_const_cap(num);
else
return cpus_have_cap(num);
}
static inline void cpus_set_cap(unsigned int num)
{
if (num >= ARM64_NCAPS) {
@ -145,7 +154,6 @@ static inline void cpus_set_cap(unsigned int num)
num, ARM64_NCAPS);
} else {
__set_bit(num, cpu_hwcaps);
static_branch_enable(&cpu_hwcap_keys[num]);
}
}

View File

@ -24,6 +24,7 @@
#include <linux/types.h>
#include <linux/kvm_types.h>
#include <asm/cpufeature.h>
#include <asm/kvm.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_mmio.h>
@ -355,9 +356,12 @@ static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
unsigned long vector_ptr)
{
/*
* Call initialization code, and switch to the full blown
* HYP code.
* Call initialization code, and switch to the full blown HYP code.
* If the cpucaps haven't been finalized yet, something has gone very
* wrong, and hyp will crash and burn when it uses any
* cpus_have_const_cap() wrapper.
*/
BUG_ON(!static_branch_likely(&arm64_const_caps_ready));
__kvm_call_hyp((void *)pgd_ptr, hyp_stack_ptr, vector_ptr);
}

View File

@ -985,8 +985,16 @@ void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
*/
void __init enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps)
{
for (; caps->matches; caps++)
if (caps->enable && cpus_have_cap(caps->capability))
for (; caps->matches; caps++) {
unsigned int num = caps->capability;
if (!cpus_have_cap(num))
continue;
/* Ensure cpus_have_const_cap(num) works */
static_branch_enable(&cpu_hwcap_keys[num]);
if (caps->enable) {
/*
* Use stop_machine() as it schedules the work allowing
* us to modify PSTATE, instead of on_each_cpu() which
@ -994,6 +1002,8 @@ void __init enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps)
* we return.
*/
stop_machine(caps->enable, NULL, cpu_online_mask);
}
}
}
/*
@ -1096,6 +1106,14 @@ static void __init setup_feature_capabilities(void)
enable_cpu_capabilities(arm64_features);
}
DEFINE_STATIC_KEY_FALSE(arm64_const_caps_ready);
EXPORT_SYMBOL(arm64_const_caps_ready);
static void __init mark_const_caps_ready(void)
{
static_branch_enable(&arm64_const_caps_ready);
}
/*
* Check if the current CPU has a given feature capability.
* Should be called from non-preemptible context.
@ -1131,6 +1149,7 @@ void __init setup_cpu_features(void)
/* Set the CPU feature capabilies */
setup_feature_capabilities();
enable_errata_workarounds();
mark_const_caps_ready();
setup_elf_hwcaps(arm64_elf_hwcaps);
if (system_supports_32bit_el0())

View File

@ -877,15 +877,24 @@ static int armv8pmu_set_event_filter(struct hw_perf_event *event,
if (attr->exclude_idle)
return -EPERM;
if (is_kernel_in_hyp_mode() &&
attr->exclude_kernel != attr->exclude_hv)
return -EINVAL;
/*
* If we're running in hyp mode, then we *are* the hypervisor.
* Therefore we ignore exclude_hv in this configuration, since
* there's no hypervisor to sample anyway. This is consistent
* with other architectures (x86 and Power).
*/
if (is_kernel_in_hyp_mode()) {
if (!attr->exclude_kernel)
config_base |= ARMV8_PMU_INCLUDE_EL2;
} else {
if (attr->exclude_kernel)
config_base |= ARMV8_PMU_EXCLUDE_EL1;
if (!attr->exclude_hv)
config_base |= ARMV8_PMU_INCLUDE_EL2;
}
if (attr->exclude_user)
config_base |= ARMV8_PMU_EXCLUDE_EL0;
if (!is_kernel_in_hyp_mode() && attr->exclude_kernel)
config_base |= ARMV8_PMU_EXCLUDE_EL1;
if (!attr->exclude_hv)
config_base |= ARMV8_PMU_INCLUDE_EL2;
/*
* Install the filter into config_base as this is used to

View File

@ -2,6 +2,8 @@
# Makefile for Kernel-based Virtual Machine module, HYP part
#
ccflags-y += -fno-stack-protector
KVM=../../../../virt/kvm
obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v2-sr.o

View File

@ -253,8 +253,9 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx)
*/
off = offsetof(struct bpf_array, ptrs);
emit_a64_mov_i64(tmp, off, ctx);
emit(A64_LDR64(tmp, r2, tmp), ctx);
emit(A64_LDR64(prg, tmp, r3), ctx);
emit(A64_ADD(1, tmp, r2, tmp), ctx);
emit(A64_LSL(1, prg, r3, 3), ctx);
emit(A64_LDR64(prg, tmp, prg), ctx);
emit(A64_CBZ(1, prg, jmp_offset), ctx);
/* goto *(prog->bpf_func + prologue_size); */

View File

@ -14,6 +14,10 @@
#include <asm-generic/module.h>
#ifdef CC_USING_MPROFILE_KERNEL
#define MODULE_ARCH_VERMAGIC "mprofile-kernel"
#endif
#ifndef __powerpc64__
/*
* Thanks to Paul M for explaining this.

View File

@ -132,7 +132,19 @@ extern long long virt_phys_offset;
#define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT)
#define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr))
#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
#ifdef CONFIG_PPC_BOOK3S_64
/*
* On hash the vmalloc and other regions alias to the kernel region when passed
* through __pa(), which virt_to_pfn() uses. That means virt_addr_valid() can
* return true for some vmalloc addresses, which is incorrect. So explicitly
* check that the address is in the kernel region.
*/
#define virt_addr_valid(kaddr) (REGION_ID(kaddr) == KERNEL_REGION_ID && \
pfn_valid(virt_to_pfn(kaddr)))
#else
#define virt_addr_valid(kaddr) pfn_valid(virt_to_pfn(kaddr))
#endif
/*
* On Book-E parts we need __va to parse the device tree and we can't

View File

@ -416,7 +416,7 @@ power9_dd1_recover_paca:
* which needs to be restored from the stack.
*/
li r3, 1
stb r0,PACA_NAPSTATELOST(r13)
stb r3,PACA_NAPSTATELOST(r13)
blr
/*

View File

@ -305,16 +305,17 @@ int kprobe_handler(struct pt_regs *regs)
save_previous_kprobe(kcb);
set_current_kprobe(p, regs, kcb);
kprobes_inc_nmissed_count(p);
prepare_singlestep(p, regs);
kcb->kprobe_status = KPROBE_REENTER;
if (p->ainsn.boostable >= 0) {
ret = try_to_emulate(p, regs);
if (ret > 0) {
restore_previous_kprobe(kcb);
preempt_enable_no_resched();
return 1;
}
}
prepare_singlestep(p, regs);
return 1;
} else {
if (*addr != BREAKPOINT_INSTRUCTION) {

View File

@ -864,6 +864,25 @@ static void tm_reclaim_thread(struct thread_struct *thr,
if (!MSR_TM_SUSPENDED(mfmsr()))
return;
/*
* If we are in a transaction and FP is off then we can't have
* used FP inside that transaction. Hence the checkpointed
* state is the same as the live state. We need to copy the
* live state to the checkpointed state so that when the
* transaction is restored, the checkpointed state is correct
* and the aborted transaction sees the correct state. We use
* ckpt_regs.msr here as that's what tm_reclaim will use to
* determine if it's going to write the checkpointed state or
* not. So either this will write the checkpointed registers,
* or reclaim will. Similarly for VMX.
*/
if ((thr->ckpt_regs.msr & MSR_FP) == 0)
memcpy(&thr->ckfp_state, &thr->fp_state,
sizeof(struct thread_fp_state));
if ((thr->ckpt_regs.msr & MSR_VEC) == 0)
memcpy(&thr->ckvr_state, &thr->vr_state,
sizeof(struct thread_vr_state));
giveup_all(container_of(thr, struct task_struct, thread));
tm_reclaim(thr, thr->ckpt_regs.msr, cause);

View File

@ -67,7 +67,7 @@ config KVM_BOOK3S_64
select KVM_BOOK3S_64_HANDLER
select KVM
select KVM_BOOK3S_PR_POSSIBLE if !KVM_BOOK3S_HV_POSSIBLE
select SPAPR_TCE_IOMMU if IOMMU_SUPPORT
select SPAPR_TCE_IOMMU if IOMMU_SUPPORT && (PPC_SERIES || PPC_POWERNV)
---help---
Support running unmodified book3s_64 and book3s_32 guest kernels
in virtual machines on book3s_64 host processors.

View File

@ -46,7 +46,7 @@ kvm-e500mc-objs := \
e500_emulate.o
kvm-objs-$(CONFIG_KVM_E500MC) := $(kvm-e500mc-objs)
kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HANDLER) := \
kvm-book3s_64-builtin-objs-$(CONFIG_SPAPR_TCE_IOMMU) := \
book3s_64_vio_hv.o
kvm-pr-y := \
@ -90,11 +90,11 @@ kvm-book3s_64-objs-$(CONFIG_KVM_XICS) += \
book3s_xics.o
kvm-book3s_64-objs-$(CONFIG_KVM_XIVE) += book3s_xive.o
kvm-book3s_64-objs-$(CONFIG_SPAPR_TCE_IOMMU) += book3s_64_vio.o
kvm-book3s_64-module-objs := \
$(common-objs-y) \
book3s.o \
book3s_64_vio.o \
book3s_rtas.o \
$(kvm-book3s_64-objs-y)

View File

@ -301,6 +301,10 @@ long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
/* udbg_printf("H_PUT_TCE(): liobn=0x%lx ioba=0x%lx, tce=0x%lx\n", */
/* liobn, ioba, tce); */
/* For radix, we might be in virtual mode, so punt */
if (kvm_is_radix(vcpu->kvm))
return H_TOO_HARD;
stt = kvmppc_find_table(vcpu->kvm, liobn);
if (!stt)
return H_TOO_HARD;
@ -381,6 +385,10 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
bool prereg = false;
struct kvmppc_spapr_tce_iommu_table *stit;
/* For radix, we might be in virtual mode, so punt */
if (kvm_is_radix(vcpu->kvm))
return H_TOO_HARD;
stt = kvmppc_find_table(vcpu->kvm, liobn);
if (!stt)
return H_TOO_HARD;
@ -491,6 +499,10 @@ long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
long i, ret;
struct kvmppc_spapr_tce_iommu_table *stit;
/* For radix, we might be in virtual mode, so punt */
if (kvm_is_radix(vcpu->kvm))
return H_TOO_HARD;
stt = kvmppc_find_table(vcpu->kvm, liobn);
if (!stt)
return H_TOO_HARD;
@ -527,6 +539,7 @@ long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
return H_SUCCESS;
}
/* This can be called in either virtual mode or real mode */
long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
unsigned long ioba)
{

View File

@ -207,7 +207,14 @@ EXPORT_SYMBOL_GPL(kvmppc_hwrng_present);
long kvmppc_h_random(struct kvm_vcpu *vcpu)
{
if (powernv_get_random_real_mode(&vcpu->arch.gpr[4]))
int r;
/* Only need to do the expensive mfmsr() on radix */
if (kvm_is_radix(vcpu->kvm) && (mfmsr() & MSR_IR))
r = powernv_get_random_long(&vcpu->arch.gpr[4]);
else
r = powernv_get_random_real_mode(&vcpu->arch.gpr[4]);
if (r)
return H_SUCCESS;
return H_HARDWARE;

View File

@ -50,7 +50,9 @@ static int kvmppc_h_pr_enter(struct kvm_vcpu *vcpu)
pteg_addr = get_pteg_addr(vcpu, pte_index);
mutex_lock(&vcpu->kvm->arch.hpt_mutex);
copy_from_user(pteg, (void __user *)pteg_addr, sizeof(pteg));
ret = H_FUNCTION;
if (copy_from_user(pteg, (void __user *)pteg_addr, sizeof(pteg)))
goto done;
hpte = pteg;
ret = H_PTEG_FULL;
@ -71,7 +73,9 @@ static int kvmppc_h_pr_enter(struct kvm_vcpu *vcpu)
hpte[0] = cpu_to_be64(kvmppc_get_gpr(vcpu, 6));
hpte[1] = cpu_to_be64(kvmppc_get_gpr(vcpu, 7));
pteg_addr += i * HPTE_SIZE;
copy_to_user((void __user *)pteg_addr, hpte, HPTE_SIZE);
ret = H_FUNCTION;
if (copy_to_user((void __user *)pteg_addr, hpte, HPTE_SIZE))
goto done;
kvmppc_set_gpr(vcpu, 4, pte_index | i);
ret = H_SUCCESS;
@ -93,7 +97,9 @@ static int kvmppc_h_pr_remove(struct kvm_vcpu *vcpu)
pteg = get_pteg_addr(vcpu, pte_index);
mutex_lock(&vcpu->kvm->arch.hpt_mutex);
copy_from_user(pte, (void __user *)pteg, sizeof(pte));
ret = H_FUNCTION;
if (copy_from_user(pte, (void __user *)pteg, sizeof(pte)))
goto done;
pte[0] = be64_to_cpu((__force __be64)pte[0]);
pte[1] = be64_to_cpu((__force __be64)pte[1]);
@ -103,7 +109,9 @@ static int kvmppc_h_pr_remove(struct kvm_vcpu *vcpu)
((flags & H_ANDCOND) && (pte[0] & avpn) != 0))
goto done;
copy_to_user((void __user *)pteg, &v, sizeof(v));
ret = H_FUNCTION;
if (copy_to_user((void __user *)pteg, &v, sizeof(v)))
goto done;
rb = compute_tlbie_rb(pte[0], pte[1], pte_index);
vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false);
@ -171,7 +179,10 @@ static int kvmppc_h_pr_bulk_remove(struct kvm_vcpu *vcpu)
}
pteg = get_pteg_addr(vcpu, tsh & H_BULK_REMOVE_PTEX);
copy_from_user(pte, (void __user *)pteg, sizeof(pte));
if (copy_from_user(pte, (void __user *)pteg, sizeof(pte))) {
ret = H_FUNCTION;
break;
}
pte[0] = be64_to_cpu((__force __be64)pte[0]);
pte[1] = be64_to_cpu((__force __be64)pte[1]);
@ -184,7 +195,10 @@ static int kvmppc_h_pr_bulk_remove(struct kvm_vcpu *vcpu)
tsh |= H_BULK_REMOVE_NOT_FOUND;
} else {
/* Splat the pteg in (userland) hpt */
copy_to_user((void __user *)pteg, &v, sizeof(v));
if (copy_to_user((void __user *)pteg, &v, sizeof(v))) {
ret = H_FUNCTION;
break;
}
rb = compute_tlbie_rb(pte[0], pte[1],
tsh & H_BULK_REMOVE_PTEX);
@ -211,7 +225,9 @@ static int kvmppc_h_pr_protect(struct kvm_vcpu *vcpu)
pteg = get_pteg_addr(vcpu, pte_index);
mutex_lock(&vcpu->kvm->arch.hpt_mutex);
copy_from_user(pte, (void __user *)pteg, sizeof(pte));
ret = H_FUNCTION;
if (copy_from_user(pte, (void __user *)pteg, sizeof(pte)))
goto done;
pte[0] = be64_to_cpu((__force __be64)pte[0]);
pte[1] = be64_to_cpu((__force __be64)pte[1]);
@ -234,7 +250,9 @@ static int kvmppc_h_pr_protect(struct kvm_vcpu *vcpu)
vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false);
pte[0] = (__force u64)cpu_to_be64(pte[0]);
pte[1] = (__force u64)cpu_to_be64(pte[1]);
copy_to_user((void __user *)pteg, pte, sizeof(pte));
ret = H_FUNCTION;
if (copy_to_user((void __user *)pteg, pte, sizeof(pte)))
goto done;
ret = H_SUCCESS;
done:
@ -244,20 +262,6 @@ static int kvmppc_h_pr_protect(struct kvm_vcpu *vcpu)
return EMULATE_DONE;
}
static int kvmppc_h_pr_put_tce(struct kvm_vcpu *vcpu)
{
unsigned long liobn = kvmppc_get_gpr(vcpu, 4);
unsigned long ioba = kvmppc_get_gpr(vcpu, 5);
unsigned long tce = kvmppc_get_gpr(vcpu, 6);
long rc;
rc = kvmppc_h_put_tce(vcpu, liobn, ioba, tce);
if (rc == H_TOO_HARD)
return EMULATE_FAIL;
kvmppc_set_gpr(vcpu, 3, rc);
return EMULATE_DONE;
}
static int kvmppc_h_pr_logical_ci_load(struct kvm_vcpu *vcpu)
{
long rc;
@ -280,6 +284,21 @@ static int kvmppc_h_pr_logical_ci_store(struct kvm_vcpu *vcpu)
return EMULATE_DONE;
}
#ifdef CONFIG_SPAPR_TCE_IOMMU
static int kvmppc_h_pr_put_tce(struct kvm_vcpu *vcpu)
{
unsigned long liobn = kvmppc_get_gpr(vcpu, 4);
unsigned long ioba = kvmppc_get_gpr(vcpu, 5);
unsigned long tce = kvmppc_get_gpr(vcpu, 6);
long rc;
rc = kvmppc_h_put_tce(vcpu, liobn, ioba, tce);
if (rc == H_TOO_HARD)
return EMULATE_FAIL;
kvmppc_set_gpr(vcpu, 3, rc);
return EMULATE_DONE;
}
static int kvmppc_h_pr_put_tce_indirect(struct kvm_vcpu *vcpu)
{
unsigned long liobn = kvmppc_get_gpr(vcpu, 4);
@ -311,6 +330,23 @@ static int kvmppc_h_pr_stuff_tce(struct kvm_vcpu *vcpu)
return EMULATE_DONE;
}
#else /* CONFIG_SPAPR_TCE_IOMMU */
static int kvmppc_h_pr_put_tce(struct kvm_vcpu *vcpu)
{
return EMULATE_FAIL;
}
static int kvmppc_h_pr_put_tce_indirect(struct kvm_vcpu *vcpu)
{
return EMULATE_FAIL;
}
static int kvmppc_h_pr_stuff_tce(struct kvm_vcpu *vcpu)
{
return EMULATE_FAIL;
}
#endif /* CONFIG_SPAPR_TCE_IOMMU */
static int kvmppc_h_pr_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd)
{
long rc = kvmppc_xics_hcall(vcpu, cmd);

View File

@ -1749,7 +1749,7 @@ long kvm_arch_vm_ioctl(struct file *filp,
r = kvm_vm_ioctl_enable_cap(kvm, &cap);
break;
}
#ifdef CONFIG_PPC_BOOK3S_64
#ifdef CONFIG_SPAPR_TCE_IOMMU
case KVM_CREATE_SPAPR_TCE_64: {
struct kvm_create_spapr_tce_64 create_tce_64;
@ -1780,6 +1780,8 @@ long kvm_arch_vm_ioctl(struct file *filp,
r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64);
goto out;
}
#endif
#ifdef CONFIG_PPC_BOOK3S_64
case KVM_PPC_GET_SMMU_INFO: {
struct kvm_ppc_smmu_info info;
struct kvm *kvm = filp->private_data;

View File

@ -16,6 +16,7 @@
*/
#include <linux/debugfs.h>
#include <linux/fs.h>
#include <linux/hugetlb.h>
#include <linux/io.h>
#include <linux/mm.h>
#include <linux/sched.h>
@ -391,7 +392,7 @@ static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start)
for (i = 0; i < PTRS_PER_PMD; i++, pmd++) {
addr = start + i * PMD_SIZE;
if (!pmd_none(*pmd))
if (!pmd_none(*pmd) && !pmd_huge(*pmd))
/* pmd exists */
walk_pte(st, pmd, addr);
else
@ -407,7 +408,7 @@ static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start)
for (i = 0; i < PTRS_PER_PUD; i++, pud++) {
addr = start + i * PUD_SIZE;
if (!pud_none(*pud))
if (!pud_none(*pud) && !pud_huge(*pud))
/* pud exists */
walk_pmd(st, pud, addr);
else
@ -427,7 +428,7 @@ static void walk_pagetables(struct pg_state *st)
*/
for (i = 0; i < PTRS_PER_PGD; i++, pgd++) {
addr = KERN_VIRT_START + i * PGDIR_SIZE;
if (!pgd_none(*pgd))
if (!pgd_none(*pgd) && !pgd_huge(*pgd))
/* pgd exists */
walk_pud(st, pgd, addr);
else

View File

@ -10,6 +10,7 @@
#include <linux/spinlock.h>
#include <linux/kernel.h>
#include <linux/time.h>
#include <linux/refcount.h>
#include <uapi/asm/debug.h>
#define DEBUG_MAX_LEVEL 6 /* debug levels range from 0 to 6 */
@ -31,7 +32,7 @@ struct debug_view;
typedef struct debug_info {
struct debug_info* next;
struct debug_info* prev;
atomic_t ref_count;
refcount_t ref_count;
spinlock_t lock;
int level;
int nr_areas;

View File

@ -40,6 +40,8 @@ static inline int insn_length(unsigned char code)
return ((((int) code + 64) >> 7) + 1) << 1;
}
struct pt_regs;
void show_code(struct pt_regs *regs);
void print_fn_code(unsigned char *code, unsigned long len);
int insn_to_mnemonic(unsigned char *instruction, char *buf, unsigned int len);

View File

@ -27,12 +27,21 @@
* 2005-Dec Used as a template for s390 by Mike Grundy
* <grundym@us.ibm.com>
*/
#include <linux/types.h>
#include <asm-generic/kprobes.h>
#define BREAKPOINT_INSTRUCTION 0x0002
#define FIXUP_PSW_NORMAL 0x08
#define FIXUP_BRANCH_NOT_TAKEN 0x04
#define FIXUP_RETURN_REGISTER 0x02
#define FIXUP_NOT_REQUIRED 0x01
int probe_is_prohibited_opcode(u16 *insn);
int probe_get_fixup_type(u16 *insn);
int probe_is_insn_relative_long(u16 *insn);
#ifdef CONFIG_KPROBES
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/percpu.h>
#include <linux/sched/task_stack.h>
@ -56,11 +65,6 @@ typedef u16 kprobe_opcode_t;
#define KPROBE_SWAP_INST 0x10
#define FIXUP_PSW_NORMAL 0x08
#define FIXUP_BRANCH_NOT_TAKEN 0x04
#define FIXUP_RETURN_REGISTER 0x02
#define FIXUP_NOT_REQUIRED 0x01
/* Architecture specific copy of original instruction */
struct arch_specific_insn {
/* copy of original instruction */
@ -90,10 +94,6 @@ int kprobe_fault_handler(struct pt_regs *regs, int trapnr);
int kprobe_exceptions_notify(struct notifier_block *self,
unsigned long val, void *data);
int probe_is_prohibited_opcode(u16 *insn);
int probe_get_fixup_type(u16 *insn);
int probe_is_insn_relative_long(u16 *insn);
#define flush_insn_slot(p) do { } while (0)
#endif /* CONFIG_KPROBES */

View File

@ -146,7 +146,7 @@ extern int topology_max_mnest;
* Returns the maximum nesting level supported by the cpu topology code.
* The current maximum level is 4 which is the drawer level.
*/
static inline int topology_mnest_limit(void)
static inline unsigned char topology_mnest_limit(void)
{
return min(topology_max_mnest, 4);
}

View File

@ -277,7 +277,7 @@ debug_info_alloc(const char *name, int pages_per_area, int nr_areas,
memset(rc->views, 0, DEBUG_MAX_VIEWS * sizeof(struct debug_view *));
memset(rc->debugfs_entries, 0 ,DEBUG_MAX_VIEWS *
sizeof(struct dentry*));
atomic_set(&(rc->ref_count), 0);
refcount_set(&(rc->ref_count), 0);
return rc;
@ -361,7 +361,7 @@ debug_info_create(const char *name, int pages_per_area, int nr_areas,
debug_area_last = rc;
rc->next = NULL;
debug_info_get(rc);
refcount_set(&rc->ref_count, 1);
out:
return rc;
}
@ -416,7 +416,7 @@ static void
debug_info_get(debug_info_t * db_info)
{
if (db_info)
atomic_inc(&db_info->ref_count);
refcount_inc(&db_info->ref_count);
}
/*
@ -431,7 +431,7 @@ debug_info_put(debug_info_t *db_info)
if (!db_info)
return;
if (atomic_dec_and_test(&db_info->ref_count)) {
if (refcount_dec_and_test(&db_info->ref_count)) {
for (i = 0; i < DEBUG_MAX_VIEWS; i++) {
if (!db_info->views[i])
continue;

View File

@ -312,6 +312,7 @@ ENTRY(system_call)
lg %r14,__LC_VDSO_PER_CPU
lmg %r0,%r10,__PT_R0(%r11)
mvc __LC_RETURN_PSW(16),__PT_PSW(%r11)
.Lsysc_exit_timer:
stpt __LC_EXIT_TIMER
mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
lmg %r11,%r15,__PT_R11(%r11)
@ -623,6 +624,7 @@ ENTRY(io_int_handler)
lg %r14,__LC_VDSO_PER_CPU
lmg %r0,%r10,__PT_R0(%r11)
mvc __LC_RETURN_PSW(16),__PT_PSW(%r11)
.Lio_exit_timer:
stpt __LC_EXIT_TIMER
mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
lmg %r11,%r15,__PT_R11(%r11)
@ -1174,15 +1176,23 @@ cleanup_critical:
br %r14
.Lcleanup_sysc_restore:
# check if stpt has been executed
clg %r9,BASED(.Lcleanup_sysc_restore_insn)
jh 0f
mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER
cghi %r11,__LC_SAVE_AREA_ASYNC
je 0f
mvc __LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER
0: clg %r9,BASED(.Lcleanup_sysc_restore_insn+8)
je 1f
lg %r9,24(%r11) # get saved pointer to pt_regs
mvc __LC_RETURN_PSW(16),__PT_PSW(%r9)
mvc 0(64,%r11),__PT_R8(%r9)
lmg %r0,%r7,__PT_R0(%r9)
0: lmg %r8,%r9,__LC_RETURN_PSW
1: lmg %r8,%r9,__LC_RETURN_PSW
br %r14
.Lcleanup_sysc_restore_insn:
.quad .Lsysc_exit_timer
.quad .Lsysc_done - 4
.Lcleanup_io_tif:
@ -1190,15 +1200,20 @@ cleanup_critical:
br %r14
.Lcleanup_io_restore:
# check if stpt has been executed
clg %r9,BASED(.Lcleanup_io_restore_insn)
je 0f
jh 0f
mvc __LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER
0: clg %r9,BASED(.Lcleanup_io_restore_insn+8)
je 1f
lg %r9,24(%r11) # get saved r11 pointer to pt_regs
mvc __LC_RETURN_PSW(16),__PT_PSW(%r9)
mvc 0(64,%r11),__PT_R8(%r9)
lmg %r0,%r7,__PT_R0(%r9)
0: lmg %r8,%r9,__LC_RETURN_PSW
1: lmg %r8,%r9,__LC_RETURN_PSW
br %r14
.Lcleanup_io_restore_insn:
.quad .Lio_exit_timer
.quad .Lio_done - 4
.Lcleanup_idle:

View File

@ -173,6 +173,8 @@ int __init ftrace_dyn_arch_init(void)
return 0;
}
#ifdef CONFIG_MODULES
static int __init ftrace_plt_init(void)
{
unsigned int *ip;
@ -191,6 +193,8 @@ static int __init ftrace_plt_init(void)
}
device_initcall(ftrace_plt_init);
#endif /* CONFIG_MODULES */
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
/*
* Hook the return address and push it in the stack of return addresses

View File

@ -31,8 +31,14 @@ SECTIONS
{
. = 0x00000000;
.text : {
_text = .; /* Text and read-only data */
/* Text and read-only data */
HEAD_TEXT
/*
* E.g. perf doesn't like symbols starting at address zero,
* therefore skip the initial PSW and channel program located
* at address zero and let _text start at 0x200.
*/
_text = 0x200;
TEXT_TEXT
SCHED_TEXT
CPUIDLE_TEXT

View File

@ -4,6 +4,7 @@
* Copyright IBM Corp. 2014
*/
#include <linux/errno.h>
#include <asm/kprobes.h>
#include <asm/dis.h>

View File

@ -337,8 +337,8 @@ long __strncpy_from_user(char *dst, const char __user *src, long size)
return 0;
done = 0;
do {
offset = (size_t)src & ~PAGE_MASK;
len = min(size - done, PAGE_SIZE - offset);
offset = (size_t)src & (L1_CACHE_BYTES - 1);
len = min(size - done, L1_CACHE_BYTES - offset);
if (copy_from_user(dst, src, len))
return -EFAULT;
len_str = strnlen(dst, len);

View File

@ -24,9 +24,11 @@ static inline int is_hugepage_only_range(struct mm_struct *mm,
static inline int prepare_hugepage_range(struct file *file,
unsigned long addr, unsigned long len)
{
if (len & ~HPAGE_MASK)
struct hstate *h = hstate_file(file);
if (len & ~huge_page_mask(h))
return -EINVAL;
if (addr & ~HPAGE_MASK)
if (addr & ~huge_page_mask(h))
return -EINVAL;
return 0;
}

View File

@ -91,9 +91,9 @@ extern unsigned long pfn_base;
* ZERO_PAGE is a global shared page that is always zero: used
* for zero-mapped memory areas etc..
*/
extern unsigned long empty_zero_page;
extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
#define ZERO_PAGE(vaddr) (virt_to_page(&empty_zero_page))
#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
/*
* In general all page table modifications should use the V8 atomic

View File

@ -16,7 +16,7 @@ extern char reboot_command[];
*/
extern unsigned char boot_cpu_id;
extern unsigned long empty_zero_page;
extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
extern int serial_console;
static inline int con_is_present(void)

View File

@ -130,18 +130,17 @@ unsigned long prepare_ftrace_return(unsigned long parent,
if (unlikely(atomic_read(&current->tracing_graph_pause)))
return parent + 8UL;
trace.func = self_addr;
trace.depth = current->curr_ret_stack + 1;
/* Only trace if the calling function expects to */
if (!ftrace_graph_entry(&trace))
return parent + 8UL;
if (ftrace_push_return_trace(parent, self_addr, &trace.depth,
frame_pointer, NULL) == -EBUSY)
return parent + 8UL;
trace.func = self_addr;
/* Only trace if the calling function expects to */
if (!ftrace_graph_entry(&trace)) {
current->curr_ret_stack--;
return parent + 8UL;
}
return return_hooker;
}
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */

View File

@ -290,7 +290,7 @@ void __init mem_init(void)
/* Saves us work later. */
memset((void *)&empty_zero_page, 0, PAGE_SIZE);
memset((void *)empty_zero_page, 0, PAGE_SIZE);
i = last_valid_pfn >> ((20 - PAGE_SHIFT) + 5);
i += 1;

View File

@ -43,7 +43,7 @@
#define KVM_PRIVATE_MEM_SLOTS 3
#define KVM_MEM_SLOTS_NUM (KVM_USER_MEM_SLOTS + KVM_PRIVATE_MEM_SLOTS)
#define KVM_HALT_POLL_NS_DEFAULT 400000
#define KVM_HALT_POLL_NS_DEFAULT 200000
#define KVM_IRQCHIP_NUM_PINS KVM_IOAPIC_NUM_PINS

View File

@ -319,10 +319,10 @@ do { \
#define __get_user_asm_u64(x, ptr, retval, errret) \
({ \
__typeof__(ptr) __ptr = (ptr); \
asm volatile(ASM_STAC "\n" \
asm volatile("\n" \
"1: movl %2,%%eax\n" \
"2: movl %3,%%edx\n" \
"3: " ASM_CLAC "\n" \
"3:\n" \
".section .fixup,\"ax\"\n" \
"4: mov %4,%0\n" \
" xorl %%eax,%%eax\n" \
@ -331,7 +331,7 @@ do { \
".previous\n" \
_ASM_EXTABLE(1b, 4b) \
_ASM_EXTABLE(2b, 4b) \
: "=r" (retval), "=A"(x) \
: "=r" (retval), "=&A"(x) \
: "m" (__m(__ptr)), "m" __m(((u32 *)(__ptr)) + 1), \
"i" (errret), "0" (retval)); \
})
@ -703,14 +703,15 @@ extern struct movsl_mask {
#define unsafe_put_user(x, ptr, err_label) \
do { \
int __pu_err; \
__put_user_size((x), (ptr), sizeof(*(ptr)), __pu_err, -EFAULT); \
__typeof__(*(ptr)) __pu_val = (x); \
__put_user_size(__pu_val, (ptr), sizeof(*(ptr)), __pu_err, -EFAULT); \
if (unlikely(__pu_err)) goto err_label; \
} while (0)
#define unsafe_get_user(x, ptr, err_label) \
do { \
int __gu_err; \
unsigned long __gu_val; \
__inttype(*(ptr)) __gu_val; \
__get_user_size(__gu_val, (ptr), sizeof(*(ptr)), __gu_err, -EFAULT); \
(x) = (__force __typeof__(*(ptr)))__gu_val; \
if (unlikely(__gu_err)) goto err_label; \

View File

@ -90,6 +90,7 @@ static void fpu__init_system_early_generic(struct cpuinfo_x86 *c)
* Boot time FPU feature detection code:
*/
unsigned int mxcsr_feature_mask __read_mostly = 0xffffffffu;
EXPORT_SYMBOL_GPL(mxcsr_feature_mask);
static void __init fpu__init_system_mxcsr(void)
{

View File

@ -4173,7 +4173,7 @@ static int check_dr_write(struct x86_emulate_ctxt *ctxt)
static int check_svme(struct x86_emulate_ctxt *ctxt)
{
u64 efer;
u64 efer = 0;
ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);

View File

@ -283,11 +283,13 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
pt_element_t pte;
pt_element_t __user *uninitialized_var(ptep_user);
gfn_t table_gfn;
unsigned index, pt_access, pte_access, accessed_dirty, pte_pkey;
u64 pt_access, pte_access;
unsigned index, accessed_dirty, pte_pkey;
unsigned nested_access;
gpa_t pte_gpa;
bool have_ad;
int offset;
u64 walk_nx_mask = 0;
const int write_fault = access & PFERR_WRITE_MASK;
const int user_fault = access & PFERR_USER_MASK;
const int fetch_fault = access & PFERR_FETCH_MASK;
@ -302,6 +304,7 @@ retry_walk:
have_ad = PT_HAVE_ACCESSED_DIRTY(mmu);
#if PTTYPE == 64
walk_nx_mask = 1ULL << PT64_NX_SHIFT;
if (walker->level == PT32E_ROOT_LEVEL) {
pte = mmu->get_pdptr(vcpu, (addr >> 30) & 3);
trace_kvm_mmu_paging_element(pte, walker->level);
@ -313,8 +316,6 @@ retry_walk:
walker->max_level = walker->level;
ASSERT(!(is_long_mode(vcpu) && !is_pae(vcpu)));
accessed_dirty = have_ad ? PT_GUEST_ACCESSED_MASK : 0;
/*
* FIXME: on Intel processors, loads of the PDPTE registers for PAE paging
* by the MOV to CR instruction are treated as reads and do not cause the
@ -322,14 +323,14 @@ retry_walk:
*/
nested_access = (have_ad ? PFERR_WRITE_MASK : 0) | PFERR_USER_MASK;
pt_access = pte_access = ACC_ALL;
pte_access = ~0;
++walker->level;
do {
gfn_t real_gfn;
unsigned long host_addr;
pt_access &= pte_access;
pt_access = pte_access;
--walker->level;
index = PT_INDEX(addr, walker->level);
@ -371,6 +372,12 @@ retry_walk:
trace_kvm_mmu_paging_element(pte, walker->level);
/*
* Inverting the NX it lets us AND it like other
* permission bits.
*/
pte_access = pt_access & (pte ^ walk_nx_mask);
if (unlikely(!FNAME(is_present_gpte)(pte)))
goto error;
@ -379,14 +386,16 @@ retry_walk:
goto error;
}
accessed_dirty &= pte;
pte_access = pt_access & FNAME(gpte_access)(vcpu, pte);
walker->ptes[walker->level - 1] = pte;
} while (!is_last_gpte(mmu, walker->level, pte));
pte_pkey = FNAME(gpte_pkeys)(vcpu, pte);
errcode = permission_fault(vcpu, mmu, pte_access, pte_pkey, access);
accessed_dirty = have_ad ? pte_access & PT_GUEST_ACCESSED_MASK : 0;
/* Convert to ACC_*_MASK flags for struct guest_walker. */
walker->pt_access = FNAME(gpte_access)(vcpu, pt_access ^ walk_nx_mask);
walker->pte_access = FNAME(gpte_access)(vcpu, pte_access ^ walk_nx_mask);
errcode = permission_fault(vcpu, mmu, walker->pte_access, pte_pkey, access);
if (unlikely(errcode))
goto error;
@ -403,7 +412,7 @@ retry_walk:
walker->gfn = real_gpa >> PAGE_SHIFT;
if (!write_fault)
FNAME(protect_clean_gpte)(mmu, &pte_access, pte);
FNAME(protect_clean_gpte)(mmu, &walker->pte_access, pte);
else
/*
* On a write fault, fold the dirty bit into accessed_dirty.
@ -421,10 +430,8 @@ retry_walk:
goto retry_walk;
}
walker->pt_access = pt_access;
walker->pte_access = pte_access;
pgprintk("%s: pte %llx pte_access %x pt_access %x\n",
__func__, (u64)pte, pte_access, pt_access);
__func__, (u64)pte, walker->pte_access, walker->pt_access);
return 1;
error:
@ -452,7 +459,7 @@ error:
*/
if (!(errcode & PFERR_RSVD_MASK)) {
vcpu->arch.exit_qualification &= 0x187;
vcpu->arch.exit_qualification |= ((pt_access & pte) & 0x7) << 3;
vcpu->arch.exit_qualification |= (pte_access & 0x7) << 3;
}
#endif
walker->fault.address = addr;

View File

@ -294,7 +294,7 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
((u64)1 << edx.split.bit_width_fixed) - 1;
}
pmu->global_ctrl = ((1 << pmu->nr_arch_gp_counters) - 1) |
pmu->global_ctrl = ((1ull << pmu->nr_arch_gp_counters) - 1) |
(((1ull << pmu->nr_arch_fixed_counters) - 1) << INTEL_PMC_IDX_FIXED);
pmu->global_ctrl_mask = ~pmu->global_ctrl;

View File

@ -1272,7 +1272,8 @@ static void init_vmcb(struct vcpu_svm *svm)
}
static u64 *avic_get_physical_id_entry(struct kvm_vcpu *vcpu, int index)
static u64 *avic_get_physical_id_entry(struct kvm_vcpu *vcpu,
unsigned int index)
{
u64 *avic_physical_id_table;
struct kvm_arch *vm_data = &vcpu->kvm->arch;

View File

@ -6504,7 +6504,7 @@ static __init int hardware_setup(void)
enable_ept_ad_bits = 0;
}
if (!cpu_has_vmx_ept_ad_bits())
if (!cpu_has_vmx_ept_ad_bits() || !enable_ept)
enable_ept_ad_bits = 0;
if (!cpu_has_vmx_unrestricted_guest())
@ -11213,7 +11213,7 @@ static int vmx_write_pml_buffer(struct kvm_vcpu *vcpu)
if (!nested_cpu_has_pml(vmcs12))
return 0;
if (vmcs12->guest_pml_index > PML_ENTITY_NUM) {
if (vmcs12->guest_pml_index >= PML_ENTITY_NUM) {
vmx->nested.pml_full = true;
return 1;
}

View File

@ -1763,6 +1763,7 @@ u64 get_kvmclock_ns(struct kvm *kvm)
{
struct kvm_arch *ka = &kvm->arch;
struct pvclock_vcpu_time_info hv_clock;
u64 ret;
spin_lock(&ka->pvclock_gtod_sync_lock);
if (!ka->use_master_clock) {
@ -1774,10 +1775,17 @@ u64 get_kvmclock_ns(struct kvm *kvm)
hv_clock.system_time = ka->master_kernel_ns + ka->kvmclock_offset;
spin_unlock(&ka->pvclock_gtod_sync_lock);
/* both __this_cpu_read() and rdtsc() should be on the same cpu */
get_cpu();
kvm_get_time_scale(NSEC_PER_SEC, __this_cpu_read(cpu_tsc_khz) * 1000LL,
&hv_clock.tsc_shift,
&hv_clock.tsc_to_system_mul);
return __pvclock_read_cycles(&hv_clock, rdtsc());
ret = __pvclock_read_cycles(&hv_clock, rdtsc());
put_cpu();
return ret;
}
static void kvm_setup_pvclock_page(struct kvm_vcpu *v)
@ -3288,11 +3296,14 @@ static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
}
}
#define XSAVE_MXCSR_OFFSET 24
static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
struct kvm_xsave *guest_xsave)
{
u64 xstate_bv =
*(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)];
u32 mxcsr = *(u32 *)&guest_xsave->region[XSAVE_MXCSR_OFFSET / sizeof(u32)];
if (boot_cpu_has(X86_FEATURE_XSAVE)) {
/*
@ -3300,11 +3311,13 @@ static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
* CPUID leaf 0xD, index 0, EDX:EAX. This is for compatibility
* with old userspace.
*/
if (xstate_bv & ~kvm_supported_xcr0())
if (xstate_bv & ~kvm_supported_xcr0() ||
mxcsr & ~mxcsr_feature_mask)
return -EINVAL;
load_xsave(vcpu, (u8 *)guest_xsave->region);
} else {
if (xstate_bv & ~XFEATURE_MASK_FPSSE)
if (xstate_bv & ~XFEATURE_MASK_FPSSE ||
mxcsr & ~mxcsr_feature_mask)
return -EINVAL;
memcpy(&vcpu->arch.guest_fpu.state.fxsave,
guest_xsave->region, sizeof(struct fxregs_state));
@ -4818,16 +4831,20 @@ emul_write:
static int kernel_pio(struct kvm_vcpu *vcpu, void *pd)
{
/* TODO: String I/O for in kernel device */
int r;
int r = 0, i;
if (vcpu->arch.pio.in)
r = kvm_io_bus_read(vcpu, KVM_PIO_BUS, vcpu->arch.pio.port,
vcpu->arch.pio.size, pd);
else
r = kvm_io_bus_write(vcpu, KVM_PIO_BUS,
vcpu->arch.pio.port, vcpu->arch.pio.size,
pd);
for (i = 0; i < vcpu->arch.pio.count; i++) {
if (vcpu->arch.pio.in)
r = kvm_io_bus_read(vcpu, KVM_PIO_BUS, vcpu->arch.pio.port,
vcpu->arch.pio.size, pd);
else
r = kvm_io_bus_write(vcpu, KVM_PIO_BUS,
vcpu->arch.pio.port, vcpu->arch.pio.size,
pd);
if (r)
break;
pd += vcpu->arch.pio.size;
}
return r;
}
@ -4865,6 +4882,8 @@ static int emulator_pio_in_emulated(struct x86_emulate_ctxt *ctxt,
if (vcpu->arch.pio.count)
goto data_avail;
memset(vcpu->arch.pio_data, 0, size * count);
ret = emulator_pio_in_out(vcpu, size, port, val, count, true);
if (ret) {
data_avail:
@ -5048,6 +5067,8 @@ static bool emulator_get_segment(struct x86_emulate_ctxt *ctxt, u16 *selector,
if (var.unusable) {
memset(desc, 0, sizeof(*desc));
if (base3)
*base3 = 0;
return false;
}

View File

@ -142,9 +142,7 @@ static void __init xen_banner(void)
struct xen_extraversion extra;
HYPERVISOR_xen_version(XENVER_extraversion, &extra);
pr_info("Booting paravirtualized kernel %son %s\n",
xen_feature(XENFEAT_auto_translated_physmap) ?
"with PVH extensions " : "", pv_info.name);
pr_info("Booting paravirtualized kernel on %s\n", pv_info.name);
printk(KERN_INFO "Xen version: %d.%d%s%s\n",
version >> 16, version & 0xffff, extra.extraversion,
xen_feature(XENFEAT_mmu_pt_update_preserve_ad) ? " (preserve-AD)" : "");
@ -957,15 +955,10 @@ static void xen_write_msr(unsigned int msr, unsigned low, unsigned high)
void xen_setup_shared_info(void)
{
if (!xen_feature(XENFEAT_auto_translated_physmap)) {
set_fixmap(FIX_PARAVIRT_BOOTMAP,
xen_start_info->shared_info);
set_fixmap(FIX_PARAVIRT_BOOTMAP, xen_start_info->shared_info);
HYPERVISOR_shared_info =
(struct shared_info *)fix_to_virt(FIX_PARAVIRT_BOOTMAP);
} else
HYPERVISOR_shared_info =
(struct shared_info *)__va(xen_start_info->shared_info);
HYPERVISOR_shared_info =
(struct shared_info *)fix_to_virt(FIX_PARAVIRT_BOOTMAP);
#ifndef CONFIG_SMP
/* In UP this is as good a place as any to set up shared info */

View File

@ -42,7 +42,7 @@ xmaddr_t arbitrary_virt_to_machine(void *vaddr)
}
EXPORT_SYMBOL_GPL(arbitrary_virt_to_machine);
void xen_flush_tlb_all(void)
static void xen_flush_tlb_all(void)
{
struct mmuext_op *op;
struct multicall_space mcs;

View File

@ -355,10 +355,8 @@ static pteval_t pte_pfn_to_mfn(pteval_t val)
pteval_t flags = val & PTE_FLAGS_MASK;
unsigned long mfn;
if (!xen_feature(XENFEAT_auto_translated_physmap))
mfn = __pfn_to_mfn(pfn);
else
mfn = pfn;
mfn = __pfn_to_mfn(pfn);
/*
* If there's no mfn for the pfn, then just create an
* empty non-present pte. Unfortunately this loses
@ -647,9 +645,6 @@ static int __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd,
limit--;
BUG_ON(limit >= FIXADDR_TOP);
if (xen_feature(XENFEAT_auto_translated_physmap))
return 0;
/*
* 64-bit has a great big hole in the middle of the address
* space, which contains the Xen mappings. On 32-bit these
@ -1289,9 +1284,6 @@ static void __init xen_pagetable_cleanhighmap(void)
static void __init xen_pagetable_p2m_setup(void)
{
if (xen_feature(XENFEAT_auto_translated_physmap))
return;
xen_vmalloc_p2m_tree();
#ifdef CONFIG_X86_64
@ -1314,8 +1306,7 @@ static void __init xen_pagetable_init(void)
xen_build_mfn_list_list();
/* Remap memory freed due to conflicts with E820 map */
if (!xen_feature(XENFEAT_auto_translated_physmap))
xen_remap_memory();
xen_remap_memory();
xen_setup_shared_info();
}
@ -1925,21 +1916,20 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
/* Zap identity mapping */
init_level4_pgt[0] = __pgd(0);
if (!xen_feature(XENFEAT_auto_translated_physmap)) {
/* Pre-constructed entries are in pfn, so convert to mfn */
/* L4[272] -> level3_ident_pgt
* L4[511] -> level3_kernel_pgt */
convert_pfn_mfn(init_level4_pgt);
/* Pre-constructed entries are in pfn, so convert to mfn */
/* L4[272] -> level3_ident_pgt */
/* L4[511] -> level3_kernel_pgt */
convert_pfn_mfn(init_level4_pgt);
/* L3_i[0] -> level2_ident_pgt */
convert_pfn_mfn(level3_ident_pgt);
/* L3_k[510] -> level2_kernel_pgt
* L3_k[511] -> level2_fixmap_pgt */
convert_pfn_mfn(level3_kernel_pgt);
/* L3_i[0] -> level2_ident_pgt */
convert_pfn_mfn(level3_ident_pgt);
/* L3_k[510] -> level2_kernel_pgt */
/* L3_k[511] -> level2_fixmap_pgt */
convert_pfn_mfn(level3_kernel_pgt);
/* L3_k[511][506] -> level1_fixmap_pgt */
convert_pfn_mfn(level2_fixmap_pgt);
/* L3_k[511][506] -> level1_fixmap_pgt */
convert_pfn_mfn(level2_fixmap_pgt);
}
/* We get [511][511] and have Xen's version of level2_kernel_pgt */
l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
@ -1962,34 +1952,30 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
if (i && i < pgd_index(__START_KERNEL_map))
init_level4_pgt[i] = ((pgd_t *)xen_start_info->pt_base)[i];
if (!xen_feature(XENFEAT_auto_translated_physmap)) {
/* Make pagetable pieces RO */
set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
set_page_prot(level1_fixmap_pgt, PAGE_KERNEL_RO);
/* Make pagetable pieces RO */
set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
set_page_prot(level1_fixmap_pgt, PAGE_KERNEL_RO);
/* Pin down new L4 */
pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
PFN_DOWN(__pa_symbol(init_level4_pgt)));
/* Pin down new L4 */
pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
PFN_DOWN(__pa_symbol(init_level4_pgt)));
/* Unpin Xen-provided one */
pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
/* Unpin Xen-provided one */
pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
/*
* At this stage there can be no user pgd, and no page
* structure to attach it to, so make sure we just set kernel
* pgd.
*/
xen_mc_batch();
__xen_write_cr3(true, __pa(init_level4_pgt));
xen_mc_issue(PARAVIRT_LAZY_CPU);
} else
native_write_cr3(__pa(init_level4_pgt));
/*
* At this stage there can be no user pgd, and no page structure to
* attach it to, so make sure we just set kernel pgd.
*/
xen_mc_batch();
__xen_write_cr3(true, __pa(init_level4_pgt));
xen_mc_issue(PARAVIRT_LAZY_CPU);
/* We can't that easily rip out L3 and L2, as the Xen pagetables are
* set out this way: [L4], [L1], [L2], [L3], [L1], [L1] ... for
@ -2403,9 +2389,6 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
static void __init xen_post_allocator_init(void)
{
if (xen_feature(XENFEAT_auto_translated_physmap))
return;
pv_mmu_ops.set_pte = xen_set_pte;
pv_mmu_ops.set_pmd = xen_set_pmd;
pv_mmu_ops.set_pud = xen_set_pud;
@ -2511,9 +2494,6 @@ void __init xen_init_mmu_ops(void)
{
x86_init.paging.pagetable_init = xen_pagetable_init;
if (xen_feature(XENFEAT_auto_translated_physmap))
return;
pv_mmu_ops = xen_mmu_ops;
memset(dummy_mapping, 0xff, PAGE_SIZE);
@ -2650,9 +2630,6 @@ int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
* this function are redundant and can be ignored.
*/
if (xen_feature(XENFEAT_auto_translated_physmap))
return 0;
if (unlikely(order > MAX_CONTIG_ORDER))
return -ENOMEM;
@ -2689,9 +2666,6 @@ void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order)
int success;
unsigned long vstart;
if (xen_feature(XENFEAT_auto_translated_physmap))
return;
if (unlikely(order > MAX_CONTIG_ORDER))
return;

View File

@ -315,24 +315,32 @@ void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m)
}
/* still holds resource->req_lock */
static int drbd_req_put_completion_ref(struct drbd_request *req, struct bio_and_error *m, int put)
static void drbd_req_put_completion_ref(struct drbd_request *req, struct bio_and_error *m, int put)
{
struct drbd_device *device = req->device;
D_ASSERT(device, m || (req->rq_state & RQ_POSTPONED));
if (!put)
return;
if (!atomic_sub_and_test(put, &req->completion_ref))
return 0;
return;
drbd_req_complete(req, m);
/* local completion may still come in later,
* we need to keep the req object around. */
if (req->rq_state & RQ_LOCAL_ABORTED)
return;
if (req->rq_state & RQ_POSTPONED) {
/* don't destroy the req object just yet,
* but queue it for retry */
drbd_restart_request(req);
return 0;
return;
}
return 1;
kref_put(&req->kref, drbd_req_destroy);
}
static void set_if_null_req_next(struct drbd_peer_device *peer_device, struct drbd_request *req)
@ -519,12 +527,8 @@ static void mod_rq_state(struct drbd_request *req, struct bio_and_error *m,
if (req->i.waiting)
wake_up(&device->misc_wait);
if (c_put) {
if (drbd_req_put_completion_ref(req, m, c_put))
kref_put(&req->kref, drbd_req_destroy);
} else {
kref_put(&req->kref, drbd_req_destroy);
}
drbd_req_put_completion_ref(req, m, c_put);
kref_put(&req->kref, drbd_req_destroy);
}
static void drbd_report_io_error(struct drbd_device *device, struct drbd_request *req)
@ -1366,8 +1370,7 @@ nodata:
}
out:
if (drbd_req_put_completion_ref(req, &m, 1))
kref_put(&req->kref, drbd_req_destroy);
drbd_req_put_completion_ref(req, &m, 1);
spin_unlock_irq(&resource->req_lock);
/* Even though above is a kref_put(), this is safe.

View File

@ -504,11 +504,13 @@ static int xen_blkbk_remove(struct xenbus_device *dev)
dev_set_drvdata(&dev->dev, NULL);
if (be->blkif)
if (be->blkif) {
xen_blkif_disconnect(be->blkif);
/* Put the reference we set in xen_blkif_alloc(). */
xen_blkif_put(be->blkif);
/* Put the reference we set in xen_blkif_alloc(). */
xen_blkif_put(be->blkif);
}
kfree(be->mode);
kfree(be);
return 0;

View File

@ -859,7 +859,11 @@ static int __init lp_setup (char *str)
} else if (!strcmp(str, "auto")) {
parport_nr[0] = LP_PARPORT_AUTO;
} else if (!strcmp(str, "none")) {
parport_nr[parport_ptr++] = LP_PARPORT_NONE;
if (parport_ptr < LP_NO)
parport_nr[parport_ptr++] = LP_PARPORT_NONE;
else
printk(KERN_INFO "lp: too many ports, %s ignored.\n",
str);
} else if (!strcmp(str, "reset")) {
reset = 1;
}

View File

@ -340,6 +340,11 @@ static const struct vm_operations_struct mmap_mem_ops = {
static int mmap_mem(struct file *file, struct vm_area_struct *vma)
{
size_t size = vma->vm_end - vma->vm_start;
phys_addr_t offset = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
/* It's illegal to wrap around the end of the physical address space. */
if (offset + (phys_addr_t)size < offset)
return -EINVAL;
if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
return -EINVAL;

View File

@ -44,6 +44,7 @@ void dax_read_unlock(int id)
}
EXPORT_SYMBOL_GPL(dax_read_unlock);
#ifdef CONFIG_BLOCK
int bdev_dax_pgoff(struct block_device *bdev, sector_t sector, size_t size,
pgoff_t *pgoff)
{
@ -112,6 +113,7 @@ int __bdev_dax_supported(struct super_block *sb, int blocksize)
return 0;
}
EXPORT_SYMBOL_GPL(__bdev_dax_supported);
#endif
/**
* struct dax_device - anchor object for dax services

View File

@ -782,24 +782,26 @@ static void debug_dump_dramcfg_low(struct amd64_pvt *pvt, u32 dclr, int chan)
static void debug_display_dimm_sizes_df(struct amd64_pvt *pvt, u8 ctrl)
{
u32 *dcsb = ctrl ? pvt->csels[1].csbases : pvt->csels[0].csbases;
int dimm, size0, size1;
int dimm, size0, size1, cs0, cs1;
edac_printk(KERN_DEBUG, EDAC_MC, "UMC%d chip selects:\n", ctrl);
for (dimm = 0; dimm < 4; dimm++) {
size0 = 0;
cs0 = dimm * 2;
if (dcsb[dimm*2] & DCSB_CS_ENABLE)
size0 = pvt->ops->dbam_to_cs(pvt, ctrl, 0, dimm);
if (csrow_enabled(cs0, ctrl, pvt))
size0 = pvt->ops->dbam_to_cs(pvt, ctrl, 0, cs0);
size1 = 0;
if (dcsb[dimm*2 + 1] & DCSB_CS_ENABLE)
size1 = pvt->ops->dbam_to_cs(pvt, ctrl, 0, dimm);
cs1 = dimm * 2 + 1;
if (csrow_enabled(cs1, ctrl, pvt))
size1 = pvt->ops->dbam_to_cs(pvt, ctrl, 0, cs1);
amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
dimm * 2, size0,
dimm * 2 + 1, size1);
cs0, size0,
cs1, size1);
}
}
@ -2756,26 +2758,22 @@ skip:
* encompasses
*
*/
static u32 get_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr)
static u32 get_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr_orig)
{
u32 cs_mode, nr_pages;
u32 dbam = dct ? pvt->dbam1 : pvt->dbam0;
int csrow_nr = csrow_nr_orig;
u32 cs_mode, nr_pages;
if (!pvt->umc)
csrow_nr >>= 1;
/*
* The math on this doesn't look right on the surface because x/2*4 can
* be simplified to x*2 but this expression makes use of the fact that
* it is integral math where 1/2=0. This intermediate value becomes the
* number of bits to shift the DBAM register to extract the proper CSROW
* field.
*/
cs_mode = DBAM_DIMM(csrow_nr / 2, dbam);
cs_mode = DBAM_DIMM(csrow_nr, dbam);
nr_pages = pvt->ops->dbam_to_cs(pvt, dct, cs_mode, (csrow_nr / 2))
<< (20 - PAGE_SHIFT);
nr_pages = pvt->ops->dbam_to_cs(pvt, dct, cs_mode, csrow_nr);
nr_pages <<= 20 - PAGE_SHIFT;
edac_dbg(0, "csrow: %d, channel: %d, DBAM idx: %d\n",
csrow_nr, dct, cs_mode);
csrow_nr_orig, dct, cs_mode);
edac_dbg(0, "nr_pages/channel: %u\n", nr_pages);
return nr_pages;

View File

@ -155,19 +155,14 @@ static int efi_pstore_scan_sysfs_exit(struct efivar_entry *pos,
* efi_pstore_sysfs_entry_iter
*
* @record: pstore record to pass to callback
* @pos: entry to begin iterating from
*
* You MUST call efivar_enter_iter_begin() before this function, and
* efivar_entry_iter_end() afterwards.
*
* It is possible to begin iteration from an arbitrary entry within
* the list by passing @pos. @pos is updated on return to point to
* the next entry of the last one passed to efi_pstore_read_func().
* To begin iterating from the beginning of the list @pos must be %NULL.
*/
static int efi_pstore_sysfs_entry_iter(struct pstore_record *record,
struct efivar_entry **pos)
static int efi_pstore_sysfs_entry_iter(struct pstore_record *record)
{
struct efivar_entry **pos = (struct efivar_entry **)&record->psi->data;
struct efivar_entry *entry, *n;
struct list_head *head = &efivar_sysfs_list;
int size = 0;
@ -218,7 +213,6 @@ static int efi_pstore_sysfs_entry_iter(struct pstore_record *record,
*/
static ssize_t efi_pstore_read(struct pstore_record *record)
{
struct efivar_entry *entry = (struct efivar_entry *)record->psi->data;
ssize_t size;
record->buf = kzalloc(EFIVARS_DATA_SIZE_MAX, GFP_KERNEL);
@ -229,7 +223,7 @@ static ssize_t efi_pstore_read(struct pstore_record *record)
size = -EINTR;
goto out;
}
size = efi_pstore_sysfs_entry_iter(record, &entry);
size = efi_pstore_sysfs_entry_iter(record);
efivar_entry_iter_end();
out:

View File

@ -116,9 +116,13 @@ static int vpd_section_attrib_add(const u8 *key, s32 key_len,
return VPD_OK;
info = kzalloc(sizeof(*info), GFP_KERNEL);
info->key = kzalloc(key_len + 1, GFP_KERNEL);
if (!info->key)
if (!info)
return -ENOMEM;
info->key = kzalloc(key_len + 1, GFP_KERNEL);
if (!info->key) {
ret = -ENOMEM;
goto free_info;
}
memcpy(info->key, key, key_len);
@ -135,12 +139,17 @@ static int vpd_section_attrib_add(const u8 *key, s32 key_len,
list_add_tail(&info->list, &sec->attribs);
ret = sysfs_create_bin_file(sec->kobj, &info->bin_attr);
if (ret) {
kfree(info->key);
return ret;
}
if (ret)
goto free_info_key;
return 0;
free_info_key:
kfree(info->key);
free_info:
kfree(info);
return ret;
}
static void vpd_section_attrib_destroy(struct vpd_section *sec)

View File

@ -10,6 +10,7 @@
*/
#include <drm/drmP.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
@ -226,16 +227,33 @@ static const struct drm_crtc_helper_funcs hdlcd_crtc_helper_funcs = {
static int hdlcd_plane_atomic_check(struct drm_plane *plane,
struct drm_plane_state *state)
{
u32 src_w, src_h;
struct drm_rect clip = { 0 };
struct drm_crtc_state *crtc_state;
u32 src_h = state->src_h >> 16;
src_w = state->src_w >> 16;
src_h = state->src_h >> 16;
/* we can't do any scaling of the plane source */
if ((src_w != state->crtc_w) || (src_h != state->crtc_h))
/* only the HDLCD_REG_FB_LINE_COUNT register has a limit */
if (src_h >= HDLCD_MAX_YRES) {
DRM_DEBUG_KMS("Invalid source width: %d\n", src_h);
return -EINVAL;
}
return 0;
if (!state->fb || !state->crtc)
return 0;
crtc_state = drm_atomic_get_existing_crtc_state(state->state,
state->crtc);
if (!crtc_state) {
DRM_DEBUG_KMS("Invalid crtc state\n");
return -EINVAL;
}
clip.x2 = crtc_state->adjusted_mode.hdisplay;
clip.y2 = crtc_state->adjusted_mode.vdisplay;
return drm_plane_helper_check_state(state, &clip,
DRM_PLANE_HELPER_NO_SCALING,
DRM_PLANE_HELPER_NO_SCALING,
false, true);
}
static void hdlcd_plane_atomic_update(struct drm_plane *plane,
@ -244,21 +262,20 @@ static void hdlcd_plane_atomic_update(struct drm_plane *plane,
struct drm_framebuffer *fb = plane->state->fb;
struct hdlcd_drm_private *hdlcd;
struct drm_gem_cma_object *gem;
u32 src_w, src_h, dest_w, dest_h;
u32 src_x, src_y, dest_h;
dma_addr_t scanout_start;
if (!fb)
return;
src_w = plane->state->src_w >> 16;
src_h = plane->state->src_h >> 16;
dest_w = plane->state->crtc_w;
dest_h = plane->state->crtc_h;
src_x = plane->state->src.x1 >> 16;
src_y = plane->state->src.y1 >> 16;
dest_h = drm_rect_height(&plane->state->dst);
gem = drm_fb_cma_get_gem_obj(fb, 0);
scanout_start = gem->paddr + fb->offsets[0] +
plane->state->crtc_y * fb->pitches[0] +
plane->state->crtc_x *
fb->format->cpp[0];
src_y * fb->pitches[0] +
src_x * fb->format->cpp[0];
hdlcd = plane->dev->dev_private;
hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_LENGTH, fb->pitches[0]);
@ -305,7 +322,6 @@ static struct drm_plane *hdlcd_plane_init(struct drm_device *drm)
formats, ARRAY_SIZE(formats),
DRM_PLANE_TYPE_PRIMARY, NULL);
if (ret) {
devm_kfree(drm->dev, plane);
return ERR_PTR(ret);
}
@ -329,7 +345,6 @@ int hdlcd_setup_crtc(struct drm_device *drm)
&hdlcd_crtc_funcs, NULL);
if (ret) {
hdlcd_plane_destroy(primary);
devm_kfree(drm->dev, primary);
return ret;
}

View File

@ -152,8 +152,7 @@ static const struct drm_connector_funcs atmel_hlcdc_panel_connector_funcs = {
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static int atmel_hlcdc_attach_endpoint(struct drm_device *dev,
const struct device_node *np)
static int atmel_hlcdc_attach_endpoint(struct drm_device *dev, int endpoint)
{
struct atmel_hlcdc_dc *dc = dev->dev_private;
struct atmel_hlcdc_rgb_output *output;
@ -161,6 +160,11 @@ static int atmel_hlcdc_attach_endpoint(struct drm_device *dev,
struct drm_bridge *bridge;
int ret;
ret = drm_of_find_panel_or_bridge(dev->dev->of_node, 0, endpoint,
&panel, &bridge);
if (ret)
return ret;
output = devm_kzalloc(dev->dev, sizeof(*output), GFP_KERNEL);
if (!output)
return -EINVAL;
@ -177,10 +181,6 @@ static int atmel_hlcdc_attach_endpoint(struct drm_device *dev,
output->encoder.possible_crtcs = 0x1;
ret = drm_of_find_panel_or_bridge(np, 0, 0, &panel, &bridge);
if (ret)
return ret;
if (panel) {
output->connector.dpms = DRM_MODE_DPMS_OFF;
output->connector.polled = DRM_CONNECTOR_POLL_CONNECT;
@ -220,22 +220,14 @@ err_encoder_cleanup:
int atmel_hlcdc_create_outputs(struct drm_device *dev)
{
struct device_node *remote;
int ret = -ENODEV;
int endpoint = 0;
int endpoint, ret = 0;
while (true) {
/* Loop thru possible multiple connections to the output */
remote = of_graph_get_remote_node(dev->dev->of_node, 0,
endpoint++);
if (!remote)
break;
for (endpoint = 0; !ret; endpoint++)
ret = atmel_hlcdc_attach_endpoint(dev, endpoint);
ret = atmel_hlcdc_attach_endpoint(dev, remote);
of_node_put(remote);
if (ret)
return ret;
}
/* At least one device was successfully attached.*/
if (ret == -ENODEV && endpoint)
return 0;
return ret;
}

View File

@ -44,6 +44,7 @@ static struct etnaviv_gem_submit *submit_create(struct drm_device *dev,
/* initially, until copy_from_user() and bo lookup succeeds: */
submit->nr_bos = 0;
submit->fence = NULL;
ww_acquire_init(&submit->ticket, &reservation_ww_class);
}
@ -294,7 +295,8 @@ static void submit_cleanup(struct etnaviv_gem_submit *submit)
}
ww_acquire_fini(&submit->ticket);
dma_fence_put(submit->fence);
if (submit->fence)
dma_fence_put(submit->fence);
kfree(submit);
}

View File

@ -1244,7 +1244,7 @@ static int dma_ctrl_write(struct intel_vgpu *vgpu, unsigned int offset,
mode = vgpu_vreg(vgpu, offset);
if (GFX_MODE_BIT_SET_IN_MASK(mode, START_DMA)) {
WARN_ONCE(1, "VM(%d): iGVT-g doesn't supporte GuC\n",
WARN_ONCE(1, "VM(%d): iGVT-g doesn't support GuC\n",
vgpu->id);
return 0;
}

View File

@ -340,6 +340,9 @@ void intel_gvt_restore_render_mmio(struct intel_vgpu *vgpu, int ring_id)
} else
v = mmio->value;
if (mmio->in_context)
continue;
I915_WRITE(mmio->reg, v);
POSTING_READ(mmio->reg);

View File

@ -129,9 +129,13 @@ static void try_to_schedule_next_vgpu(struct intel_gvt *gvt)
struct vgpu_sched_data *vgpu_data;
ktime_t cur_time;
/* no target to schedule */
if (!scheduler->next_vgpu)
/* no need to schedule if next_vgpu is the same with current_vgpu,
* let scheduler chose next_vgpu again by setting it to NULL.
*/
if (scheduler->next_vgpu == scheduler->current_vgpu) {
scheduler->next_vgpu = NULL;
return;
}
/*
* after the flag is set, workload dispatch thread will

View File

@ -195,9 +195,12 @@ static int ppgtt_bind_vma(struct i915_vma *vma,
u32 pte_flags;
int ret;
ret = vma->vm->allocate_va_range(vma->vm, vma->node.start, vma->size);
if (ret)
return ret;
if (!(vma->flags & I915_VMA_LOCAL_BIND)) {
ret = vma->vm->allocate_va_range(vma->vm, vma->node.start,
vma->size);
if (ret)
return ret;
}
vma->pages = vma->obj->mm.pages;
@ -2306,7 +2309,8 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
if (flags & I915_VMA_LOCAL_BIND) {
struct i915_hw_ppgtt *appgtt = i915->mm.aliasing_ppgtt;
if (appgtt->base.allocate_va_range) {
if (!(vma->flags & I915_VMA_LOCAL_BIND) &&
appgtt->base.allocate_va_range) {
ret = appgtt->base.allocate_va_range(&appgtt->base,
vma->node.start,
vma->node.size);

View File

@ -3051,10 +3051,14 @@ enum skl_disp_power_wells {
#define CLKCFG_FSB_667 (3 << 0) /* hrawclk 166 */
#define CLKCFG_FSB_800 (2 << 0) /* hrawclk 200 */
#define CLKCFG_FSB_1067 (6 << 0) /* hrawclk 266 */
#define CLKCFG_FSB_1067_ALT (0 << 0) /* hrawclk 266 */
#define CLKCFG_FSB_1333 (7 << 0) /* hrawclk 333 */
/* Note, below two are guess */
#define CLKCFG_FSB_1600 (4 << 0) /* hrawclk 400 */
#define CLKCFG_FSB_1600_ALT (0 << 0) /* hrawclk 400 */
/*
* Note that on at least on ELK the below value is reported for both
* 333 and 400 MHz BIOS FSB setting, but given that the gmch datasheet
* lists only 200/266/333 MHz FSB as supported let's decode it as 333 MHz.
*/
#define CLKCFG_FSB_1333_ALT (4 << 0) /* hrawclk 333 */
#define CLKCFG_FSB_MASK (7 << 0)
#define CLKCFG_MEM_533 (1 << 4)
#define CLKCFG_MEM_667 (2 << 4)

View File

@ -1798,13 +1798,11 @@ static int g4x_hrawclk(struct drm_i915_private *dev_priv)
case CLKCFG_FSB_800:
return 200000;
case CLKCFG_FSB_1067:
case CLKCFG_FSB_1067_ALT:
return 266667;
case CLKCFG_FSB_1333:
case CLKCFG_FSB_1333_ALT:
return 333333;
/* these two are just a guess; one of them might be right */
case CLKCFG_FSB_1600:
case CLKCFG_FSB_1600_ALT:
return 400000;
default:
return 133333;
}

View File

@ -410,11 +410,10 @@ static void glk_dsi_device_ready(struct intel_encoder *encoder)
val |= (ULPS_STATE_ENTER | DEVICE_READY);
I915_WRITE(MIPI_DEVICE_READY(port), val);
/* Wait for ULPS Not active */
/* Wait for ULPS active */
if (intel_wait_for_register(dev_priv,
MIPI_CTRL(port), GLK_ULPS_NOT_ACTIVE,
GLK_ULPS_NOT_ACTIVE, 20))
DRM_ERROR("ULPS is still active\n");
MIPI_CTRL(port), GLK_ULPS_NOT_ACTIVE, 0, 20))
DRM_ERROR("ULPS not active\n");
/* Exit ULPS */
val = I915_READ(MIPI_DEVICE_READY(port));

View File

@ -63,6 +63,7 @@
#include <linux/acpi.h>
#include <linux/device.h>
#include <linux/pci.h>
#include <linux/pm_runtime.h>
#include "i915_drv.h"
#include <linux/delay.h>
@ -121,6 +122,10 @@ lpe_audio_platdev_create(struct drm_i915_private *dev_priv)
kfree(rsc);
pm_runtime_forbid(&platdev->dev);
pm_runtime_set_active(&platdev->dev);
pm_runtime_enable(&platdev->dev);
return platdev;
err:

View File

@ -360,6 +360,8 @@ nouveau_display_hpd_work(struct work_struct *work)
pm_runtime_get_sync(drm->dev->dev);
drm_helper_hpd_irq_event(drm->dev);
/* enable polling for external displays */
drm_kms_helper_poll_enable(drm->dev);
pm_runtime_mark_last_busy(drm->dev->dev);
pm_runtime_put_sync(drm->dev->dev);
@ -413,10 +415,6 @@ nouveau_display_init(struct drm_device *dev)
if (ret)
return ret;
/* enable polling for external displays */
if (!dev->mode_config.poll_enabled)
drm_kms_helper_poll_enable(dev);
/* enable hotplug interrupts */
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct nouveau_connector *conn = nouveau_connector(connector);

View File

@ -502,6 +502,9 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
pm_runtime_allow(dev->dev);
pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put(dev->dev);
} else {
/* enable polling for external displays */
drm_kms_helper_poll_enable(dev);
}
return 0;
@ -774,9 +777,6 @@ nouveau_pmops_runtime_resume(struct device *dev)
ret = nouveau_do_resume(drm_dev, true);
if (!drm_dev->mode_config.poll_enabled)
drm_kms_helper_poll_enable(drm_dev);
/* do magic */
nvif_mask(&device->object, 0x088488, (1 << 25), (1 << 25));
vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON);

View File

@ -148,7 +148,7 @@ gk104_fifo_runlist_commit(struct gk104_fifo *fifo, int runl)
case NVKM_MEM_TARGET_NCOH: target = 3; break;
default:
WARN_ON(1);
return;
goto unlock;
}
nvkm_wr32(device, 0x002270, (nvkm_memory_addr(mem) >> 12) |
@ -160,6 +160,7 @@ gk104_fifo_runlist_commit(struct gk104_fifo *fifo, int runl)
& 0x00100000),
msecs_to_jiffies(2000)) == 0)
nvkm_error(subdev, "runlist %d update timeout\n", runl);
unlock:
mutex_unlock(&subdev->mutex);
}

View File

@ -116,6 +116,7 @@ ls_ucode_img_load_gr(const struct nvkm_subdev *subdev, struct ls_ucode_img *img,
ret = nvkm_firmware_get(subdev->device, f, &sig);
if (ret)
goto free_data;
img->sig = kmemdup(sig->data, sig->size, GFP_KERNEL);
if (!img->sig) {
ret = -ENOMEM;
@ -126,8 +127,9 @@ ls_ucode_img_load_gr(const struct nvkm_subdev *subdev, struct ls_ucode_img *img,
img->ucode_data = ls_ucode_img_build(bl, code, data,
&img->ucode_desc);
if (IS_ERR(img->ucode_data)) {
kfree(img->sig);
ret = PTR_ERR(img->ucode_data);
goto free_data;
goto free_sig;
}
img->ucode_size = img->ucode_desc.image_size;

View File

@ -1,6 +1,7 @@
config TEGRA_HOST1X
tristate "NVIDIA Tegra host1x driver"
depends on ARCH_TEGRA || (ARM && COMPILE_TEST)
select IOMMU_IOVA if IOMMU_SUPPORT
help
Driver for the NVIDIA Tegra host1x hardware.

View File

@ -604,6 +604,13 @@ static int coretemp_cpu_online(unsigned int cpu)
struct cpuinfo_x86 *c = &cpu_data(cpu);
struct platform_data *pdata;
/*
* Don't execute this on resume as the offline callback did
* not get executed on suspend.
*/
if (cpuhp_tasks_frozen)
return 0;
/*
* CPUID.06H.EAX[0] indicates whether the CPU has thermal
* sensors. We check this bit only, all the early CPUs
@ -654,6 +661,13 @@ static int coretemp_cpu_offline(unsigned int cpu)
struct temp_data *tdata;
int indx, target;
/*
* Don't execute this on suspend as the device remove locks
* up the machine.
*/
if (cpuhp_tasks_frozen)
return 0;
/* If the physical CPU device does not exist, just return */
if (!pdev)
return 0;

View File

@ -96,6 +96,7 @@ static int dw_i2c_acpi_configure(struct platform_device *pdev)
struct dw_i2c_dev *dev = platform_get_drvdata(pdev);
acpi_handle handle = ACPI_HANDLE(&pdev->dev);
const struct acpi_device_id *id;
u32 ss_ht, fp_ht, hs_ht, fs_ht;
struct acpi_device *adev;
const char *uid;
@ -107,23 +108,24 @@ static int dw_i2c_acpi_configure(struct platform_device *pdev)
* Try to get SDA hold time and *CNT values from an ACPI method for
* selected speed modes.
*/
dw_i2c_acpi_params(pdev, "SSCN", &dev->ss_hcnt, &dev->ss_lcnt, &ss_ht);
dw_i2c_acpi_params(pdev, "FPCN", &dev->fp_hcnt, &dev->fp_lcnt, &fp_ht);
dw_i2c_acpi_params(pdev, "HSCN", &dev->hs_hcnt, &dev->hs_lcnt, &hs_ht);
dw_i2c_acpi_params(pdev, "FMCN", &dev->fs_hcnt, &dev->fs_lcnt, &fs_ht);
switch (dev->clk_freq) {
case 100000:
dw_i2c_acpi_params(pdev, "SSCN", &dev->ss_hcnt, &dev->ss_lcnt,
&dev->sda_hold_time);
dev->sda_hold_time = ss_ht;
break;
case 1000000:
dw_i2c_acpi_params(pdev, "FPCN", &dev->fp_hcnt, &dev->fp_lcnt,
&dev->sda_hold_time);
dev->sda_hold_time = fp_ht;
break;
case 3400000:
dw_i2c_acpi_params(pdev, "HSCN", &dev->hs_hcnt, &dev->hs_lcnt,
&dev->sda_hold_time);
dev->sda_hold_time = hs_ht;
break;
case 400000:
default:
dw_i2c_acpi_params(pdev, "FMCN", &dev->fs_hcnt, &dev->fs_lcnt,
&dev->sda_hold_time);
dev->sda_hold_time = fs_ht;
break;
}

View File

@ -819,7 +819,6 @@ mv64xxx_of_config(struct mv64xxx_i2c_data *drv_data,
rc = -EINVAL;
goto out;
}
drv_data->irq = irq_of_parse_and_map(np, 0);
drv_data->rstc = devm_reset_control_get_optional(dev, NULL);
if (IS_ERR(drv_data->rstc)) {
@ -902,10 +901,11 @@ mv64xxx_i2c_probe(struct platform_device *pd)
if (!IS_ERR(drv_data->clk))
clk_prepare_enable(drv_data->clk);
drv_data->irq = platform_get_irq(pd, 0);
if (pdata) {
drv_data->freq_m = pdata->freq_m;
drv_data->freq_n = pdata->freq_n;
drv_data->irq = platform_get_irq(pd, 0);
drv_data->adapter.timeout = msecs_to_jiffies(pdata->timeout);
drv_data->offload_enabled = false;
memcpy(&drv_data->reg_offsets, &mv64xxx_i2c_regs_mv64xxx, sizeof(drv_data->reg_offsets));
@ -915,7 +915,7 @@ mv64xxx_i2c_probe(struct platform_device *pd)
goto exit_clk;
}
if (drv_data->irq < 0) {
rc = -ENXIO;
rc = drv_data->irq;
goto exit_reset;
}

View File

@ -416,6 +416,7 @@ static int xgene_slimpro_i2c_probe(struct platform_device *pdev)
adapter->class = I2C_CLASS_HWMON;
adapter->dev.parent = &pdev->dev;
adapter->dev.of_node = pdev->dev.of_node;
ACPI_COMPANION_SET(&adapter->dev, ACPI_COMPANION(&pdev->dev));
i2c_set_adapdata(adapter, ctx);
rc = i2c_add_adapter(adapter);
if (rc) {

View File

@ -395,18 +395,20 @@ int i2c_mux_add_adapter(struct i2c_mux_core *muxc,
if (force_nr) {
priv->adap.nr = force_nr;
ret = i2c_add_numbered_adapter(&priv->adap);
dev_err(&parent->dev,
"failed to add mux-adapter %u as bus %u (error=%d)\n",
chan_id, force_nr, ret);
if (ret < 0) {
dev_err(&parent->dev,
"failed to add mux-adapter %u as bus %u (error=%d)\n",
chan_id, force_nr, ret);
goto err_free_priv;
}
} else {
ret = i2c_add_adapter(&priv->adap);
dev_err(&parent->dev,
"failed to add mux-adapter %u (error=%d)\n",
chan_id, ret);
}
if (ret < 0) {
kfree(priv);
return ret;
if (ret < 0) {
dev_err(&parent->dev,
"failed to add mux-adapter %u (error=%d)\n",
chan_id, ret);
goto err_free_priv;
}
}
WARN(sysfs_create_link(&priv->adap.dev.kobj, &muxc->dev->kobj,
@ -422,6 +424,10 @@ int i2c_mux_add_adapter(struct i2c_mux_core *muxc,
muxc->adapter[muxc->num_adapters++] = &priv->adap;
return 0;
err_free_priv:
kfree(priv);
return ret;
}
EXPORT_SYMBOL_GPL(i2c_mux_add_adapter);

View File

@ -196,20 +196,25 @@ static int i2c_mux_reg_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
mux->data.reg_size = resource_size(res);
mux->data.reg = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(mux->data.reg))
return PTR_ERR(mux->data.reg);
if (IS_ERR(mux->data.reg)) {
ret = PTR_ERR(mux->data.reg);
goto err_put_parent;
}
}
if (mux->data.reg_size != 4 && mux->data.reg_size != 2 &&
mux->data.reg_size != 1) {
dev_err(&pdev->dev, "Invalid register size\n");
return -EINVAL;
ret = -EINVAL;
goto err_put_parent;
}
muxc = i2c_mux_alloc(parent, &pdev->dev, mux->data.n_values, 0, 0,
i2c_mux_reg_select, NULL);
if (!muxc)
return -ENOMEM;
if (!muxc) {
ret = -ENOMEM;
goto err_put_parent;
}
muxc->priv = mux;
platform_set_drvdata(pdev, muxc);
@ -223,7 +228,7 @@ static int i2c_mux_reg_probe(struct platform_device *pdev)
ret = i2c_mux_add_adapter(muxc, nr, mux->data.values[i], class);
if (ret)
goto add_adapter_failed;
goto err_del_mux_adapters;
}
dev_dbg(&pdev->dev, "%d port mux on %s adapter\n",
@ -231,8 +236,10 @@ static int i2c_mux_reg_probe(struct platform_device *pdev)
return 0;
add_adapter_failed:
err_del_mux_adapters:
i2c_mux_del_adapters(muxc);
err_put_parent:
i2c_put_adapter(parent);
return ret;
}

View File

@ -396,13 +396,13 @@ static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie,
dma_addr_t iova, size_t size)
{
struct iova_domain *iovad = &cookie->iovad;
unsigned long shift = iova_shift(iovad);
/* The MSI case is only ever cleaning up its most recent allocation */
if (cookie->type == IOMMU_DMA_MSI_COOKIE)
cookie->msi_iova -= size;
else
free_iova_fast(iovad, iova >> shift, size >> shift);
free_iova_fast(iovad, iova_pfn(iovad, iova),
size >> iova_shift(iovad));
}
static void __iommu_dma_unmap(struct iommu_domain *domain, dma_addr_t dma_addr,
@ -617,11 +617,14 @@ static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
{
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
struct iommu_dma_cookie *cookie = domain->iova_cookie;
struct iova_domain *iovad = &cookie->iovad;
size_t iova_off = iova_offset(iovad, phys);
size_t iova_off = 0;
dma_addr_t iova;
size = iova_align(iovad, size + iova_off);
if (cookie->type == IOMMU_DMA_IOVA_COOKIE) {
iova_off = iova_offset(&cookie->iovad, phys);
size = iova_align(&cookie->iovad, size + iova_off);
}
iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
if (!iova)
return DMA_ERROR_CODE;

View File

@ -2055,11 +2055,14 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
if (context_copied(context)) {
u16 did_old = context_domain_id(context);
if (did_old >= 0 && did_old < cap_ndoms(iommu->cap))
if (did_old >= 0 && did_old < cap_ndoms(iommu->cap)) {
iommu->flush.flush_context(iommu, did_old,
(((u16)bus) << 8) | devfn,
DMA_CCMD_MASK_NOBIT,
DMA_CCMD_DEVICE_INVL);
iommu->flush.flush_iotlb(iommu, did_old, 0, 0,
DMA_TLB_DSI_FLUSH);
}
}
pgd = domain->pgd;

View File

@ -18,6 +18,7 @@
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/dma-iommu.h>
#include <linux/err.h>
#include <linux/interrupt.h>

View File

@ -106,10 +106,7 @@ static inline void get_mbigen_type_reg(irq_hw_number_t hwirq,
static inline void get_mbigen_clear_reg(irq_hw_number_t hwirq,
u32 *mask, u32 *addr)
{
unsigned int ofst;
hwirq -= RESERVED_IRQ_PER_MBIGEN_CHIP;
ofst = hwirq / 32 * 4;
unsigned int ofst = (hwirq / 32) * 4;
*mask = 1 << (hwirq % 32);
*addr = ofst + REG_MBIGEN_CLEAR_OFFSET;
@ -337,9 +334,15 @@ static int mbigen_device_probe(struct platform_device *pdev)
mgn_chip->pdev = pdev;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
mgn_chip->base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(mgn_chip->base))
return PTR_ERR(mgn_chip->base);
if (!res)
return -EINVAL;
mgn_chip->base = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
if (!mgn_chip->base) {
dev_err(&pdev->dev, "failed to ioremap %pR\n", res);
return -ENOMEM;
}
if (IS_ENABLED(CONFIG_OF) && pdev->dev.of_node)
err = mbigen_of_create_domain(pdev, mgn_chip);

View File

@ -218,7 +218,7 @@ static DEFINE_SPINLOCK(param_spinlock);
* Buffers are freed after this timeout
*/
static unsigned dm_bufio_max_age = DM_BUFIO_DEFAULT_AGE_SECS;
static unsigned dm_bufio_retain_bytes = DM_BUFIO_DEFAULT_RETAIN_BYTES;
static unsigned long dm_bufio_retain_bytes = DM_BUFIO_DEFAULT_RETAIN_BYTES;
static unsigned long dm_bufio_peak_allocated;
static unsigned long dm_bufio_allocated_kmem_cache;
@ -1558,10 +1558,10 @@ static bool __try_evict_buffer(struct dm_buffer *b, gfp_t gfp)
return true;
}
static unsigned get_retain_buffers(struct dm_bufio_client *c)
static unsigned long get_retain_buffers(struct dm_bufio_client *c)
{
unsigned retain_bytes = ACCESS_ONCE(dm_bufio_retain_bytes);
return retain_bytes / c->block_size;
unsigned long retain_bytes = ACCESS_ONCE(dm_bufio_retain_bytes);
return retain_bytes >> (c->sectors_per_block_bits + SECTOR_SHIFT);
}
static unsigned long __scan(struct dm_bufio_client *c, unsigned long nr_to_scan,
@ -1571,7 +1571,7 @@ static unsigned long __scan(struct dm_bufio_client *c, unsigned long nr_to_scan,
struct dm_buffer *b, *tmp;
unsigned long freed = 0;
unsigned long count = nr_to_scan;
unsigned retain_target = get_retain_buffers(c);
unsigned long retain_target = get_retain_buffers(c);
for (l = 0; l < LIST_SIZE; l++) {
list_for_each_entry_safe_reverse(b, tmp, &c->lru[l], lru_list) {
@ -1794,8 +1794,8 @@ static bool older_than(struct dm_buffer *b, unsigned long age_hz)
static void __evict_old_buffers(struct dm_bufio_client *c, unsigned long age_hz)
{
struct dm_buffer *b, *tmp;
unsigned retain_target = get_retain_buffers(c);
unsigned count;
unsigned long retain_target = get_retain_buffers(c);
unsigned long count;
LIST_HEAD(write_list);
dm_bufio_lock(c);
@ -1955,7 +1955,7 @@ MODULE_PARM_DESC(max_cache_size_bytes, "Size of metadata cache");
module_param_named(max_age_seconds, dm_bufio_max_age, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(max_age_seconds, "Max age of a buffer in seconds");
module_param_named(retain_bytes, dm_bufio_retain_bytes, uint, S_IRUGO | S_IWUSR);
module_param_named(retain_bytes, dm_bufio_retain_bytes, ulong, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(retain_bytes, "Try to keep at least this many bytes cached in memory");
module_param_named(peak_allocated_bytes, dm_bufio_peak_allocated, ulong, S_IRUGO | S_IWUSR);

View File

@ -33,6 +33,11 @@ struct background_tracker *btracker_create(unsigned max_work)
{
struct background_tracker *b = kmalloc(sizeof(*b), GFP_KERNEL);
if (!b) {
DMERR("couldn't create background_tracker");
return NULL;
}
b->max_work = max_work;
atomic_set(&b->pending_promotes, 0);
atomic_set(&b->pending_writebacks, 0);

View File

@ -1120,8 +1120,6 @@ static bool clean_target_met(struct smq_policy *mq, bool idle)
* Cache entries may not be populated. So we cannot rely on the
* size of the clean queue.
*/
unsigned nr_clean;
if (idle) {
/*
* We'd like to clean everything.
@ -1129,18 +1127,16 @@ static bool clean_target_met(struct smq_policy *mq, bool idle)
return q_size(&mq->dirty) == 0u;
}
nr_clean = from_cblock(mq->cache_size) - q_size(&mq->dirty);
return (nr_clean + btracker_nr_writebacks_queued(mq->bg_work)) >=
percent_to_target(mq, CLEAN_TARGET);
/*
* If we're busy we don't worry about cleaning at all.
*/
return true;
}
static bool free_target_met(struct smq_policy *mq, bool idle)
static bool free_target_met(struct smq_policy *mq)
{
unsigned nr_free;
if (!idle)
return true;
nr_free = from_cblock(mq->cache_size) - mq->cache_alloc.nr_allocated;
return (nr_free + btracker_nr_demotions_queued(mq->bg_work)) >=
percent_to_target(mq, FREE_TARGET);
@ -1190,9 +1186,9 @@ static void queue_demotion(struct smq_policy *mq)
if (unlikely(WARN_ON_ONCE(!mq->migrations_allowed)))
return;
e = q_peek(&mq->clean, mq->clean.nr_levels, true);
e = q_peek(&mq->clean, mq->clean.nr_levels / 2, true);
if (!e) {
if (!clean_target_met(mq, false))
if (!clean_target_met(mq, true))
queue_writeback(mq);
return;
}
@ -1220,7 +1216,7 @@ static void queue_promotion(struct smq_policy *mq, dm_oblock_t oblock,
* We always claim to be 'idle' to ensure some demotions happen
* with continuous loads.
*/
if (!free_target_met(mq, true))
if (!free_target_met(mq))
queue_demotion(mq);
return;
}
@ -1421,14 +1417,10 @@ static int smq_get_background_work(struct dm_cache_policy *p, bool idle,
spin_lock_irqsave(&mq->lock, flags);
r = btracker_issue(mq->bg_work, result);
if (r == -ENODATA) {
/* find some writeback work to do */
if (mq->migrations_allowed && !free_target_met(mq, idle))
queue_demotion(mq);
else if (!clean_target_met(mq, idle))
if (!clean_target_met(mq, idle)) {
queue_writeback(mq);
r = btracker_issue(mq->bg_work, result);
r = btracker_issue(mq->bg_work, result);
}
}
spin_unlock_irqrestore(&mq->lock, flags);
@ -1452,6 +1444,7 @@ static void __complete_background_work(struct smq_policy *mq,
clear_pending(mq, e);
if (success) {
e->oblock = work->oblock;
e->level = NR_CACHE_LEVELS - 1;
push(mq, e);
// h, q, a
} else {

View File

@ -94,6 +94,9 @@ static void iot_io_begin(struct io_tracker *iot, sector_t len)
static void __iot_io_end(struct io_tracker *iot, sector_t len)
{
if (!len)
return;
iot->in_flight -= len;
if (!iot->in_flight)
iot->idle_time = jiffies;
@ -474,7 +477,7 @@ struct cache {
spinlock_t invalidation_lock;
struct list_head invalidation_requests;
struct io_tracker origin_tracker;
struct io_tracker tracker;
struct work_struct commit_ws;
struct batcher committer;
@ -901,8 +904,7 @@ static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio)
static bool accountable_bio(struct cache *cache, struct bio *bio)
{
return ((bio->bi_bdev == cache->origin_dev->bdev) &&
bio_op(bio) != REQ_OP_DISCARD);
return bio_op(bio) != REQ_OP_DISCARD;
}
static void accounted_begin(struct cache *cache, struct bio *bio)
@ -912,7 +914,7 @@ static void accounted_begin(struct cache *cache, struct bio *bio)
if (accountable_bio(cache, bio)) {
pb->len = bio_sectors(bio);
iot_io_begin(&cache->origin_tracker, pb->len);
iot_io_begin(&cache->tracker, pb->len);
}
}
@ -921,7 +923,7 @@ static void accounted_complete(struct cache *cache, struct bio *bio)
size_t pb_data_size = get_per_bio_data_size(cache);
struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
iot_io_end(&cache->origin_tracker, pb->len);
iot_io_end(&cache->tracker, pb->len);
}
static void accounted_request(struct cache *cache, struct bio *bio)
@ -1716,20 +1718,19 @@ static int invalidate_start(struct cache *cache, dm_cblock_t cblock,
enum busy {
IDLE,
MODERATE,
BUSY
};
static enum busy spare_migration_bandwidth(struct cache *cache)
{
bool idle = iot_idle_for(&cache->origin_tracker, HZ);
bool idle = iot_idle_for(&cache->tracker, HZ);
sector_t current_volume = (atomic_read(&cache->nr_io_migrations) + 1) *
cache->sectors_per_block;
if (current_volume <= cache->migration_threshold)
return idle ? IDLE : MODERATE;
if (idle && current_volume <= cache->migration_threshold)
return IDLE;
else
return idle ? MODERATE : BUSY;
return BUSY;
}
static void inc_hit_counter(struct cache *cache, struct bio *bio)
@ -2045,8 +2046,6 @@ static void check_migrations(struct work_struct *ws)
for (;;) {
b = spare_migration_bandwidth(cache);
if (b == BUSY)
break;
r = policy_get_background_work(cache->policy, b == IDLE, &op);
if (r == -ENODATA)
@ -2717,7 +2716,7 @@ static int cache_create(struct cache_args *ca, struct cache **result)
batcher_init(&cache->committer, commit_op, cache,
issue_op, cache, cache->wq);
iot_init(&cache->origin_tracker);
iot_init(&cache->tracker);
init_rwsem(&cache->background_work_lock);
prevent_background_work(cache);
@ -2941,7 +2940,7 @@ static void cache_postsuspend(struct dm_target *ti)
cancel_delayed_work(&cache->waker);
flush_workqueue(cache->wq);
WARN_ON(cache->origin_tracker.in_flight);
WARN_ON(cache->tracker.in_flight);
/*
* If it's a flush suspend there won't be any deferred bios, so this

View File

@ -447,7 +447,7 @@ failed:
* it has been invoked.
*/
#define dm_report_EIO(m) \
({ \
do { \
struct mapped_device *md = dm_table_get_md((m)->ti->table); \
\
pr_debug("%s: returning EIO; QIFNP = %d; SQIFNP = %d; DNFS = %d\n", \
@ -455,8 +455,7 @@ failed:
test_bit(MPATHF_QUEUE_IF_NO_PATH, &(m)->flags), \
test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &(m)->flags), \
dm_noflush_suspending((m)->ti)); \
-EIO; \
})
} while (0)
/*
* Map cloned requests (request-based multipath)
@ -481,7 +480,8 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
if (!pgpath) {
if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
return DM_MAPIO_DELAY_REQUEUE;
return dm_report_EIO(m); /* Failed */
dm_report_EIO(m); /* Failed */
return DM_MAPIO_KILL;
} else if (test_bit(MPATHF_QUEUE_IO, &m->flags) ||
test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) {
if (pg_init_all_paths(m))
@ -558,7 +558,8 @@ static int __multipath_map_bio(struct multipath *m, struct bio *bio, struct dm_m
if (!pgpath) {
if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
return DM_MAPIO_REQUEUE;
return dm_report_EIO(m);
dm_report_EIO(m);
return -EIO;
}
mpio->pgpath = pgpath;
@ -1493,7 +1494,7 @@ static int multipath_end_io(struct dm_target *ti, struct request *clone,
if (atomic_read(&m->nr_valid_paths) == 0 &&
!test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
if (error == -EIO)
error = dm_report_EIO(m);
dm_report_EIO(m);
/* complete with the original error */
r = DM_ENDIO_DONE;
}
@ -1524,8 +1525,10 @@ static int do_end_io_bio(struct multipath *m, struct bio *clone,
fail_path(mpio->pgpath);
if (atomic_read(&m->nr_valid_paths) == 0 &&
!test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
return dm_report_EIO(m);
!test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
dm_report_EIO(m);
return -EIO;
}
/* Queue for the daemon to resubmit */
dm_bio_restore(get_bio_details_from_bio(clone), clone);

View File

@ -507,6 +507,7 @@ static int map_request(struct dm_rq_target_io *tio)
case DM_MAPIO_KILL:
/* The target wants to complete the I/O */
dm_kill_unmapped_request(rq, -EIO);
break;
default:
DMWARN("unimplemented target map return value: %d", r);
BUG();

Some files were not shown because too many files have changed in this diff Show More