Merge branch 'perf/urgent' into perf/core, to pick up fixes

Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Ingo Molnar 2015-08-31 10:25:26 +02:00
commit 02b643b643
124 changed files with 858 additions and 1090 deletions

1
.get_maintainer.ignore Normal file
View File

@ -0,0 +1 @@
Christoph Hellwig <hch@lst.de>

View File

@ -5849,6 +5849,7 @@ S: Odd Fixes
KERNEL NFSD, SUNRPC, AND LOCKD SERVERS
M: "J. Bruce Fields" <bfields@fieldses.org>
M: Jeff Layton <jlayton@poochiereds.net>
L: linux-nfs@vger.kernel.org
W: http://nfs.sourceforge.net/
S: Supported

View File

@ -1,7 +1,7 @@
VERSION = 4
PATCHLEVEL = 2
SUBLEVEL = 0
EXTRAVERSION = -rc7
EXTRAVERSION =
NAME = Hurr durr I'ma sheep
# *DOCUMENTATION*

View File

@ -312,6 +312,9 @@ INSTALL_TARGETS = zinstall uinstall install
PHONY += bzImage $(BOOT_TARGETS) $(INSTALL_TARGETS)
bootpImage uImage: zImage
zImage: Image
$(BOOT_TARGETS): vmlinux
$(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@

View File

@ -96,7 +96,7 @@ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
}
/* the mmap semaphore is taken only if not in an atomic context */
atomic = in_atomic();
atomic = faulthandler_disabled();
if (!atomic)
down_read(&current->mm->mmap_sem);

View File

@ -392,6 +392,7 @@ static struct irq_chip wakeupgen_chip = {
.irq_mask = wakeupgen_mask,
.irq_unmask = wakeupgen_unmask,
.irq_retrigger = irq_chip_retrigger_hierarchy,
.irq_set_type = irq_chip_set_type_parent,
.flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND,
#ifdef CONFIG_SMP
.irq_set_affinity = irq_chip_set_affinity_parent,

View File

@ -168,8 +168,8 @@ void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr)
{
if (!(vcpu->arch.hcr_el2 & HCR_RW))
inject_abt32(vcpu, false, addr);
inject_abt64(vcpu, false, addr);
else
inject_abt64(vcpu, false, addr);
}
/**
@ -184,8 +184,8 @@ void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr)
{
if (!(vcpu->arch.hcr_el2 & HCR_RW))
inject_abt32(vcpu, true, addr);
inject_abt64(vcpu, true, addr);
else
inject_abt64(vcpu, true, addr);
}
/**
@ -198,6 +198,6 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu)
{
if (!(vcpu->arch.hcr_el2 & HCR_RW))
inject_undef32(vcpu);
inject_undef64(vcpu);
else
inject_undef64(vcpu);
}

View File

@ -407,7 +407,7 @@ NESTED(nmi_handler, PT_SIZE, sp)
.set noat
SAVE_ALL
FEXPORT(handle_\exception\ext)
__BUILD_clear_\clear
__build_clear_\clear
.set at
__BUILD_\verbose \exception
move a0, sp

View File

@ -191,6 +191,9 @@ struct pci_dev *of_create_pci_dev(struct device_node *node,
pci_device_add(dev, bus);
/* Setup MSI caps & disable MSI/MSI-X interrupts */
pci_msi_setup_pci_dev(dev);
return dev;
}
EXPORT_SYMBOL(of_create_pci_dev);

View File

@ -79,12 +79,12 @@ do { \
#else /* CONFIG_X86_32 */
/* frame pointer must be last for get_wchan */
#define SAVE_CONTEXT "pushq %%rbp ; movq %%rsi,%%rbp\n\t"
#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp\t"
#define SAVE_CONTEXT "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t"
#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popf\t"
#define __EXTRA_CLOBBER \
, "rcx", "rbx", "rdx", "r8", "r9", "r10", "r11", \
"r12", "r13", "r14", "r15", "flags"
"r12", "r13", "r14", "r15"
#ifdef CONFIG_CC_STACKPROTECTOR
#define __switch_canary \
@ -100,11 +100,7 @@ do { \
#define __switch_canary_iparam
#endif /* CC_STACKPROTECTOR */
/*
* There is no need to save or restore flags, because flags are always
* clean in kernel mode, with the possible exception of IOPL. Kernel IOPL
* has no effect.
*/
/* Save restore flags to clear handle leaking NT */
#define switch_to(prev, next, last) \
asm volatile(SAVE_CONTEXT \
"movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \

View File

@ -1424,7 +1424,7 @@ static inline void __x2apic_disable(void)
{
u64 msr;
if (cpu_has_apic)
if (!cpu_has_apic)
return;
rdmsrl(MSR_IA32_APICBASE, msr);
@ -1483,10 +1483,13 @@ void x2apic_setup(void)
static __init void x2apic_disable(void)
{
u32 x2apic_id;
u32 x2apic_id, state = x2apic_state;
if (x2apic_state != X2APIC_ON)
goto out;
x2apic_mode = 0;
x2apic_state = X2APIC_DISABLED;
if (state != X2APIC_ON)
return;
x2apic_id = read_apic_id();
if (x2apic_id >= 255)
@ -1494,9 +1497,6 @@ static __init void x2apic_disable(void)
__x2apic_disable();
register_lapic_address(mp_lapic_addr);
out:
x2apic_state = X2APIC_DISABLED;
x2apic_mode = 0;
}
static __init void x2apic_enable(void)

View File

@ -322,7 +322,7 @@ static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq,
irq_data->chip = &lapic_controller;
irq_data->chip_data = data;
irq_data->hwirq = virq + i;
err = assign_irq_vector_policy(virq, irq_data->node, data,
err = assign_irq_vector_policy(virq + i, irq_data->node, data,
info);
if (err)
goto error;

View File

@ -270,7 +270,7 @@ int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
dst_fpu->fpregs_active = 0;
dst_fpu->last_cpu = -1;
if (src_fpu->fpstate_active)
if (src_fpu->fpstate_active && cpu_has_fpu)
fpu_copy(dst_fpu, src_fpu);
return 0;

View File

@ -40,7 +40,12 @@ static void fpu__init_cpu_generic(void)
write_cr0(cr0);
/* Flush out any pending x87 state: */
asm volatile ("fninit");
#ifdef CONFIG_MATH_EMULATION
if (!cpu_has_fpu)
fpstate_init_soft(&current->thread.fpu.state.soft);
else
#endif
asm volatile ("fninit");
}
/*

View File

@ -408,6 +408,7 @@ static int prefer_mwait_c1_over_halt(const struct cpuinfo_x86 *c)
static void mwait_idle(void)
{
if (!current_set_polling_and_test()) {
trace_cpu_idle_rcuidle(1, smp_processor_id());
if (this_cpu_has(X86_BUG_CLFLUSH_MONITOR)) {
smp_mb(); /* quirk */
clflush((void *)&current_thread_info()->flags);
@ -419,6 +420,7 @@ static void mwait_idle(void)
__sti_mwait(0, 0);
else
local_irq_enable();
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
} else {
local_irq_enable();
}

View File

@ -8,7 +8,7 @@ config XEN
select PARAVIRT_CLOCK
select XEN_HAVE_PVMMU
depends on X86_64 || (X86_32 && X86_PAE)
depends on X86_TSC
depends on X86_LOCAL_APIC && X86_TSC
help
This is the Linux Xen port. Enabling this will allow the
kernel to boot in a paravirtualized environment under the
@ -17,7 +17,7 @@ config XEN
config XEN_DOM0
def_bool y
depends on XEN && PCI_XEN && SWIOTLB_XEN
depends on X86_LOCAL_APIC && X86_IO_APIC && ACPI && PCI
depends on X86_IO_APIC && ACPI && PCI
config XEN_PVHVM
def_bool y

View File

@ -702,11 +702,11 @@ static ssize_t flags_show(struct device *dev,
u16 flags = to_nfit_memdev(dev)->flags;
return sprintf(buf, "%s%s%s%s%s\n",
flags & ACPI_NFIT_MEM_SAVE_FAILED ? "save " : "",
flags & ACPI_NFIT_MEM_RESTORE_FAILED ? "restore " : "",
flags & ACPI_NFIT_MEM_FLUSH_FAILED ? "flush " : "",
flags & ACPI_NFIT_MEM_ARMED ? "arm " : "",
flags & ACPI_NFIT_MEM_HEALTH_OBSERVED ? "smart " : "");
flags & ACPI_NFIT_MEM_SAVE_FAILED ? "save_fail " : "",
flags & ACPI_NFIT_MEM_RESTORE_FAILED ? "restore_fail " : "",
flags & ACPI_NFIT_MEM_FLUSH_FAILED ? "flush_fail " : "",
flags & ACPI_NFIT_MEM_ARMED ? "not_armed " : "",
flags & ACPI_NFIT_MEM_HEALTH_OBSERVED ? "smart_event " : "");
}
static DEVICE_ATTR_RO(flags);
@ -849,12 +849,12 @@ static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc)
if ((mem_flags & ACPI_NFIT_MEM_FAILED_MASK) == 0)
continue;
dev_info(acpi_desc->dev, "%s: failed: %s%s%s%s\n",
dev_info(acpi_desc->dev, "%s flags:%s%s%s%s\n",
nvdimm_name(nvdimm),
mem_flags & ACPI_NFIT_MEM_SAVE_FAILED ? "save " : "",
mem_flags & ACPI_NFIT_MEM_RESTORE_FAILED ? "restore " : "",
mem_flags & ACPI_NFIT_MEM_FLUSH_FAILED ? "flush " : "",
mem_flags & ACPI_NFIT_MEM_ARMED ? "arm " : "");
mem_flags & ACPI_NFIT_MEM_SAVE_FAILED ? " save_fail" : "",
mem_flags & ACPI_NFIT_MEM_RESTORE_FAILED ? " restore_fail":"",
mem_flags & ACPI_NFIT_MEM_FLUSH_FAILED ? " flush_fail" : "",
mem_flags & ACPI_NFIT_MEM_ARMED ? " not_armed" : "");
}
@ -1024,7 +1024,7 @@ static void wmb_blk(struct nfit_blk *nfit_blk)
wmb_pmem();
}
static u64 read_blk_stat(struct nfit_blk *nfit_blk, unsigned int bw)
static u32 read_blk_stat(struct nfit_blk *nfit_blk, unsigned int bw)
{
struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR];
u64 offset = nfit_blk->stat_offset + mmio->size * bw;
@ -1032,7 +1032,7 @@ static u64 read_blk_stat(struct nfit_blk *nfit_blk, unsigned int bw)
if (mmio->num_lines)
offset = to_interleave_offset(offset, mmio);
return readq(mmio->base + offset);
return readl(mmio->base + offset);
}
static void write_blk_ctl(struct nfit_blk *nfit_blk, unsigned int bw,

View File

@ -32,6 +32,7 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/types.h>
#include <linux/workqueue.h>
#include <acpi/video.h>
ACPI_MODULE_NAME("video");
@ -41,6 +42,7 @@ void acpi_video_unregister_backlight(void);
static bool backlight_notifier_registered;
static struct notifier_block backlight_nb;
static struct work_struct backlight_notify_work;
static enum acpi_backlight_type acpi_backlight_cmdline = acpi_backlight_undef;
static enum acpi_backlight_type acpi_backlight_dmi = acpi_backlight_undef;
@ -262,6 +264,13 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
{ },
};
/* This uses a workqueue to avoid various locking ordering issues */
static void acpi_video_backlight_notify_work(struct work_struct *work)
{
if (acpi_video_get_backlight_type() != acpi_backlight_video)
acpi_video_unregister_backlight();
}
static int acpi_video_backlight_notify(struct notifier_block *nb,
unsigned long val, void *bd)
{
@ -269,9 +278,8 @@ static int acpi_video_backlight_notify(struct notifier_block *nb,
/* A raw bl registering may change video -> native */
if (backlight->props.type == BACKLIGHT_RAW &&
val == BACKLIGHT_REGISTERED &&
acpi_video_get_backlight_type() != acpi_backlight_video)
acpi_video_unregister_backlight();
val == BACKLIGHT_REGISTERED)
schedule_work(&backlight_notify_work);
return NOTIFY_OK;
}
@ -304,6 +312,8 @@ enum acpi_backlight_type acpi_video_get_backlight_type(void)
acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
ACPI_UINT32_MAX, find_video, NULL,
&video_caps, NULL);
INIT_WORK(&backlight_notify_work,
acpi_video_backlight_notify_work);
backlight_nb.notifier_call = acpi_video_backlight_notify;
backlight_nb.priority = 0;
if (backlight_register_notifier(&backlight_nb) == 0)

View File

@ -4230,6 +4230,8 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
ATA_HORKAGE_ZERO_AFTER_TRIM, },
{ "Samsung SSD 8*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
ATA_HORKAGE_ZERO_AFTER_TRIM, },
{ "FCCT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
ATA_HORKAGE_ZERO_AFTER_TRIM, },
/* devices that don't properly handle TRIM commands */
{ "SuperSSpeed S238*", NULL, ATA_HORKAGE_NOTRIM, },

View File

@ -3756,6 +3756,14 @@ static int mtip_init_cmd(void *data, struct request *rq, unsigned int hctx_idx,
struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
u32 host_cap_64 = readl(dd->mmio + HOST_CAP) & HOST_CAP_64;
/*
* For flush requests, request_idx starts at the end of the
* tag space. Since we don't support FLUSH/FUA, simply return
* 0 as there's nothing to be done.
*/
if (request_idx >= MTIP_MAX_COMMAND_SLOTS)
return 0;
cmd->command = dmam_alloc_coherent(&dd->pdev->dev, CMD_DMA_ALLOC_SZ,
&cmd->command_dma, GFP_KERNEL);
if (!cmd->command)

View File

@ -462,6 +462,7 @@ void __init mxc_timer_init(unsigned long pbase, int irq, enum imx_gpt_type type)
BUG_ON(!imxtm->base);
imxtm->type = type;
imxtm->irq = irq;
_mxc_timer_init(imxtm);
}

View File

@ -180,7 +180,7 @@ static int exynos_cpufreq_probe(struct platform_device *pdev)
ret = exynos5250_cpufreq_init(exynos_info);
} else {
pr_err("%s: Unknown SoC type\n", __func__);
return -ENODEV;
ret = -ENODEV;
}
if (ret)
@ -188,12 +188,14 @@ static int exynos_cpufreq_probe(struct platform_device *pdev)
if (exynos_info->set_freq == NULL) {
dev_err(&pdev->dev, "No set_freq function (ERR)\n");
ret = -EINVAL;
goto err_vdd_arm;
}
arm_regulator = regulator_get(NULL, "vdd_arm");
if (IS_ERR(arm_regulator)) {
dev_err(&pdev->dev, "failed to get resource vdd_arm\n");
ret = -EINVAL;
goto err_vdd_arm;
}
@ -225,7 +227,7 @@ err_cpufreq_reg:
regulator_put(arm_regulator);
err_vdd_arm:
kfree(exynos_info);
return -EINVAL;
return ret;
}
static struct platform_driver exynos_cpufreq_platdrv = {

View File

@ -245,4 +245,4 @@ char *bcm47xx_nvram_get_contents(size_t *nvram_size)
}
EXPORT_SYMBOL(bcm47xx_nvram_get_contents);
MODULE_LICENSE("GPLv2");
MODULE_LICENSE("GPL v2");

View File

@ -559,7 +559,7 @@ static int atmel_hlcdc_dc_drm_remove(struct platform_device *pdev)
return 0;
}
#ifdef CONFIG_PM
#ifdef CONFIG_PM_SLEEP
static int atmel_hlcdc_dc_drm_suspend(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);

View File

@ -1075,34 +1075,15 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
const union child_device_config *p_child;
union child_device_config *child_dev_ptr;
int i, child_device_num, count;
u8 expected_size;
u16 block_size;
u16 block_size;
p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
if (!p_defs) {
DRM_DEBUG_KMS("No general definition block is found, no devices defined.\n");
return;
}
if (bdb->version < 195) {
expected_size = 33;
} else if (bdb->version == 195) {
expected_size = 37;
} else if (bdb->version <= 197) {
expected_size = 38;
} else {
expected_size = 38;
DRM_DEBUG_DRIVER("Expected child_device_config size for BDB version %u not known; assuming %u\n",
expected_size, bdb->version);
}
if (expected_size > sizeof(*p_child)) {
DRM_ERROR("child_device_config cannot fit in p_child\n");
return;
}
if (p_defs->child_dev_size != expected_size) {
DRM_ERROR("Size mismatch; child_device_config size=%u (expected %u); bdb->version: %u\n",
p_defs->child_dev_size, expected_size, bdb->version);
if (p_defs->child_dev_size < sizeof(*p_child)) {
DRM_ERROR("General definiton block child device size is too small.\n");
return;
}
/* get the block size of general definitions */
@ -1149,7 +1130,7 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
child_dev_ptr = dev_priv->vbt.child_dev + count;
count++;
memcpy(child_dev_ptr, p_child, p_defs->child_dev_size);
memcpy(child_dev_ptr, p_child, sizeof(*p_child));
}
return;
}

View File

@ -93,9 +93,6 @@ static const struct dp_link_dpll chv_dpll[] = {
static const int skl_rates[] = { 162000, 216000, 270000,
324000, 432000, 540000 };
static const int chv_rates[] = { 162000, 202500, 210000, 216000,
243000, 270000, 324000, 405000,
420000, 432000, 540000 };
static const int default_rates[] = { 162000, 270000, 540000 };
/**
@ -1169,24 +1166,31 @@ intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
}
static bool intel_dp_source_supports_hbr2(struct drm_device *dev)
{
/* WaDisableHBR2:skl */
if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
return false;
if ((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || IS_BROADWELL(dev) ||
(INTEL_INFO(dev)->gen >= 9))
return true;
else
return false;
}
static int
intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
{
if (IS_SKYLAKE(dev)) {
*source_rates = skl_rates;
return ARRAY_SIZE(skl_rates);
} else if (IS_CHERRYVIEW(dev)) {
*source_rates = chv_rates;
return ARRAY_SIZE(chv_rates);
}
*source_rates = default_rates;
if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
/* WaDisableHBR2:skl */
return (DP_LINK_BW_2_7 >> 3) + 1;
else if (INTEL_INFO(dev)->gen >= 8 ||
(IS_HASWELL(dev) && !IS_HSW_ULX(dev)))
/* This depends on the fact that 5.4 is last value in the array */
if (intel_dp_source_supports_hbr2(dev))
return (DP_LINK_BW_5_4 >> 3) + 1;
else
return (DP_LINK_BW_2_7 >> 3) + 1;
@ -3941,10 +3945,15 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
}
}
/* Training Pattern 3 support, both source and sink */
/* Training Pattern 3 support, Intel platforms that support HBR2 alone
* have support for TP3 hence that check is used along with dpcd check
* to ensure TP3 can be enabled.
* SKL < B0: due it's WaDisableHBR2 is the only exception where TP3 is
* supported but still not enabled.
*/
if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 &&
intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED &&
(IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)) {
intel_dp_source_supports_hbr2(dev)) {
intel_dp->use_tps3 = true;
DRM_DEBUG_KMS("Displayport TPS3 supported\n");
} else

View File

@ -1012,6 +1012,8 @@ static int intel_lr_context_pin(struct intel_engine_cs *ring,
ret = intel_pin_and_map_ringbuffer_obj(ring->dev, ringbuf);
if (ret)
goto unpin_ctx_obj;
ctx_obj->dirty = true;
}
return ret;

View File

@ -79,6 +79,11 @@ static void radeon_hotplug_work_func(struct work_struct *work)
struct drm_mode_config *mode_config = &dev->mode_config;
struct drm_connector *connector;
/* we can race here at startup, some boards seem to trigger
* hotplug irqs when they shouldn't. */
if (!rdev->mode_info.mode_config_initialized)
return;
mutex_lock(&mode_config->mutex);
if (mode_config->num_connector) {
list_for_each_entry(connector, &mode_config->connector_list, head)

View File

@ -246,7 +246,7 @@ static int gpio_keys_polled_probe(struct platform_device *pdev)
* convert it to descriptor.
*/
if (!button->gpiod && gpio_is_valid(button->gpio)) {
unsigned flags = 0;
unsigned flags = GPIOF_IN;
if (button->active_low)
flags |= GPIOF_ACTIVE_LOW;

View File

@ -68,7 +68,9 @@ static struct irq_chip crossbar_chip = {
.irq_mask = irq_chip_mask_parent,
.irq_unmask = irq_chip_unmask_parent,
.irq_retrigger = irq_chip_retrigger_hierarchy,
.irq_set_wake = irq_chip_set_wake_parent,
.irq_set_type = irq_chip_set_type_parent,
.flags = IRQCHIP_MASK_ON_SUSPEND |
IRQCHIP_SKIP_SET_WAKE,
#ifdef CONFIG_SMP
.irq_set_affinity = irq_chip_set_affinity_parent,
#endif

View File

@ -240,7 +240,7 @@ config DVB_SI21XX
config DVB_TS2020
tristate "Montage Tehnology TS2020 based tuners"
depends on DVB_CORE
depends on DVB_CORE && I2C
select REGMAP_I2C
default m if !MEDIA_SUBDRV_AUTOSELECT
help

View File

@ -2,6 +2,7 @@ config VIDEO_COBALT
tristate "Cisco Cobalt support"
depends on VIDEO_V4L2 && I2C && MEDIA_CONTROLLER
depends on PCI_MSI && MTD_COMPLEX_MAPPINGS && GPIOLIB
depends on SND
select I2C_ALGOBIT
select VIDEO_ADV7604
select VIDEO_ADV7511

View File

@ -139,7 +139,7 @@ done:
also know about dropped frames. */
cb->vb.v4l2_buf.sequence = s->sequence++;
vb2_buffer_done(&cb->vb, (skip || s->unstable_frame) ?
VB2_BUF_STATE_QUEUED : VB2_BUF_STATE_DONE);
VB2_BUF_STATE_REQUEUEING : VB2_BUF_STATE_DONE);
}
irqreturn_t cobalt_irq_handler(int irq, void *dev_id)

View File

@ -130,10 +130,11 @@ err:
int mantis_dma_init(struct mantis_pci *mantis)
{
int err = 0;
int err;
dprintk(MANTIS_DEBUG, 1, "Mantis DMA init");
if (mantis_alloc_buffers(mantis) < 0) {
err = mantis_alloc_buffers(mantis);
if (err < 0) {
dprintk(MANTIS_ERROR, 1, "Error allocating DMA buffer");
/* Stop RISC Engine */

View File

@ -184,125 +184,9 @@ out:
return -EINVAL;
}
static struct ir_raw_timings_manchester ir_rc5_timings = {
.leader = RC5_UNIT,
.pulse_space_start = 0,
.clock = RC5_UNIT,
.trailer_space = RC5_UNIT * 10,
};
static struct ir_raw_timings_manchester ir_rc5x_timings[2] = {
{
.leader = RC5_UNIT,
.pulse_space_start = 0,
.clock = RC5_UNIT,
.trailer_space = RC5X_SPACE,
},
{
.clock = RC5_UNIT,
.trailer_space = RC5_UNIT * 10,
},
};
static struct ir_raw_timings_manchester ir_rc5_sz_timings = {
.leader = RC5_UNIT,
.pulse_space_start = 0,
.clock = RC5_UNIT,
.trailer_space = RC5_UNIT * 10,
};
static int ir_rc5_validate_filter(const struct rc_scancode_filter *scancode,
unsigned int important_bits)
{
/* all important bits of scancode should be set in mask */
if (~scancode->mask & important_bits)
return -EINVAL;
/* extra bits in mask should be zero in data */
if (scancode->mask & scancode->data & ~important_bits)
return -EINVAL;
return 0;
}
/**
* ir_rc5_encode() - Encode a scancode as a stream of raw events
*
* @protocols: allowed protocols
* @scancode: scancode filter describing scancode (helps distinguish between
* protocol subtypes when scancode is ambiguous)
* @events: array of raw ir events to write into
* @max: maximum size of @events
*
* Returns: The number of events written.
* -ENOBUFS if there isn't enough space in the array to fit the
* encoding. In this case all @max events will have been written.
* -EINVAL if the scancode is ambiguous or invalid.
*/
static int ir_rc5_encode(u64 protocols,
const struct rc_scancode_filter *scancode,
struct ir_raw_event *events, unsigned int max)
{
int ret;
struct ir_raw_event *e = events;
unsigned int data, xdata, command, commandx, system;
/* Detect protocol and convert scancode to raw data */
if (protocols & RC_BIT_RC5 &&
!ir_rc5_validate_filter(scancode, 0x1f7f)) {
/* decode scancode */
command = (scancode->data & 0x003f) >> 0;
commandx = (scancode->data & 0x0040) >> 6;
system = (scancode->data & 0x1f00) >> 8;
/* encode data */
data = !commandx << 12 | system << 6 | command;
/* Modulate the data */
ret = ir_raw_gen_manchester(&e, max, &ir_rc5_timings, RC5_NBITS,
data);
if (ret < 0)
return ret;
} else if (protocols & RC_BIT_RC5X &&
!ir_rc5_validate_filter(scancode, 0x1f7f3f)) {
/* decode scancode */
xdata = (scancode->data & 0x00003f) >> 0;
command = (scancode->data & 0x003f00) >> 8;
commandx = (scancode->data & 0x004000) >> 14;
system = (scancode->data & 0x1f0000) >> 16;
/* commandx and system overlap, bits must match when encoded */
if (commandx == (system & 0x1))
return -EINVAL;
/* encode data */
data = 1 << 18 | system << 12 | command << 6 | xdata;
/* Modulate the data */
ret = ir_raw_gen_manchester(&e, max, &ir_rc5x_timings[0],
CHECK_RC5X_NBITS,
data >> (RC5X_NBITS-CHECK_RC5X_NBITS));
if (ret < 0)
return ret;
ret = ir_raw_gen_manchester(&e, max - (e - events),
&ir_rc5x_timings[1],
RC5X_NBITS - CHECK_RC5X_NBITS,
data);
if (ret < 0)
return ret;
} else if (protocols & RC_BIT_RC5_SZ &&
!ir_rc5_validate_filter(scancode, 0x2fff)) {
/* RC5-SZ scancode is raw enough for Manchester as it is */
ret = ir_raw_gen_manchester(&e, max, &ir_rc5_sz_timings,
RC5_SZ_NBITS, scancode->data & 0x2fff);
if (ret < 0)
return ret;
} else {
return -EINVAL;
}
return e - events;
}
static struct ir_raw_handler rc5_handler = {
.protocols = RC_BIT_RC5 | RC_BIT_RC5X | RC_BIT_RC5_SZ,
.decode = ir_rc5_decode,
.encode = ir_rc5_encode,
};
static int __init ir_rc5_decode_init(void)

View File

@ -291,133 +291,11 @@ out:
return -EINVAL;
}
static struct ir_raw_timings_manchester ir_rc6_timings[4] = {
{
.leader = RC6_PREFIX_PULSE,
.pulse_space_start = 0,
.clock = RC6_UNIT,
.invert = 1,
.trailer_space = RC6_PREFIX_SPACE,
},
{
.clock = RC6_UNIT,
.invert = 1,
},
{
.clock = RC6_UNIT * 2,
.invert = 1,
},
{
.clock = RC6_UNIT,
.invert = 1,
.trailer_space = RC6_SUFFIX_SPACE,
},
};
static int ir_rc6_validate_filter(const struct rc_scancode_filter *scancode,
unsigned int important_bits)
{
/* all important bits of scancode should be set in mask */
if (~scancode->mask & important_bits)
return -EINVAL;
/* extra bits in mask should be zero in data */
if (scancode->mask & scancode->data & ~important_bits)
return -EINVAL;
return 0;
}
/**
* ir_rc6_encode() - Encode a scancode as a stream of raw events
*
* @protocols: allowed protocols
* @scancode: scancode filter describing scancode (helps distinguish between
* protocol subtypes when scancode is ambiguous)
* @events: array of raw ir events to write into
* @max: maximum size of @events
*
* Returns: The number of events written.
* -ENOBUFS if there isn't enough space in the array to fit the
* encoding. In this case all @max events will have been written.
* -EINVAL if the scancode is ambiguous or invalid.
*/
static int ir_rc6_encode(u64 protocols,
const struct rc_scancode_filter *scancode,
struct ir_raw_event *events, unsigned int max)
{
int ret;
struct ir_raw_event *e = events;
if (protocols & RC_BIT_RC6_0 &&
!ir_rc6_validate_filter(scancode, 0xffff)) {
/* Modulate the preamble */
ret = ir_raw_gen_manchester(&e, max, &ir_rc6_timings[0], 0, 0);
if (ret < 0)
return ret;
/* Modulate the header (Start Bit & Mode-0) */
ret = ir_raw_gen_manchester(&e, max - (e - events),
&ir_rc6_timings[1],
RC6_HEADER_NBITS, (1 << 3));
if (ret < 0)
return ret;
/* Modulate Trailer Bit */
ret = ir_raw_gen_manchester(&e, max - (e - events),
&ir_rc6_timings[2], 1, 0);
if (ret < 0)
return ret;
/* Modulate rest of the data */
ret = ir_raw_gen_manchester(&e, max - (e - events),
&ir_rc6_timings[3], RC6_0_NBITS,
scancode->data);
if (ret < 0)
return ret;
} else if (protocols & (RC_BIT_RC6_6A_20 | RC_BIT_RC6_6A_24 |
RC_BIT_RC6_6A_32 | RC_BIT_RC6_MCE) &&
!ir_rc6_validate_filter(scancode, 0x8fffffff)) {
/* Modulate the preamble */
ret = ir_raw_gen_manchester(&e, max, &ir_rc6_timings[0], 0, 0);
if (ret < 0)
return ret;
/* Modulate the header (Start Bit & Header-version 6 */
ret = ir_raw_gen_manchester(&e, max - (e - events),
&ir_rc6_timings[1],
RC6_HEADER_NBITS, (1 << 3 | 6));
if (ret < 0)
return ret;
/* Modulate Trailer Bit */
ret = ir_raw_gen_manchester(&e, max - (e - events),
&ir_rc6_timings[2], 1, 0);
if (ret < 0)
return ret;
/* Modulate rest of the data */
ret = ir_raw_gen_manchester(&e, max - (e - events),
&ir_rc6_timings[3],
fls(scancode->mask),
scancode->data);
if (ret < 0)
return ret;
} else {
return -EINVAL;
}
return e - events;
}
static struct ir_raw_handler rc6_handler = {
.protocols = RC_BIT_RC6_0 | RC_BIT_RC6_6A_20 |
RC_BIT_RC6_6A_24 | RC_BIT_RC6_6A_32 |
RC_BIT_RC6_MCE,
.decode = ir_rc6_decode,
.encode = ir_rc6_encode,
};
static int __init ir_rc6_decode_init(void)

View File

@ -526,130 +526,6 @@ static int nvt_set_tx_carrier(struct rc_dev *dev, u32 carrier)
return 0;
}
static int nvt_write_wakeup_codes(struct rc_dev *dev,
const u8 *wakeup_sample_buf, int count)
{
int i = 0;
u8 reg, reg_learn_mode;
unsigned long flags;
struct nvt_dev *nvt = dev->priv;
nvt_dbg_wake("writing wakeup samples");
reg = nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRCON);
reg_learn_mode = reg & ~CIR_WAKE_IRCON_MODE0;
reg_learn_mode |= CIR_WAKE_IRCON_MODE1;
/* Lock the learn area to prevent racing with wake-isr */
spin_lock_irqsave(&nvt->nvt_lock, flags);
/* Enable fifo writes */
nvt_cir_wake_reg_write(nvt, reg_learn_mode, CIR_WAKE_IRCON);
/* Clear cir wake rx fifo */
nvt_clear_cir_wake_fifo(nvt);
if (count > WAKE_FIFO_LEN) {
nvt_dbg_wake("HW FIFO too small for all wake samples");
count = WAKE_FIFO_LEN;
}
if (count)
pr_info("Wake samples (%d) =", count);
else
pr_info("Wake sample fifo cleared");
/* Write wake samples to fifo */
for (i = 0; i < count; i++) {
pr_cont(" %02x", wakeup_sample_buf[i]);
nvt_cir_wake_reg_write(nvt, wakeup_sample_buf[i],
CIR_WAKE_WR_FIFO_DATA);
}
pr_cont("\n");
/* Switch cir to wakeup mode and disable fifo writing */
nvt_cir_wake_reg_write(nvt, reg, CIR_WAKE_IRCON);
/* Set number of bytes needed for wake */
nvt_cir_wake_reg_write(nvt, count ? count :
CIR_WAKE_FIFO_CMP_BYTES,
CIR_WAKE_FIFO_CMP_DEEP);
spin_unlock_irqrestore(&nvt->nvt_lock, flags);
return 0;
}
static int nvt_ir_raw_set_wakeup_filter(struct rc_dev *dev,
struct rc_scancode_filter *sc_filter)
{
u8 *reg_buf;
u8 buf_val;
int i, ret, count;
unsigned int val;
struct ir_raw_event *raw;
bool complete;
/* Require both mask and data to be set before actually committing */
if (!sc_filter->mask || !sc_filter->data)
return 0;
raw = kmalloc_array(WAKE_FIFO_LEN, sizeof(*raw), GFP_KERNEL);
if (!raw)
return -ENOMEM;
ret = ir_raw_encode_scancode(dev->enabled_wakeup_protocols, sc_filter,
raw, WAKE_FIFO_LEN);
complete = (ret != -ENOBUFS);
if (!complete)
ret = WAKE_FIFO_LEN;
else if (ret < 0)
goto out_raw;
reg_buf = kmalloc_array(WAKE_FIFO_LEN, sizeof(*reg_buf), GFP_KERNEL);
if (!reg_buf) {
ret = -ENOMEM;
goto out_raw;
}
/* Inspect the ir samples */
for (i = 0, count = 0; i < ret && count < WAKE_FIFO_LEN; ++i) {
val = NS_TO_US((raw[i]).duration) / SAMPLE_PERIOD;
/* Split too large values into several smaller ones */
while (val > 0 && count < WAKE_FIFO_LEN) {
/* Skip last value for better comparison tolerance */
if (complete && i == ret - 1 && val < BUF_LEN_MASK)
break;
/* Clamp values to BUF_LEN_MASK at most */
buf_val = (val > BUF_LEN_MASK) ? BUF_LEN_MASK : val;
reg_buf[count] = buf_val;
val -= buf_val;
if ((raw[i]).pulse)
reg_buf[count] |= BUF_PULSE_BIT;
count++;
}
}
ret = nvt_write_wakeup_codes(dev, reg_buf, count);
kfree(reg_buf);
out_raw:
kfree(raw);
return ret;
}
/* Dummy implementation. nuvoton is agnostic to the protocol used */
static int nvt_ir_raw_change_wakeup_protocol(struct rc_dev *dev,
u64 *rc_type)
{
return 0;
}
/*
* nvt_tx_ir
*
@ -1167,14 +1043,11 @@ static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
/* Set up the rc device */
rdev->priv = nvt;
rdev->driver_type = RC_DRIVER_IR_RAW;
rdev->encode_wakeup = true;
rdev->allowed_protocols = RC_BIT_ALL;
rdev->open = nvt_open;
rdev->close = nvt_close;
rdev->tx_ir = nvt_tx_ir;
rdev->s_tx_carrier = nvt_set_tx_carrier;
rdev->s_wakeup_filter = nvt_ir_raw_set_wakeup_filter;
rdev->change_wakeup_protocol = nvt_ir_raw_change_wakeup_protocol;
rdev->input_name = "Nuvoton w836x7hg Infrared Remote Transceiver";
rdev->input_phys = "nuvoton/cir0";
rdev->input_id.bustype = BUS_HOST;

View File

@ -63,7 +63,6 @@ static int debug;
*/
#define TX_BUF_LEN 256
#define RX_BUF_LEN 32
#define WAKE_FIFO_LEN 67
struct nvt_dev {
struct pnp_dev *pdev;

View File

@ -25,8 +25,6 @@ struct ir_raw_handler {
u64 protocols; /* which are handled by this handler */
int (*decode)(struct rc_dev *dev, struct ir_raw_event event);
int (*encode)(u64 protocols, const struct rc_scancode_filter *scancode,
struct ir_raw_event *events, unsigned int max);
/* These two should only be used by the lirc decoder */
int (*raw_register)(struct rc_dev *dev);
@ -152,44 +150,10 @@ static inline bool is_timing_event(struct ir_raw_event ev)
#define TO_US(duration) DIV_ROUND_CLOSEST((duration), 1000)
#define TO_STR(is_pulse) ((is_pulse) ? "pulse" : "space")
/* functions for IR encoders */
static inline void init_ir_raw_event_duration(struct ir_raw_event *ev,
unsigned int pulse,
u32 duration)
{
init_ir_raw_event(ev);
ev->duration = duration;
ev->pulse = pulse;
}
/**
* struct ir_raw_timings_manchester - Manchester coding timings
* @leader: duration of leader pulse (if any) 0 if continuing
* existing signal (see @pulse_space_start)
* @pulse_space_start: 1 for starting with pulse (0 for starting with space)
* @clock: duration of each pulse/space in ns
* @invert: if set clock logic is inverted
* (0 = space + pulse, 1 = pulse + space)
* @trailer_space: duration of trailer space in ns
*/
struct ir_raw_timings_manchester {
unsigned int leader;
unsigned int pulse_space_start:1;
unsigned int clock;
unsigned int invert:1;
unsigned int trailer_space;
};
int ir_raw_gen_manchester(struct ir_raw_event **ev, unsigned int max,
const struct ir_raw_timings_manchester *timings,
unsigned int n, unsigned int data);
/*
* Routines from rc-raw.c to be used internally and by decoders
*/
u64 ir_raw_get_allowed_protocols(void);
u64 ir_raw_get_encode_protocols(void);
int ir_raw_event_register(struct rc_dev *dev);
void ir_raw_event_unregister(struct rc_dev *dev);
int ir_raw_handler_register(struct ir_raw_handler *ir_raw_handler);

View File

@ -30,7 +30,6 @@ static LIST_HEAD(ir_raw_client_list);
static DEFINE_MUTEX(ir_raw_handler_lock);
static LIST_HEAD(ir_raw_handler_list);
static u64 available_protocols;
static u64 encode_protocols;
static int ir_raw_event_thread(void *data)
{
@ -241,146 +240,12 @@ ir_raw_get_allowed_protocols(void)
return protocols;
}
/* used internally by the sysfs interface */
u64
ir_raw_get_encode_protocols(void)
{
u64 protocols;
mutex_lock(&ir_raw_handler_lock);
protocols = encode_protocols;
mutex_unlock(&ir_raw_handler_lock);
return protocols;
}
static int change_protocol(struct rc_dev *dev, u64 *rc_type)
{
/* the caller will update dev->enabled_protocols */
return 0;
}
/**
* ir_raw_gen_manchester() - Encode data with Manchester (bi-phase) modulation.
* @ev: Pointer to pointer to next free event. *@ev is incremented for
* each raw event filled.
* @max: Maximum number of raw events to fill.
* @timings: Manchester modulation timings.
* @n: Number of bits of data.
* @data: Data bits to encode.
*
* Encodes the @n least significant bits of @data using Manchester (bi-phase)
* modulation with the timing characteristics described by @timings, writing up
* to @max raw IR events using the *@ev pointer.
*
* Returns: 0 on success.
* -ENOBUFS if there isn't enough space in the array to fit the
* full encoded data. In this case all @max events will have been
* written.
*/
int ir_raw_gen_manchester(struct ir_raw_event **ev, unsigned int max,
const struct ir_raw_timings_manchester *timings,
unsigned int n, unsigned int data)
{
bool need_pulse;
unsigned int i;
int ret = -ENOBUFS;
i = 1 << (n - 1);
if (timings->leader) {
if (!max--)
return ret;
if (timings->pulse_space_start) {
init_ir_raw_event_duration((*ev)++, 1, timings->leader);
if (!max--)
return ret;
init_ir_raw_event_duration((*ev), 0, timings->leader);
} else {
init_ir_raw_event_duration((*ev), 1, timings->leader);
}
i >>= 1;
} else {
/* continue existing signal */
--(*ev);
}
/* from here on *ev will point to the last event rather than the next */
while (n && i > 0) {
need_pulse = !(data & i);
if (timings->invert)
need_pulse = !need_pulse;
if (need_pulse == !!(*ev)->pulse) {
(*ev)->duration += timings->clock;
} else {
if (!max--)
goto nobufs;
init_ir_raw_event_duration(++(*ev), need_pulse,
timings->clock);
}
if (!max--)
goto nobufs;
init_ir_raw_event_duration(++(*ev), !need_pulse,
timings->clock);
i >>= 1;
}
if (timings->trailer_space) {
if (!(*ev)->pulse)
(*ev)->duration += timings->trailer_space;
else if (!max--)
goto nobufs;
else
init_ir_raw_event_duration(++(*ev), 0,
timings->trailer_space);
}
ret = 0;
nobufs:
/* point to the next event rather than last event before returning */
++(*ev);
return ret;
}
EXPORT_SYMBOL(ir_raw_gen_manchester);
/**
* ir_raw_encode_scancode() - Encode a scancode as raw events
*
* @protocols: permitted protocols
* @scancode: scancode filter describing a single scancode
* @events: array of raw events to write into
* @max: max number of raw events
*
* Attempts to encode the scancode as raw events.
*
* Returns: The number of events written.
* -ENOBUFS if there isn't enough space in the array to fit the
* encoding. In this case all @max events will have been written.
* -EINVAL if the scancode is ambiguous or invalid, or if no
* compatible encoder was found.
*/
int ir_raw_encode_scancode(u64 protocols,
const struct rc_scancode_filter *scancode,
struct ir_raw_event *events, unsigned int max)
{
struct ir_raw_handler *handler;
int ret = -EINVAL;
mutex_lock(&ir_raw_handler_lock);
list_for_each_entry(handler, &ir_raw_handler_list, list) {
if (handler->protocols & protocols && handler->encode) {
ret = handler->encode(protocols, scancode, events, max);
if (ret >= 0 || ret == -ENOBUFS)
break;
}
}
mutex_unlock(&ir_raw_handler_lock);
return ret;
}
EXPORT_SYMBOL(ir_raw_encode_scancode);
/*
* Used to (un)register raw event clients
*/
@ -463,8 +328,6 @@ int ir_raw_handler_register(struct ir_raw_handler *ir_raw_handler)
list_for_each_entry(raw, &ir_raw_client_list, list)
ir_raw_handler->raw_register(raw->dev);
available_protocols |= ir_raw_handler->protocols;
if (ir_raw_handler->encode)
encode_protocols |= ir_raw_handler->protocols;
mutex_unlock(&ir_raw_handler_lock);
return 0;
@ -481,8 +344,6 @@ void ir_raw_handler_unregister(struct ir_raw_handler *ir_raw_handler)
list_for_each_entry(raw, &ir_raw_client_list, list)
ir_raw_handler->raw_unregister(raw->dev);
available_protocols &= ~ir_raw_handler->protocols;
if (ir_raw_handler->encode)
encode_protocols &= ~ir_raw_handler->protocols;
mutex_unlock(&ir_raw_handler_lock);
}
EXPORT_SYMBOL(ir_raw_handler_unregister);

View File

@ -26,7 +26,6 @@
#include <linux/device.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <media/rc-core.h>
#define DRIVER_NAME "rc-loopback"
@ -177,39 +176,6 @@ static int loop_set_carrier_report(struct rc_dev *dev, int enable)
return 0;
}
static int loop_set_wakeup_filter(struct rc_dev *dev,
struct rc_scancode_filter *sc_filter)
{
static const unsigned int max = 512;
struct ir_raw_event *raw;
int ret;
int i;
/* fine to disable filter */
if (!sc_filter->mask)
return 0;
/* encode the specified filter and loop it back */
raw = kmalloc_array(max, sizeof(*raw), GFP_KERNEL);
ret = ir_raw_encode_scancode(dev->enabled_wakeup_protocols, sc_filter,
raw, max);
/* still loop back the partial raw IR even if it's incomplete */
if (ret == -ENOBUFS)
ret = max;
if (ret >= 0) {
/* do the loopback */
for (i = 0; i < ret; ++i)
ir_raw_event_store(dev, &raw[i]);
ir_raw_event_handle(dev);
ret = 0;
}
kfree(raw);
return ret;
}
static int __init loop_init(void)
{
struct rc_dev *rc;
@ -229,7 +195,6 @@ static int __init loop_init(void)
rc->map_name = RC_MAP_EMPTY;
rc->priv = &loopdev;
rc->driver_type = RC_DRIVER_IR_RAW;
rc->encode_wakeup = true;
rc->allowed_protocols = RC_BIT_ALL;
rc->timeout = 100 * 1000 * 1000; /* 100 ms */
rc->min_timeout = 1;
@ -244,7 +209,6 @@ static int __init loop_init(void)
rc->s_idle = loop_set_idle;
rc->s_learning_mode = loop_set_learning_mode;
rc->s_carrier_report = loop_set_carrier_report;
rc->s_wakeup_filter = loop_set_wakeup_filter;
loopdev.txmask = RXMASK_REGULAR;
loopdev.txcarrier = 36000;

View File

@ -865,8 +865,6 @@ static ssize_t show_protocols(struct device *device,
} else {
enabled = dev->enabled_wakeup_protocols;
allowed = dev->allowed_wakeup_protocols;
if (dev->encode_wakeup && !allowed)
allowed = ir_raw_get_encode_protocols();
}
mutex_unlock(&dev->lock);
@ -1408,16 +1406,13 @@ int rc_register_device(struct rc_dev *dev)
path ? path : "N/A");
kfree(path);
if (dev->driver_type == RC_DRIVER_IR_RAW || dev->encode_wakeup) {
if (dev->driver_type == RC_DRIVER_IR_RAW) {
/* Load raw decoders, if they aren't already */
if (!raw_init) {
IR_dprintk(1, "Loading raw decoders\n");
ir_raw_init();
raw_init = true;
}
}
if (dev->driver_type == RC_DRIVER_IR_RAW) {
/* calls ir_register_device so unlock mutex here*/
mutex_unlock(&dev->lock);
rc = ir_raw_event_register(dev);

View File

@ -715,6 +715,7 @@ static void __fill_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b)
break;
case VB2_BUF_STATE_PREPARING:
case VB2_BUF_STATE_DEQUEUED:
case VB2_BUF_STATE_REQUEUEING:
/* nothing */
break;
}
@ -1182,7 +1183,8 @@ void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state)
if (WARN_ON(state != VB2_BUF_STATE_DONE &&
state != VB2_BUF_STATE_ERROR &&
state != VB2_BUF_STATE_QUEUED))
state != VB2_BUF_STATE_QUEUED &&
state != VB2_BUF_STATE_REQUEUEING))
state = VB2_BUF_STATE_ERROR;
#ifdef CONFIG_VIDEO_ADV_DEBUG
@ -1199,22 +1201,30 @@ void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state)
for (plane = 0; plane < vb->num_planes; ++plane)
call_void_memop(vb, finish, vb->planes[plane].mem_priv);
/* Add the buffer to the done buffers list */
spin_lock_irqsave(&q->done_lock, flags);
vb->state = state;
if (state != VB2_BUF_STATE_QUEUED)
if (state == VB2_BUF_STATE_QUEUED ||
state == VB2_BUF_STATE_REQUEUEING) {
vb->state = VB2_BUF_STATE_QUEUED;
} else {
/* Add the buffer to the done buffers list */
list_add_tail(&vb->done_entry, &q->done_list);
vb->state = state;
}
atomic_dec(&q->owned_by_drv_count);
spin_unlock_irqrestore(&q->done_lock, flags);
if (state == VB2_BUF_STATE_QUEUED) {
switch (state) {
case VB2_BUF_STATE_QUEUED:
return;
case VB2_BUF_STATE_REQUEUEING:
if (q->start_streaming_called)
__enqueue_in_driver(vb);
return;
default:
/* Inform any processes that may be waiting for buffers */
wake_up(&q->done_wq);
break;
}
/* Inform any processes that may be waiting for buffers */
wake_up(&q->done_wq);
}
EXPORT_SYMBOL_GPL(vb2_buffer_done);
@ -1244,19 +1254,19 @@ EXPORT_SYMBOL_GPL(vb2_discard_done);
static void vb2_warn_zero_bytesused(struct vb2_buffer *vb)
{
static bool __check_once __read_mostly;
static bool check_once;
if (__check_once)
if (check_once)
return;
__check_once = true;
__WARN();
check_once = true;
WARN_ON(1);
pr_warn_once("use of bytesused == 0 is deprecated and will be removed in the future,\n");
pr_warn("use of bytesused == 0 is deprecated and will be removed in the future,\n");
if (vb->vb2_queue->allow_zero_bytesused)
pr_warn_once("use VIDIOC_DECODER_CMD(V4L2_DEC_CMD_STOP) instead.\n");
pr_warn("use VIDIOC_DECODER_CMD(V4L2_DEC_CMD_STOP) instead.\n");
else
pr_warn_once("use the actual size instead.\n");
pr_warn("use the actual size instead.\n");
}
/**

View File

@ -854,6 +854,18 @@ static int pcan_usb_probe(struct usb_interface *intf)
/*
* describe the PCAN-USB adapter
*/
static const struct can_bittiming_const pcan_usb_const = {
.name = "pcan_usb",
.tseg1_min = 1,
.tseg1_max = 16,
.tseg2_min = 1,
.tseg2_max = 8,
.sjw_max = 4,
.brp_min = 1,
.brp_max = 64,
.brp_inc = 1,
};
const struct peak_usb_adapter pcan_usb = {
.name = "PCAN-USB",
.device_id = PCAN_USB_PRODUCT_ID,
@ -862,17 +874,7 @@ const struct peak_usb_adapter pcan_usb = {
.clock = {
.freq = PCAN_USB_CRYSTAL_HZ / 2 ,
},
.bittiming_const = {
.name = "pcan_usb",
.tseg1_min = 1,
.tseg1_max = 16,
.tseg2_min = 1,
.tseg2_max = 8,
.sjw_max = 4,
.brp_min = 1,
.brp_max = 64,
.brp_inc = 1,
},
.bittiming_const = &pcan_usb_const,
/* size of device private data */
.sizeof_dev_private = sizeof(struct pcan_usb),

View File

@ -792,9 +792,9 @@ static int peak_usb_create_dev(const struct peak_usb_adapter *peak_usb_adapter,
dev->ep_msg_out = peak_usb_adapter->ep_msg_out[ctrl_idx];
dev->can.clock = peak_usb_adapter->clock;
dev->can.bittiming_const = &peak_usb_adapter->bittiming_const;
dev->can.bittiming_const = peak_usb_adapter->bittiming_const;
dev->can.do_set_bittiming = peak_usb_set_bittiming;
dev->can.data_bittiming_const = &peak_usb_adapter->data_bittiming_const;
dev->can.data_bittiming_const = peak_usb_adapter->data_bittiming_const;
dev->can.do_set_data_bittiming = peak_usb_set_data_bittiming;
dev->can.do_set_mode = peak_usb_set_mode;
dev->can.do_get_berr_counter = peak_usb_adapter->do_get_berr_counter;

View File

@ -48,8 +48,8 @@ struct peak_usb_adapter {
u32 device_id;
u32 ctrlmode_supported;
struct can_clock clock;
const struct can_bittiming_const bittiming_const;
const struct can_bittiming_const data_bittiming_const;
const struct can_bittiming_const * const bittiming_const;
const struct can_bittiming_const * const data_bittiming_const;
unsigned int ctrl_count;
int (*intf_probe)(struct usb_interface *intf);

View File

@ -990,6 +990,30 @@ static void pcan_usb_fd_free(struct peak_usb_device *dev)
}
/* describes the PCAN-USB FD adapter */
static const struct can_bittiming_const pcan_usb_fd_const = {
.name = "pcan_usb_fd",
.tseg1_min = 1,
.tseg1_max = 64,
.tseg2_min = 1,
.tseg2_max = 16,
.sjw_max = 16,
.brp_min = 1,
.brp_max = 1024,
.brp_inc = 1,
};
static const struct can_bittiming_const pcan_usb_fd_data_const = {
.name = "pcan_usb_fd",
.tseg1_min = 1,
.tseg1_max = 16,
.tseg2_min = 1,
.tseg2_max = 8,
.sjw_max = 4,
.brp_min = 1,
.brp_max = 1024,
.brp_inc = 1,
};
const struct peak_usb_adapter pcan_usb_fd = {
.name = "PCAN-USB FD",
.device_id = PCAN_USBFD_PRODUCT_ID,
@ -999,28 +1023,8 @@ const struct peak_usb_adapter pcan_usb_fd = {
.clock = {
.freq = PCAN_UFD_CRYSTAL_HZ,
},
.bittiming_const = {
.name = "pcan_usb_fd",
.tseg1_min = 1,
.tseg1_max = 64,
.tseg2_min = 1,
.tseg2_max = 16,
.sjw_max = 16,
.brp_min = 1,
.brp_max = 1024,
.brp_inc = 1,
},
.data_bittiming_const = {
.name = "pcan_usb_fd",
.tseg1_min = 1,
.tseg1_max = 16,
.tseg2_min = 1,
.tseg2_max = 8,
.sjw_max = 4,
.brp_min = 1,
.brp_max = 1024,
.brp_inc = 1,
},
.bittiming_const = &pcan_usb_fd_const,
.data_bittiming_const = &pcan_usb_fd_data_const,
/* size of device private data */
.sizeof_dev_private = sizeof(struct pcan_usb_fd_device),
@ -1058,6 +1062,30 @@ const struct peak_usb_adapter pcan_usb_fd = {
};
/* describes the PCAN-USB Pro FD adapter */
static const struct can_bittiming_const pcan_usb_pro_fd_const = {
.name = "pcan_usb_pro_fd",
.tseg1_min = 1,
.tseg1_max = 64,
.tseg2_min = 1,
.tseg2_max = 16,
.sjw_max = 16,
.brp_min = 1,
.brp_max = 1024,
.brp_inc = 1,
};
static const struct can_bittiming_const pcan_usb_pro_fd_data_const = {
.name = "pcan_usb_pro_fd",
.tseg1_min = 1,
.tseg1_max = 16,
.tseg2_min = 1,
.tseg2_max = 8,
.sjw_max = 4,
.brp_min = 1,
.brp_max = 1024,
.brp_inc = 1,
};
const struct peak_usb_adapter pcan_usb_pro_fd = {
.name = "PCAN-USB Pro FD",
.device_id = PCAN_USBPROFD_PRODUCT_ID,
@ -1067,28 +1095,8 @@ const struct peak_usb_adapter pcan_usb_pro_fd = {
.clock = {
.freq = PCAN_UFD_CRYSTAL_HZ,
},
.bittiming_const = {
.name = "pcan_usb_pro_fd",
.tseg1_min = 1,
.tseg1_max = 64,
.tseg2_min = 1,
.tseg2_max = 16,
.sjw_max = 16,
.brp_min = 1,
.brp_max = 1024,
.brp_inc = 1,
},
.data_bittiming_const = {
.name = "pcan_usb_pro_fd",
.tseg1_min = 1,
.tseg1_max = 16,
.tseg2_min = 1,
.tseg2_max = 8,
.sjw_max = 4,
.brp_min = 1,
.brp_max = 1024,
.brp_inc = 1,
},
.bittiming_const = &pcan_usb_pro_fd_const,
.data_bittiming_const = &pcan_usb_pro_fd_data_const,
/* size of device private data */
.sizeof_dev_private = sizeof(struct pcan_usb_fd_device),

View File

@ -1004,6 +1004,18 @@ int pcan_usb_pro_probe(struct usb_interface *intf)
/*
* describe the PCAN-USB Pro adapter
*/
static const struct can_bittiming_const pcan_usb_pro_const = {
.name = "pcan_usb_pro",
.tseg1_min = 1,
.tseg1_max = 16,
.tseg2_min = 1,
.tseg2_max = 8,
.sjw_max = 4,
.brp_min = 1,
.brp_max = 1024,
.brp_inc = 1,
};
const struct peak_usb_adapter pcan_usb_pro = {
.name = "PCAN-USB Pro",
.device_id = PCAN_USBPRO_PRODUCT_ID,
@ -1012,17 +1024,7 @@ const struct peak_usb_adapter pcan_usb_pro = {
.clock = {
.freq = PCAN_USBPRO_CRYSTAL_HZ,
},
.bittiming_const = {
.name = "pcan_usb_pro",
.tseg1_min = 1,
.tseg1_max = 16,
.tseg2_min = 1,
.tseg2_max = 8,
.sjw_max = 4,
.brp_min = 1,
.brp_max = 1024,
.brp_inc = 1,
},
.bittiming_const = &pcan_usb_pro_const,
/* size of device private data */
.sizeof_dev_private = sizeof(struct pcan_usb_pro_device),

View File

@ -65,7 +65,7 @@ obj-$(CONFIG_NET_VENDOR_PASEMI) += pasemi/
obj-$(CONFIG_NET_VENDOR_QLOGIC) += qlogic/
obj-$(CONFIG_NET_VENDOR_QUALCOMM) += qualcomm/
obj-$(CONFIG_NET_VENDOR_REALTEK) += realtek/
obj-$(CONFIG_SH_ETH) += renesas/
obj-$(CONFIG_NET_VENDOR_RENESAS) += renesas/
obj-$(CONFIG_NET_VENDOR_RDC) += rdc/
obj-$(CONFIG_NET_VENDOR_ROCKER) += rocker/
obj-$(CONFIG_NET_VENDOR_SAMSUNG) += samsung/

View File

@ -801,6 +801,9 @@ int xgene_enet_mdio_config(struct xgene_enet_pdata *pdata)
void xgene_enet_mdio_remove(struct xgene_enet_pdata *pdata)
{
if (pdata->phy_dev)
phy_disconnect(pdata->phy_dev);
mdiobus_unregister(pdata->mdio_bus);
mdiobus_free(pdata->mdio_bus);
pdata->mdio_bus = NULL;

View File

@ -1277,9 +1277,10 @@ static int xgene_enet_remove(struct platform_device *pdev)
mac_ops->tx_disable(pdata);
xgene_enet_napi_del(pdata);
xgene_enet_mdio_remove(pdata);
xgene_enet_delete_desc_rings(pdata);
if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
xgene_enet_mdio_remove(pdata);
unregister_netdev(ndev);
xgene_enet_delete_desc_rings(pdata);
pdata->port_ops->shutdown(pdata);
free_netdev(ndev);

View File

@ -2126,6 +2126,8 @@ static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
int ret = 0;
int timeout = 0;
u32 reg;
u32 dma_ctrl;
int i;
/* Disable TDMA to stop add more frames in TX DMA */
reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
@ -2169,6 +2171,20 @@ static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
ret = -ETIMEDOUT;
}
dma_ctrl = 0;
for (i = 0; i < priv->hw_params->rx_queues; i++)
dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
reg &= ~dma_ctrl;
bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
dma_ctrl = 0;
for (i = 0; i < priv->hw_params->tx_queues; i++)
dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
reg &= ~dma_ctrl;
bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
return ret;
}
@ -2820,8 +2836,6 @@ static void bcmgenet_timeout(struct net_device *dev)
netif_dbg(priv, tx_err, dev, "bcmgenet_timeout\n");
bcmgenet_disable_tx_napi(priv);
for (q = 0; q < priv->hw_params->tx_queues; q++)
bcmgenet_dump_tx_queue(&priv->tx_rings[q]);
bcmgenet_dump_tx_queue(&priv->tx_rings[DESC_INDEX]);
@ -2837,8 +2851,6 @@ static void bcmgenet_timeout(struct net_device *dev)
bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR);
bcmgenet_enable_tx_napi(priv);
dev->trans_start = jiffies;
dev->stats.tx_errors++;

View File

@ -5174,7 +5174,7 @@ static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
struct device *dev = &adapter->pdev->dev;
int status;
if (lancer_chip(adapter) || BEx_chip(adapter))
if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
return;
if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
@ -5221,7 +5221,7 @@ static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
{
struct be_adapter *adapter = netdev_priv(netdev);
if (lancer_chip(adapter) || BEx_chip(adapter))
if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
return;
if (adapter->vxlan_port != port)

View File

@ -1778,7 +1778,7 @@ static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
return ret;
fep->mii_timeout = 0;
init_completion(&fep->mdio_done);
reinit_completion(&fep->mdio_done);
/* start a read op */
writel(FEC_MMFR_ST | FEC_MMFR_OP_READ |
@ -1817,7 +1817,7 @@ static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
return ret;
fep->mii_timeout = 0;
init_completion(&fep->mdio_done);
reinit_completion(&fep->mdio_done);
/* start a write op */
writel(FEC_MMFR_ST | FEC_MMFR_OP_WRITE |

View File

@ -2102,6 +2102,11 @@ int startup_gfar(struct net_device *ndev)
/* Start Rx/Tx DMA and enable the interrupts */
gfar_start(priv);
/* force link state update after mac reset */
priv->oldlink = 0;
priv->oldspeed = 0;
priv->oldduplex = -1;
phy_start(priv->phydev);
enable_napi(priv);

View File

@ -216,7 +216,7 @@ static void fm10k_reuse_rx_page(struct fm10k_ring *rx_ring,
static inline bool fm10k_page_is_reserved(struct page *page)
{
return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc;
return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
}
static bool fm10k_can_reuse_rx_page(struct fm10k_rx_buffer *rx_buffer,

View File

@ -6566,7 +6566,7 @@ static void igb_reuse_rx_page(struct igb_ring *rx_ring,
static inline bool igb_page_is_reserved(struct page *page)
{
return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc;
return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
}
static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,

View File

@ -1832,7 +1832,7 @@ static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring,
static inline bool ixgbe_page_is_reserved(struct page *page)
{
return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc;
return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
}
/**

View File

@ -765,7 +765,7 @@ static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring,
static inline bool ixgbevf_page_is_reserved(struct page *page)
{
return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc;
return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
}
/**

View File

@ -952,9 +952,8 @@ static int ks8842_alloc_dma_bufs(struct net_device *netdev)
sg_dma_address(&tx_ctl->sg) = dma_map_single(adapter->dev,
tx_ctl->buf, DMA_BUFFER_SIZE, DMA_TO_DEVICE);
err = dma_mapping_error(adapter->dev,
sg_dma_address(&tx_ctl->sg));
if (err) {
if (dma_mapping_error(adapter->dev, sg_dma_address(&tx_ctl->sg))) {
err = -ENOMEM;
sg_dma_address(&tx_ctl->sg) = 0;
goto err;
}

View File

@ -1282,7 +1282,12 @@ static size_t efx_ef10_update_stats_common(struct efx_nic *efx, u64 *full_stats,
}
}
if (core_stats) {
if (!core_stats)
return stats_count;
if (nic_data->datapath_caps &
1 << MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN) {
/* Use vadaptor stats. */
core_stats->rx_packets = stats[EF10_STAT_rx_unicast] +
stats[EF10_STAT_rx_multicast] +
stats[EF10_STAT_rx_broadcast];
@ -1302,6 +1307,26 @@ static size_t efx_ef10_update_stats_common(struct efx_nic *efx, u64 *full_stats,
core_stats->rx_fifo_errors = stats[EF10_STAT_rx_overflow];
core_stats->rx_errors = core_stats->rx_crc_errors;
core_stats->tx_errors = stats[EF10_STAT_tx_bad];
} else {
/* Use port stats. */
core_stats->rx_packets = stats[EF10_STAT_port_rx_packets];
core_stats->tx_packets = stats[EF10_STAT_port_tx_packets];
core_stats->rx_bytes = stats[EF10_STAT_port_rx_bytes];
core_stats->tx_bytes = stats[EF10_STAT_port_tx_bytes];
core_stats->rx_dropped = stats[EF10_STAT_port_rx_nodesc_drops] +
stats[GENERIC_STAT_rx_nodesc_trunc] +
stats[GENERIC_STAT_rx_noskb_drops];
core_stats->multicast = stats[EF10_STAT_port_rx_multicast];
core_stats->rx_length_errors =
stats[EF10_STAT_port_rx_gtjumbo] +
stats[EF10_STAT_port_rx_length_error];
core_stats->rx_crc_errors = stats[EF10_STAT_port_rx_bad];
core_stats->rx_frame_errors =
stats[EF10_STAT_port_rx_align_error];
core_stats->rx_fifo_errors = stats[EF10_STAT_port_rx_overflow];
core_stats->rx_errors = (core_stats->rx_length_errors +
core_stats->rx_crc_errors +
core_stats->rx_frame_errors);
}
return stats_count;

View File

@ -290,6 +290,15 @@ struct phy_device *fixed_phy_register(unsigned int irq,
return ERR_PTR(-EINVAL);
}
/* propagate the fixed link values to struct phy_device */
phy->link = status->link;
if (status->link) {
phy->speed = status->speed;
phy->duplex = status->duplex;
phy->pause = status->pause;
phy->asym_pause = status->asym_pause;
}
of_node_get(np);
phy->dev.of_node = np;

View File

@ -811,6 +811,7 @@ void phy_state_machine(struct work_struct *work)
bool needs_aneg = false, do_suspend = false;
enum phy_state old_state;
int err = 0;
int old_link;
mutex_lock(&phydev->lock);
@ -896,11 +897,18 @@ void phy_state_machine(struct work_struct *work)
phydev->adjust_link(phydev->attached_dev);
break;
case PHY_RUNNING:
/* Only register a CHANGE if we are
* polling or ignoring interrupts
/* Only register a CHANGE if we are polling or ignoring
* interrupts and link changed since latest checking.
*/
if (!phy_interrupt_is_valid(phydev))
phydev->state = PHY_CHANGELINK;
if (!phy_interrupt_is_valid(phydev)) {
old_link = phydev->link;
err = phy_read_status(phydev);
if (err)
break;
if (old_link != phydev->link)
phydev->state = PHY_CHANGELINK;
}
break;
case PHY_CHANGELINK:
err = phy_read_status(phydev);
@ -1030,10 +1038,14 @@ int phy_read_mmd_indirect(struct phy_device *phydev, int prtad,
int value = -1;
if (phydrv->read_mmd_indirect == NULL) {
mmd_phy_indirect(phydev->bus, prtad, devad, addr);
struct mii_bus *bus = phydev->bus;
mutex_lock(&bus->mdio_lock);
mmd_phy_indirect(bus, prtad, devad, addr);
/* Read the content of the MMD's selected register */
value = phydev->bus->read(phydev->bus, addr, MII_MMD_DATA);
value = bus->read(bus, addr, MII_MMD_DATA);
mutex_unlock(&bus->mdio_lock);
} else {
value = phydrv->read_mmd_indirect(phydev, prtad, devad, addr);
}
@ -1063,10 +1075,14 @@ void phy_write_mmd_indirect(struct phy_device *phydev, int prtad,
struct phy_driver *phydrv = phydev->drv;
if (phydrv->write_mmd_indirect == NULL) {
mmd_phy_indirect(phydev->bus, prtad, devad, addr);
struct mii_bus *bus = phydev->bus;
mutex_lock(&bus->mdio_lock);
mmd_phy_indirect(bus, prtad, devad, addr);
/* Write the data into MMD's selected register */
phydev->bus->write(phydev->bus, addr, MII_MMD_DATA, data);
bus->write(bus, addr, MII_MMD_DATA, data);
mutex_unlock(&bus->mdio_lock);
} else {
phydrv->write_mmd_indirect(phydev, prtad, devad, addr, data);
}

View File

@ -176,7 +176,7 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id,
if (c45_ids)
dev->c45_ids = *c45_ids;
dev->bus = bus;
dev->dev.parent = bus->parent;
dev->dev.parent = &bus->dev;
dev->dev.bus = &mdio_bus_type;
dev->irq = bus->irq != NULL ? bus->irq[addr] : PHY_POLL;
dev_set_name(&dev->dev, PHY_ID_FMT, bus->id, addr);

View File

@ -91,19 +91,18 @@ static int lan911x_config_init(struct phy_device *phydev)
}
/*
* The LAN8710/LAN8720 requires a minimum of 2 link pulses within 64ms of each
* other in order to set the ENERGYON bit and exit EDPD mode. If a link partner
* does send the pulses within this interval, the PHY will remained powered
* down.
*
* This workaround will manually toggle the PHY on/off upon calls to read_status
* in order to generate link test pulses if the link is down. If a link partner
* is present, it will respond to the pulses, which will cause the ENERGYON bit
* to be set and will cause the EDPD mode to be exited.
* The LAN87xx suffers from rare absence of the ENERGYON-bit when Ethernet cable
* plugs in while LAN87xx is in Energy Detect Power-Down mode. This leads to
* unstable detection of plugging in Ethernet cable.
* This workaround disables Energy Detect Power-Down mode and waiting for
* response on link pulses to detect presence of plugged Ethernet cable.
* The Energy Detect Power-Down mode is enabled again in the end of procedure to
* save approximately 220 mW of power if cable is unplugged.
*/
static int lan87xx_read_status(struct phy_device *phydev)
{
int err = genphy_read_status(phydev);
int i;
if (!phydev->link) {
/* Disable EDPD to wake up PHY */
@ -116,8 +115,16 @@ static int lan87xx_read_status(struct phy_device *phydev)
if (rc < 0)
return rc;
/* Sleep 64 ms to allow ~5 link test pulses to be sent */
msleep(64);
/* Wait max 640 ms to detect energy */
for (i = 0; i < 64; i++) {
/* Sleep to allow link test pulses to be sent */
msleep(10);
rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS);
if (rc < 0)
return rc;
if (rc & MII_LAN83C185_ENERGYON)
break;
}
/* Re-enable EDPD */
rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS);
@ -191,7 +198,7 @@ static struct phy_driver smsc_phy_driver[] = {
/* basic functions */
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
.read_status = lan87xx_read_status,
.config_init = smsc_phy_config_init,
.soft_reset = smsc_phy_reset,

View File

@ -269,9 +269,9 @@ static void ppp_ccp_peek(struct ppp *ppp, struct sk_buff *skb, int inbound);
static void ppp_ccp_closed(struct ppp *ppp);
static struct compressor *find_compressor(int type);
static void ppp_get_stats(struct ppp *ppp, struct ppp_stats *st);
static struct ppp *ppp_create_interface(struct net *net, int unit, int *retp);
static struct ppp *ppp_create_interface(struct net *net, int unit,
struct file *file, int *retp);
static void init_ppp_file(struct ppp_file *pf, int kind);
static void ppp_shutdown_interface(struct ppp *ppp);
static void ppp_destroy_interface(struct ppp *ppp);
static struct ppp *ppp_find_unit(struct ppp_net *pn, int unit);
static struct channel *ppp_find_channel(struct ppp_net *pn, int unit);
@ -392,8 +392,10 @@ static int ppp_release(struct inode *unused, struct file *file)
file->private_data = NULL;
if (pf->kind == INTERFACE) {
ppp = PF_TO_PPP(pf);
rtnl_lock();
if (file == ppp->owner)
ppp_shutdown_interface(ppp);
unregister_netdevice(ppp->dev);
rtnl_unlock();
}
if (atomic_dec_and_test(&pf->refcnt)) {
switch (pf->kind) {
@ -593,8 +595,10 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
mutex_lock(&ppp_mutex);
if (pf->kind == INTERFACE) {
ppp = PF_TO_PPP(pf);
rtnl_lock();
if (file == ppp->owner)
ppp_shutdown_interface(ppp);
unregister_netdevice(ppp->dev);
rtnl_unlock();
}
if (atomic_long_read(&file->f_count) < 2) {
ppp_release(NULL, file);
@ -838,11 +842,10 @@ static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf,
/* Create a new ppp unit */
if (get_user(unit, p))
break;
ppp = ppp_create_interface(net, unit, &err);
ppp = ppp_create_interface(net, unit, file, &err);
if (!ppp)
break;
file->private_data = &ppp->file;
ppp->owner = file;
err = -EFAULT;
if (put_user(ppp->file.index, p))
break;
@ -916,6 +919,16 @@ static __net_init int ppp_init_net(struct net *net)
static __net_exit void ppp_exit_net(struct net *net)
{
struct ppp_net *pn = net_generic(net, ppp_net_id);
struct ppp *ppp;
LIST_HEAD(list);
int id;
rtnl_lock();
idr_for_each_entry(&pn->units_idr, ppp, id)
unregister_netdevice_queue(ppp->dev, &list);
unregister_netdevice_many(&list);
rtnl_unlock();
idr_destroy(&pn->units_idr);
}
@ -1088,8 +1101,28 @@ static int ppp_dev_init(struct net_device *dev)
return 0;
}
static void ppp_dev_uninit(struct net_device *dev)
{
struct ppp *ppp = netdev_priv(dev);
struct ppp_net *pn = ppp_pernet(ppp->ppp_net);
ppp_lock(ppp);
ppp->closing = 1;
ppp_unlock(ppp);
mutex_lock(&pn->all_ppp_mutex);
unit_put(&pn->units_idr, ppp->file.index);
mutex_unlock(&pn->all_ppp_mutex);
ppp->owner = NULL;
ppp->file.dead = 1;
wake_up_interruptible(&ppp->file.rwait);
}
static const struct net_device_ops ppp_netdev_ops = {
.ndo_init = ppp_dev_init,
.ndo_uninit = ppp_dev_uninit,
.ndo_start_xmit = ppp_start_xmit,
.ndo_do_ioctl = ppp_net_ioctl,
.ndo_get_stats64 = ppp_get_stats64,
@ -2667,8 +2700,8 @@ ppp_get_stats(struct ppp *ppp, struct ppp_stats *st)
* or if there is already a unit with the requested number.
* unit == -1 means allocate a new number.
*/
static struct ppp *
ppp_create_interface(struct net *net, int unit, int *retp)
static struct ppp *ppp_create_interface(struct net *net, int unit,
struct file *file, int *retp)
{
struct ppp *ppp;
struct ppp_net *pn;
@ -2688,6 +2721,7 @@ ppp_create_interface(struct net *net, int unit, int *retp)
ppp->mru = PPP_MRU;
init_ppp_file(&ppp->file, INTERFACE);
ppp->file.hdrlen = PPP_HDRLEN - 2; /* don't count proto bytes */
ppp->owner = file;
for (i = 0; i < NUM_NP; ++i)
ppp->npmode[i] = NPMODE_PASS;
INIT_LIST_HEAD(&ppp->channels);
@ -2775,34 +2809,6 @@ init_ppp_file(struct ppp_file *pf, int kind)
init_waitqueue_head(&pf->rwait);
}
/*
* Take down a ppp interface unit - called when the owning file
* (the one that created the unit) is closed or detached.
*/
static void ppp_shutdown_interface(struct ppp *ppp)
{
struct ppp_net *pn;
pn = ppp_pernet(ppp->ppp_net);
mutex_lock(&pn->all_ppp_mutex);
/* This will call dev_close() for us. */
ppp_lock(ppp);
if (!ppp->closing) {
ppp->closing = 1;
ppp_unlock(ppp);
unregister_netdev(ppp->dev);
unit_put(&pn->units_idr, ppp->file.index);
} else
ppp_unlock(ppp);
ppp->file.dead = 1;
ppp->owner = NULL;
wake_up_interruptible(&ppp->file.rwait);
mutex_unlock(&pn->all_ppp_mutex);
}
/*
* Free the memory used by a ppp unit. This is only called once
* there are no channels connected to the unit and no file structs

View File

@ -785,6 +785,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x413c, 0x81a4, 8)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
{QMI_FIXED_INTF(0x413c, 0x81a8, 8)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */
{QMI_FIXED_INTF(0x413c, 0x81a9, 8)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */
{QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */
{QMI_FIXED_INTF(0x03f0, 0x581d, 4)}, /* HP lt4112 LTE/HSPA+ Gobi 4G Module (Huawei me906e) */
/* 4. Gobi 1000 devices */

View File

@ -778,7 +778,7 @@ int usbnet_stop (struct net_device *net)
{
struct usbnet *dev = netdev_priv(net);
struct driver_info *info = dev->driver_info;
int retval, pm;
int retval, pm, mpn;
clear_bit(EVENT_DEV_OPEN, &dev->flags);
netif_stop_queue (net);
@ -809,6 +809,8 @@ int usbnet_stop (struct net_device *net)
usbnet_purge_paused_rxq(dev);
mpn = !test_and_clear_bit(EVENT_NO_RUNTIME_PM, &dev->flags);
/* deferred work (task, timer, softirq) must also stop.
* can't flush_scheduled_work() until we drop rtnl (later),
* else workers could deadlock; so make workers a NOP.
@ -819,8 +821,7 @@ int usbnet_stop (struct net_device *net)
if (!pm)
usb_autopm_put_interface(dev->intf);
if (info->manage_power &&
!test_and_clear_bit(EVENT_NO_RUNTIME_PM, &dev->flags))
if (info->manage_power && mpn)
info->manage_power(dev, 0);
else
usb_autopm_put_interface(dev->intf);

View File

@ -2216,6 +2216,8 @@ static int vxlan_open(struct net_device *dev)
if (vxlan_addr_multicast(&vxlan->default_dst.remote_ip)) {
ret = vxlan_igmp_join(vxlan);
if (ret == -EADDRINUSE)
ret = 0;
if (ret) {
vxlan_sock_release(vs);
return ret;

View File

@ -2,7 +2,7 @@
# PCI configuration
#
config PCI_BUS_ADDR_T_64BIT
def_bool y if (ARCH_DMA_ADDR_T_64BIT || 64BIT)
def_bool y if (ARCH_DMA_ADDR_T_64BIT || (64BIT && !PARISC))
depends on PCI
config PCI_MSI

View File

@ -997,7 +997,12 @@ void set_pcie_port_type(struct pci_dev *pdev)
else if (type == PCI_EXP_TYPE_UPSTREAM ||
type == PCI_EXP_TYPE_DOWNSTREAM) {
parent = pci_upstream_bridge(pdev);
if (!parent->has_secondary_link)
/*
* Usually there's an upstream device (Root Port or Switch
* Downstream Port), but we can't assume one exists.
*/
if (parent && !parent->has_secondary_link)
pdev->has_secondary_link = 1;
}
}
@ -1103,7 +1108,7 @@ int pci_cfg_space_size(struct pci_dev *dev)
#define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED)
static void pci_msi_setup_pci_dev(struct pci_dev *dev)
void pci_msi_setup_pci_dev(struct pci_dev *dev)
{
/*
* Disable the MSI hardware to avoid screaming interrupts

View File

@ -39,7 +39,7 @@
#define DRV_NAME "fnic"
#define DRV_DESCRIPTION "Cisco FCoE HBA Driver"
#define DRV_VERSION "1.6.0.17"
#define DRV_VERSION "1.6.0.17a"
#define PFX DRV_NAME ": "
#define DFX DRV_NAME "%d: "

View File

@ -425,6 +425,7 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
unsigned long ptr;
struct fc_rport_priv *rdata;
spinlock_t *io_lock = NULL;
int io_lock_acquired = 0;
if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED)))
return SCSI_MLQUEUE_HOST_BUSY;
@ -518,6 +519,7 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
spin_lock_irqsave(io_lock, flags);
/* initialize rest of io_req */
io_lock_acquired = 1;
io_req->port_id = rport->port_id;
io_req->start_time = jiffies;
CMD_STATE(sc) = FNIC_IOREQ_CMD_PENDING;
@ -571,7 +573,7 @@ out:
(((u64)CMD_FLAGS(sc) >> 32) | CMD_STATE(sc)));
/* if only we issued IO, will we have the io lock */
if (CMD_FLAGS(sc) & FNIC_IO_INITIALIZED)
if (io_lock_acquired)
spin_unlock_irqrestore(io_lock, flags);
atomic_dec(&fnic->in_flight);

View File

@ -217,15 +217,15 @@ static int sdev_runtime_suspend(struct device *dev)
{
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
struct scsi_device *sdev = to_scsi_device(dev);
int err;
int err = 0;
err = blk_pre_runtime_suspend(sdev->request_queue);
if (err)
return err;
if (pm && pm->runtime_suspend)
if (pm && pm->runtime_suspend) {
err = blk_pre_runtime_suspend(sdev->request_queue);
if (err)
return err;
err = pm->runtime_suspend(dev);
blk_post_runtime_suspend(sdev->request_queue, err);
blk_post_runtime_suspend(sdev->request_queue, err);
}
return err;
}
@ -248,11 +248,11 @@ static int sdev_runtime_resume(struct device *dev)
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
int err = 0;
blk_pre_runtime_resume(sdev->request_queue);
if (pm && pm->runtime_resume)
if (pm && pm->runtime_resume) {
blk_pre_runtime_resume(sdev->request_queue);
err = pm->runtime_resume(dev);
blk_post_runtime_resume(sdev->request_queue, err);
blk_post_runtime_resume(sdev->request_queue, err);
}
return err;
}

View File

@ -968,9 +968,9 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
cmd->cmd_flags |= ICF_NON_IMMEDIATE_UNSOLICITED_DATA;
conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt;
if (hdr->flags & ISCSI_FLAG_CMD_READ) {
if (hdr->flags & ISCSI_FLAG_CMD_READ)
cmd->targ_xfer_tag = session_get_next_ttt(conn->sess);
} else if (hdr->flags & ISCSI_FLAG_CMD_WRITE)
else
cmd->targ_xfer_tag = 0xFFFFFFFF;
cmd->cmd_sn = be32_to_cpu(hdr->cmdsn);
cmd->exp_stat_sn = be32_to_cpu(hdr->exp_statsn);

View File

@ -457,8 +457,15 @@ void target_unregister_template(const struct target_core_fabric_ops *fo)
if (!strcmp(t->tf_ops->name, fo->name)) {
BUG_ON(atomic_read(&t->tf_access_cnt));
list_del(&t->tf_list);
mutex_unlock(&g_tf_lock);
/*
* Wait for any outstanding fabric se_deve_entry->rcu_head
* callbacks to complete post kfree_rcu(), before allowing
* fabric driver unload of TFO->module to proceed.
*/
rcu_barrier();
kfree(t);
break;
return;
}
}
mutex_unlock(&g_tf_lock);

View File

@ -84,8 +84,16 @@ void target_backend_unregister(const struct target_backend_ops *ops)
list_for_each_entry(tb, &backend_list, list) {
if (tb->ops == ops) {
list_del(&tb->list);
mutex_unlock(&backend_mutex);
/*
* Wait for any outstanding backend driver ->rcu_head
* callbacks to complete post TBO->free_device() ->
* call_rcu(), before allowing backend driver module
* unload of target_backend_ops->owner to proceed.
*/
rcu_barrier();
kfree(tb);
break;
return;
}
}
mutex_unlock(&backend_mutex);

View File

@ -1203,17 +1203,13 @@ sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd)
struct se_dev_entry *deve;
struct se_session *sess = cmd->se_sess;
struct se_node_acl *nacl;
struct scsi_lun slun;
unsigned char *buf;
u32 lun_count = 0, offset = 8;
if (cmd->data_length < 16) {
pr_warn("REPORT LUNS allocation length %u too small\n",
cmd->data_length);
return TCM_INVALID_CDB_FIELD;
}
__be32 len;
buf = transport_kmap_data_sg(cmd);
if (!buf)
if (cmd->data_length && !buf)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
/*
@ -1221,11 +1217,9 @@ sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd)
* coming via a target_core_mod PASSTHROUGH op, and not through
* a $FABRIC_MOD. In that case, report LUN=0 only.
*/
if (!sess) {
int_to_scsilun(0, (struct scsi_lun *)&buf[offset]);
lun_count = 1;
if (!sess)
goto done;
}
nacl = sess->se_node_acl;
rcu_read_lock();
@ -1236,10 +1230,12 @@ sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd)
* See SPC2-R20 7.19.
*/
lun_count++;
if ((offset + 8) > cmd->data_length)
if (offset >= cmd->data_length)
continue;
int_to_scsilun(deve->mapped_lun, (struct scsi_lun *)&buf[offset]);
int_to_scsilun(deve->mapped_lun, &slun);
memcpy(buf + offset, &slun,
min(8u, cmd->data_length - offset));
offset += 8;
}
rcu_read_unlock();
@ -1248,12 +1244,22 @@ sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd)
* See SPC3 r07, page 159.
*/
done:
lun_count *= 8;
buf[0] = ((lun_count >> 24) & 0xff);
buf[1] = ((lun_count >> 16) & 0xff);
buf[2] = ((lun_count >> 8) & 0xff);
buf[3] = (lun_count & 0xff);
transport_kunmap_data_sg(cmd);
/*
* If no LUNs are accessible, report virtual LUN 0.
*/
if (lun_count == 0) {
int_to_scsilun(0, &slun);
if (cmd->data_length > 8)
memcpy(buf + offset, &slun,
min(8u, cmd->data_length - offset));
lun_count = 1;
}
if (buf) {
len = cpu_to_be32(lun_count * 8);
memcpy(buf, &len, min_t(int, sizeof len, cmd->data_length));
transport_kunmap_data_sg(cmd);
}
target_complete_cmd_with_length(cmd, GOOD, 8 + lun_count * 8);
return 0;

View File

@ -68,7 +68,7 @@ struct power_table {
* registered cooling device.
* @cpufreq_state: integer value representing the current state of cpufreq
* cooling devices.
* @cpufreq_val: integer value representing the absolute value of the clipped
* @clipped_freq: integer value representing the absolute value of the clipped
* frequency.
* @max_level: maximum cooling level. One less than total number of valid
* cpufreq frequencies.
@ -91,7 +91,7 @@ struct cpufreq_cooling_device {
int id;
struct thermal_cooling_device *cool_dev;
unsigned int cpufreq_state;
unsigned int cpufreq_val;
unsigned int clipped_freq;
unsigned int max_level;
unsigned int *freq_table; /* In descending order */
struct cpumask allowed_cpus;
@ -107,6 +107,9 @@ struct cpufreq_cooling_device {
static DEFINE_IDR(cpufreq_idr);
static DEFINE_MUTEX(cooling_cpufreq_lock);
static unsigned int cpufreq_dev_count;
static DEFINE_MUTEX(cooling_list_lock);
static LIST_HEAD(cpufreq_dev_list);
/**
@ -185,14 +188,14 @@ unsigned long cpufreq_cooling_get_level(unsigned int cpu, unsigned int freq)
{
struct cpufreq_cooling_device *cpufreq_dev;
mutex_lock(&cooling_cpufreq_lock);
mutex_lock(&cooling_list_lock);
list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) {
if (cpumask_test_cpu(cpu, &cpufreq_dev->allowed_cpus)) {
mutex_unlock(&cooling_cpufreq_lock);
mutex_unlock(&cooling_list_lock);
return get_level(cpufreq_dev, freq);
}
}
mutex_unlock(&cooling_cpufreq_lock);
mutex_unlock(&cooling_list_lock);
pr_err("%s: cpu:%d not part of any cooling device\n", __func__, cpu);
return THERMAL_CSTATE_INVALID;
@ -215,29 +218,35 @@ static int cpufreq_thermal_notifier(struct notifier_block *nb,
unsigned long event, void *data)
{
struct cpufreq_policy *policy = data;
unsigned long max_freq = 0;
unsigned long clipped_freq;
struct cpufreq_cooling_device *cpufreq_dev;
switch (event) {
case CPUFREQ_ADJUST:
mutex_lock(&cooling_cpufreq_lock);
list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) {
if (!cpumask_test_cpu(policy->cpu,
&cpufreq_dev->allowed_cpus))
continue;
max_freq = cpufreq_dev->cpufreq_val;
if (policy->max != max_freq)
cpufreq_verify_within_limits(policy, 0,
max_freq);
}
mutex_unlock(&cooling_cpufreq_lock);
break;
default:
if (event != CPUFREQ_ADJUST)
return NOTIFY_DONE;
mutex_lock(&cooling_list_lock);
list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) {
if (!cpumask_test_cpu(policy->cpu, &cpufreq_dev->allowed_cpus))
continue;
/*
* policy->max is the maximum allowed frequency defined by user
* and clipped_freq is the maximum that thermal constraints
* allow.
*
* If clipped_freq is lower than policy->max, then we need to
* readjust policy->max.
*
* But, if clipped_freq is greater than policy->max, we don't
* need to do anything.
*/
clipped_freq = cpufreq_dev->clipped_freq;
if (policy->max > clipped_freq)
cpufreq_verify_within_limits(policy, 0, clipped_freq);
break;
}
mutex_unlock(&cooling_list_lock);
return NOTIFY_OK;
}
@ -519,7 +528,7 @@ static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev,
clip_freq = cpufreq_device->freq_table[state];
cpufreq_device->cpufreq_state = state;
cpufreq_device->cpufreq_val = clip_freq;
cpufreq_device->clipped_freq = clip_freq;
cpufreq_update_policy(cpu);
@ -861,17 +870,19 @@ __cpufreq_cooling_register(struct device_node *np,
pr_debug("%s: freq:%u KHz\n", __func__, freq);
}
cpufreq_dev->cpufreq_val = cpufreq_dev->freq_table[0];
cpufreq_dev->clipped_freq = cpufreq_dev->freq_table[0];
cpufreq_dev->cool_dev = cool_dev;
mutex_lock(&cooling_cpufreq_lock);
mutex_lock(&cooling_list_lock);
list_add(&cpufreq_dev->node, &cpufreq_dev_list);
mutex_unlock(&cooling_list_lock);
/* Register the notifier for first cpufreq cooling device */
if (list_empty(&cpufreq_dev_list))
if (!cpufreq_dev_count++)
cpufreq_register_notifier(&thermal_cpufreq_notifier_block,
CPUFREQ_POLICY_NOTIFIER);
list_add(&cpufreq_dev->node, &cpufreq_dev_list);
mutex_unlock(&cooling_cpufreq_lock);
return cool_dev;
@ -1013,13 +1024,17 @@ void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
return;
cpufreq_dev = cdev->devdata;
mutex_lock(&cooling_cpufreq_lock);
list_del(&cpufreq_dev->node);
/* Unregister the notifier for the last cpufreq cooling device */
if (list_empty(&cpufreq_dev_list))
mutex_lock(&cooling_cpufreq_lock);
if (!--cpufreq_dev_count)
cpufreq_unregister_notifier(&thermal_cpufreq_notifier_block,
CPUFREQ_POLICY_NOTIFIER);
mutex_lock(&cooling_list_lock);
list_del(&cpufreq_dev->node);
mutex_unlock(&cooling_list_lock);
mutex_unlock(&cooling_cpufreq_lock);
thermal_cooling_device_unregister(cpufreq_dev->cool_dev);

View File

@ -258,8 +258,7 @@ static int allocate_power(struct thermal_zone_device *tz,
BUILD_BUG_ON(sizeof(*req_power) != sizeof(*granted_power));
BUILD_BUG_ON(sizeof(*req_power) != sizeof(*extra_actor_power));
BUILD_BUG_ON(sizeof(*req_power) != sizeof(*weighted_req_power));
req_power = devm_kcalloc(&tz->device, num_actors * 5,
sizeof(*req_power), GFP_KERNEL);
req_power = kcalloc(num_actors * 5, sizeof(*req_power), GFP_KERNEL);
if (!req_power) {
ret = -ENOMEM;
goto unlock;
@ -334,7 +333,7 @@ static int allocate_power(struct thermal_zone_device *tz,
max_allocatable_power, current_temp,
(s32)control_temp - (s32)current_temp);
devm_kfree(&tz->device, req_power);
kfree(req_power);
unlock:
mutex_unlock(&tz->lock);
@ -426,7 +425,7 @@ static int power_allocator_bind(struct thermal_zone_device *tz)
return -EINVAL;
}
params = devm_kzalloc(&tz->device, sizeof(*params), GFP_KERNEL);
params = kzalloc(sizeof(*params), GFP_KERNEL);
if (!params)
return -ENOMEM;
@ -468,14 +467,14 @@ static int power_allocator_bind(struct thermal_zone_device *tz)
return 0;
free:
devm_kfree(&tz->device, params);
kfree(params);
return ret;
}
static void power_allocator_unbind(struct thermal_zone_device *tz)
{
dev_dbg(&tz->device, "Unbinding from thermal zone %d\n", tz->id);
devm_kfree(&tz->device, tz->governor_data);
kfree(tz->governor_data);
tz->governor_data = NULL;
}

View File

@ -844,14 +844,15 @@ static void bdi_split_work_to_wbs(struct backing_dev_info *bdi,
struct wb_iter iter;
might_sleep();
if (!bdi_has_dirty_io(bdi))
return;
restart:
rcu_read_lock();
bdi_for_each_wb(wb, bdi, &iter, next_blkcg_id) {
if (!wb_has_dirty_io(wb) ||
(skip_if_busy && writeback_in_progress(wb)))
/* SYNC_ALL writes out I_DIRTY_TIME too */
if (!wb_has_dirty_io(wb) &&
(base_work->sync_mode == WB_SYNC_NONE ||
list_empty(&wb->b_dirty_time)))
continue;
if (skip_if_busy && writeback_in_progress(wb))
continue;
base_work->nr_pages = wb_split_bdi_pages(wb, nr_pages);
@ -899,8 +900,7 @@ static void bdi_split_work_to_wbs(struct backing_dev_info *bdi,
{
might_sleep();
if (bdi_has_dirty_io(bdi) &&
(!skip_if_busy || !writeback_in_progress(&bdi->wb))) {
if (!skip_if_busy || !writeback_in_progress(&bdi->wb)) {
base_work->auto_free = 0;
base_work->single_wait = 0;
base_work->single_done = 0;
@ -2275,8 +2275,12 @@ void sync_inodes_sb(struct super_block *sb)
};
struct backing_dev_info *bdi = sb->s_bdi;
/* Nothing to do? */
if (!bdi_has_dirty_io(bdi) || bdi == &noop_backing_dev_info)
/*
* Can't skip on !bdi_has_dirty() because we should wait for !dirty
* inodes under writeback and I_DIRTY_TIME inodes ignored by
* bdi_has_dirty() need to be written out too.
*/
if (bdi == &noop_backing_dev_info)
return;
WARN_ON(!rwsem_is_locked(&sb->s_umount));

View File

@ -347,6 +347,25 @@ static inline int drm_eld_mnl(const uint8_t *eld)
return (eld[DRM_ELD_CEA_EDID_VER_MNL] & DRM_ELD_MNL_MASK) >> DRM_ELD_MNL_SHIFT;
}
/**
* drm_eld_sad - Get ELD SAD structures.
* @eld: pointer to an eld memory structure with sad_count set
*/
static inline const uint8_t *drm_eld_sad(const uint8_t *eld)
{
unsigned int ver, mnl;
ver = (eld[DRM_ELD_VER] & DRM_ELD_VER_MASK) >> DRM_ELD_VER_SHIFT;
if (ver != 2 && ver != 31)
return NULL;
mnl = drm_eld_mnl(eld);
if (mnl > 16)
return NULL;
return eld + DRM_ELD_CEA_SAD(mnl, 0);
}
/**
* drm_eld_sad_count - Get ELD SAD count.
* @eld: pointer to an eld memory structure with sad_count set

View File

@ -484,6 +484,7 @@ extern int irq_chip_set_affinity_parent(struct irq_data *data,
extern int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on);
extern int irq_chip_set_vcpu_affinity_parent(struct irq_data *data,
void *vcpu_info);
extern int irq_chip_set_type_parent(struct irq_data *data, unsigned int type);
#endif
/* Handling of unhandled and spurious interrupts: */

View File

@ -1002,6 +1002,34 @@ static inline int page_mapped(struct page *page)
return atomic_read(&(page)->_mapcount) >= 0;
}
/*
* Return true only if the page has been allocated with
* ALLOC_NO_WATERMARKS and the low watermark was not
* met implying that the system is under some pressure.
*/
static inline bool page_is_pfmemalloc(struct page *page)
{
/*
* Page index cannot be this large so this must be
* a pfmemalloc page.
*/
return page->index == -1UL;
}
/*
* Only to be called by the page allocator on a freshly allocated
* page.
*/
static inline void set_page_pfmemalloc(struct page *page)
{
page->index = -1UL;
}
static inline void clear_page_pfmemalloc(struct page *page)
{
page->index = 0;
}
/*
* Different kinds of faults, as returned by handle_mm_fault().
* Used to decide whether a process gets delivered SIGBUS or

View File

@ -63,15 +63,6 @@ struct page {
union {
pgoff_t index; /* Our offset within mapping. */
void *freelist; /* sl[aou]b first free object */
bool pfmemalloc; /* If set by the page allocator,
* ALLOC_NO_WATERMARKS was set
* and the low watermark was not
* met implying that the system
* is under some pressure. The
* caller should try ensure
* this page is only used to
* free other pages.
*/
};
union {

View File

@ -1202,6 +1202,7 @@ struct msix_entry {
u16 entry; /* driver uses to specify entry, OS writes */
};
void pci_msi_setup_pci_dev(struct pci_dev *dev);
#ifdef CONFIG_PCI_MSI
int pci_msi_vec_count(struct pci_dev *dev);

View File

@ -1602,20 +1602,16 @@ static inline void __skb_fill_page_desc(struct sk_buff *skb, int i,
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
/*
* Propagate page->pfmemalloc to the skb if we can. The problem is
* that not all callers have unique ownership of the page. If
* pfmemalloc is set, we check the mapping as a mapping implies
* page->index is set (index and pfmemalloc share space).
* If it's a valid mapping, we cannot use page->pfmemalloc but we
* do not lose pfmemalloc information as the pages would not be
* allocated using __GFP_MEMALLOC.
* Propagate page pfmemalloc to the skb if we can. The problem is
* that not all callers have unique ownership of the page but rely
* on page_is_pfmemalloc doing the right thing(tm).
*/
frag->page.p = page;
frag->page_offset = off;
skb_frag_size_set(frag, size);
page = compound_head(page);
if (page->pfmemalloc && !page->mapping)
if (page_is_pfmemalloc(page))
skb->pfmemalloc = true;
}
@ -2263,7 +2259,7 @@ static inline struct page *dev_alloc_page(void)
static inline void skb_propagate_pfmemalloc(struct page *page,
struct sk_buff *skb)
{
if (page && page->pfmemalloc)
if (page_is_pfmemalloc(page))
skb->pfmemalloc = true;
}

View File

@ -74,8 +74,6 @@ enum rc_filter_type {
* @input_dev: the input child device used to communicate events to userspace
* @driver_type: specifies if protocol decoding is done in hardware or software
* @idle: used to keep track of RX state
* @encode_wakeup: wakeup filtering uses IR encode API, therefore the allowed
* wakeup protocols is the set of all raw encoders
* @allowed_protocols: bitmask with the supported RC_BIT_* protocols
* @enabled_protocols: bitmask with the enabled RC_BIT_* protocols
* @allowed_wakeup_protocols: bitmask with the supported RC_BIT_* wakeup protocols
@ -136,7 +134,6 @@ struct rc_dev {
struct input_dev *input_dev;
enum rc_driver_type driver_type;
bool idle;
bool encode_wakeup;
u64 allowed_protocols;
u64 enabled_protocols;
u64 allowed_wakeup_protocols;
@ -246,7 +243,6 @@ static inline void init_ir_raw_event(struct ir_raw_event *ev)
#define US_TO_NS(usec) ((usec) * 1000)
#define MS_TO_US(msec) ((msec) * 1000)
#define MS_TO_NS(msec) ((msec) * 1000 * 1000)
#define NS_TO_US(nsec) DIV_ROUND_UP(nsec, 1000L)
void ir_raw_event_handle(struct rc_dev *dev);
int ir_raw_event_store(struct rc_dev *dev, struct ir_raw_event *ev);
@ -254,9 +250,6 @@ int ir_raw_event_store_edge(struct rc_dev *dev, enum raw_event_type type);
int ir_raw_event_store_with_filter(struct rc_dev *dev,
struct ir_raw_event *ev);
void ir_raw_event_set_idle(struct rc_dev *dev, bool idle);
int ir_raw_encode_scancode(u64 protocols,
const struct rc_scancode_filter *scancode,
struct ir_raw_event *events, unsigned int max);
static inline void ir_raw_event_reset(struct rc_dev *dev)
{

View File

@ -139,6 +139,7 @@ enum vb2_io_modes {
* @VB2_BUF_STATE_PREPARING: buffer is being prepared in videobuf
* @VB2_BUF_STATE_PREPARED: buffer prepared in videobuf and by the driver
* @VB2_BUF_STATE_QUEUED: buffer queued in videobuf, but not in driver
* @VB2_BUF_STATE_REQUEUEING: re-queue a buffer to the driver
* @VB2_BUF_STATE_ACTIVE: buffer queued in driver and possibly used
* in a hardware operation
* @VB2_BUF_STATE_DONE: buffer returned from driver to videobuf, but
@ -152,6 +153,7 @@ enum vb2_buffer_state {
VB2_BUF_STATE_PREPARING,
VB2_BUF_STATE_PREPARED,
VB2_BUF_STATE_QUEUED,
VB2_BUF_STATE_REQUEUEING,
VB2_BUF_STATE_ACTIVE,
VB2_BUF_STATE_DONE,
VB2_BUF_STATE_ERROR,

View File

@ -141,6 +141,8 @@ struct snd_soc_tplg_ops {
int io_ops_count;
};
#ifdef CONFIG_SND_SOC_TOPOLOGY
/* gets a pointer to data from the firmware block header */
static inline const void *snd_soc_tplg_get_data(struct snd_soc_tplg_hdr *hdr)
{
@ -165,4 +167,14 @@ int snd_soc_tplg_widget_bind_event(struct snd_soc_dapm_widget *w,
const struct snd_soc_tplg_widget_events *events, int num_events,
u16 event_type);
#else
static inline int snd_soc_tplg_component_remove(struct snd_soc_component *comp,
u32 index)
{
return 0;
}
#endif
#endif

View File

@ -18,6 +18,12 @@
#include <linux/types.h>
#include <sound/asound.h>
#ifndef __KERNEL__
#error This API is an early revision and not enabled in the current
#error kernel release, it will be enabled in a future kernel version
#error with incompatible changes to what is here.
#endif
/*
* Maximum number of channels topology kcontrol can represent.
*/

View File

@ -984,6 +984,23 @@ int irq_chip_set_affinity_parent(struct irq_data *data,
return -ENOSYS;
}
/**
* irq_chip_set_type_parent - Set IRQ type on the parent interrupt
* @data: Pointer to interrupt specific data
* @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
*
* Conditional, as the underlying parent chip might not implement it.
*/
int irq_chip_set_type_parent(struct irq_data *data, unsigned int type)
{
data = data->parent_data;
if (data->chip->irq_set_type)
return data->chip->irq_set_type(data, type);
return -ENOSYS;
}
/**
* irq_chip_retrigger_hierarchy - Retrigger an interrupt in hardware
* @data: Pointer to interrupt specific data
@ -997,7 +1014,7 @@ int irq_chip_retrigger_hierarchy(struct irq_data *data)
if (data->chip && data->chip->irq_retrigger)
return data->chip->irq_retrigger(data);
return -ENOSYS;
return 0;
}
/**

View File

@ -807,8 +807,8 @@ __mod_timer(struct timer_list *timer, unsigned long expires,
spin_unlock(&base->lock);
base = new_base;
spin_lock(&base->lock);
timer->flags &= ~TIMER_BASEMASK;
timer->flags |= base->cpu;
WRITE_ONCE(timer->flags,
(timer->flags & ~TIMER_BASEMASK) | base->cpu);
}
}

View File

@ -1343,12 +1343,15 @@ static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
set_page_owner(page, order, gfp_flags);
/*
* page->pfmemalloc is set when ALLOC_NO_WATERMARKS was necessary to
* page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to
* allocate the page. The expectation is that the caller is taking
* steps that will free more memory. The caller should avoid the page
* being used for !PFMEMALLOC purposes.
*/
page->pfmemalloc = !!(alloc_flags & ALLOC_NO_WATERMARKS);
if (alloc_flags & ALLOC_NO_WATERMARKS)
set_page_pfmemalloc(page);
else
clear_page_pfmemalloc(page);
return 0;
}
@ -3345,7 +3348,7 @@ refill:
atomic_add(size - 1, &page->_count);
/* reset page count bias and offset to start of new frag */
nc->pfmemalloc = page->pfmemalloc;
nc->pfmemalloc = page_is_pfmemalloc(page);
nc->pagecnt_bias = size;
nc->offset = size;
}

View File

@ -1603,7 +1603,7 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
}
/* Record if ALLOC_NO_WATERMARKS was set when allocating the slab */
if (unlikely(page->pfmemalloc))
if (page_is_pfmemalloc(page))
pfmemalloc_active = true;
nr_pages = (1 << cachep->gfporder);
@ -1614,7 +1614,7 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
add_zone_page_state(page_zone(page),
NR_SLAB_UNRECLAIMABLE, nr_pages);
__SetPageSlab(page);
if (page->pfmemalloc)
if (page_is_pfmemalloc(page))
SetPageSlabPfmemalloc(page);
if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) {

View File

@ -1427,7 +1427,7 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
inc_slabs_node(s, page_to_nid(page), page->objects);
page->slab_cache = s;
__SetPageSlab(page);
if (page->pfmemalloc)
if (page_is_pfmemalloc(page))
SetPageSlabPfmemalloc(page);
start = page_address(page);

View File

@ -1541,6 +1541,7 @@ p9_client_read(struct p9_fid *fid, u64 offset, struct iov_iter *to, int *err)
struct p9_client *clnt = fid->clnt;
struct p9_req_t *req;
int total = 0;
*err = 0;
p9_debug(P9_DEBUG_9P, ">>> TREAD fid %d offset %llu %d\n",
fid->fid, (unsigned long long) offset, (int)iov_iter_count(to));
@ -1620,6 +1621,7 @@ p9_client_write(struct p9_fid *fid, u64 offset, struct iov_iter *from, int *err)
struct p9_client *clnt = fid->clnt;
struct p9_req_t *req;
int total = 0;
*err = 0;
p9_debug(P9_DEBUG_9P, ">>> TWRITE fid %d offset %llu count %zd\n",
fid->fid, (unsigned long long) offset,

View File

@ -595,8 +595,11 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
/* increase the refcounter of the related vlan */
vlan = batadv_softif_vlan_get(bat_priv, vid);
if (WARN(!vlan, "adding TT local entry %pM to non-existent VLAN %d",
addr, BATADV_PRINT_VID(vid)))
addr, BATADV_PRINT_VID(vid))) {
kfree(tt_local);
tt_local = NULL;
goto out;
}
batadv_dbg(BATADV_DBG_TT, bat_priv,
"Creating new local tt entry: %pM (vid: %d, ttvn: %d)\n",

View File

@ -1591,7 +1591,7 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br,
break;
}
if (skb_trimmed)
if (skb_trimmed && skb_trimmed != skb)
kfree_skb(skb_trimmed);
return err;
@ -1636,7 +1636,7 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
break;
}
if (skb_trimmed)
if (skb_trimmed && skb_trimmed != skb)
kfree_skb(skb_trimmed);
return err;

View File

@ -340,7 +340,7 @@ struct sk_buff *build_skb(void *data, unsigned int frag_size)
if (skb && frag_size) {
skb->head_frag = 1;
if (virt_to_head_page(data)->pfmemalloc)
if (page_is_pfmemalloc(virt_to_head_page(data)))
skb->pfmemalloc = 1;
}
return skb;
@ -4022,8 +4022,8 @@ EXPORT_SYMBOL(skb_checksum_setup);
* Otherwise returns the provided skb. Returns NULL in error cases
* (e.g. transport_len exceeds skb length or out-of-memory).
*
* Caller needs to set the skb transport header and release the returned skb.
* Provided skb is consumed.
* Caller needs to set the skb transport header and free any returned skb if it
* differs from the provided skb.
*/
static struct sk_buff *skb_checksum_maybe_trim(struct sk_buff *skb,
unsigned int transport_len)
@ -4032,16 +4032,12 @@ static struct sk_buff *skb_checksum_maybe_trim(struct sk_buff *skb,
unsigned int len = skb_transport_offset(skb) + transport_len;
int ret;
if (skb->len < len) {
kfree_skb(skb);
if (skb->len < len)
return NULL;
} else if (skb->len == len) {
else if (skb->len == len)
return skb;
}
skb_chk = skb_clone(skb, GFP_ATOMIC);
kfree_skb(skb);
if (!skb_chk)
return NULL;
@ -4066,8 +4062,8 @@ static struct sk_buff *skb_checksum_maybe_trim(struct sk_buff *skb,
* If the skb has data beyond the given transport length, then a
* trimmed & cloned skb is checked and returned.
*
* Caller needs to set the skb transport header and release the returned skb.
* Provided skb is consumed.
* Caller needs to set the skb transport header and free any returned skb if it
* differs from the provided skb.
*/
struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb,
unsigned int transport_len,
@ -4079,23 +4075,26 @@ struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb,
skb_chk = skb_checksum_maybe_trim(skb, transport_len);
if (!skb_chk)
return NULL;
goto err;
if (!pskb_may_pull(skb_chk, offset)) {
kfree_skb(skb_chk);
return NULL;
}
if (!pskb_may_pull(skb_chk, offset))
goto err;
__skb_pull(skb_chk, offset);
ret = skb_chkf(skb_chk);
__skb_push(skb_chk, offset);
if (ret) {
kfree_skb(skb_chk);
return NULL;
}
if (ret)
goto err;
return skb_chk;
err:
if (skb_chk && skb_chk != skb)
kfree_skb(skb_chk);
return NULL;
}
EXPORT_SYMBOL(skb_checksum_trimmed);

Some files were not shown because too many files have changed in this diff Show More