Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

This commit is contained in:
David S. Miller 2015-06-13 23:56:52 -07:00
commit 25c43bf13b
38 changed files with 200 additions and 88 deletions

View File

@ -1481,6 +1481,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
By default, super page will be supported if Intel IOMMU
has the capability. With this option, super page will
not be supported.
ecs_off [Default Off]
By default, extended context tables will be supported if
the hardware advertises that it has support both for the
extended tables themselves, and also PASID support. With
this option set, extended tables will not be used even
on hardware which claims to support them.
intel_idle.max_cstate= [KNL,HW,ACPI,X86]
0 disables intel_idle and fall back on acpi_idle.

View File

@ -20,7 +20,7 @@
files/UDP-Lite-HOWTO.txt
o The Wireshark UDP-Lite WiKi (with capture files):
http://wiki.wireshark.org/Lightweight_User_Datagram_Protocol
https://wiki.wireshark.org/Lightweight_User_Datagram_Protocol
o The Protocol Spec, RFC 3828, http://www.ietf.org/rfc/rfc3828.txt

View File

@ -10,6 +10,7 @@
#include <linux/compiler.h>
#include <linux/types.h>
#include <asm/byteorder.h>
#include <asm/def_LPBlackfin.h>
#define __raw_readb bfin_read8
#define __raw_readw bfin_read16

View File

@ -175,10 +175,10 @@ ENTRY(__clear_user)
br r3
.section .fixup, "ax"
99:
br r3
.previous
.section __ex_table, "a"
.align 2
99:
.word 0b, 99b
.previous

View File

@ -4215,13 +4215,13 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
u64 entry, gentry, *spte;
int npte;
bool remote_flush, local_flush, zap_page;
union kvm_mmu_page_role mask = (union kvm_mmu_page_role) {
.cr0_wp = 1,
.cr4_pae = 1,
.nxe = 1,
.smep_andnot_wp = 1,
.smap_andnot_wp = 1,
};
union kvm_mmu_page_role mask = { };
mask.cr0_wp = 1;
mask.cr4_pae = 1;
mask.nxe = 1;
mask.smep_andnot_wp = 1;
mask.smap_andnot_wp = 1;
/*
* If we don't have indirect shadow pages, it means no page is

View File

@ -1600,6 +1600,7 @@ static int blk_mq_hctx_notify(void *data, unsigned long action,
return NOTIFY_OK;
}
/* hctx->ctxs will be freed in queue's release handler */
static void blk_mq_exit_hctx(struct request_queue *q,
struct blk_mq_tag_set *set,
struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
@ -1618,7 +1619,6 @@ static void blk_mq_exit_hctx(struct request_queue *q,
blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier);
blk_free_flush_queue(hctx->fq);
kfree(hctx->ctxs);
blk_mq_free_bitmap(&hctx->ctx_map);
}
@ -1891,8 +1891,12 @@ void blk_mq_release(struct request_queue *q)
unsigned int i;
/* hctx kobj stays in hctx */
queue_for_each_hw_ctx(q, hctx, i)
queue_for_each_hw_ctx(q, hctx, i) {
if (!hctx)
continue;
kfree(hctx->ctxs);
kfree(hctx);
}
kfree(q->queue_hw_ctx);

View File

@ -422,9 +422,9 @@ int blk_alloc_devt(struct hd_struct *part, dev_t *devt)
/* allocate ext devt */
idr_preload(GFP_KERNEL);
spin_lock(&ext_devt_lock);
spin_lock_bh(&ext_devt_lock);
idx = idr_alloc(&ext_devt_idr, part, 0, NR_EXT_DEVT, GFP_NOWAIT);
spin_unlock(&ext_devt_lock);
spin_unlock_bh(&ext_devt_lock);
idr_preload_end();
if (idx < 0)
@ -449,9 +449,9 @@ void blk_free_devt(dev_t devt)
return;
if (MAJOR(devt) == BLOCK_EXT_MAJOR) {
spin_lock(&ext_devt_lock);
spin_lock_bh(&ext_devt_lock);
idr_remove(&ext_devt_idr, blk_mangle_minor(MINOR(devt)));
spin_unlock(&ext_devt_lock);
spin_unlock_bh(&ext_devt_lock);
}
}
@ -690,13 +690,13 @@ struct gendisk *get_gendisk(dev_t devt, int *partno)
} else {
struct hd_struct *part;
spin_lock(&ext_devt_lock);
spin_lock_bh(&ext_devt_lock);
part = idr_find(&ext_devt_idr, blk_mangle_minor(MINOR(devt)));
if (part && get_disk(part_to_disk(part))) {
*partno = part->partno;
disk = part_to_disk(part);
}
spin_unlock(&ext_devt_lock);
spin_unlock_bh(&ext_devt_lock);
}
return disk;

View File

@ -406,6 +406,7 @@ config BLK_DEV_RAM_DAX
config BLK_DEV_PMEM
tristate "Persistent memory block device support"
depends on HAS_IOMEM
help
Saying Y here will allow you to use a contiguous range of reserved
memory as one or more persistent block devices.

View File

@ -805,7 +805,9 @@ static void zram_reset_device(struct zram *zram)
memset(&zram->stats, 0, sizeof(zram->stats));
zram->disksize = 0;
zram->max_comp_streams = 1;
set_capacity(zram->disk, 0);
part_stat_set_all(&zram->disk->part0, 0);
up_write(&zram->init_lock);
/* I/O operation under all of CPU are done so let's free */

View File

@ -435,7 +435,7 @@ gmbus_xfer(struct i2c_adapter *adapter,
struct intel_gmbus,
adapter);
struct drm_i915_private *dev_priv = bus->dev_priv;
int i, reg_offset;
int i = 0, inc, try = 0, reg_offset;
int ret = 0;
intel_aux_display_runtime_get(dev_priv);
@ -448,12 +448,14 @@ gmbus_xfer(struct i2c_adapter *adapter,
reg_offset = dev_priv->gpio_mmio_base;
retry:
I915_WRITE(GMBUS0 + reg_offset, bus->reg0);
for (i = 0; i < num; i++) {
for (; i < num; i += inc) {
inc = 1;
if (gmbus_is_index_read(msgs, i, num)) {
ret = gmbus_xfer_index_read(dev_priv, &msgs[i]);
i += 1; /* set i to the index of the read xfer */
inc = 2; /* an index read is two msgs */
} else if (msgs[i].flags & I2C_M_RD) {
ret = gmbus_xfer_read(dev_priv, &msgs[i], 0);
} else {
@ -525,6 +527,18 @@ clear_err:
adapter->name, msgs[i].addr,
(msgs[i].flags & I2C_M_RD) ? 'r' : 'w', msgs[i].len);
/*
* Passive adapters sometimes NAK the first probe. Retry the first
* message once on -ENXIO for GMBUS transfers; the bit banging algorithm
* has retries internally. See also the retry loop in
* drm_do_probe_ddc_edid, which bails out on the first -ENXIO.
*/
if (ret == -ENXIO && i == 0 && try++ == 0) {
DRM_DEBUG_KMS("GMBUS [%s] NAK on first message, retry\n",
adapter->name);
goto retry;
}
goto out;
timeout:

View File

@ -2550,7 +2550,7 @@ intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, int device)
DRM_DEBUG_KMS("initialising analog device %d\n", device);
intel_sdvo_connector = kzalloc(sizeof(*intel_sdvo_connector), GFP_KERNEL);
intel_sdvo_connector = intel_sdvo_connector_alloc();
if (!intel_sdvo_connector)
return false;

View File

@ -580,9 +580,6 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
else
radeon_crtc->pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
/* if there is no audio, set MINM_OVER_MAXP */
if (!drm_detect_monitor_audio(radeon_connector_edid(connector)))
radeon_crtc->pll_flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP;
if (rdev->family < CHIP_RV770)
radeon_crtc->pll_flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP;
/* use frac fb div on APUs */
@ -1798,9 +1795,7 @@ static int radeon_get_shared_nondp_ppll(struct drm_crtc *crtc)
if ((crtc->mode.clock == test_crtc->mode.clock) &&
(adjusted_clock == test_adjusted_clock) &&
(radeon_crtc->ss_enabled == test_radeon_crtc->ss_enabled) &&
(test_radeon_crtc->pll_id != ATOM_PPLL_INVALID) &&
(drm_detect_monitor_audio(radeon_connector_edid(test_radeon_crtc->connector)) ==
drm_detect_monitor_audio(radeon_connector_edid(radeon_crtc->connector))))
(test_radeon_crtc->pll_id != ATOM_PPLL_INVALID))
return test_radeon_crtc->pll_id;
}
}

View File

@ -1463,6 +1463,21 @@ int radeon_device_init(struct radeon_device *rdev,
if (r)
DRM_ERROR("ib ring test failed (%d).\n", r);
/*
* Turks/Thames GPU will freeze whole laptop if DPM is not restarted
* after the CP ring have chew one packet at least. Hence here we stop
* and restart DPM after the radeon_ib_ring_tests().
*/
if (rdev->pm.dpm_enabled &&
(rdev->pm.pm_method == PM_METHOD_DPM) &&
(rdev->family == CHIP_TURKS) &&
(rdev->flags & RADEON_IS_MOBILITY)) {
mutex_lock(&rdev->pm.mutex);
radeon_dpm_disable(rdev);
radeon_dpm_enable(rdev);
mutex_unlock(&rdev->pm.mutex);
}
if ((radeon_testing & 1)) {
if (rdev->accel_working)
radeon_test_moves(rdev);

View File

@ -458,14 +458,16 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
/* make sure object fit at this offset */
eoffset = soffset + size;
if (soffset >= eoffset) {
return -EINVAL;
r = -EINVAL;
goto error_unreserve;
}
last_pfn = eoffset / RADEON_GPU_PAGE_SIZE;
if (last_pfn > rdev->vm_manager.max_pfn) {
dev_err(rdev->dev, "va above limit (0x%08X > 0x%08X)\n",
last_pfn, rdev->vm_manager.max_pfn);
return -EINVAL;
r = -EINVAL;
goto error_unreserve;
}
} else {
@ -486,7 +488,8 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
"(bo %p 0x%010lx 0x%010lx)\n", bo_va->bo,
soffset, tmp->bo, tmp->it.start, tmp->it.last);
mutex_unlock(&vm->mutex);
return -EINVAL;
r = -EINVAL;
goto error_unreserve;
}
}
@ -497,7 +500,8 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
tmp = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
if (!tmp) {
mutex_unlock(&vm->mutex);
return -ENOMEM;
r = -ENOMEM;
goto error_unreserve;
}
tmp->it.start = bo_va->it.start;
tmp->it.last = bo_va->it.last;
@ -555,7 +559,6 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
r = radeon_vm_clear_bo(rdev, pt);
if (r) {
radeon_bo_unref(&pt);
radeon_bo_reserve(bo_va->bo, false);
return r;
}
@ -575,6 +578,10 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
mutex_unlock(&vm->mutex);
return 0;
error_unreserve:
radeon_bo_unreserve(bo_va->bo);
return r;
}
/**

View File

@ -150,6 +150,11 @@ static const struct min_max_quirk min_max_pnpid_table[] = {
{ANY_BOARD_ID, 2961},
1024, 5112, 2024, 4832
},
{
(const char * const []){"LEN2000", NULL},
{ANY_BOARD_ID, ANY_BOARD_ID},
1024, 5113, 2021, 4832
},
{
(const char * const []){"LEN2001", NULL},
{ANY_BOARD_ID, ANY_BOARD_ID},
@ -191,7 +196,7 @@ static const char * const topbuttonpad_pnp_ids[] = {
"LEN0045",
"LEN0047",
"LEN0049",
"LEN2000",
"LEN2000", /* S540 */
"LEN2001", /* Edge E431 */
"LEN2002", /* Edge E531 */
"LEN2003",

View File

@ -422,6 +422,14 @@ static int dmar_map_gfx = 1;
static int dmar_forcedac;
static int intel_iommu_strict;
static int intel_iommu_superpage = 1;
static int intel_iommu_ecs = 1;
/* We only actually use ECS when PASID support (on the new bit 40)
* is also advertised. Some early implementations the ones with
* PASID support on bit 28 have issues even when we *only* use
* extended root/context tables. */
#define ecs_enabled(iommu) (intel_iommu_ecs && ecap_ecs(iommu->ecap) && \
ecap_pasid(iommu->ecap))
int intel_iommu_gfx_mapped;
EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
@ -465,6 +473,10 @@ static int __init intel_iommu_setup(char *str)
printk(KERN_INFO
"Intel-IOMMU: disable supported super page\n");
intel_iommu_superpage = 0;
} else if (!strncmp(str, "ecs_off", 7)) {
printk(KERN_INFO
"Intel-IOMMU: disable extended context table support\n");
intel_iommu_ecs = 0;
}
str += strcspn(str, ",");
@ -669,7 +681,7 @@ static inline struct context_entry *iommu_context_addr(struct intel_iommu *iommu
struct context_entry *context;
u64 *entry;
if (ecap_ecs(iommu->ecap)) {
if (ecs_enabled(iommu)) {
if (devfn >= 0x80) {
devfn -= 0x80;
entry = &root->hi;
@ -806,7 +818,7 @@ static void free_context_table(struct intel_iommu *iommu)
if (context)
free_pgtable_page(context);
if (!ecap_ecs(iommu->ecap))
if (!ecs_enabled(iommu))
continue;
context = iommu_context_addr(iommu, i, 0x80, 0);
@ -1141,7 +1153,7 @@ static void iommu_set_root_entry(struct intel_iommu *iommu)
unsigned long flag;
addr = virt_to_phys(iommu->root_entry);
if (ecap_ecs(iommu->ecap))
if (ecs_enabled(iommu))
addr |= DMA_RTADDR_RTT;
raw_spin_lock_irqsave(&iommu->register_lock, flag);

View File

@ -3834,7 +3834,7 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
err = -EBUSY;
}
spin_unlock(&mddev->lock);
return err;
return err ?: len;
}
err = mddev_lock(mddev);
if (err)
@ -4217,13 +4217,14 @@ action_store(struct mddev *mddev, const char *page, size_t len)
set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
else
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
flush_workqueue(md_misc_wq);
if (mddev->sync_thread) {
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
if (mddev_lock(mddev) == 0) {
if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
mddev_lock(mddev) == 0) {
flush_workqueue(md_misc_wq);
if (mddev->sync_thread) {
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
md_reap_sync_thread(mddev);
mddev_unlock(mddev);
}
mddev_unlock(mddev);
}
} else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
@ -8261,6 +8262,7 @@ void md_reap_sync_thread(struct mddev *mddev)
if (mddev_is_clustered(mddev))
md_cluster_ops->metadata_update_finish(mddev);
clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);

View File

@ -4156,6 +4156,7 @@ static int raid10_start_reshape(struct mddev *mddev)
clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);

View File

@ -7354,6 +7354,7 @@ static int raid5_start_reshape(struct mddev *mddev)
clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
mddev->sync_thread = md_register_thread(md_do_sync, mddev,

View File

@ -131,8 +131,15 @@ static void enic_get_drvinfo(struct net_device *netdev,
{
struct enic *enic = netdev_priv(netdev);
struct vnic_devcmd_fw_info *fw_info;
int err;
enic_dev_fw_info(enic, &fw_info);
err = enic_dev_fw_info(enic, &fw_info);
/* return only when pci_zalloc_consistent fails in vnic_dev_fw_info
* For other failures, like devcmd failure, we return previously
* recorded info.
*/
if (err == -ENOMEM)
return;
strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
@ -181,8 +188,15 @@ static void enic_get_ethtool_stats(struct net_device *netdev,
struct enic *enic = netdev_priv(netdev);
struct vnic_stats *vstats;
unsigned int i;
int err;
enic_dev_stats_dump(enic, &vstats);
err = enic_dev_stats_dump(enic, &vstats);
/* return only when pci_zalloc_consistent fails in vnic_dev_stats_dump
* For other failures, like devcmd failure, we return previously
* recorded stats.
*/
if (err == -ENOMEM)
return;
for (i = 0; i < enic_n_tx_stats; i++)
*(data++) = ((u64 *)&vstats->tx)[enic_tx_stats[i].index];

View File

@ -615,8 +615,15 @@ static struct rtnl_link_stats64 *enic_get_stats(struct net_device *netdev,
{
struct enic *enic = netdev_priv(netdev);
struct vnic_stats *stats;
int err;
enic_dev_stats_dump(enic, &stats);
err = enic_dev_stats_dump(enic, &stats);
/* return only when pci_zalloc_consistent fails in vnic_dev_stats_dump
* For other failures, like devcmd failure, we return previously
* recorded stats.
*/
if (err == -ENOMEM)
return net_stats;
net_stats->tx_packets = stats->tx.tx_frames_ok;
net_stats->tx_bytes = stats->tx.tx_bytes_ok;
@ -1407,6 +1414,7 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget)
*/
enic_calc_int_moderation(enic, &enic->rq[rq]);
enic_poll_unlock_napi(&enic->rq[rq]);
if (work_done < work_to_do) {
/* Some work done, but not enough to stay in polling,
@ -1418,7 +1426,6 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget)
enic_set_int_moderation(enic, &enic->rq[rq]);
vnic_intr_unmask(&enic->intr[intr]);
}
enic_poll_unlock_napi(&enic->rq[rq]);
return work_done;
}

View File

@ -188,16 +188,15 @@ void vnic_rq_clean(struct vnic_rq *rq,
struct vnic_rq_buf *buf;
u32 fetch_index;
unsigned int count = rq->ring.desc_count;
int i;
buf = rq->to_clean;
while (vnic_rq_desc_used(rq) > 0) {
for (i = 0; i < rq->ring.desc_count; i++) {
(*buf_clean)(rq, buf);
buf = rq->to_clean = buf->next;
rq->ring.desc_avail++;
buf = buf->next;
}
rq->ring.desc_avail = rq->ring.desc_count - 1;
/* Use current fetch_index as the ring starting point */
fetch_index = ioread32(&rq->ctrl->fetch_index);

View File

@ -538,8 +538,8 @@ static int igb_ptp_feature_enable_i210(struct ptp_clock_info *ptp,
igb->perout[i].start.tv_nsec = rq->perout.start.nsec;
igb->perout[i].period.tv_sec = ts.tv_sec;
igb->perout[i].period.tv_nsec = ts.tv_nsec;
wr32(trgttiml, rq->perout.start.sec);
wr32(trgttimh, rq->perout.start.nsec);
wr32(trgttimh, rq->perout.start.sec);
wr32(trgttiml, rq->perout.start.nsec);
tsauxc |= tsauxc_mask;
tsim |= tsim_mask;
} else {

View File

@ -115,13 +115,14 @@ static inline void dmar_writeq(void __iomem *addr, u64 val)
* Extended Capability Register
*/
#define ecap_pasid(e) ((e >> 40) & 0x1)
#define ecap_pss(e) ((e >> 35) & 0x1f)
#define ecap_eafs(e) ((e >> 34) & 0x1)
#define ecap_nwfs(e) ((e >> 33) & 0x1)
#define ecap_srs(e) ((e >> 31) & 0x1)
#define ecap_ers(e) ((e >> 30) & 0x1)
#define ecap_prs(e) ((e >> 29) & 0x1)
#define ecap_pasid(e) ((e >> 28) & 0x1)
/* PASID support used to be on bit 28 */
#define ecap_dis(e) ((e >> 27) & 0x1)
#define ecap_nest(e) ((e >> 26) & 0x1)
#define ecap_mts(e) ((e >> 25) & 0x1)

View File

@ -2181,7 +2181,7 @@ void task_numa_work(struct callback_head *work)
}
for (; vma; vma = vma->vm_next) {
if (!vma_migratable(vma) || !vma_policy_mof(vma) ||
is_vm_hugetlb_page(vma)) {
is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_MIXEDMAP)) {
continue;
}

View File

@ -450,7 +450,7 @@ static int __init ring_buffer_benchmark_init(void)
if (producer_fifo >= 0) {
struct sched_param param = {
.sched_priority = consumer_fifo
.sched_priority = producer_fifo
};
sched_setscheduler(producer, SCHED_FIFO, &param);
} else

View File

@ -2323,6 +2323,8 @@ done_restock:
css_get_many(&memcg->css, batch);
if (batch > nr_pages)
refill_stock(memcg, batch - nr_pages);
if (!(gfp_mask & __GFP_WAIT))
goto done;
/*
* If the hierarchy is above the normal consumption range,
* make the charging task trim their excess contribution.
@ -5833,9 +5835,7 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
if (!mem_cgroup_is_root(memcg))
page_counter_uncharge(&memcg->memory, 1);
/* XXX: caller holds IRQ-safe mapping->tree_lock */
VM_BUG_ON(!irqs_disabled());
/* Caller disabled preemption with mapping->tree_lock */
mem_cgroup_charge_statistics(memcg, page, -1);
memcg_check_events(memcg, page);
}

View File

@ -1969,8 +1969,10 @@ void try_offline_node(int nid)
* wait_table may be allocated from boot memory,
* here only free if it's allocated by vmalloc.
*/
if (is_vmalloc_addr(zone->wait_table))
if (is_vmalloc_addr(zone->wait_table)) {
vfree(zone->wait_table);
zone->wait_table = NULL;
}
}
}
EXPORT_SYMBOL(try_offline_node);

View File

@ -289,7 +289,8 @@ static int create_handle_cache(struct zs_pool *pool)
static void destroy_handle_cache(struct zs_pool *pool)
{
kmem_cache_destroy(pool->handle_cachep);
if (pool->handle_cachep)
kmem_cache_destroy(pool->handle_cachep);
}
static unsigned long alloc_handle(struct zs_pool *pool)

View File

@ -1164,6 +1164,9 @@ static void br_multicast_add_router(struct net_bridge *br,
struct net_bridge_port *p;
struct hlist_node *slot = NULL;
if (!hlist_unhashed(&port->rlist))
return;
hlist_for_each_entry(p, &br->router_list, rlist) {
if ((unsigned long) port >= (unsigned long) p)
break;
@ -1191,12 +1194,8 @@ static void br_multicast_mark_router(struct net_bridge *br,
if (port->multicast_router != 1)
return;
if (!hlist_unhashed(&port->rlist))
goto timer;
br_multicast_add_router(br, port);
timer:
mod_timer(&port->multicast_router_timer,
now + br->multicast_querier_interval);
}

View File

@ -4467,7 +4467,7 @@ struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
while (order) {
if (npages >= 1 << order) {
page = alloc_pages(gfp_mask |
page = alloc_pages((gfp_mask & ~__GFP_WAIT) |
__GFP_COMP |
__GFP_NOWARN |
__GFP_NORETRY,

View File

@ -354,15 +354,12 @@ void sk_clear_memalloc(struct sock *sk)
/*
* SOCK_MEMALLOC is allowed to ignore rmem limits to ensure forward
* progress of swapping. However, if SOCK_MEMALLOC is cleared while
* it has rmem allocations there is a risk that the user of the
* socket cannot make forward progress due to exceeding the rmem
* limits. By rights, sk_clear_memalloc() should only be called
* on sockets being torn down but warn and reset the accounting if
* that assumption breaks.
* progress of swapping. SOCK_MEMALLOC may be cleared while
* it has rmem allocations due to the last swapfile being deactivated
* but there is a risk that the socket is unusable due to exceeding
* the rmem limits. Reclaim the reserves and obey rmem limits again.
*/
if (WARN_ON(sk->sk_forward_alloc))
sk_mem_reclaim(sk);
sk_mem_reclaim(sk);
}
EXPORT_SYMBOL_GPL(sk_clear_memalloc);
@ -1872,7 +1869,7 @@ bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t gfp)
pfrag->offset = 0;
if (SKB_FRAG_PAGE_ORDER) {
pfrag->page = alloc_pages(gfp | __GFP_COMP |
pfrag->page = alloc_pages((gfp & ~__GFP_WAIT) | __GFP_COMP |
__GFP_NOWARN | __GFP_NORETRY,
SKB_FRAG_PAGE_ORDER);
if (likely(pfrag->page)) {

View File

@ -212,13 +212,13 @@ static int ip6_input_finish(struct sock *sk, struct sk_buff *skb)
*/
rcu_read_lock();
resubmit:
idev = ip6_dst_idev(skb_dst(skb));
if (!pskb_pull(skb, skb_transport_offset(skb)))
goto discard;
nhoff = IP6CB(skb)->nhoff;
nexthdr = skb_network_header(skb)[nhoff];
resubmit:
raw = raw6_local_deliver(skb, nexthdr);
ipprot = rcu_dereference(inet6_protos[nexthdr]);
if (ipprot) {
@ -246,12 +246,10 @@ resubmit:
goto discard;
ret = ipprot->handler(skb);
if (ret < 0) {
nexthdr = -ret;
if (ret > 0)
goto resubmit;
} else if (ret == 0) {
else if (ret == 0)
IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INDELIVERS);
}
} else {
if (!raw) {
if (xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {

View File

@ -564,6 +564,17 @@ static int mpls_dev_notify(struct notifier_block *this, unsigned long event,
case NETDEV_UNREGISTER:
mpls_ifdown(dev);
break;
case NETDEV_CHANGENAME:
mdev = mpls_dev_get(dev);
if (mdev) {
int err;
mpls_dev_sysctl_unregister(mdev);
err = mpls_dev_sysctl_register(dev, mdev);
if (err)
return notifier_from_errno(err);
}
break;
}
return NOTIFY_OK;
}

View File

@ -381,13 +381,14 @@ nomem:
}
/* Public interface to creat the association shared key.
/* Public interface to create the association shared key.
* See code above for the algorithm.
*/
int sctp_auth_asoc_init_active_key(struct sctp_association *asoc, gfp_t gfp)
{
struct sctp_auth_bytes *secret;
struct sctp_shared_key *ep_key;
struct sctp_chunk *chunk;
/* If we don't support AUTH, or peer is not capable
* we don't need to do anything.
@ -410,6 +411,14 @@ int sctp_auth_asoc_init_active_key(struct sctp_association *asoc, gfp_t gfp)
sctp_auth_key_put(asoc->asoc_shared_key);
asoc->asoc_shared_key = secret;
/* Update send queue in case any chunk already in there now
* needs authenticating
*/
list_for_each_entry(chunk, &asoc->outqueue.out_chunk_list, list) {
if (sctp_auth_send_cid(chunk->chunk_hdr->type, asoc))
chunk->auth = 1;
}
return 0;
}

View File

@ -2140,11 +2140,17 @@ static void tipc_sk_timeout(unsigned long data)
peer_node = tsk_peer_node(tsk);
if (tsk->probing_state == TIPC_CONN_PROBING) {
/* Previous probe not answered -> self abort */
skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE,
TIPC_CONN_MSG, SHORT_H_SIZE, 0,
own_node, peer_node, tsk->portid,
peer_port, TIPC_ERR_NO_PORT);
if (!sock_owned_by_user(sk)) {
sk->sk_socket->state = SS_DISCONNECTING;
tsk->connected = 0;
tipc_node_remove_conn(sock_net(sk), tsk_peer_node(tsk),
tsk_peer_port(tsk));
sk->sk_state_change(sk);
} else {
/* Try again later */
sk_reset_timer(sk, &sk->sk_timer, (HZ / 20));
}
} else {
skb = tipc_msg_create(CONN_MANAGER, CONN_PROBE,
INT_H_SIZE, 0, peer_node, own_node,

View File

@ -1333,6 +1333,8 @@ static struct iw_statistics *cfg80211_wireless_stats(struct net_device *dev)
memcpy(bssid, wdev->current_bss->pub.bssid, ETH_ALEN);
wdev_unlock(wdev);
memset(&sinfo, 0, sizeof(sinfo));
if (rdev_get_station(rdev, dev, bssid, &sinfo))
return NULL;

View File

@ -3169,12 +3169,12 @@ sub process {
}
# check for global initialisers.
if ($line =~ /^\+(\s*$Type\s*$Ident\s*(?:\s+$Modifier))*\s*=\s*(0|NULL|false)\s*;/) {
if ($line =~ /^\+$Type\s*$Ident(?:\s+$Modifier)*\s*=\s*(?:0|NULL|false)\s*;/) {
if (ERROR("GLOBAL_INITIALISERS",
"do not initialise globals to 0 or NULL\n" .
$herecurr) &&
$fix) {
$fixed[$fixlinenr] =~ s/($Type\s*$Ident\s*(?:\s+$Modifier))*\s*=\s*(0|NULL|false)\s*;/$1;/;
$fixed[$fixlinenr] =~ s/(^.$Type\s*$Ident(?:\s+$Modifier)*)\s*=\s*(0|NULL|false)\s*;/$1;/;
}
}
# check for static initialisers.