Merge branch 'linus' into timers/core

Resolve semantic conflict in kernel/time/timekeeping.c.

Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Ingo Molnar 2012-07-18 11:24:41 +02:00
commit eec19d1a0d
66 changed files with 462 additions and 271 deletions

View File

@ -1626,3 +1626,5 @@ MX6Q_PAD_SD2_DAT3__PCIE_CTRL_MUX_11 1587
MX6Q_PAD_SD2_DAT3__GPIO_1_12 1588
MX6Q_PAD_SD2_DAT3__SJC_DONE 1589
MX6Q_PAD_SD2_DAT3__ANATOP_TESTO_3 1590
MX6Q_PAD_ENET_RX_ER__ANATOP_USBOTG_ID 1591
MX6Q_PAD_GPIO_1__ANATOP_USBOTG_ID 1592

View File

@ -3433,13 +3433,14 @@ S: Supported
F: drivers/idle/i7300_idle.c
IEEE 802.15.4 SUBSYSTEM
M: Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
M: Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
M: Sergey Lapin <slapin@ossfans.org>
L: linux-zigbee-devel@lists.sourceforge.net (moderated for non-subscribers)
W: http://apps.sourceforge.net/trac/linux-zigbee
T: git git://git.kernel.org/pub/scm/linux/kernel/git/lowpan/lowpan.git
S: Maintained
F: net/ieee802154/
F: net/mac802154/
F: drivers/ieee802154/
IIO SUBSYSTEM AND DRIVERS

View File

@ -1091,7 +1091,7 @@ error:
while (--i)
if (pages[i])
__free_pages(pages[i], 0);
if (array_size < PAGE_SIZE)
if (array_size <= PAGE_SIZE)
kfree(pages);
else
vfree(pages);
@ -1106,7 +1106,7 @@ static int __iommu_free_buffer(struct device *dev, struct page **pages, size_t s
for (i = 0; i < count; i++)
if (pages[i])
__free_pages(pages[i], 0);
if (array_size < PAGE_SIZE)
if (array_size <= PAGE_SIZE)
kfree(pages);
else
vfree(pages);

View File

@ -78,21 +78,6 @@ static int cdv_backlight_combination_mode(struct drm_device *dev)
return REG_READ(BLC_PWM_CTL2) & PWM_LEGACY_MODE;
}
static int cdv_get_brightness(struct backlight_device *bd)
{
struct drm_device *dev = bl_get_data(bd);
u32 val = REG_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
if (cdv_backlight_combination_mode(dev)) {
u8 lbpc;
val &= ~1;
pci_read_config_byte(dev->pdev, 0xF4, &lbpc);
val *= lbpc;
}
return val;
}
static u32 cdv_get_max_backlight(struct drm_device *dev)
{
u32 max = REG_READ(BLC_PWM_CTL);
@ -110,6 +95,22 @@ static u32 cdv_get_max_backlight(struct drm_device *dev)
return max;
}
static int cdv_get_brightness(struct backlight_device *bd)
{
struct drm_device *dev = bl_get_data(bd);
u32 val = REG_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
if (cdv_backlight_combination_mode(dev)) {
u8 lbpc;
val &= ~1;
pci_read_config_byte(dev->pdev, 0xF4, &lbpc);
val *= lbpc;
}
return (val * 100)/cdv_get_max_backlight(dev);
}
static int cdv_set_brightness(struct backlight_device *bd)
{
struct drm_device *dev = bl_get_data(bd);
@ -120,6 +121,9 @@ static int cdv_set_brightness(struct backlight_device *bd)
if (level < 1)
level = 1;
level *= cdv_get_max_backlight(dev);
level /= 100;
if (cdv_backlight_combination_mode(dev)) {
u32 max = cdv_get_max_backlight(dev);
u8 lbpc;
@ -157,7 +161,6 @@ static int cdv_backlight_init(struct drm_device *dev)
cdv_backlight_device->props.brightness =
cdv_get_brightness(cdv_backlight_device);
cdv_backlight_device->props.max_brightness = cdv_get_max_backlight(dev);
backlight_update_status(cdv_backlight_device);
dev_priv->backlight_device = cdv_backlight_device;
return 0;

View File

@ -144,6 +144,8 @@ struct opregion_asle {
#define ASLE_CBLV_VALID (1<<31)
static struct psb_intel_opregion *system_opregion;
static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
{
struct drm_psb_private *dev_priv = dev->dev_private;
@ -205,7 +207,7 @@ void psb_intel_opregion_enable_asle(struct drm_device *dev)
struct drm_psb_private *dev_priv = dev->dev_private;
struct opregion_asle *asle = dev_priv->opregion.asle;
if (asle) {
if (asle && system_opregion ) {
/* Don't do this on Medfield or other non PC like devices, they
use the bit for something different altogether */
psb_enable_pipestat(dev_priv, 0, PIPE_LEGACY_BLC_EVENT_ENABLE);
@ -221,7 +223,6 @@ void psb_intel_opregion_enable_asle(struct drm_device *dev)
#define ACPI_EV_LID (1<<1)
#define ACPI_EV_DOCK (1<<2)
static struct psb_intel_opregion *system_opregion;
static int psb_intel_opregion_video_event(struct notifier_block *nb,
unsigned long val, void *data)
@ -266,9 +267,6 @@ void psb_intel_opregion_init(struct drm_device *dev)
system_opregion = opregion;
register_acpi_notifier(&psb_intel_opregion_notifier);
}
if (opregion->asle)
psb_intel_opregion_enable_asle(dev);
}
void psb_intel_opregion_fini(struct drm_device *dev)

View File

@ -27,6 +27,7 @@ extern void psb_intel_opregion_asle_intr(struct drm_device *dev);
extern void psb_intel_opregion_init(struct drm_device *dev);
extern void psb_intel_opregion_fini(struct drm_device *dev);
extern int psb_intel_opregion_setup(struct drm_device *dev);
extern void psb_intel_opregion_enable_asle(struct drm_device *dev);
#else
@ -46,4 +47,8 @@ extern inline int psb_intel_opregion_setup(struct drm_device *dev)
{
return 0;
}
extern inline void psb_intel_opregion_enable_asle(struct drm_device *dev)
{
}
#endif

View File

@ -144,6 +144,10 @@ static int psb_backlight_init(struct drm_device *dev)
psb_backlight_device->props.max_brightness = 100;
backlight_update_status(psb_backlight_device);
dev_priv->backlight_device = psb_backlight_device;
/* This must occur after the backlight is properly initialised */
psb_lid_timer_init(dev_priv);
return 0;
}
@ -354,13 +358,6 @@ static int psb_chip_setup(struct drm_device *dev)
return 0;
}
/* Not exactly an erratum more an irritation */
static void psb_chip_errata(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
psb_lid_timer_init(dev_priv);
}
static void psb_chip_teardown(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
@ -379,7 +376,6 @@ const struct psb_ops psb_chip_ops = {
.sgx_offset = PSB_SGX_OFFSET,
.chip_setup = psb_chip_setup,
.chip_teardown = psb_chip_teardown,
.errata = psb_chip_errata,
.crtc_helper = &psb_intel_helper_funcs,
.crtc_funcs = &psb_intel_crtc_funcs,

View File

@ -374,6 +374,7 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
if (ret)
return ret;
psb_intel_opregion_enable_asle(dev);
#if 0
/*enable runtime pm at last*/
pm_runtime_enable(&dev->pdev->dev);

View File

@ -123,7 +123,7 @@ static void ipoib_ud_skb_put_frags(struct ipoib_dev_priv *priv,
skb_frag_size_set(frag, size);
skb->data_len += size;
skb->truesize += size;
skb->truesize += PAGE_SIZE;
} else
skb_put(skb, length);
@ -156,14 +156,18 @@ static struct sk_buff *ipoib_alloc_rx_skb(struct net_device *dev, int id)
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct sk_buff *skb;
int buf_size;
int tailroom;
u64 *mapping;
if (ipoib_ud_need_sg(priv->max_ib_mtu))
if (ipoib_ud_need_sg(priv->max_ib_mtu)) {
buf_size = IPOIB_UD_HEAD_SIZE;
else
tailroom = 128; /* reserve some tailroom for IP/TCP headers */
} else {
buf_size = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu);
tailroom = 0;
}
skb = dev_alloc_skb(buf_size + 4);
skb = dev_alloc_skb(buf_size + tailroom + 4);
if (unlikely(!skb))
return NULL;

View File

@ -135,8 +135,8 @@ send_layer2(struct mISDNstack *st, struct sk_buff *skb)
skb = NULL;
else if (*debug & DEBUG_SEND_ERR)
printk(KERN_DEBUG
"%s ch%d mgr prim(%x) addr(%x) err %d\n",
__func__, ch->nr, hh->prim, ch->addr, ret);
"%s mgr prim(%x) err %d\n",
__func__, hh->prim, ret);
}
out:
mutex_unlock(&st->lmutex);

View File

@ -6,7 +6,7 @@
#include "bonding.h"
#include "bond_alb.h"
#ifdef CONFIG_DEBUG_FS
#if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_NET_NS)
#include <linux/debugfs.h>
#include <linux/seq_file.h>

View File

@ -3227,6 +3227,12 @@ static int bond_master_netdev_event(unsigned long event,
switch (event) {
case NETDEV_CHANGENAME:
return bond_event_changename(event_bond);
case NETDEV_UNREGISTER:
bond_remove_proc_entry(event_bond);
break;
case NETDEV_REGISTER:
bond_create_proc_entry(event_bond);
break;
default:
break;
}
@ -4411,8 +4417,6 @@ static void bond_uninit(struct net_device *bond_dev)
bond_work_cancel_all(bond);
bond_remove_proc_entry(bond);
bond_debug_unregister(bond);
__hw_addr_flush(&bond->mc_list);
@ -4814,7 +4818,6 @@ static int bond_init(struct net_device *bond_dev)
bond_set_lockdep_class(bond_dev);
bond_create_proc_entry(bond);
list_add_tail(&bond->bond_list, &bn->dev_list);
bond_prepare_sysfs_group(bond);

View File

@ -261,7 +261,6 @@ static void atl1c_check_link_status(struct atl1c_adapter *adapter)
if ((phy_data & BMSR_LSTATUS) == 0) {
/* link down */
netif_carrier_off(netdev);
netif_stop_queue(netdev);
hw->hibernate = true;
if (atl1c_reset_mac(hw) != 0)
if (netif_msg_hw(adapter))

View File

@ -656,7 +656,7 @@ static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
dma_unmap_single(bp->sdev->dma_dev, mapping,
RX_PKT_BUF_SZ, DMA_FROM_DEVICE);
dev_kfree_skb_any(skb);
skb = __netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ, GFP_ATOMIC|GFP_DMA);
skb = alloc_skb(RX_PKT_BUF_SZ, GFP_ATOMIC | GFP_DMA);
if (skb == NULL)
return -ENOMEM;
mapping = dma_map_single(bp->sdev->dma_dev, skb->data,
@ -967,7 +967,7 @@ static netdev_tx_t b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
dma_unmap_single(bp->sdev->dma_dev, mapping, len,
DMA_TO_DEVICE);
bounce_skb = __netdev_alloc_skb(dev, len, GFP_ATOMIC | GFP_DMA);
bounce_skb = alloc_skb(len, GFP_ATOMIC | GFP_DMA);
if (!bounce_skb)
goto err_out;

View File

@ -5372,7 +5372,7 @@ bnx2_free_tx_skbs(struct bnx2 *bp)
int k, last;
if (skb == NULL) {
j++;
j = NEXT_TX_BD(j);
continue;
}
@ -5384,8 +5384,8 @@ bnx2_free_tx_skbs(struct bnx2 *bp)
tx_buf->skb = NULL;
last = tx_buf->nr_frags;
j++;
for (k = 0; k < last; k++, j++) {
j = NEXT_TX_BD(j);
for (k = 0; k < last; k++, j = NEXT_TX_BD(j)) {
tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)];
dma_unmap_page(&bp->pdev->dev,
dma_unmap_addr(tx_buf, mapping),

View File

@ -534,7 +534,8 @@ int cnic_unregister_driver(int ulp_type)
}
if (atomic_read(&ulp_ops->ref_count) != 0)
netdev_warn(dev->netdev, "Failed waiting for ref count to go to zero\n");
pr_warn("%s: Failed waiting for ref count to go to zero\n",
__func__);
return 0;
out_unlock:
@ -1053,12 +1054,13 @@ static int cnic_init_uio(struct cnic_dev *dev)
uinfo = &udev->cnic_uinfo;
uinfo->mem[0].addr = dev->netdev->base_addr;
uinfo->mem[0].addr = pci_resource_start(dev->pcidev, 0);
uinfo->mem[0].internal_addr = dev->regview;
uinfo->mem[0].size = dev->netdev->mem_end - dev->netdev->mem_start;
uinfo->mem[0].memtype = UIO_MEM_PHYS;
if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
uinfo->mem[0].size = MB_GET_CID_ADDR(TX_TSS_CID +
TX_MAX_TSS_RINGS + 1);
uinfo->mem[1].addr = (unsigned long) cp->status_blk.gen &
PAGE_MASK;
if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
@ -1068,6 +1070,8 @@ static int cnic_init_uio(struct cnic_dev *dev)
uinfo->name = "bnx2_cnic";
} else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
uinfo->mem[0].size = pci_resource_len(dev->pcidev, 0);
uinfo->mem[1].addr = (unsigned long) cp->bnx2x_def_status_blk &
PAGE_MASK;
uinfo->mem[1].size = sizeof(*cp->bnx2x_def_status_blk);

View File

@ -2063,10 +2063,9 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
/* Steal sock reference for processing TX time stamps */
swap(skb_new->sk, skb->sk);
swap(skb_new->destructor, skb->destructor);
kfree_skb(skb);
if (skb->sk)
skb_set_owner_w(skb_new, skb->sk);
consume_skb(skb);
skb = skb_new;
}

View File

@ -1572,6 +1572,9 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
ctrl = er32(CTRL);
status = er32(STATUS);
rxcw = er32(RXCW);
/* SYNCH bit and IV bit are sticky */
udelay(10);
rxcw = er32(RXCW);
if ((rxcw & E1000_RXCW_SYNCH) && !(rxcw & E1000_RXCW_IV)) {

View File

@ -325,24 +325,46 @@ static inline void __ew32flash(struct e1000_hw *hw, unsigned long reg, u32 val)
**/
static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
{
u16 phy_reg;
u32 phy_id;
u16 phy_reg = 0;
u32 phy_id = 0;
s32 ret_val;
u16 retry_count;
e1e_rphy_locked(hw, PHY_ID1, &phy_reg);
phy_id = (u32)(phy_reg << 16);
e1e_rphy_locked(hw, PHY_ID2, &phy_reg);
phy_id |= (u32)(phy_reg & PHY_REVISION_MASK);
for (retry_count = 0; retry_count < 2; retry_count++) {
ret_val = e1e_rphy_locked(hw, PHY_ID1, &phy_reg);
if (ret_val || (phy_reg == 0xFFFF))
continue;
phy_id = (u32)(phy_reg << 16);
ret_val = e1e_rphy_locked(hw, PHY_ID2, &phy_reg);
if (ret_val || (phy_reg == 0xFFFF)) {
phy_id = 0;
continue;
}
phy_id |= (u32)(phy_reg & PHY_REVISION_MASK);
break;
}
if (hw->phy.id) {
if (hw->phy.id == phy_id)
return true;
} else {
if ((phy_id != 0) && (phy_id != PHY_REVISION_MASK))
hw->phy.id = phy_id;
} else if (phy_id) {
hw->phy.id = phy_id;
hw->phy.revision = (u32)(phy_reg & ~PHY_REVISION_MASK);
return true;
}
return false;
/*
* In case the PHY needs to be in mdio slow mode,
* set slow mode and try to get the PHY id again.
*/
hw->phy.ops.release(hw);
ret_val = e1000_set_mdio_slow_mode_hv(hw);
if (!ret_val)
ret_val = e1000e_get_phy_id(hw);
hw->phy.ops.acquire(hw);
return !ret_val;
}
/**

View File

@ -6647,6 +6647,11 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
return -EINVAL;
}
if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
e_err(drv, "Enable failed, SR-IOV enabled\n");
return -EINVAL;
}
/* Hardware supports up to 8 traffic classes */
if (tc > adapter->dcb_cfg.num_tcs.pg_tcs ||
(hw->mac.type == ixgbe_mac_82598EB &&

View File

@ -201,6 +201,9 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_adapter *adapter,
unsigned int i, eop, count = 0;
unsigned int total_bytes = 0, total_packets = 0;
if (test_bit(__IXGBEVF_DOWN, &adapter->state))
return true;
i = tx_ring->next_to_clean;
eop = tx_ring->tx_buffer_info[i].next_to_watch;
eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
@ -969,8 +972,6 @@ static irqreturn_t ixgbevf_msix_clean_tx(int irq, void *data)
r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
for (i = 0; i < q_vector->txr_count; i++) {
tx_ring = &(adapter->tx_ring[r_idx]);
tx_ring->total_bytes = 0;
tx_ring->total_packets = 0;
ixgbevf_clean_tx_irq(adapter, tx_ring);
r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
r_idx + 1);
@ -994,16 +995,6 @@ static irqreturn_t ixgbevf_msix_clean_rx(int irq, void *data)
struct ixgbe_hw *hw = &adapter->hw;
struct ixgbevf_ring *rx_ring;
int r_idx;
int i;
r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
for (i = 0; i < q_vector->rxr_count; i++) {
rx_ring = &(adapter->rx_ring[r_idx]);
rx_ring->total_bytes = 0;
rx_ring->total_packets = 0;
r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
r_idx + 1);
}
if (!q_vector->rxr_count)
return IRQ_HANDLED;

View File

@ -51,7 +51,7 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
desc->des3 = desc->des2 + BUF_SIZE_4KiB;
priv->hw->desc->prepare_tx_desc(desc, 1, bmax,
csum);
wmb();
entry = (++priv->cur_tx) % txsize;
desc = priv->dma_tx + entry;
@ -59,6 +59,7 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
len, DMA_TO_DEVICE);
desc->des3 = desc->des2 + BUF_SIZE_4KiB;
priv->hw->desc->prepare_tx_desc(desc, 0, len, csum);
wmb();
priv->hw->desc->set_tx_owner(desc);
priv->tx_skbuff[entry] = NULL;
} else {

View File

@ -1212,6 +1212,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion);
wmb();
priv->hw->desc->set_tx_owner(desc);
wmb();
}
/* Interrupt on completition only for the latest segment */
@ -1227,6 +1228,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
/* To avoid raise condition */
priv->hw->desc->set_tx_owner(first);
wmb();
priv->cur_tx++;
@ -1290,6 +1292,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
}
wmb();
priv->hw->desc->set_rx_owner(p + entry);
wmb();
}
}

View File

@ -46,7 +46,13 @@ static int mdio_mux_read(struct mii_bus *bus, int phy_id, int regnum)
struct mdio_mux_parent_bus *pb = cb->parent;
int r;
mutex_lock(&pb->mii_bus->mdio_lock);
/* In theory multiple mdio_mux could be stacked, thus creating
* more than a single level of nesting. But in practice,
* SINGLE_DEPTH_NESTING will cover the vast majority of use
* cases. We use it, instead of trying to handle the general
* case.
*/
mutex_lock_nested(&pb->mii_bus->mdio_lock, SINGLE_DEPTH_NESTING);
r = pb->switch_fn(pb->current_child, cb->bus_number, pb->switch_data);
if (r)
goto out;
@ -71,7 +77,7 @@ static int mdio_mux_write(struct mii_bus *bus, int phy_id,
int r;
mutex_lock(&pb->mii_bus->mdio_lock);
mutex_lock_nested(&pb->mii_bus->mdio_lock, SINGLE_DEPTH_NESTING);
r = pb->switch_fn(pb->current_child, cb->bus_number, pb->switch_data);
if (r)
goto out;

View File

@ -346,6 +346,15 @@ static const struct driver_info qmi_wwan_force_int1 = {
.data = BIT(1), /* interface whitelist bitmap */
};
static const struct driver_info qmi_wwan_force_int2 = {
.description = "Qualcomm WWAN/QMI device",
.flags = FLAG_WWAN,
.bind = qmi_wwan_bind_shared,
.unbind = qmi_wwan_unbind_shared,
.manage_power = qmi_wwan_manage_power,
.data = BIT(2), /* interface whitelist bitmap */
};
static const struct driver_info qmi_wwan_force_int3 = {
.description = "Qualcomm WWAN/QMI device",
.flags = FLAG_WWAN,
@ -498,6 +507,15 @@ static const struct usb_device_id products[] = {
.bInterfaceProtocol = 0xff,
.driver_info = (unsigned long)&qmi_wwan_force_int4,
},
{ /* ZTE MF60 */
.match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
.idVendor = 0x19d2,
.idProduct = 0x1402,
.bInterfaceClass = 0xff,
.bInterfaceSubClass = 0xff,
.bInterfaceProtocol = 0xff,
.driver_info = (unsigned long)&qmi_wwan_force_int2,
},
{ /* Sierra Wireless MC77xx in QMI mode */
.match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
.idVendor = 0x1199,

View File

@ -1072,7 +1072,7 @@ static int dma_tx_fragment(struct b43legacy_dmaring *ring,
meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
/* create a bounce buffer in zone_dma on mapping failure. */
if (b43legacy_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
bounce_skb = __dev_alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
bounce_skb = alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
if (!bounce_skb) {
ring->current_slot = old_top_slot;
ring->used_slots = old_used_slots;

View File

@ -3405,7 +3405,7 @@ il4965_remove_dynamic_key(struct il_priv *il,
return 0;
}
if (il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET) {
if (il->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_INVALID) {
IL_WARN("Removing wrong key %d 0x%x\n", keyconf->keyidx,
key_flags);
spin_unlock_irqrestore(&il->sta_lock, flags);
@ -3420,7 +3420,7 @@ il4965_remove_dynamic_key(struct il_priv *il,
memset(&il->stations[sta_id].sta.key, 0, sizeof(struct il4965_keyinfo));
il->stations[sta_id].sta.key.key_flags =
STA_KEY_FLG_NO_ENC | STA_KEY_FLG_INVALID;
il->stations[sta_id].sta.key.key_offset = WEP_INVALID_OFFSET;
il->stations[sta_id].sta.key.key_offset = keyconf->hw_key_idx;
il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;

View File

@ -4767,14 +4767,12 @@ il_bg_watchdog(unsigned long data)
return;
/* monitor and check for other stuck queues */
if (il_is_any_associated(il)) {
for (cnt = 0; cnt < il->hw_params.max_txq_num; cnt++) {
/* skip as we already checked the command queue */
if (cnt == il->cmd_queue)
continue;
if (il_check_stuck_queue(il, cnt))
return;
}
for (cnt = 0; cnt < il->hw_params.max_txq_num; cnt++) {
/* skip as we already checked the command queue */
if (cnt == il->cmd_queue)
continue;
if (il_check_stuck_queue(il, cnt))
return;
}
mod_timer(&il->watchdog,

View File

@ -958,6 +958,7 @@ static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy,
case NL80211_HIDDEN_SSID_ZERO_CONTENTS:
/* firmware doesn't support this type of hidden SSID */
default:
kfree(bss_cfg);
return -EINVAL;
}

View File

@ -436,8 +436,8 @@ void rt2x00usb_kick_queue(struct data_queue *queue)
case QID_RX:
if (!rt2x00queue_full(queue))
rt2x00queue_for_each_entry(queue,
Q_INDEX_DONE,
Q_INDEX,
Q_INDEX_DONE,
NULL,
rt2x00usb_kick_rx_entry);
break;

View File

@ -474,7 +474,9 @@ static int __devinit imx_pinctrl_parse_groups(struct device_node *np,
grp->configs[j] = config & ~IMX_PAD_SION;
}
#ifdef DEBUG
IMX_PMX_DUMP(info, grp->pins, grp->mux_mode, grp->configs, grp->npins);
#endif
return 0;
}

View File

@ -1950,6 +1950,8 @@ static struct imx_pin_reg imx6q_pin_regs[] = {
IMX_PIN_REG(MX6Q_PAD_SD2_DAT3, 0x0744, 0x035C, 5, 0x0000, 0), /* MX6Q_PAD_SD2_DAT3__GPIO_1_12 */
IMX_PIN_REG(MX6Q_PAD_SD2_DAT3, 0x0744, 0x035C, 6, 0x0000, 0), /* MX6Q_PAD_SD2_DAT3__SJC_DONE */
IMX_PIN_REG(MX6Q_PAD_SD2_DAT3, 0x0744, 0x035C, 7, 0x0000, 0), /* MX6Q_PAD_SD2_DAT3__ANATOP_TESTO_3 */
IMX_PIN_REG(MX6Q_PAD_ENET_RX_ER, 0x04EC, 0x01D8, 0, 0x0000, 0), /* MX6Q_PAD_ENET_RX_ER__ANATOP_USBOTG_ID */
IMX_PIN_REG(MX6Q_PAD_GPIO_1, 0x05F4, 0x0224, 3, 0x0000, 0), /* MX6Q_PAD_GPIO_1__ANATOP_USBOTG_ID */
};
/* Pad names for the pinmux subsystem */

View File

@ -694,10 +694,10 @@ MODULE_DEVICE_TABLE(acpi, ideapad_device_ids);
static int __devinit ideapad_acpi_add(struct acpi_device *adevice)
{
int ret, i;
unsigned long cfg;
int cfg;
struct ideapad_private *priv;
if (read_method_int(adevice->handle, "_CFG", (int *)&cfg))
if (read_method_int(adevice->handle, "_CFG", &cfg))
return -ENODEV;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
@ -721,7 +721,7 @@ static int __devinit ideapad_acpi_add(struct acpi_device *adevice)
goto input_failed;
for (i = 0; i < IDEAPAD_RFKILL_DEV_NUM; i++) {
if (test_bit(ideapad_rfk_data[i].cfgbit, &cfg))
if (test_bit(ideapad_rfk_data[i].cfgbit, &priv->cfg))
ideapad_register_rfkill(adevice, i);
else
priv->rfk[i] = NULL;

View File

@ -72,6 +72,7 @@
#include <linux/string.h>
#include <linux/tick.h>
#include <linux/timer.h>
#include <linux/dmi.h>
#include <drm/i915_drm.h>
#include <asm/msr.h>
#include <asm/processor.h>
@ -1485,6 +1486,24 @@ static DEFINE_PCI_DEVICE_TABLE(ips_id_table) = {
MODULE_DEVICE_TABLE(pci, ips_id_table);
static int ips_blacklist_callback(const struct dmi_system_id *id)
{
pr_info("Blacklisted intel_ips for %s\n", id->ident);
return 1;
}
static const struct dmi_system_id ips_blacklist[] = {
{
.callback = ips_blacklist_callback,
.ident = "HP ProBook",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
DMI_MATCH(DMI_PRODUCT_NAME, "HP ProBook"),
},
},
{ } /* terminating entry */
};
static int ips_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
u64 platform_info;
@ -1494,6 +1513,9 @@ static int ips_probe(struct pci_dev *dev, const struct pci_device_id *id)
u16 htshi, trc, trc_required_mask;
u8 tse;
if (dmi_check_system(ips_blacklist))
return -ENODEV;
ips = kzalloc(sizeof(struct ips_driver), GFP_KERNEL);
if (!ips)
return -ENOMEM;

View File

@ -973,7 +973,7 @@ static ssize_t sony_nc_sysfs_store(struct device *dev,
struct device_attribute *attr,
const char *buffer, size_t count)
{
unsigned long value = 0;
int value;
int ret = 0;
struct sony_nc_value *item =
container_of(attr, struct sony_nc_value, devattr);
@ -984,7 +984,7 @@ static ssize_t sony_nc_sysfs_store(struct device *dev,
if (count > 31)
return -EINVAL;
if (kstrtoul(buffer, 10, &value))
if (kstrtoint(buffer, 10, &value))
return -EINVAL;
if (item->validate)
@ -994,7 +994,7 @@ static ssize_t sony_nc_sysfs_store(struct device *dev,
return value;
ret = sony_nc_int_call(sony_nc_acpi_handle, *item->acpiset,
(int *)&value, NULL);
&value, NULL);
if (ret < 0)
return -EIO;
@ -1010,6 +1010,7 @@ static ssize_t sony_nc_sysfs_store(struct device *dev,
struct sony_backlight_props {
struct backlight_device *dev;
int handle;
int cmd_base;
u8 offset;
u8 maxlvl;
};
@ -1037,7 +1038,7 @@ static int sony_nc_get_brightness_ng(struct backlight_device *bd)
struct sony_backlight_props *sdev =
(struct sony_backlight_props *)bl_get_data(bd);
sony_call_snc_handle(sdev->handle, 0x0200, &result);
sony_call_snc_handle(sdev->handle, sdev->cmd_base + 0x100, &result);
return (result & 0xff) - sdev->offset;
}
@ -1049,7 +1050,8 @@ static int sony_nc_update_status_ng(struct backlight_device *bd)
(struct sony_backlight_props *)bl_get_data(bd);
value = bd->props.brightness + sdev->offset;
if (sony_call_snc_handle(sdev->handle, 0x0100 | (value << 16), &result))
if (sony_call_snc_handle(sdev->handle, sdev->cmd_base | (value << 0x10),
&result))
return -EIO;
return value;
@ -1172,6 +1174,11 @@ static int sony_nc_hotkeys_decode(u32 event, unsigned int handle)
/*
* ACPI callbacks
*/
enum event_types {
HOTKEY = 1,
KILLSWITCH,
GFX_SWITCH
};
static void sony_nc_notify(struct acpi_device *device, u32 event)
{
u32 real_ev = event;
@ -1196,7 +1203,7 @@ static void sony_nc_notify(struct acpi_device *device, u32 event)
/* hotkey event */
case 0x0100:
case 0x0127:
ev_type = 1;
ev_type = HOTKEY;
real_ev = sony_nc_hotkeys_decode(event, handle);
if (real_ev > 0)
@ -1216,7 +1223,7 @@ static void sony_nc_notify(struct acpi_device *device, u32 event)
* update the rfkill device status when the
* switch is moved.
*/
ev_type = 2;
ev_type = KILLSWITCH;
sony_call_snc_handle(handle, 0x0100, &result);
real_ev = result & 0x03;
@ -1226,6 +1233,24 @@ static void sony_nc_notify(struct acpi_device *device, u32 event)
break;
case 0x0128:
case 0x0146:
/* Hybrid GFX switching */
sony_call_snc_handle(handle, 0x0000, &result);
dprintk("GFX switch event received (reason: %s)\n",
(result & 0x01) ?
"switch change" : "unknown");
/* verify the switch state
* 1: discrete GFX
* 0: integrated GFX
*/
sony_call_snc_handle(handle, 0x0100, &result);
ev_type = GFX_SWITCH;
real_ev = result & 0xff;
break;
default:
dprintk("Unknown event 0x%x for handle 0x%x\n",
event, handle);
@ -1238,7 +1263,7 @@ static void sony_nc_notify(struct acpi_device *device, u32 event)
} else {
/* old style event */
ev_type = 1;
ev_type = HOTKEY;
sony_laptop_report_input_event(real_ev);
}
@ -1893,32 +1918,33 @@ static ssize_t sony_nc_battery_care_limit_store(struct device *dev,
* bits 4,5: store the limit into the EC
* bits 6,7: store the limit into the battery
*/
cmd = 0;
/*
* handle 0x0115 should allow storing on battery too;
* handle 0x0136 same as 0x0115 + health status;
* handle 0x013f, same as 0x0136 but no storing on the battery
*
* Store only inside the EC for now, regardless the handle number
*/
if (value == 0)
/* disable limits */
cmd = 0x0;
if (value > 0) {
if (value <= 50)
cmd = 0x20;
else if (value <= 50)
cmd = 0x21;
else if (value <= 80)
cmd = 0x10;
else if (value <= 80)
cmd = 0x11;
else if (value <= 100)
cmd = 0x30;
else if (value <= 100)
cmd = 0x31;
else
return -EINVAL;
else
return -EINVAL;
/*
* handle 0x0115 should allow storing on battery too;
* handle 0x0136 same as 0x0115 + health status;
* handle 0x013f, same as 0x0136 but no storing on the battery
*/
if (bcare_ctl->handle != 0x013f)
cmd = cmd | (cmd << 2);
if (sony_call_snc_handle(bcare_ctl->handle, (cmd << 0x10) | 0x0100,
&result))
cmd = (cmd | 0x1) << 0x10;
}
if (sony_call_snc_handle(bcare_ctl->handle, cmd | 0x0100, &result))
return -EIO;
return count;
@ -2113,7 +2139,7 @@ static ssize_t sony_nc_thermal_mode_show(struct device *dev,
struct device_attribute *attr, char *buffer)
{
ssize_t count = 0;
unsigned int mode = sony_nc_thermal_mode_get();
int mode = sony_nc_thermal_mode_get();
if (mode < 0)
return mode;
@ -2472,6 +2498,7 @@ static void sony_nc_backlight_ng_read_limits(int handle,
{
u64 offset;
int i;
int lvl_table_len = 0;
u8 min = 0xff, max = 0x00;
unsigned char buffer[32] = { 0 };
@ -2480,8 +2507,6 @@ static void sony_nc_backlight_ng_read_limits(int handle,
props->maxlvl = 0xff;
offset = sony_find_snc_handle(handle);
if (offset < 0)
return;
/* try to read the boundaries from ACPI tables, if we fail the above
* defaults should be reasonable
@ -2491,11 +2516,21 @@ static void sony_nc_backlight_ng_read_limits(int handle,
if (i < 0)
return;
switch (handle) {
case 0x012f:
case 0x0137:
lvl_table_len = 9;
break;
case 0x143:
lvl_table_len = 16;
break;
}
/* the buffer lists brightness levels available, brightness levels are
* from position 0 to 8 in the array, other values are used by ALS
* control.
*/
for (i = 0; i < 9 && i < ARRAY_SIZE(buffer); i++) {
for (i = 0; i < lvl_table_len && i < ARRAY_SIZE(buffer); i++) {
dprintk("Brightness level: %d\n", buffer[i]);
@ -2520,16 +2555,24 @@ static void sony_nc_backlight_setup(void)
const struct backlight_ops *ops = NULL;
struct backlight_properties props;
if (sony_find_snc_handle(0x12f) != -1) {
if (sony_find_snc_handle(0x12f) >= 0) {
ops = &sony_backlight_ng_ops;
sony_bl_props.cmd_base = 0x0100;
sony_nc_backlight_ng_read_limits(0x12f, &sony_bl_props);
max_brightness = sony_bl_props.maxlvl - sony_bl_props.offset;
} else if (sony_find_snc_handle(0x137) != -1) {
} else if (sony_find_snc_handle(0x137) >= 0) {
ops = &sony_backlight_ng_ops;
sony_bl_props.cmd_base = 0x0100;
sony_nc_backlight_ng_read_limits(0x137, &sony_bl_props);
max_brightness = sony_bl_props.maxlvl - sony_bl_props.offset;
} else if (sony_find_snc_handle(0x143) >= 0) {
ops = &sony_backlight_ng_ops;
sony_bl_props.cmd_base = 0x3000;
sony_nc_backlight_ng_read_limits(0x143, &sony_bl_props);
max_brightness = sony_bl_props.maxlvl - sony_bl_props.offset;
} else if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "GBRT",
&unused))) {
ops = &sony_backlight_ops;
@ -2597,6 +2640,12 @@ static int sony_nc_add(struct acpi_device *device)
}
}
result = sony_laptop_setup_input(device);
if (result) {
pr_err("Unable to create input devices\n");
goto outplatform;
}
if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "ECON",
&handle))) {
int arg = 1;
@ -2614,12 +2663,6 @@ static int sony_nc_add(struct acpi_device *device)
}
/* setup input devices and helper fifo */
result = sony_laptop_setup_input(device);
if (result) {
pr_err("Unable to create input devices\n");
goto outsnc;
}
if (acpi_video_backlight_support()) {
pr_info("brightness ignored, must be controlled by ACPI video driver\n");
} else {
@ -2667,22 +2710,21 @@ static int sony_nc_add(struct acpi_device *device)
return 0;
out_sysfs:
out_sysfs:
for (item = sony_nc_values; item->name; ++item) {
device_remove_file(&sony_pf_device->dev, &item->devattr);
}
sony_nc_backlight_cleanup();
sony_laptop_remove_input();
outsnc:
sony_nc_function_cleanup(sony_pf_device);
sony_nc_handles_cleanup(sony_pf_device);
outpresent:
outplatform:
sony_laptop_remove_input();
outpresent:
sony_pf_remove();
outwalk:
outwalk:
sony_nc_rfkill_cleanup();
return result;
}

View File

@ -1085,7 +1085,7 @@ static int __init rpmsg_init(void)
return ret;
}
module_init(rpmsg_init);
subsys_initcall(rpmsg_init);
static void __exit rpmsg_fini(void)
{

View File

@ -14,7 +14,7 @@
#include <linux/sched.h>
#include <linux/pipe_fs_i.h>
static void wait_for_partner(struct inode* inode, unsigned int *cnt)
static int wait_for_partner(struct inode* inode, unsigned int *cnt)
{
int cur = *cnt;
@ -23,6 +23,7 @@ static void wait_for_partner(struct inode* inode, unsigned int *cnt)
if (signal_pending(current))
break;
}
return cur == *cnt ? -ERESTARTSYS : 0;
}
static void wake_up_partner(struct inode* inode)
@ -67,8 +68,7 @@ static int fifo_open(struct inode *inode, struct file *filp)
* seen a writer */
filp->f_version = pipe->w_counter;
} else {
wait_for_partner(inode, &pipe->w_counter);
if(signal_pending(current))
if (wait_for_partner(inode, &pipe->w_counter))
goto err_rd;
}
}
@ -90,8 +90,7 @@ static int fifo_open(struct inode *inode, struct file *filp)
wake_up_partner(inode);
if (!pipe->readers) {
wait_for_partner(inode, &pipe->r_counter);
if (signal_pending(current))
if (wait_for_partner(inode, &pipe->r_counter))
goto err_wr;
}
break;

View File

@ -1074,13 +1074,13 @@ restart:
* If we couldn't get anything, give up.
*/
if (bno_cur_lt == NULL && bno_cur_gt == NULL) {
xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
if (!forced++) {
trace_xfs_alloc_near_busy(args);
xfs_log_force(args->mp, XFS_LOG_SYNC);
goto restart;
}
xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
trace_xfs_alloc_size_neither(args);
args->agbno = NULLAGBLOCK;
return 0;
@ -2434,13 +2434,22 @@ xfs_alloc_vextent_worker(
current_restore_flags_nested(&pflags, PF_FSTRANS);
}
int /* error */
/*
* Data allocation requests often come in with little stack to work on. Push
* them off to a worker thread so there is lots of stack to use. Metadata
* requests, OTOH, are generally from low stack usage paths, so avoid the
* context switch overhead here.
*/
int
xfs_alloc_vextent(
xfs_alloc_arg_t *args) /* allocation argument structure */
struct xfs_alloc_arg *args)
{
DECLARE_COMPLETION_ONSTACK(done);
if (!args->userdata)
return __xfs_alloc_vextent(args);
args->done = &done;
INIT_WORK_ONSTACK(&args->work, xfs_alloc_vextent_worker);
queue_work(xfs_alloc_wq, &args->work);

View File

@ -989,27 +989,6 @@ xfs_buf_ioerror_alert(
(__uint64_t)XFS_BUF_ADDR(bp), func, bp->b_error, bp->b_length);
}
int
xfs_bwrite(
struct xfs_buf *bp)
{
int error;
ASSERT(xfs_buf_islocked(bp));
bp->b_flags |= XBF_WRITE;
bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q);
xfs_bdstrat_cb(bp);
error = xfs_buf_iowait(bp);
if (error) {
xfs_force_shutdown(bp->b_target->bt_mount,
SHUTDOWN_META_IO_ERROR);
}
return error;
}
/*
* Called when we want to stop a buffer from getting written or read.
* We attach the EIO error, muck with its flags, and call xfs_buf_ioend
@ -1079,14 +1058,7 @@ xfs_bioerror_relse(
return EIO;
}
/*
* All xfs metadata buffers except log state machine buffers
* get this attached as their b_bdstrat callback function.
* This is so that we can catch a buffer
* after prematurely unpinning it to forcibly shutdown the filesystem.
*/
int
STATIC int
xfs_bdstrat_cb(
struct xfs_buf *bp)
{
@ -1107,6 +1079,27 @@ xfs_bdstrat_cb(
return 0;
}
int
xfs_bwrite(
struct xfs_buf *bp)
{
int error;
ASSERT(xfs_buf_islocked(bp));
bp->b_flags |= XBF_WRITE;
bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q);
xfs_bdstrat_cb(bp);
error = xfs_buf_iowait(bp);
if (error) {
xfs_force_shutdown(bp->b_target->bt_mount,
SHUTDOWN_META_IO_ERROR);
}
return error;
}
/*
* Wrapper around bdstrat so that we can stop data from going to disk in case
* we are shutting down the filesystem. Typically user data goes thru this
@ -1243,7 +1236,7 @@ xfs_buf_iorequest(
*/
atomic_set(&bp->b_io_remaining, 1);
_xfs_buf_ioapply(bp);
_xfs_buf_ioend(bp, 0);
_xfs_buf_ioend(bp, 1);
xfs_buf_rele(bp);
}

View File

@ -180,7 +180,6 @@ extern void xfs_buf_unlock(xfs_buf_t *);
extern int xfs_bwrite(struct xfs_buf *bp);
extern void xfsbdstrat(struct xfs_mount *, struct xfs_buf *);
extern int xfs_bdstrat_cb(struct xfs_buf *);
extern void xfs_buf_ioend(xfs_buf_t *, int);
extern void xfs_buf_ioerror(xfs_buf_t *, int);

View File

@ -954,7 +954,7 @@ xfs_buf_iodone_callbacks(
if (!XFS_BUF_ISSTALE(bp)) {
bp->b_flags |= XBF_WRITE | XBF_ASYNC | XBF_DONE;
xfs_bdstrat_cb(bp);
xfs_buf_iorequest(bp);
} else {
xfs_buf_relse(bp);
}

View File

@ -18,7 +18,7 @@ static inline void dev_set_cma_area(struct device *dev, struct cma *cma)
{
if (dev)
dev->cma_area = cma;
if (!dev || !dma_contiguous_default_area)
if (!dev && !dma_contiguous_default_area)
dma_contiguous_default_area = cma;
}

View File

@ -1425,7 +1425,7 @@ static inline void ip_vs_notrack(struct sk_buff *skb)
struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
if (!ct || !nf_ct_is_untracked(ct)) {
nf_reset(skb);
nf_conntrack_put(skb->nfct);
skb->nfct = &nf_ct_untracked_get()->ct_general;
skb->nfctinfo = IP_CT_NEW;
nf_conntrack_get(skb->nfct);

View File

@ -78,7 +78,7 @@ nf_conntrack_event_cache(enum ip_conntrack_events event, struct nf_conn *ct)
struct net *net = nf_ct_net(ct);
struct nf_conntrack_ecache *e;
if (net->ct.nf_conntrack_event_cb == NULL)
if (!rcu_access_pointer(net->ct.nf_conntrack_event_cb))
return;
e = nf_ct_ecache_find(ct);

View File

@ -745,6 +745,7 @@ static void timekeeping_resume(void)
timekeeper.clock->cycle_last = timekeeper.clock->read(timekeeper.clock);
timekeeper.ntp_error = 0;
timekeeping_suspended = 0;
timekeeping_update(&timekeeper, false);
write_sequnlock_irqrestore(&timekeeper.lock, flags);
touch_softlockup_watchdog();

View File

@ -5635,7 +5635,12 @@ static struct page *
__alloc_contig_migrate_alloc(struct page *page, unsigned long private,
int **resultp)
{
return alloc_page(GFP_HIGHUSER_MOVABLE);
gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE;
if (PageHighMem(page))
gfp_mask |= __GFP_HIGHMEM;
return alloc_page(gfp_mask);
}
/* [start, end) must belong to a single zone. */

View File

@ -403,6 +403,9 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
break;
case NETDEV_DOWN:
if (dev->features & NETIF_F_HW_VLAN_FILTER)
vlan_vid_del(dev, 0);
/* Put all VLANs for this dev in the down state too. */
for (i = 0; i < VLAN_N_VID; i++) {
vlandev = vlan_group_get_device(grp, i);

View File

@ -842,6 +842,7 @@ static int ax25_create(struct net *net, struct socket *sock, int protocol,
case AX25_P_NETROM:
if (ax25_protocol_is_registered(AX25_P_NETROM))
return -ESOCKTNOSUPPORT;
break;
#endif
#ifdef CONFIG_ROSE_MODULE
case AX25_P_ROSE:

View File

@ -1351,6 +1351,7 @@ void bla_free(struct bat_priv *bat_priv)
* @bat_priv: the bat priv with all the soft interface information
* @skb: the frame to be checked
* @vid: the VLAN ID of the frame
* @is_bcast: the packet came in a broadcast packet type.
*
* bla_rx avoidance checks if:
* * we have to race for a claim
@ -1361,7 +1362,8 @@ void bla_free(struct bat_priv *bat_priv)
* process the skb.
*
*/
int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid)
int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid,
bool is_bcast)
{
struct ethhdr *ethhdr;
struct claim search_claim, *claim = NULL;
@ -1380,7 +1382,7 @@ int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid)
if (unlikely(atomic_read(&bat_priv->bla_num_requests)))
/* don't allow broadcasts while requests are in flight */
if (is_multicast_ether_addr(ethhdr->h_dest))
if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast)
goto handled;
memcpy(search_claim.addr, ethhdr->h_source, ETH_ALEN);
@ -1406,8 +1408,13 @@ int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid)
}
/* if it is a broadcast ... */
if (is_multicast_ether_addr(ethhdr->h_dest)) {
/* ... drop it. the responsible gateway is in charge. */
if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast) {
/* ... drop it. the responsible gateway is in charge.
*
* We need to check is_bcast because with the gateway
* feature, broadcasts (like DHCP requests) may be sent
* using a unicast packet type.
*/
goto handled;
} else {
/* seems the client considers us as its best gateway.

View File

@ -23,7 +23,8 @@
#define _NET_BATMAN_ADV_BLA_H_
#ifdef CONFIG_BATMAN_ADV_BLA
int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid);
int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid,
bool is_bcast);
int bla_tx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid);
int bla_is_backbone_gw(struct sk_buff *skb,
struct orig_node *orig_node, int hdr_size);
@ -41,7 +42,7 @@ void bla_free(struct bat_priv *bat_priv);
#else /* ifdef CONFIG_BATMAN_ADV_BLA */
static inline int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb,
short vid)
short vid, bool is_bcast)
{
return 0;
}

View File

@ -256,7 +256,11 @@ void interface_rx(struct net_device *soft_iface,
struct bat_priv *bat_priv = netdev_priv(soft_iface);
struct ethhdr *ethhdr;
struct vlan_ethhdr *vhdr;
struct batman_header *batadv_header = (struct batman_header *)skb->data;
short vid __maybe_unused = -1;
bool is_bcast;
is_bcast = (batadv_header->packet_type == BAT_BCAST);
/* check if enough space is available for pulling, and pull */
if (!pskb_may_pull(skb, hdr_size))
@ -302,7 +306,7 @@ void interface_rx(struct net_device *soft_iface,
/* Let the bridge loop avoidance check the packet. If will
* not handle it, we can safely push it up.
*/
if (bla_rx(bat_priv, skb, vid))
if (bla_rx(bat_priv, skb, vid, is_bcast))
goto out;
netif_rx(skb);

View File

@ -561,9 +561,9 @@ static int __init caif_device_init(void)
static void __exit caif_device_exit(void)
{
unregister_pernet_subsys(&caif_net_ops);
unregister_netdevice_notifier(&caif_device_notifier);
dev_remove_pack(&caif_packet_type);
unregister_pernet_subsys(&caif_net_ops);
}
module_init(caif_device_init);

View File

@ -2444,8 +2444,12 @@ static void skb_update_prio(struct sk_buff *skb)
{
struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap);
if ((!skb->priority) && (skb->sk) && map)
skb->priority = map->priomap[skb->sk->sk_cgrp_prioidx];
if (!skb->priority && skb->sk && map) {
unsigned int prioidx = skb->sk->sk_cgrp_prioidx;
if (prioidx < map->priomap_len)
skb->priority = map->priomap[prioidx];
}
}
#else
#define skb_update_prio(skb)

View File

@ -49,8 +49,9 @@ static int get_prioidx(u32 *prio)
return -ENOSPC;
}
set_bit(prioidx, prioidx_map);
if (atomic_read(&max_prioidx) < prioidx)
atomic_set(&max_prioidx, prioidx);
spin_unlock_irqrestore(&prioidx_map_lock, flags);
atomic_set(&max_prioidx, prioidx);
*prio = prioidx;
return 0;
}
@ -64,7 +65,7 @@ static void put_prioidx(u32 idx)
spin_unlock_irqrestore(&prioidx_map_lock, flags);
}
static void extend_netdev_table(struct net_device *dev, u32 new_len)
static int extend_netdev_table(struct net_device *dev, u32 new_len)
{
size_t new_size = sizeof(struct netprio_map) +
((sizeof(u32) * new_len));
@ -76,7 +77,7 @@ static void extend_netdev_table(struct net_device *dev, u32 new_len)
if (!new_priomap) {
pr_warn("Unable to alloc new priomap!\n");
return;
return -ENOMEM;
}
for (i = 0;
@ -89,46 +90,79 @@ static void extend_netdev_table(struct net_device *dev, u32 new_len)
rcu_assign_pointer(dev->priomap, new_priomap);
if (old_priomap)
kfree_rcu(old_priomap, rcu);
return 0;
}
static void update_netdev_tables(void)
static int write_update_netdev_table(struct net_device *dev)
{
struct net_device *dev;
u32 max_len = atomic_read(&max_prioidx) + 1;
int ret = 0;
u32 max_len;
struct netprio_map *map;
rtnl_lock();
max_len = atomic_read(&max_prioidx) + 1;
map = rtnl_dereference(dev->priomap);
if (!map || map->priomap_len < max_len)
ret = extend_netdev_table(dev, max_len);
rtnl_unlock();
return ret;
}
static int update_netdev_tables(void)
{
int ret = 0;
struct net_device *dev;
u32 max_len;
struct netprio_map *map;
rtnl_lock();
max_len = atomic_read(&max_prioidx) + 1;
for_each_netdev(&init_net, dev) {
map = rtnl_dereference(dev->priomap);
if ((!map) ||
(map->priomap_len < max_len))
extend_netdev_table(dev, max_len);
/*
* don't allocate priomap if we didn't
* change net_prio.ifpriomap (map == NULL),
* this will speed up skb_update_prio.
*/
if (map && map->priomap_len < max_len) {
ret = extend_netdev_table(dev, max_len);
if (ret < 0)
break;
}
}
rtnl_unlock();
return ret;
}
static struct cgroup_subsys_state *cgrp_create(struct cgroup *cgrp)
{
struct cgroup_netprio_state *cs;
int ret;
int ret = -EINVAL;
cs = kzalloc(sizeof(*cs), GFP_KERNEL);
if (!cs)
return ERR_PTR(-ENOMEM);
if (cgrp->parent && cgrp_netprio_state(cgrp->parent)->prioidx) {
kfree(cs);
return ERR_PTR(-EINVAL);
}
if (cgrp->parent && cgrp_netprio_state(cgrp->parent)->prioidx)
goto out;
ret = get_prioidx(&cs->prioidx);
if (ret != 0) {
if (ret < 0) {
pr_warn("No space in priority index array\n");
kfree(cs);
return ERR_PTR(ret);
goto out;
}
ret = update_netdev_tables();
if (ret < 0) {
put_prioidx(cs->prioidx);
goto out;
}
return &cs->css;
out:
kfree(cs);
return ERR_PTR(ret);
}
static void cgrp_destroy(struct cgroup *cgrp)
@ -141,7 +175,7 @@ static void cgrp_destroy(struct cgroup *cgrp)
rtnl_lock();
for_each_netdev(&init_net, dev) {
map = rtnl_dereference(dev->priomap);
if (map)
if (map && cs->prioidx < map->priomap_len)
map->priomap[cs->prioidx] = 0;
}
rtnl_unlock();
@ -165,7 +199,7 @@ static int read_priomap(struct cgroup *cont, struct cftype *cft,
rcu_read_lock();
for_each_netdev_rcu(&init_net, dev) {
map = rcu_dereference(dev->priomap);
priority = map ? map->priomap[prioidx] : 0;
priority = (map && prioidx < map->priomap_len) ? map->priomap[prioidx] : 0;
cb->fill(cb, dev->name, priority);
}
rcu_read_unlock();
@ -220,13 +254,17 @@ static int write_priomap(struct cgroup *cgrp, struct cftype *cft,
if (!dev)
goto out_free_devname;
update_netdev_tables();
ret = 0;
ret = write_update_netdev_table(dev);
if (ret < 0)
goto out_put_dev;
rcu_read_lock();
map = rcu_dereference(dev->priomap);
if (map)
map->priomap[prioidx] = priority;
rcu_read_unlock();
out_put_dev:
dev_put(dev);
out_free_devname:

View File

@ -353,7 +353,7 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
unsigned int fragsz = SKB_DATA_ALIGN(length + NET_SKB_PAD) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
if (fragsz <= PAGE_SIZE && !(gfp_mask & __GFP_WAIT)) {
if (fragsz <= PAGE_SIZE && !(gfp_mask & (__GFP_WAIT | GFP_DMA))) {
void *data = netdev_alloc_frag(fragsz);
if (likely(data)) {

View File

@ -230,6 +230,12 @@ static int dgram_sendmsg(struct kiocb *iocb, struct sock *sk,
mtu = dev->mtu;
pr_debug("name = %s, mtu = %u\n", dev->name, mtu);
if (size > mtu) {
pr_debug("size = %Zu, mtu = %u\n", size, mtu);
err = -EINVAL;
goto out_dev;
}
hlen = LL_RESERVED_SPACE(dev);
tlen = dev->needed_tailroom;
skb = sock_alloc_send_skb(sk, hlen + tlen + size,
@ -258,12 +264,6 @@ static int dgram_sendmsg(struct kiocb *iocb, struct sock *sk,
if (err < 0)
goto out_skb;
if (size > mtu) {
pr_debug("size = %Zu, mtu = %u\n", size, mtu);
err = -EINVAL;
goto out_skb;
}
skb->dev = dev;
skb->sk = sk;
skb->protocol = htons(ETH_P_IEEE802154);

View File

@ -2174,15 +2174,13 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
sdata->name, mgmt->sa, status_code);
ieee80211_destroy_assoc_data(sdata, false);
} else {
printk(KERN_DEBUG "%s: associated\n", sdata->name);
if (!ieee80211_assoc_success(sdata, *bss, mgmt, len)) {
/* oops -- internal error -- send timeout for now */
ieee80211_destroy_assoc_data(sdata, true);
sta_info_destroy_addr(sdata, mgmt->bssid);
ieee80211_destroy_assoc_data(sdata, false);
cfg80211_put_bss(*bss);
return RX_MGMT_CFG80211_ASSOC_TIMEOUT;
}
printk(KERN_DEBUG "%s: associated\n", sdata->name);
/*
* destroy assoc_data afterwards, as otherwise an idle

View File

@ -809,7 +809,7 @@ minstrel_ht_alloc_sta(void *priv, struct ieee80211_sta *sta, gfp_t gfp)
max_rates = sband->n_bitrates;
}
msp = kzalloc(sizeof(struct minstrel_ht_sta), gfp);
msp = kzalloc(sizeof(*msp), gfp);
if (!msp)
return NULL;

View File

@ -1521,11 +1521,12 @@ static int ip_vs_dst_event(struct notifier_block *this, unsigned long event,
{
struct net_device *dev = ptr;
struct net *net = dev_net(dev);
struct netns_ipvs *ipvs = net_ipvs(net);
struct ip_vs_service *svc;
struct ip_vs_dest *dest;
unsigned int idx;
if (event != NETDEV_UNREGISTER)
if (event != NETDEV_UNREGISTER || !ipvs)
return NOTIFY_DONE;
IP_VS_DBG(3, "%s() dev=%s\n", __func__, dev->name);
EnterFunction(2);
@ -1551,7 +1552,7 @@ static int ip_vs_dst_event(struct notifier_block *this, unsigned long event,
}
}
list_for_each_entry(dest, &net_ipvs(net)->dest_trash, n_list) {
list_for_each_entry(dest, &ipvs->dest_trash, n_list) {
__ip_vs_dev_reset(dest, dev);
}
mutex_unlock(&__ip_vs_mutex);

View File

@ -16,6 +16,7 @@
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter/xt_set.h>
#include <linux/netfilter/ipset/ip_set_timeout.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
@ -310,7 +311,8 @@ set_target_v2(struct sk_buff *skb, const struct xt_action_param *par)
info->del_set.flags, 0, UINT_MAX);
/* Normalize to fit into jiffies */
if (add_opt.timeout > UINT_MAX/MSEC_PER_SEC)
if (add_opt.timeout != IPSET_NO_TIMEOUT &&
add_opt.timeout > UINT_MAX/MSEC_PER_SEC)
add_opt.timeout = UINT_MAX/MSEC_PER_SEC;
if (info->add_set.index != IPSET_INVALID_ID)
ip_set_add(info->add_set.index, skb, par, &add_opt);

View File

@ -292,7 +292,7 @@ static int llcp_sock_getname(struct socket *sock, struct sockaddr *addr,
pr_debug("%p\n", sk);
if (llcp_sock == NULL)
if (llcp_sock == NULL || llcp_sock->dev == NULL)
return -EBADFD;
addr->sa_family = AF_NFC;

View File

@ -229,7 +229,7 @@ found_UDP_peer:
return peer;
new_UDP_peer:
_net("Rx UDP DGRAM from NEW peer %d", peer->debug_id);
_net("Rx UDP DGRAM from NEW peer");
read_unlock_bh(&rxrpc_peer_lock);
_leave(" = -EBUSY [new]");
return ERR_PTR(-EBUSY);

View File

@ -331,29 +331,22 @@ static psched_time_t packet_len_2_sched_time(unsigned int len, struct netem_sche
return PSCHED_NS2TICKS(ticks);
}
static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
{
struct sk_buff_head *list = &sch->q;
psched_time_t tnext = netem_skb_cb(nskb)->time_to_send;
struct sk_buff *skb;
struct sk_buff *skb = skb_peek_tail(list);
if (likely(skb_queue_len(list) < sch->limit)) {
skb = skb_peek_tail(list);
/* Optimize for add at tail */
if (likely(!skb || tnext >= netem_skb_cb(skb)->time_to_send))
return qdisc_enqueue_tail(nskb, sch);
/* Optimize for add at tail */
if (likely(!skb || tnext >= netem_skb_cb(skb)->time_to_send))
return __skb_queue_tail(list, nskb);
skb_queue_reverse_walk(list, skb) {
if (tnext >= netem_skb_cb(skb)->time_to_send)
break;
}
__skb_queue_after(list, skb, nskb);
sch->qstats.backlog += qdisc_pkt_len(nskb);
return NET_XMIT_SUCCESS;
skb_queue_reverse_walk(list, skb) {
if (tnext >= netem_skb_cb(skb)->time_to_send)
break;
}
return qdisc_reshape_fail(nskb, sch);
__skb_queue_after(list, skb, nskb);
}
/*
@ -368,7 +361,6 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
/* We don't fill cb now as skb_unshare() may invalidate it */
struct netem_skb_cb *cb;
struct sk_buff *skb2;
int ret;
int count = 1;
/* Random duplication */
@ -419,6 +411,11 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8);
}
if (unlikely(skb_queue_len(&sch->q) >= sch->limit))
return qdisc_reshape_fail(skb, sch);
sch->qstats.backlog += qdisc_pkt_len(skb);
cb = netem_skb_cb(skb);
if (q->gap == 0 || /* not doing reordering */
q->counter < q->gap - 1 || /* inside last reordering gap */
@ -450,7 +447,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
cb->time_to_send = now + delay;
++q->counter;
ret = tfifo_enqueue(skb, sch);
tfifo_enqueue(skb, sch);
} else {
/*
* Do re-ordering by putting one out of N packets at the front
@ -460,16 +457,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
q->counter = 0;
__skb_queue_head(&sch->q, skb);
sch->qstats.backlog += qdisc_pkt_len(skb);
sch->qstats.requeues++;
ret = NET_XMIT_SUCCESS;
}
if (ret != NET_XMIT_SUCCESS) {
if (net_xmit_drop_count(ret)) {
sch->qstats.drops++;
return ret;
}
}
return NET_XMIT_SUCCESS;

View File

@ -570,6 +570,8 @@ static int sfb_dump(struct Qdisc *sch, struct sk_buff *skb)
sch->qstats.backlog = q->qdisc->qstats.backlog;
opts = nla_nest_start(skb, TCA_OPTIONS);
if (opts == NULL)
goto nla_put_failure;
if (nla_put(skb, TCA_SFB_PARMS, sizeof(opt), &opt))
goto nla_put_failure;
return nla_nest_end(skb, opts);

View File

@ -736,15 +736,12 @@ static void __sctp_unhash_endpoint(struct sctp_endpoint *ep)
epb = &ep->base;
if (hlist_unhashed(&epb->node))
return;
epb->hashent = sctp_ep_hashfn(epb->bind_addr.port);
head = &sctp_ep_hashtable[epb->hashent];
sctp_write_lock(&head->lock);
__hlist_del(&epb->node);
hlist_del_init(&epb->node);
sctp_write_unlock(&head->lock);
}
@ -825,7 +822,7 @@ static void __sctp_unhash_established(struct sctp_association *asoc)
head = &sctp_assoc_hashtable[epb->hashent];
sctp_write_lock(&head->lock);
__hlist_del(&epb->node);
hlist_del_init(&epb->node);
sctp_write_unlock(&head->lock);
}

View File

@ -1231,8 +1231,14 @@ out_free:
SCTP_DEBUG_PRINTK("About to exit __sctp_connect() free asoc: %p"
" kaddrs: %p err: %d\n",
asoc, kaddrs, err);
if (asoc)
if (asoc) {
/* sctp_primitive_ASSOCIATE may have added this association
* To the hash table, try to unhash it, just in case, its a noop
* if it wasn't hashed so we're safe
*/
sctp_unhash_established(asoc);
sctp_association_free(asoc);
}
return err;
}
@ -1942,8 +1948,10 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
goto out_unlock;
out_free:
if (new_asoc)
if (new_asoc) {
sctp_unhash_established(asoc);
sctp_association_free(asoc);
}
out_unlock:
sctp_release_sock(sk);