Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:

 1) Must perform TXQ teardown before unregistering interfaces in
    mac80211, from Toke Høiland-Jørgensen.

 2) Don't allow creating mac80211_hwsim with less than one channel, from
    Johannes Berg.

 3) Division by zero in cfg80211, fix from Johannes Berg.

 4) Fix endian issue in tipc, from Haiqing Bai.

 5) BPF sockmap use-after-free fixes from Daniel Borkmann.

 6) Spectre-v1 in mac80211_hwsim, from Jinbum Park.

 7) Missing rhashtable_walk_exit() in tipc, from Cong Wang.

 8) Revert kvzalloc() conversion of AF_PACKET, it breaks mmap() when
    kvzalloc() tries to use kmalloc() pages. From Eric Dumazet.

 9) Fix deadlock in hv_netvsc, from Dexuan Cui.

10) Do not restart timewait timer on RST, from Florian Westphal.

11) Fix double lwstate refcount grab in ipv6, from Alexey Kodanev.

12) Unsolicit report count handling is off-by-one, fix from Hangbin Liu.

13) Sleep-in-atomic in cadence driver, from Jia-Ju Bai.

14) Respect ttl-inherit in ip6 tunnel driver, from Hangbin Liu.

15) Use-after-free in act_ife, fix from Cong Wang.

16) Missing hold to meta module in act_ife, from Vlad Buslov.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (91 commits)
  net: phy: sfp: Handle unimplemented hwmon limits and alarms
  net: sched: action_ife: take reference to meta module
  act_ife: fix a potential use-after-free
  net/mlx5: Fix SQ offset in QPs with small RQ
  tipc: correct spelling errors for tipc_topsrv_queue_evt() comments
  tipc: correct spelling errors for struct tipc_bc_base's comment
  bnxt_en: Do not adjust max_cp_rings by the ones used by RDMA.
  bnxt_en: Clean up unused functions.
  bnxt_en: Fix firmware signaled resource change logic in open.
  sctp: not traverse asoc trans list if non-ipv6 trans exists for ipv6_flowlabel
  sctp: fix invalid reference to the index variable of the iterator
  net/ibm/emac: wrong emac_calc_base call was used by typo
  net: sched: null actions array pointer before releasing action
  vhost: fix VHOST_GET_BACKEND_FEATURES ioctl request definition
  r8169: add support for NCube 8168 network card
  ip6_tunnel: respect ttl inherit for ip6tnl
  mac80211: shorten the IBSS debug messages
  mac80211: don't Tx a deauth frame if the AP forbade Tx
  mac80211: Fix station bandwidth setting after channel switch
  mac80211: fix a race between restart and CSA flows
  ...
This commit is contained in:
Linus Torvalds 2018-09-04 12:45:11 -07:00
commit 28619527b8
89 changed files with 953 additions and 464 deletions

View File

@ -19,6 +19,10 @@ Required properties:
- slaves : Specifies number for slaves
- active_slave : Specifies the slave to use for time stamping,
ethtool and SIOCGMIIPHY
- cpsw-phy-sel : Specifies the phandle to the CPSW phy mode selection
device. See also cpsw-phy-sel.txt for it's binding.
Note that in legacy cases cpsw-phy-sel may be
a child device instead of a phandle.
Optional properties:
- ti,hwmods : Must be "cpgmac0"
@ -75,6 +79,7 @@ Examples:
cpts_clock_mult = <0x80000000>;
cpts_clock_shift = <29>;
syscon = <&cm>;
cpsw-phy-sel = <&phy_sel>;
cpsw_emac0: slave@0 {
phy_id = <&davinci_mdio>, <0>;
phy-mode = "rgmii-txid";
@ -103,6 +108,7 @@ Examples:
cpts_clock_mult = <0x80000000>;
cpts_clock_shift = <29>;
syscon = <&cm>;
cpsw-phy-sel = <&phy_sel>;
cpsw_emac0: slave@0 {
phy_id = <&davinci_mdio>, <0>;
phy-mode = "rgmii-txid";

View File

@ -16,6 +16,7 @@ Required properties:
"renesas,ether-r8a7794" if the device is a part of R8A7794 SoC.
"renesas,gether-r8a77980" if the device is a part of R8A77980 SoC.
"renesas,ether-r7s72100" if the device is a part of R7S72100 SoC.
"renesas,ether-r7s9210" if the device is a part of R7S9210 SoC.
"renesas,rcar-gen1-ether" for a generic R-Car Gen1 device.
"renesas,rcar-gen2-ether" for a generic R-Car Gen2 or RZ/G1
device.

View File

@ -5913,12 +5913,12 @@ unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp)
return bp->hw_resc.max_cp_rings;
}
void bnxt_set_max_func_cp_rings(struct bnxt *bp, unsigned int max)
unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp)
{
bp->hw_resc.max_cp_rings = max;
return bp->hw_resc.max_cp_rings - bnxt_get_ulp_msix_num(bp);
}
unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
{
struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
@ -6684,6 +6684,8 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
hw_resc->resv_rx_rings = 0;
hw_resc->resv_hw_ring_grps = 0;
hw_resc->resv_vnics = 0;
bp->tx_nr_rings = 0;
bp->rx_nr_rings = 0;
}
return rc;
}
@ -8629,7 +8631,8 @@ static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
*max_tx = hw_resc->max_tx_rings;
*max_rx = hw_resc->max_rx_rings;
*max_cp = min_t(int, hw_resc->max_irqs, hw_resc->max_cp_rings);
*max_cp = min_t(int, bnxt_get_max_func_cp_rings_for_en(bp),
hw_resc->max_irqs);
*max_cp = min_t(int, *max_cp, hw_resc->max_stat_ctxs);
max_ring_grps = hw_resc->max_hw_ring_grps;
if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
@ -8769,20 +8772,25 @@ static int bnxt_init_dflt_ring_mode(struct bnxt *bp)
if (bp->tx_nr_rings)
return 0;
bnxt_ulp_irq_stop(bp);
bnxt_clear_int_mode(bp);
rc = bnxt_set_dflt_rings(bp, true);
if (rc) {
netdev_err(bp->dev, "Not enough rings available.\n");
return rc;
goto init_dflt_ring_err;
}
rc = bnxt_init_int_mode(bp);
if (rc)
return rc;
goto init_dflt_ring_err;
bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
if (bnxt_rfs_supported(bp) && bnxt_rfs_capable(bp)) {
bp->flags |= BNXT_FLAG_RFS;
bp->dev->features |= NETIF_F_NTUPLE;
}
return 0;
init_dflt_ring_err:
bnxt_ulp_irq_restart(bp, rc);
return rc;
}
int bnxt_restore_pf_fw_resources(struct bnxt *bp)

View File

@ -1481,8 +1481,7 @@ int bnxt_hwrm_set_coal(struct bnxt *);
unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp);
void bnxt_set_max_func_stat_ctxs(struct bnxt *bp, unsigned int max);
unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp);
void bnxt_set_max_func_cp_rings(struct bnxt *bp, unsigned int max);
unsigned int bnxt_get_max_func_irqs(struct bnxt *bp);
unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp);
int bnxt_get_avail_msix(struct bnxt *bp, int num);
int bnxt_reserve_rings(struct bnxt *bp);
void bnxt_tx_disable(struct bnxt *bp);

View File

@ -451,7 +451,7 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs)
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESOURCE_CFG, -1, -1);
vf_cp_rings = hw_resc->max_cp_rings - bp->cp_nr_rings;
vf_cp_rings = bnxt_get_max_func_cp_rings_for_en(bp) - bp->cp_nr_rings;
vf_stat_ctx = hw_resc->max_stat_ctxs - bp->num_stat_ctxs;
if (bp->flags & BNXT_FLAG_AGG_RINGS)
vf_rx_rings = hw_resc->max_rx_rings - bp->rx_nr_rings * 2;
@ -549,7 +549,8 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
max_stat_ctxs = hw_resc->max_stat_ctxs;
/* Remaining rings are distributed equally amongs VF's for now */
vf_cp_rings = (hw_resc->max_cp_rings - bp->cp_nr_rings) / num_vfs;
vf_cp_rings = (bnxt_get_max_func_cp_rings_for_en(bp) -
bp->cp_nr_rings) / num_vfs;
vf_stat_ctx = (max_stat_ctxs - bp->num_stat_ctxs) / num_vfs;
if (bp->flags & BNXT_FLAG_AGG_RINGS)
vf_rx_rings = (hw_resc->max_rx_rings - bp->rx_nr_rings * 2) /
@ -643,7 +644,7 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
*/
vfs_supported = *num_vfs;
avail_cp = hw_resc->max_cp_rings - bp->cp_nr_rings;
avail_cp = bnxt_get_max_func_cp_rings_for_en(bp) - bp->cp_nr_rings;
avail_stat = hw_resc->max_stat_ctxs - bp->num_stat_ctxs;
avail_cp = min_t(int, avail_cp, avail_stat);

View File

@ -169,7 +169,6 @@ static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, int ulp_id,
edev->ulp_tbl[ulp_id].msix_requested = avail_msix;
}
bnxt_fill_msix_vecs(bp, ent);
bnxt_set_max_func_cp_rings(bp, max_cp_rings - avail_msix);
edev->flags |= BNXT_EN_FLAG_MSIX_REQUESTED;
return avail_msix;
}
@ -178,7 +177,6 @@ static int bnxt_free_msix_vecs(struct bnxt_en_dev *edev, int ulp_id)
{
struct net_device *dev = edev->net;
struct bnxt *bp = netdev_priv(dev);
int max_cp_rings, msix_requested;
ASSERT_RTNL();
if (ulp_id != BNXT_ROCE_ULP)
@ -187,9 +185,6 @@ static int bnxt_free_msix_vecs(struct bnxt_en_dev *edev, int ulp_id)
if (!(edev->flags & BNXT_EN_FLAG_MSIX_REQUESTED))
return 0;
max_cp_rings = bnxt_get_max_func_cp_rings(bp);
msix_requested = edev->ulp_tbl[ulp_id].msix_requested;
bnxt_set_max_func_cp_rings(bp, max_cp_rings + msix_requested);
edev->ulp_tbl[ulp_id].msix_requested = 0;
edev->flags &= ~BNXT_EN_FLAG_MSIX_REQUESTED;
if (netif_running(dev)) {
@ -220,21 +215,6 @@ int bnxt_get_ulp_msix_base(struct bnxt *bp)
return 0;
}
void bnxt_subtract_ulp_resources(struct bnxt *bp, int ulp_id)
{
ASSERT_RTNL();
if (bnxt_ulp_registered(bp->edev, ulp_id)) {
struct bnxt_en_dev *edev = bp->edev;
unsigned int msix_req, max;
msix_req = edev->ulp_tbl[ulp_id].msix_requested;
max = bnxt_get_max_func_cp_rings(bp);
bnxt_set_max_func_cp_rings(bp, max - msix_req);
max = bnxt_get_max_func_stat_ctxs(bp);
bnxt_set_max_func_stat_ctxs(bp, max - 1);
}
}
static int bnxt_send_msg(struct bnxt_en_dev *edev, int ulp_id,
struct bnxt_fw_msg *fw_msg)
{

View File

@ -90,7 +90,6 @@ static inline bool bnxt_ulp_registered(struct bnxt_en_dev *edev, int ulp_id)
int bnxt_get_ulp_msix_num(struct bnxt *bp);
int bnxt_get_ulp_msix_base(struct bnxt *bp);
void bnxt_subtract_ulp_resources(struct bnxt *bp, int ulp_id);
void bnxt_ulp_stop(struct bnxt *bp);
void bnxt_ulp_start(struct bnxt *bp);
void bnxt_ulp_sriov_cfg(struct bnxt *bp, int num_vfs);

View File

@ -186,6 +186,9 @@ struct bcmgenet_mib_counters {
#define UMAC_MAC1 0x010
#define UMAC_MAX_FRAME_LEN 0x014
#define UMAC_MODE 0x44
#define MODE_LINK_STATUS (1 << 5)
#define UMAC_EEE_CTRL 0x064
#define EN_LPI_RX_PAUSE (1 << 0)
#define EN_LPI_TX_PFC (1 << 1)

View File

@ -115,8 +115,14 @@ void bcmgenet_mii_setup(struct net_device *dev)
static int bcmgenet_fixed_phy_link_update(struct net_device *dev,
struct fixed_phy_status *status)
{
if (dev && dev->phydev && status)
status->link = dev->phydev->link;
struct bcmgenet_priv *priv;
u32 reg;
if (dev && dev->phydev && status) {
priv = netdev_priv(dev);
reg = bcmgenet_umac_readl(priv, UMAC_MODE);
status->link = !!(reg & MODE_LINK_STATUS);
}
return 0;
}

View File

@ -649,7 +649,7 @@ static int macb_halt_tx(struct macb *bp)
if (!(status & MACB_BIT(TGO)))
return 0;
usleep_range(10, 250);
udelay(250);
} while (time_before(halt_time, timeout));
return -ETIMEDOUT;

View File

@ -486,6 +486,8 @@ struct hnae_ae_ops {
u8 *auto_neg, u16 *speed, u8 *duplex);
void (*toggle_ring_irq)(struct hnae_ring *ring, u32 val);
void (*adjust_link)(struct hnae_handle *handle, int speed, int duplex);
bool (*need_adjust_link)(struct hnae_handle *handle,
int speed, int duplex);
int (*set_loopback)(struct hnae_handle *handle,
enum hnae_loop loop_mode, int en);
void (*get_ring_bdnum_limit)(struct hnae_queue *queue,

View File

@ -155,6 +155,41 @@ static void hns_ae_put_handle(struct hnae_handle *handle)
hns_ae_get_ring_pair(handle->qs[i])->used_by_vf = 0;
}
static int hns_ae_wait_flow_down(struct hnae_handle *handle)
{
struct dsaf_device *dsaf_dev;
struct hns_ppe_cb *ppe_cb;
struct hnae_vf_cb *vf_cb;
int ret;
int i;
for (i = 0; i < handle->q_num; i++) {
ret = hns_rcb_wait_tx_ring_clean(handle->qs[i]);
if (ret)
return ret;
}
ppe_cb = hns_get_ppe_cb(handle);
ret = hns_ppe_wait_tx_fifo_clean(ppe_cb);
if (ret)
return ret;
dsaf_dev = hns_ae_get_dsaf_dev(handle->dev);
if (!dsaf_dev)
return -EINVAL;
ret = hns_dsaf_wait_pkt_clean(dsaf_dev, handle->dport_id);
if (ret)
return ret;
vf_cb = hns_ae_get_vf_cb(handle);
ret = hns_mac_wait_fifo_clean(vf_cb->mac_cb);
if (ret)
return ret;
mdelay(10);
return 0;
}
static void hns_ae_ring_enable_all(struct hnae_handle *handle, int val)
{
int q_num = handle->q_num;
@ -399,12 +434,41 @@ static int hns_ae_get_mac_info(struct hnae_handle *handle,
return hns_mac_get_port_info(mac_cb, auto_neg, speed, duplex);
}
static bool hns_ae_need_adjust_link(struct hnae_handle *handle, int speed,
int duplex)
{
struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
return hns_mac_need_adjust_link(mac_cb, speed, duplex);
}
static void hns_ae_adjust_link(struct hnae_handle *handle, int speed,
int duplex)
{
struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
hns_mac_adjust_link(mac_cb, speed, duplex);
switch (mac_cb->dsaf_dev->dsaf_ver) {
case AE_VERSION_1:
hns_mac_adjust_link(mac_cb, speed, duplex);
break;
case AE_VERSION_2:
/* chip need to clear all pkt inside */
hns_mac_disable(mac_cb, MAC_COMM_MODE_RX);
if (hns_ae_wait_flow_down(handle)) {
hns_mac_enable(mac_cb, MAC_COMM_MODE_RX);
break;
}
hns_mac_adjust_link(mac_cb, speed, duplex);
hns_mac_enable(mac_cb, MAC_COMM_MODE_RX);
break;
default:
break;
}
return;
}
static void hns_ae_get_ring_bdnum_limit(struct hnae_queue *queue,
@ -902,6 +966,7 @@ static struct hnae_ae_ops hns_dsaf_ops = {
.get_status = hns_ae_get_link_status,
.get_info = hns_ae_get_mac_info,
.adjust_link = hns_ae_adjust_link,
.need_adjust_link = hns_ae_need_adjust_link,
.set_loopback = hns_ae_config_loopback,
.get_ring_bdnum_limit = hns_ae_get_ring_bdnum_limit,
.get_pauseparam = hns_ae_get_pauseparam,

View File

@ -257,6 +257,16 @@ static void hns_gmac_get_pausefrm_cfg(void *mac_drv, u32 *rx_pause_en,
*tx_pause_en = dsaf_get_bit(pause_en, GMAC_PAUSE_EN_TX_FDFC_B);
}
static bool hns_gmac_need_adjust_link(void *mac_drv, enum mac_speed speed,
int duplex)
{
struct mac_driver *drv = (struct mac_driver *)mac_drv;
struct hns_mac_cb *mac_cb = drv->mac_cb;
return (mac_cb->speed != speed) ||
(mac_cb->half_duplex == duplex);
}
static int hns_gmac_adjust_link(void *mac_drv, enum mac_speed speed,
u32 full_duplex)
{
@ -309,6 +319,30 @@ static void hns_gmac_set_promisc(void *mac_drv, u8 en)
hns_gmac_set_uc_match(mac_drv, en);
}
int hns_gmac_wait_fifo_clean(void *mac_drv)
{
struct mac_driver *drv = (struct mac_driver *)mac_drv;
int wait_cnt;
u32 val;
wait_cnt = 0;
while (wait_cnt++ < HNS_MAX_WAIT_CNT) {
val = dsaf_read_dev(drv, GMAC_FIFO_STATE_REG);
/* bit5~bit0 is not send complete pkts */
if ((val & 0x3f) == 0)
break;
usleep_range(100, 200);
}
if (wait_cnt >= HNS_MAX_WAIT_CNT) {
dev_err(drv->dev,
"hns ge %d fifo was not idle.\n", drv->mac_id);
return -EBUSY;
}
return 0;
}
static void hns_gmac_init(void *mac_drv)
{
u32 port;
@ -690,6 +724,7 @@ void *hns_gmac_config(struct hns_mac_cb *mac_cb, struct mac_params *mac_param)
mac_drv->mac_disable = hns_gmac_disable;
mac_drv->mac_free = hns_gmac_free;
mac_drv->adjust_link = hns_gmac_adjust_link;
mac_drv->need_adjust_link = hns_gmac_need_adjust_link;
mac_drv->set_tx_auto_pause_frames = hns_gmac_set_tx_auto_pause_frames;
mac_drv->config_max_frame_length = hns_gmac_config_max_frame_length;
mac_drv->mac_pausefrm_cfg = hns_gmac_pause_frm_cfg;
@ -717,6 +752,7 @@ void *hns_gmac_config(struct hns_mac_cb *mac_cb, struct mac_params *mac_param)
mac_drv->get_strings = hns_gmac_get_strings;
mac_drv->update_stats = hns_gmac_update_stats;
mac_drv->set_promiscuous = hns_gmac_set_promisc;
mac_drv->wait_fifo_clean = hns_gmac_wait_fifo_clean;
return (void *)mac_drv;
}

View File

@ -114,6 +114,26 @@ int hns_mac_get_port_info(struct hns_mac_cb *mac_cb,
return 0;
}
/**
*hns_mac_is_adjust_link - check is need change mac speed and duplex register
*@mac_cb: mac device
*@speed: phy device speed
*@duplex:phy device duplex
*
*/
bool hns_mac_need_adjust_link(struct hns_mac_cb *mac_cb, int speed, int duplex)
{
struct mac_driver *mac_ctrl_drv;
mac_ctrl_drv = (struct mac_driver *)(mac_cb->priv.mac);
if (mac_ctrl_drv->need_adjust_link)
return mac_ctrl_drv->need_adjust_link(mac_ctrl_drv,
(enum mac_speed)speed, duplex);
else
return true;
}
void hns_mac_adjust_link(struct hns_mac_cb *mac_cb, int speed, int duplex)
{
int ret;
@ -430,6 +450,16 @@ int hns_mac_vm_config_bc_en(struct hns_mac_cb *mac_cb, u32 vmid, bool enable)
return 0;
}
int hns_mac_wait_fifo_clean(struct hns_mac_cb *mac_cb)
{
struct mac_driver *drv = hns_mac_get_drv(mac_cb);
if (drv->wait_fifo_clean)
return drv->wait_fifo_clean(drv);
return 0;
}
void hns_mac_reset(struct hns_mac_cb *mac_cb)
{
struct mac_driver *drv = hns_mac_get_drv(mac_cb);
@ -998,6 +1028,20 @@ static int hns_mac_get_max_port_num(struct dsaf_device *dsaf_dev)
return DSAF_MAX_PORT_NUM;
}
void hns_mac_enable(struct hns_mac_cb *mac_cb, enum mac_commom_mode mode)
{
struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
mac_ctrl_drv->mac_enable(mac_cb->priv.mac, mode);
}
void hns_mac_disable(struct hns_mac_cb *mac_cb, enum mac_commom_mode mode)
{
struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
mac_ctrl_drv->mac_disable(mac_cb->priv.mac, mode);
}
/**
* hns_mac_init - init mac
* @dsaf_dev: dsa fabric device struct pointer

View File

@ -356,6 +356,9 @@ struct mac_driver {
/*adjust mac mode of port,include speed and duplex*/
int (*adjust_link)(void *mac_drv, enum mac_speed speed,
u32 full_duplex);
/* need adjust link */
bool (*need_adjust_link)(void *mac_drv, enum mac_speed speed,
int duplex);
/* config autoegotaite mode of port*/
void (*set_an_mode)(void *mac_drv, u8 enable);
/* config loopbank mode */
@ -394,6 +397,7 @@ struct mac_driver {
void (*get_info)(void *mac_drv, struct mac_info *mac_info);
void (*update_stats)(void *mac_drv);
int (*wait_fifo_clean)(void *mac_drv);
enum mac_mode mac_mode;
u8 mac_id;
@ -427,6 +431,7 @@ void *hns_xgmac_config(struct hns_mac_cb *mac_cb,
int hns_mac_init(struct dsaf_device *dsaf_dev);
void mac_adjust_link(struct net_device *net_dev);
bool hns_mac_need_adjust_link(struct hns_mac_cb *mac_cb, int speed, int duplex);
void hns_mac_get_link_status(struct hns_mac_cb *mac_cb, u32 *link_status);
int hns_mac_change_vf_addr(struct hns_mac_cb *mac_cb, u32 vmid, char *addr);
int hns_mac_set_multi(struct hns_mac_cb *mac_cb,
@ -463,5 +468,8 @@ int hns_mac_add_uc_addr(struct hns_mac_cb *mac_cb, u8 vf_id,
int hns_mac_rm_uc_addr(struct hns_mac_cb *mac_cb, u8 vf_id,
const unsigned char *addr);
int hns_mac_clr_multicast(struct hns_mac_cb *mac_cb, int vfn);
void hns_mac_enable(struct hns_mac_cb *mac_cb, enum mac_commom_mode mode);
void hns_mac_disable(struct hns_mac_cb *mac_cb, enum mac_commom_mode mode);
int hns_mac_wait_fifo_clean(struct hns_mac_cb *mac_cb);
#endif /* _HNS_DSAF_MAC_H */

View File

@ -2727,6 +2727,35 @@ void hns_dsaf_set_promisc_tcam(struct dsaf_device *dsaf_dev,
soft_mac_entry->index = enable ? entry_index : DSAF_INVALID_ENTRY_IDX;
}
int hns_dsaf_wait_pkt_clean(struct dsaf_device *dsaf_dev, int port)
{
u32 val, val_tmp;
int wait_cnt;
if (port >= DSAF_SERVICE_NW_NUM)
return 0;
wait_cnt = 0;
while (wait_cnt++ < HNS_MAX_WAIT_CNT) {
val = dsaf_read_dev(dsaf_dev, DSAF_VOQ_IN_PKT_NUM_0_REG +
(port + DSAF_XGE_NUM) * 0x40);
val_tmp = dsaf_read_dev(dsaf_dev, DSAF_VOQ_OUT_PKT_NUM_0_REG +
(port + DSAF_XGE_NUM) * 0x40);
if (val == val_tmp)
break;
usleep_range(100, 200);
}
if (wait_cnt >= HNS_MAX_WAIT_CNT) {
dev_err(dsaf_dev->dev, "hns dsaf clean wait timeout(%u - %u).\n",
val, val_tmp);
return -EBUSY;
}
return 0;
}
/**
* dsaf_probe - probo dsaf dev
* @pdev: dasf platform device

View File

@ -44,6 +44,8 @@ struct hns_mac_cb;
#define DSAF_ROCE_CREDIT_CHN 8
#define DSAF_ROCE_CHAN_MODE 3
#define HNS_MAX_WAIT_CNT 10000
enum dsaf_roce_port_mode {
DSAF_ROCE_6PORT_MODE,
DSAF_ROCE_4PORT_MODE,
@ -463,5 +465,6 @@ int hns_dsaf_rm_mac_addr(
int hns_dsaf_clr_mac_mc_port(struct dsaf_device *dsaf_dev,
u8 mac_id, u8 port_num);
int hns_dsaf_wait_pkt_clean(struct dsaf_device *dsaf_dev, int port);
#endif /* __HNS_DSAF_MAIN_H__ */

View File

@ -275,6 +275,29 @@ static void hns_ppe_exc_irq_en(struct hns_ppe_cb *ppe_cb, int en)
dsaf_write_dev(ppe_cb, PPE_INTEN_REG, msk_vlue & vld_msk);
}
int hns_ppe_wait_tx_fifo_clean(struct hns_ppe_cb *ppe_cb)
{
int wait_cnt;
u32 val;
wait_cnt = 0;
while (wait_cnt++ < HNS_MAX_WAIT_CNT) {
val = dsaf_read_dev(ppe_cb, PPE_CURR_TX_FIFO0_REG) & 0x3ffU;
if (!val)
break;
usleep_range(100, 200);
}
if (wait_cnt >= HNS_MAX_WAIT_CNT) {
dev_err(ppe_cb->dev, "hns ppe tx fifo clean wait timeout, still has %u pkt.\n",
val);
return -EBUSY;
}
return 0;
}
/**
* ppe_init_hw - init ppe
* @ppe_cb: ppe device

View File

@ -100,6 +100,7 @@ struct ppe_common_cb {
};
int hns_ppe_wait_tx_fifo_clean(struct hns_ppe_cb *ppe_cb);
int hns_ppe_init(struct dsaf_device *dsaf_dev);
void hns_ppe_uninit(struct dsaf_device *dsaf_dev);

View File

@ -66,6 +66,29 @@ void hns_rcb_wait_fbd_clean(struct hnae_queue **qs, int q_num, u32 flag)
"queue(%d) wait fbd(%d) clean fail!!\n", i, fbd_num);
}
int hns_rcb_wait_tx_ring_clean(struct hnae_queue *qs)
{
u32 head, tail;
int wait_cnt;
tail = dsaf_read_dev(&qs->tx_ring, RCB_REG_TAIL);
wait_cnt = 0;
while (wait_cnt++ < HNS_MAX_WAIT_CNT) {
head = dsaf_read_dev(&qs->tx_ring, RCB_REG_HEAD);
if (tail == head)
break;
usleep_range(100, 200);
}
if (wait_cnt >= HNS_MAX_WAIT_CNT) {
dev_err(qs->dev->dev, "rcb wait timeout, head not equal to tail.\n");
return -EBUSY;
}
return 0;
}
/**
*hns_rcb_reset_ring_hw - ring reset
*@q: ring struct pointer

View File

@ -136,6 +136,7 @@ void hns_rcbv2_int_clr_hw(struct hnae_queue *q, u32 flag);
void hns_rcb_init_hw(struct ring_pair_cb *ring);
void hns_rcb_reset_ring_hw(struct hnae_queue *q);
void hns_rcb_wait_fbd_clean(struct hnae_queue **qs, int q_num, u32 flag);
int hns_rcb_wait_tx_ring_clean(struct hnae_queue *qs);
u32 hns_rcb_get_rx_coalesced_frames(
struct rcb_common_cb *rcb_common, u32 port_idx);
u32 hns_rcb_get_tx_coalesced_frames(

View File

@ -464,6 +464,7 @@
#define RCB_RING_INTMSK_TX_OVERTIME_REG 0x000C4
#define RCB_RING_INTSTS_TX_OVERTIME_REG 0x000C8
#define GMAC_FIFO_STATE_REG 0x0000UL
#define GMAC_DUPLEX_TYPE_REG 0x0008UL
#define GMAC_FD_FC_TYPE_REG 0x000CUL
#define GMAC_TX_WATER_LINE_REG 0x0010UL

View File

@ -1112,11 +1112,26 @@ static void hns_nic_adjust_link(struct net_device *ndev)
struct hnae_handle *h = priv->ae_handle;
int state = 1;
/* If there is no phy, do not need adjust link */
if (ndev->phydev) {
h->dev->ops->adjust_link(h, ndev->phydev->speed,
ndev->phydev->duplex);
state = ndev->phydev->link;
/* When phy link down, do nothing */
if (ndev->phydev->link == 0)
return;
if (h->dev->ops->need_adjust_link(h, ndev->phydev->speed,
ndev->phydev->duplex)) {
/* because Hi161X chip don't support to change gmac
* speed and duplex with traffic. Delay 200ms to
* make sure there is no more data in chip FIFO.
*/
netif_carrier_off(ndev);
msleep(200);
h->dev->ops->adjust_link(h, ndev->phydev->speed,
ndev->phydev->duplex);
netif_carrier_on(ndev);
}
}
state = state && h->dev->ops->get_status(h);
if (state != priv->link) {

View File

@ -243,7 +243,9 @@ static int hns_nic_set_link_ksettings(struct net_device *net_dev,
}
if (h->dev->ops->adjust_link) {
netif_carrier_off(net_dev);
h->dev->ops->adjust_link(h, (int)speed, cmd->base.duplex);
netif_carrier_on(net_dev);
return 0;
}

View File

@ -494,9 +494,6 @@ static u32 __emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_s
case 16384:
ret |= EMAC_MR1_RFS_16K;
break;
case 8192:
ret |= EMAC4_MR1_RFS_8K;
break;
case 4096:
ret |= EMAC_MR1_RFS_4K;
break;
@ -537,6 +534,9 @@ static u32 __emac4_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_
case 16384:
ret |= EMAC4_MR1_RFS_16K;
break;
case 8192:
ret |= EMAC4_MR1_RFS_8K;
break;
case 4096:
ret |= EMAC4_MR1_RFS_4K;
break;

View File

@ -1823,11 +1823,17 @@ static int do_reset(struct ibmvnic_adapter *adapter,
adapter->map_id = 1;
release_rx_pools(adapter);
release_tx_pools(adapter);
init_rx_pools(netdev);
init_tx_pools(netdev);
rc = init_rx_pools(netdev);
if (rc)
return rc;
rc = init_tx_pools(netdev);
if (rc)
return rc;
release_napi(adapter);
init_napi(adapter);
rc = init_napi(adapter);
if (rc)
return rc;
} else {
rc = reset_tx_pools(adapter);
if (rc)

View File

@ -4803,6 +4803,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
dev->min_mtu = ETH_MIN_MTU;
/* 9704 == 9728 - 20 and rounding to 8 */
dev->max_mtu = MVPP2_BM_JUMBO_PKT_SIZE;
dev->dev.of_node = port_node;
/* Phylink isn't used w/ ACPI as of now */
if (port_node) {

View File

@ -139,14 +139,15 @@ int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
struct mlx5_wq_ctrl *wq_ctrl)
{
u32 sq_strides_offset;
u32 rq_pg_remainder;
int err;
mlx5_fill_fbc(MLX5_GET(qpc, qpc, log_rq_stride) + 4,
MLX5_GET(qpc, qpc, log_rq_size),
&wq->rq.fbc);
sq_strides_offset =
((wq->rq.fbc.frag_sz_m1 + 1) % PAGE_SIZE) / MLX5_SEND_WQE_BB;
rq_pg_remainder = mlx5_wq_cyc_get_byte_size(&wq->rq) % PAGE_SIZE;
sq_strides_offset = rq_pg_remainder / MLX5_SEND_WQE_BB;
mlx5_fill_fbc_offset(ilog2(MLX5_SEND_WQE_BB),
MLX5_GET(qpc, qpc, log_sq_size),

View File

@ -229,6 +229,45 @@ done:
spin_unlock_bh(&nn->reconfig_lock);
}
static void nfp_net_reconfig_sync_enter(struct nfp_net *nn)
{
bool cancelled_timer = false;
u32 pre_posted_requests;
spin_lock_bh(&nn->reconfig_lock);
nn->reconfig_sync_present = true;
if (nn->reconfig_timer_active) {
nn->reconfig_timer_active = false;
cancelled_timer = true;
}
pre_posted_requests = nn->reconfig_posted;
nn->reconfig_posted = 0;
spin_unlock_bh(&nn->reconfig_lock);
if (cancelled_timer) {
del_timer_sync(&nn->reconfig_timer);
nfp_net_reconfig_wait(nn, nn->reconfig_timer.expires);
}
/* Run the posted reconfigs which were issued before we started */
if (pre_posted_requests) {
nfp_net_reconfig_start(nn, pre_posted_requests);
nfp_net_reconfig_wait(nn, jiffies + HZ * NFP_NET_POLL_TIMEOUT);
}
}
static void nfp_net_reconfig_wait_posted(struct nfp_net *nn)
{
nfp_net_reconfig_sync_enter(nn);
spin_lock_bh(&nn->reconfig_lock);
nn->reconfig_sync_present = false;
spin_unlock_bh(&nn->reconfig_lock);
}
/**
* nfp_net_reconfig() - Reconfigure the firmware
* @nn: NFP Net device to reconfigure
@ -242,32 +281,9 @@ done:
*/
int nfp_net_reconfig(struct nfp_net *nn, u32 update)
{
bool cancelled_timer = false;
u32 pre_posted_requests;
int ret;
spin_lock_bh(&nn->reconfig_lock);
nn->reconfig_sync_present = true;
if (nn->reconfig_timer_active) {
del_timer(&nn->reconfig_timer);
nn->reconfig_timer_active = false;
cancelled_timer = true;
}
pre_posted_requests = nn->reconfig_posted;
nn->reconfig_posted = 0;
spin_unlock_bh(&nn->reconfig_lock);
if (cancelled_timer)
nfp_net_reconfig_wait(nn, nn->reconfig_timer.expires);
/* Run the posted reconfigs which were issued before we started */
if (pre_posted_requests) {
nfp_net_reconfig_start(nn, pre_posted_requests);
nfp_net_reconfig_wait(nn, jiffies + HZ * NFP_NET_POLL_TIMEOUT);
}
nfp_net_reconfig_sync_enter(nn);
nfp_net_reconfig_start(nn, update);
ret = nfp_net_reconfig_wait(nn, jiffies + HZ * NFP_NET_POLL_TIMEOUT);
@ -3633,6 +3649,7 @@ struct nfp_net *nfp_net_alloc(struct pci_dev *pdev, bool needs_netdev,
*/
void nfp_net_free(struct nfp_net *nn)
{
WARN_ON(timer_pending(&nn->reconfig_timer) || nn->reconfig_posted);
if (nn->dp.netdev)
free_netdev(nn->dp.netdev);
else
@ -3920,4 +3937,5 @@ void nfp_net_clean(struct nfp_net *nn)
return;
unregister_netdev(nn->dp.netdev);
nfp_net_reconfig_wait_posted(nn);
}

View File

@ -218,6 +218,7 @@ static const struct pci_device_id rtl8169_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8161), 0, 0, RTL_CFG_1 },
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8167), 0, 0, RTL_CFG_0 },
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_1 },
{ PCI_DEVICE(PCI_VENDOR_ID_NCUBE, 0x8168), 0, 0, RTL_CFG_1 },
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, RTL_CFG_0 },
{ PCI_VENDOR_ID_DLINK, 0x4300,
PCI_VENDOR_ID_DLINK, 0x4b10, 0, 0, RTL_CFG_1 },
@ -4522,7 +4523,7 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp)
rtl_hw_reset(tp);
}
static void rtl_set_rx_tx_config_registers(struct rtl8169_private *tp)
static void rtl_set_tx_config_registers(struct rtl8169_private *tp)
{
/* Set DMA burst size and Interframe Gap Time */
RTL_W32(tp, TxConfig, (TX_DMA_BURST << TxDMAShift) |
@ -4633,12 +4634,14 @@ static void rtl_hw_start(struct rtl8169_private *tp)
rtl_set_rx_max_size(tp);
rtl_set_rx_tx_desc_registers(tp);
rtl_set_rx_tx_config_registers(tp);
rtl_set_tx_config_registers(tp);
RTL_W8(tp, Cfg9346, Cfg9346_Lock);
/* Initially a 10 us delay. Turned it into a PCI commit. - FR */
RTL_R8(tp, IntrMask);
RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb);
rtl_init_rxcfg(tp);
rtl_set_rx_mode(tp->dev);
/* no early-rx interrupts */
RTL_W16(tp, MultiIntr, RTL_R16(tp, MultiIntr) & 0xf000);

View File

@ -798,6 +798,41 @@ static struct sh_eth_cpu_data r8a77980_data = {
.magic = 1,
.cexcr = 1,
};
/* R7S9210 */
static struct sh_eth_cpu_data r7s9210_data = {
.soft_reset = sh_eth_soft_reset,
.set_duplex = sh_eth_set_duplex,
.set_rate = sh_eth_set_rate_rcar,
.register_type = SH_ETH_REG_FAST_SH4,
.edtrr_trns = EDTRR_TRNS_ETHER,
.ecsr_value = ECSR_ICD,
.ecsipr_value = ECSIPR_ICDIP,
.eesipr_value = EESIPR_TWBIP | EESIPR_TABTIP | EESIPR_RABTIP |
EESIPR_RFCOFIP | EESIPR_ECIIP | EESIPR_FTCIP |
EESIPR_TDEIP | EESIPR_TFUFIP | EESIPR_FRIP |
EESIPR_RDEIP | EESIPR_RFOFIP | EESIPR_CNDIP |
EESIPR_DLCIP | EESIPR_CDIP | EESIPR_TROIP |
EESIPR_RMAFIP | EESIPR_RRFIP | EESIPR_RTLFIP |
EESIPR_RTSFIP | EESIPR_PREIP | EESIPR_CERFIP,
.tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_TRO,
.eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE,
.fdr_value = 0x0000070f,
.apr = 1,
.mpr = 1,
.tpauser = 1,
.hw_swap = 1,
.rpadir = 1,
.no_ade = 1,
.xdfar_rw = 1,
};
#endif /* CONFIG_OF */
static void sh_eth_set_rate_sh7724(struct net_device *ndev)
@ -3121,6 +3156,7 @@ static const struct of_device_id sh_eth_match_table[] = {
{ .compatible = "renesas,ether-r8a7794", .data = &rcar_gen2_data },
{ .compatible = "renesas,gether-r8a77980", .data = &r8a77980_data },
{ .compatible = "renesas,ether-r7s72100", .data = &r7s72100_data },
{ .compatible = "renesas,ether-r7s9210", .data = &r7s9210_data },
{ .compatible = "renesas,rcar-gen1-ether", .data = &rcar_gen1_data },
{ .compatible = "renesas,rcar-gen2-ether", .data = &rcar_gen2_data },
{ }

View File

@ -110,7 +110,7 @@ config DWMAC_ROCKCHIP
config DWMAC_SOCFPGA
tristate "SOCFPGA dwmac support"
default ARCH_SOCFPGA
default (ARCH_SOCFPGA || ARCH_STRATIX10)
depends on OF && (ARCH_SOCFPGA || ARCH_STRATIX10 || COMPILE_TEST)
select MFD_SYSCON
help

View File

@ -112,7 +112,6 @@ struct stmmac_priv {
u32 tx_count_frames;
u32 tx_coal_frames;
u32 tx_coal_timer;
bool tx_timer_armed;
int tx_coalesce;
int hwts_tx_en;

View File

@ -3147,16 +3147,13 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
* element in case of no SG.
*/
priv->tx_count_frames += nfrags + 1;
if (likely(priv->tx_coal_frames > priv->tx_count_frames) &&
!priv->tx_timer_armed) {
if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
mod_timer(&priv->txtimer,
STMMAC_COAL_TIMER(priv->tx_coal_timer));
priv->tx_timer_armed = true;
} else {
priv->tx_count_frames = 0;
stmmac_set_tx_ic(priv, desc);
priv->xstats.tx_set_ic_bit++;
priv->tx_timer_armed = false;
}
skb_tx_timestamp(skb);

View File

@ -170,10 +170,13 @@ void cpsw_phy_sel(struct device *dev, phy_interface_t phy_mode, int slave)
struct device_node *node;
struct cpsw_phy_sel_priv *priv;
node = of_get_child_by_name(dev->of_node, "cpsw-phy-sel");
node = of_parse_phandle(dev->of_node, "cpsw-phy-sel", 0);
if (!node) {
dev_err(dev, "Phy mode driver DT not found\n");
return;
node = of_get_child_by_name(dev->of_node, "cpsw-phy-sel");
if (!node) {
dev_err(dev, "Phy mode driver DT not found\n");
return;
}
}
dev = bus_find_device(&platform_bus_type, NULL, node, match);

View File

@ -2206,6 +2206,16 @@ static int netvsc_probe(struct hv_device *dev,
memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN);
/* We must get rtnl lock before scheduling nvdev->subchan_work,
* otherwise netvsc_subchan_work() can get rtnl lock first and wait
* all subchannels to show up, but that may not happen because
* netvsc_probe() can't get rtnl lock and as a result vmbus_onoffer()
* -> ... -> device_add() -> ... -> __device_attach() can't get
* the device lock, so all the subchannels can't be processed --
* finally netvsc_subchan_work() hangs for ever.
*/
rtnl_lock();
if (nvdev->num_chn > 1)
schedule_work(&nvdev->subchan_work);
@ -2224,7 +2234,6 @@ static int netvsc_probe(struct hv_device *dev,
else
net->max_mtu = ETH_DATA_LEN;
rtnl_lock();
ret = register_netdevice(net);
if (ret != 0) {
pr_err("Unable to register netdev.\n");

View File

@ -398,7 +398,6 @@ static umode_t sfp_hwmon_is_visible(const void *data,
switch (type) {
case hwmon_temp:
switch (attr) {
case hwmon_temp_input:
case hwmon_temp_min_alarm:
case hwmon_temp_max_alarm:
case hwmon_temp_lcrit_alarm:
@ -407,13 +406,16 @@ static umode_t sfp_hwmon_is_visible(const void *data,
case hwmon_temp_max:
case hwmon_temp_lcrit:
case hwmon_temp_crit:
if (!(sfp->id.ext.enhopts & SFP_ENHOPTS_ALARMWARN))
return 0;
/* fall through */
case hwmon_temp_input:
return 0444;
default:
return 0;
}
case hwmon_in:
switch (attr) {
case hwmon_in_input:
case hwmon_in_min_alarm:
case hwmon_in_max_alarm:
case hwmon_in_lcrit_alarm:
@ -422,13 +424,16 @@ static umode_t sfp_hwmon_is_visible(const void *data,
case hwmon_in_max:
case hwmon_in_lcrit:
case hwmon_in_crit:
if (!(sfp->id.ext.enhopts & SFP_ENHOPTS_ALARMWARN))
return 0;
/* fall through */
case hwmon_in_input:
return 0444;
default:
return 0;
}
case hwmon_curr:
switch (attr) {
case hwmon_curr_input:
case hwmon_curr_min_alarm:
case hwmon_curr_max_alarm:
case hwmon_curr_lcrit_alarm:
@ -437,6 +442,10 @@ static umode_t sfp_hwmon_is_visible(const void *data,
case hwmon_curr_max:
case hwmon_curr_lcrit:
case hwmon_curr_crit:
if (!(sfp->id.ext.enhopts & SFP_ENHOPTS_ALARMWARN))
return 0;
/* fall through */
case hwmon_curr_input:
return 0444;
default:
return 0;
@ -452,7 +461,6 @@ static umode_t sfp_hwmon_is_visible(const void *data,
channel == 1)
return 0;
switch (attr) {
case hwmon_power_input:
case hwmon_power_min_alarm:
case hwmon_power_max_alarm:
case hwmon_power_lcrit_alarm:
@ -461,6 +469,10 @@ static umode_t sfp_hwmon_is_visible(const void *data,
case hwmon_power_max:
case hwmon_power_lcrit:
case hwmon_power_crit:
if (!(sfp->id.ext.enhopts & SFP_ENHOPTS_ALARMWARN))
return 0;
/* fall through */
case hwmon_power_input:
return 0444;
default:
return 0;

View File

@ -985,15 +985,12 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
const u8 *nvm_chan = cfg->nvm_type == IWL_NVM_EXT ?
iwl_ext_nvm_channels : iwl_nvm_channels;
struct ieee80211_regdomain *regd, *copy_rd;
int size_of_regd, regd_to_copy, wmms_to_copy;
int size_of_wmms = 0;
int size_of_regd, regd_to_copy;
struct ieee80211_reg_rule *rule;
struct ieee80211_wmm_rule *wmm_rule, *d_wmm, *s_wmm;
struct regdb_ptrs *regdb_ptrs;
enum nl80211_band band;
int center_freq, prev_center_freq = 0;
int valid_rules = 0, n_wmms = 0;
int i;
int valid_rules = 0;
bool new_rule;
int max_num_ch = cfg->nvm_type == IWL_NVM_EXT ?
IWL_NVM_NUM_CHANNELS_EXT : IWL_NVM_NUM_CHANNELS;
@ -1012,11 +1009,7 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
sizeof(struct ieee80211_regdomain) +
num_of_ch * sizeof(struct ieee80211_reg_rule);
if (geo_info & GEO_WMM_ETSI_5GHZ_INFO)
size_of_wmms =
num_of_ch * sizeof(struct ieee80211_wmm_rule);
regd = kzalloc(size_of_regd + size_of_wmms, GFP_KERNEL);
regd = kzalloc(size_of_regd, GFP_KERNEL);
if (!regd)
return ERR_PTR(-ENOMEM);
@ -1030,8 +1023,6 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
regd->alpha2[0] = fw_mcc >> 8;
regd->alpha2[1] = fw_mcc & 0xff;
wmm_rule = (struct ieee80211_wmm_rule *)((u8 *)regd + size_of_regd);
for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) {
ch_flags = (u16)__le32_to_cpup(channels + ch_idx);
band = (ch_idx < NUM_2GHZ_CHANNELS) ?
@ -1085,26 +1076,10 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
band == NL80211_BAND_2GHZ)
continue;
if (!reg_query_regdb_wmm(regd->alpha2, center_freq,
&regdb_ptrs[n_wmms].token, wmm_rule)) {
/* Add only new rules */
for (i = 0; i < n_wmms; i++) {
if (regdb_ptrs[i].token ==
regdb_ptrs[n_wmms].token) {
rule->wmm_rule = regdb_ptrs[i].rule;
break;
}
}
if (i == n_wmms) {
rule->wmm_rule = wmm_rule;
regdb_ptrs[n_wmms++].rule = wmm_rule;
wmm_rule++;
}
}
reg_query_regdb_wmm(regd->alpha2, center_freq, rule);
}
regd->n_reg_rules = valid_rules;
regd->n_wmm_rules = n_wmms;
/*
* Narrow down regdom for unused regulatory rules to prevent hole
@ -1113,28 +1088,13 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
regd_to_copy = sizeof(struct ieee80211_regdomain) +
valid_rules * sizeof(struct ieee80211_reg_rule);
wmms_to_copy = sizeof(struct ieee80211_wmm_rule) * n_wmms;
copy_rd = kzalloc(regd_to_copy + wmms_to_copy, GFP_KERNEL);
copy_rd = kzalloc(regd_to_copy, GFP_KERNEL);
if (!copy_rd) {
copy_rd = ERR_PTR(-ENOMEM);
goto out;
}
memcpy(copy_rd, regd, regd_to_copy);
memcpy((u8 *)copy_rd + regd_to_copy, (u8 *)regd + size_of_regd,
wmms_to_copy);
d_wmm = (struct ieee80211_wmm_rule *)((u8 *)copy_rd + regd_to_copy);
s_wmm = (struct ieee80211_wmm_rule *)((u8 *)regd + size_of_regd);
for (i = 0; i < regd->n_reg_rules; i++) {
if (!regd->reg_rules[i].wmm_rule)
continue;
copy_rd->reg_rules[i].wmm_rule = d_wmm +
(regd->reg_rules[i].wmm_rule - s_wmm);
}
out:
kfree(regdb_ptrs);

View File

@ -34,6 +34,7 @@
#include <net/net_namespace.h>
#include <net/netns/generic.h>
#include <linux/rhashtable.h>
#include <linux/nospec.h>
#include "mac80211_hwsim.h"
#define WARN_QUEUE 100
@ -2820,9 +2821,6 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
IEEE80211_VHT_CAP_SHORT_GI_80 |
IEEE80211_VHT_CAP_SHORT_GI_160 |
IEEE80211_VHT_CAP_TXSTBC |
IEEE80211_VHT_CAP_RXSTBC_1 |
IEEE80211_VHT_CAP_RXSTBC_2 |
IEEE80211_VHT_CAP_RXSTBC_3 |
IEEE80211_VHT_CAP_RXSTBC_4 |
IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
sband->vht_cap.vht_mcs.rx_mcs_map =
@ -3317,6 +3315,11 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info)
if (info->attrs[HWSIM_ATTR_CHANNELS])
param.channels = nla_get_u32(info->attrs[HWSIM_ATTR_CHANNELS]);
if (param.channels < 1) {
GENL_SET_ERR_MSG(info, "must have at least one channel");
return -EINVAL;
}
if (param.channels > CFG80211_MAX_NUM_DIFFERENT_CHANNELS) {
GENL_SET_ERR_MSG(info, "too many channels specified");
return -EINVAL;
@ -3350,6 +3353,9 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info)
kfree(hwname);
return -EINVAL;
}
idx = array_index_nospec(idx,
ARRAY_SIZE(hwsim_world_regdom_custom));
param.regd = hwsim_world_regdom_custom[idx];
}

View File

@ -3084,4 +3084,6 @@
#define PCI_VENDOR_ID_OCZ 0x1b85
#define PCI_VENDOR_ID_NCUBE 0x10ff
#endif /* _LINUX_PCI_IDS_H */

View File

@ -4865,8 +4865,8 @@ const char *reg_initiator_name(enum nl80211_reg_initiator initiator);
*
* Return: 0 on success. -ENODATA.
*/
int reg_query_regdb_wmm(char *alpha2, int freq, u32 *ptr,
struct ieee80211_wmm_rule *rule);
int reg_query_regdb_wmm(char *alpha2, int freq,
struct ieee80211_reg_rule *rule);
/*
* callbacks for asynchronous cfg80211 methods, notification

View File

@ -217,15 +217,15 @@ struct ieee80211_wmm_rule {
struct ieee80211_reg_rule {
struct ieee80211_freq_range freq_range;
struct ieee80211_power_rule power_rule;
struct ieee80211_wmm_rule *wmm_rule;
struct ieee80211_wmm_rule wmm_rule;
u32 flags;
u32 dfs_cac_ms;
bool has_wmm;
};
struct ieee80211_regdomain {
struct rcu_head rcu_head;
u32 n_reg_rules;
u32 n_wmm_rules;
char alpha2[3];
enum nl80211_dfs_regions dfs_region;
struct ieee80211_reg_rule reg_rules[];

View File

@ -37,6 +37,7 @@
#include <linux/types.h>
#include <linux/socket.h> /* For __kernel_sockaddr_storage. */
#include <linux/in6.h> /* For struct in6_addr. */
#define RDS_IB_ABI_VERSION 0x301

View File

@ -176,7 +176,7 @@ struct vhost_memory {
#define VHOST_BACKEND_F_IOTLB_MSG_V2 0x1
#define VHOST_SET_BACKEND_FEATURES _IOW(VHOST_VIRTIO, 0x25, __u64)
#define VHOST_GET_BACKEND_FEATURES _IOW(VHOST_VIRTIO, 0x26, __u64)
#define VHOST_GET_BACKEND_FEATURES _IOR(VHOST_VIRTIO, 0x26, __u64)
/* VHOST_NET specific defines */

View File

@ -236,7 +236,7 @@ static int bpf_tcp_init(struct sock *sk)
}
static void smap_release_sock(struct smap_psock *psock, struct sock *sock);
static int free_start_sg(struct sock *sk, struct sk_msg_buff *md);
static int free_start_sg(struct sock *sk, struct sk_msg_buff *md, bool charge);
static void bpf_tcp_release(struct sock *sk)
{
@ -248,7 +248,7 @@ static void bpf_tcp_release(struct sock *sk)
goto out;
if (psock->cork) {
free_start_sg(psock->sock, psock->cork);
free_start_sg(psock->sock, psock->cork, true);
kfree(psock->cork);
psock->cork = NULL;
}
@ -330,14 +330,14 @@ static void bpf_tcp_close(struct sock *sk, long timeout)
close_fun = psock->save_close;
if (psock->cork) {
free_start_sg(psock->sock, psock->cork);
free_start_sg(psock->sock, psock->cork, true);
kfree(psock->cork);
psock->cork = NULL;
}
list_for_each_entry_safe(md, mtmp, &psock->ingress, list) {
list_del(&md->list);
free_start_sg(psock->sock, md);
free_start_sg(psock->sock, md, true);
kfree(md);
}
@ -369,7 +369,7 @@ static void bpf_tcp_close(struct sock *sk, long timeout)
/* If another thread deleted this object skip deletion.
* The refcnt on psock may or may not be zero.
*/
if (l) {
if (l && l == link) {
hlist_del_rcu(&link->hash_node);
smap_release_sock(psock, link->sk);
free_htab_elem(htab, link);
@ -570,14 +570,16 @@ static void free_bytes_sg(struct sock *sk, int bytes,
md->sg_start = i;
}
static int free_sg(struct sock *sk, int start, struct sk_msg_buff *md)
static int free_sg(struct sock *sk, int start,
struct sk_msg_buff *md, bool charge)
{
struct scatterlist *sg = md->sg_data;
int i = start, free = 0;
while (sg[i].length) {
free += sg[i].length;
sk_mem_uncharge(sk, sg[i].length);
if (charge)
sk_mem_uncharge(sk, sg[i].length);
if (!md->skb)
put_page(sg_page(&sg[i]));
sg[i].length = 0;
@ -594,9 +596,9 @@ static int free_sg(struct sock *sk, int start, struct sk_msg_buff *md)
return free;
}
static int free_start_sg(struct sock *sk, struct sk_msg_buff *md)
static int free_start_sg(struct sock *sk, struct sk_msg_buff *md, bool charge)
{
int free = free_sg(sk, md->sg_start, md);
int free = free_sg(sk, md->sg_start, md, charge);
md->sg_start = md->sg_end;
return free;
@ -604,7 +606,7 @@ static int free_start_sg(struct sock *sk, struct sk_msg_buff *md)
static int free_curr_sg(struct sock *sk, struct sk_msg_buff *md)
{
return free_sg(sk, md->sg_curr, md);
return free_sg(sk, md->sg_curr, md, true);
}
static int bpf_map_msg_verdict(int _rc, struct sk_msg_buff *md)
@ -718,7 +720,7 @@ static int bpf_tcp_ingress(struct sock *sk, int apply_bytes,
list_add_tail(&r->list, &psock->ingress);
sk->sk_data_ready(sk);
} else {
free_start_sg(sk, r);
free_start_sg(sk, r, true);
kfree(r);
}
@ -752,14 +754,10 @@ static int bpf_tcp_sendmsg_do_redirect(struct sock *sk, int send,
release_sock(sk);
}
smap_release_sock(psock, sk);
if (unlikely(err))
goto out;
return 0;
return err;
out_rcu:
rcu_read_unlock();
out:
free_bytes_sg(NULL, send, md, false);
return err;
return 0;
}
static inline void bpf_md_init(struct smap_psock *psock)
@ -822,7 +820,7 @@ more_data:
case __SK_PASS:
err = bpf_tcp_push(sk, send, m, flags, true);
if (unlikely(err)) {
*copied -= free_start_sg(sk, m);
*copied -= free_start_sg(sk, m, true);
break;
}
@ -845,16 +843,17 @@ more_data:
lock_sock(sk);
if (unlikely(err < 0)) {
free_start_sg(sk, m);
int free = free_start_sg(sk, m, false);
psock->sg_size = 0;
if (!cork)
*copied -= send;
*copied -= free;
} else {
psock->sg_size -= send;
}
if (cork) {
free_start_sg(sk, m);
free_start_sg(sk, m, true);
psock->sg_size = 0;
kfree(m);
m = NULL;
@ -912,6 +911,8 @@ static int bpf_tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
if (unlikely(flags & MSG_ERRQUEUE))
return inet_recv_error(sk, msg, len, addr_len);
if (!skb_queue_empty(&sk->sk_receive_queue))
return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len);
rcu_read_lock();
psock = smap_psock_sk(sk);
@ -922,9 +923,6 @@ static int bpf_tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
goto out;
rcu_read_unlock();
if (!skb_queue_empty(&sk->sk_receive_queue))
return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len);
lock_sock(sk);
bytes_ready:
while (copied != len) {
@ -1122,7 +1120,7 @@ wait_for_memory:
err = sk_stream_wait_memory(sk, &timeo);
if (err) {
if (m && m != psock->cork)
free_start_sg(sk, m);
free_start_sg(sk, m, true);
goto out_err;
}
}
@ -1464,10 +1462,16 @@ static void smap_destroy_psock(struct rcu_head *rcu)
schedule_work(&psock->gc_work);
}
static bool psock_is_smap_sk(struct sock *sk)
{
return inet_csk(sk)->icsk_ulp_ops == &bpf_tcp_ulp_ops;
}
static void smap_release_sock(struct smap_psock *psock, struct sock *sock)
{
if (refcount_dec_and_test(&psock->refcnt)) {
tcp_cleanup_ulp(sock);
if (psock_is_smap_sk(sock))
tcp_cleanup_ulp(sock);
write_lock_bh(&sock->sk_callback_lock);
smap_stop_sock(psock, sock);
write_unlock_bh(&sock->sk_callback_lock);
@ -1581,13 +1585,13 @@ static void smap_gc_work(struct work_struct *w)
bpf_prog_put(psock->bpf_tx_msg);
if (psock->cork) {
free_start_sg(psock->sock, psock->cork);
free_start_sg(psock->sock, psock->cork, true);
kfree(psock->cork);
}
list_for_each_entry_safe(md, mtmp, &psock->ingress, list) {
list_del(&md->list);
free_start_sg(psock->sock, md);
free_start_sg(psock->sock, md, true);
kfree(md);
}
@ -1894,6 +1898,10 @@ static int __sock_map_ctx_update_elem(struct bpf_map *map,
* doesn't update user data.
*/
if (psock) {
if (!psock_is_smap_sk(sock)) {
err = -EBUSY;
goto out_progs;
}
if (READ_ONCE(psock->bpf_parse) && parse) {
err = -EBUSY;
goto out_progs;

View File

@ -2282,14 +2282,21 @@ static const struct bpf_func_proto bpf_msg_cork_bytes_proto = {
.arg2_type = ARG_ANYTHING,
};
#define sk_msg_iter_var(var) \
do { \
var++; \
if (var == MAX_SKB_FRAGS) \
var = 0; \
} while (0)
BPF_CALL_4(bpf_msg_pull_data,
struct sk_msg_buff *, msg, u32, start, u32, end, u64, flags)
{
unsigned int len = 0, offset = 0, copy = 0;
unsigned int len = 0, offset = 0, copy = 0, poffset = 0;
int bytes = end - start, bytes_sg_total;
struct scatterlist *sg = msg->sg_data;
int first_sg, last_sg, i, shift;
unsigned char *p, *to, *from;
int bytes = end - start;
struct page *page;
if (unlikely(flags || end <= start))
@ -2299,21 +2306,22 @@ BPF_CALL_4(bpf_msg_pull_data,
i = msg->sg_start;
do {
len = sg[i].length;
offset += len;
if (start < offset + len)
break;
i++;
if (i == MAX_SKB_FRAGS)
i = 0;
offset += len;
sk_msg_iter_var(i);
} while (i != msg->sg_end);
if (unlikely(start >= offset + len))
return -EINVAL;
if (!msg->sg_copy[i] && bytes <= len)
goto out;
first_sg = i;
/* The start may point into the sg element so we need to also
* account for the headroom.
*/
bytes_sg_total = start - offset + bytes;
if (!msg->sg_copy[i] && bytes_sg_total <= len)
goto out;
/* At this point we need to linearize multiple scatterlist
* elements or a single shared page. Either way we need to
@ -2327,37 +2335,32 @@ BPF_CALL_4(bpf_msg_pull_data,
*/
do {
copy += sg[i].length;
i++;
if (i == MAX_SKB_FRAGS)
i = 0;
if (bytes < copy)
sk_msg_iter_var(i);
if (bytes_sg_total <= copy)
break;
} while (i != msg->sg_end);
last_sg = i;
if (unlikely(copy < end - start))
if (unlikely(bytes_sg_total > copy))
return -EINVAL;
page = alloc_pages(__GFP_NOWARN | GFP_ATOMIC, get_order(copy));
if (unlikely(!page))
return -ENOMEM;
p = page_address(page);
offset = 0;
i = first_sg;
do {
from = sg_virt(&sg[i]);
len = sg[i].length;
to = p + offset;
to = p + poffset;
memcpy(to, from, len);
offset += len;
poffset += len;
sg[i].length = 0;
put_page(sg_page(&sg[i]));
i++;
if (i == MAX_SKB_FRAGS)
i = 0;
sk_msg_iter_var(i);
} while (i != last_sg);
sg[first_sg].length = copy;
@ -2367,11 +2370,15 @@ BPF_CALL_4(bpf_msg_pull_data,
* had a single entry though we can just replace it and
* be done. Otherwise walk the ring and shift the entries.
*/
shift = last_sg - first_sg - 1;
WARN_ON_ONCE(last_sg == first_sg);
shift = last_sg > first_sg ?
last_sg - first_sg - 1 :
MAX_SKB_FRAGS - first_sg + last_sg - 1;
if (!shift)
goto out;
i = first_sg + 1;
i = first_sg;
sk_msg_iter_var(i);
do {
int move_from;
@ -2388,15 +2395,13 @@ BPF_CALL_4(bpf_msg_pull_data,
sg[move_from].page_link = 0;
sg[move_from].offset = 0;
i++;
if (i == MAX_SKB_FRAGS)
i = 0;
sk_msg_iter_var(i);
} while (1);
msg->sg_end -= shift;
if (msg->sg_end < 0)
msg->sg_end += MAX_SKB_FRAGS;
out:
msg->data = sg_virt(&sg[i]) + start - offset;
msg->data = sg_virt(&sg[first_sg]) + start - offset;
msg->data_end = msg->data + bytes;
return 0;
@ -7281,7 +7286,7 @@ static u32 sk_reuseport_convert_ctx_access(enum bpf_access_type type,
break;
case offsetof(struct sk_reuseport_md, ip_protocol):
BUILD_BUG_ON(hweight_long(SK_FL_PROTO_MASK) != BITS_PER_BYTE);
BUILD_BUG_ON(HWEIGHT32(SK_FL_PROTO_MASK) != BITS_PER_BYTE);
SK_REUSEPORT_LOAD_SK_FIELD_SIZE_OFF(__sk_flags_offset,
BPF_W, 0);
*insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_PROTO_MASK);

View File

@ -324,6 +324,10 @@ void rtnl_unregister_all(int protocol)
rtnl_lock();
tab = rtnl_msg_handlers[protocol];
if (!tab) {
rtnl_unlock();
return;
}
RCU_INIT_POINTER(rtnl_msg_handlers[protocol], NULL);
for (msgindex = 0; msgindex < RTM_NR_MSGTYPES; msgindex++) {
link = tab[msgindex];

View File

@ -19,12 +19,10 @@
#include <linux/of_mdio.h>
#include <linux/of_platform.h>
#include <linux/of_net.h>
#include <linux/of_gpio.h>
#include <linux/netdevice.h>
#include <linux/sysfs.h>
#include <linux/phy_fixed.h>
#include <linux/ptp_classify.h>
#include <linux/gpio/consumer.h>
#include <linux/etherdevice.h>
#include "dsa_priv.h"

View File

@ -820,10 +820,9 @@ static void igmp_timer_expire(struct timer_list *t)
spin_lock(&im->lock);
im->tm_running = 0;
if (im->unsolicit_count) {
im->unsolicit_count--;
if (im->unsolicit_count && --im->unsolicit_count)
igmp_start_timer(im, unsolicited_report_interval(in_dev));
}
im->reporter = 1;
spin_unlock(&im->lock);
@ -1308,6 +1307,8 @@ static void igmp_group_added(struct ip_mc_list *im)
if (in_dev->dead)
return;
im->unsolicit_count = net->ipv4.sysctl_igmp_qrv;
if (IGMP_V1_SEEN(in_dev) || IGMP_V2_SEEN(in_dev)) {
spin_lock_bh(&im->lock);
igmp_start_timer(im, IGMP_INITIAL_REPORT_DELAY);
@ -1391,9 +1392,6 @@ static void __ip_mc_inc_group(struct in_device *in_dev, __be32 addr,
unsigned int mode)
{
struct ip_mc_list *im;
#ifdef CONFIG_IP_MULTICAST
struct net *net = dev_net(in_dev->dev);
#endif
ASSERT_RTNL();
@ -1420,7 +1418,6 @@ static void __ip_mc_inc_group(struct in_device *in_dev, __be32 addr,
spin_lock_init(&im->lock);
#ifdef CONFIG_IP_MULTICAST
timer_setup(&im->timer, igmp_timer_expire, 0);
im->unsolicit_count = net->ipv4.sysctl_igmp_qrv;
#endif
im->next_rcu = in_dev->mc_list;

View File

@ -1508,11 +1508,14 @@ nla_put_failure:
static void erspan_setup(struct net_device *dev)
{
struct ip_tunnel *t = netdev_priv(dev);
ether_setup(dev);
dev->netdev_ops = &erspan_netdev_ops;
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
ip_tunnel_setup(dev, erspan_net_id);
t->erspan_ver = 1;
}
static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {

View File

@ -184,8 +184,9 @@ kill:
inet_twsk_deschedule_put(tw);
return TCP_TW_SUCCESS;
}
} else {
inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
}
inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
if (tmp_opt.saw_tstamp) {
tcptw->tw_ts_recent = tmp_opt.rcv_tsval;

View File

@ -938,14 +938,14 @@ static int __init inet6_init(void)
err = proto_register(&pingv6_prot, 1);
if (err)
goto out_unregister_ping_proto;
goto out_unregister_raw_proto;
/* We MUST register RAW sockets before we create the ICMP6,
* IGMP6, or NDISC control sockets.
*/
err = rawv6_init();
if (err)
goto out_unregister_raw_proto;
goto out_unregister_ping_proto;
/* Register the family here so that the init calls below will
* be able to create sockets. (?? is this dangerous ??)
@ -1113,11 +1113,11 @@ netfilter_fail:
igmp_fail:
ndisc_cleanup();
ndisc_fail:
ip6_mr_cleanup();
icmp_fail:
unregister_pernet_subsys(&inet6_net_ops);
ipmr_fail:
icmpv6_cleanup();
icmp_fail:
ip6_mr_cleanup();
ipmr_fail:
unregister_pernet_subsys(&inet6_net_ops);
register_pernet_fail:
sock_unregister(PF_INET6);
rtnl_unregister_all(PF_INET6);

View File

@ -989,7 +989,10 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct fib6_info *rt,
fib6_clean_expires(iter);
else
fib6_set_expires(iter, rt->expires);
fib6_metric_set(iter, RTAX_MTU, rt->fib6_pmtu);
if (rt->fib6_pmtu)
fib6_metric_set(iter, RTAX_MTU,
rt->fib6_pmtu);
return -EEXIST;
}
/* If we have the same destination and the same metric,

View File

@ -1778,6 +1778,7 @@ static void ip6gre_netlink_parms(struct nlattr *data[],
if (data[IFLA_GRE_COLLECT_METADATA])
parms->collect_md = true;
parms->erspan_ver = 1;
if (data[IFLA_GRE_ERSPAN_VER])
parms->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]);

View File

@ -1188,7 +1188,15 @@ route_lookup:
init_tel_txopt(&opt, encap_limit);
ipv6_push_frag_opts(skb, &opt.ops, &proto);
}
hop_limit = hop_limit ? : ip6_dst_hoplimit(dst);
if (hop_limit == 0) {
if (skb->protocol == htons(ETH_P_IP))
hop_limit = ip_hdr(skb)->ttl;
else if (skb->protocol == htons(ETH_P_IPV6))
hop_limit = ipv6_hdr(skb)->hop_limit;
else
hop_limit = ip6_dst_hoplimit(dst);
}
/* Calculate max headroom for all the headers and adjust
* needed_headroom if necessary.

View File

@ -481,7 +481,7 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
}
mtu = dst_mtu(dst);
if (!skb->ignore_df && skb->len > mtu) {
if (skb->len > mtu) {
skb_dst_update_pmtu(skb, mtu);
if (skb->protocol == htons(ETH_P_IPV6)) {

View File

@ -996,7 +996,6 @@ static void ip6_rt_copy_init(struct rt6_info *rt, struct fib6_info *ort)
rt->rt6i_src = ort->fib6_src;
#endif
rt->rt6i_prefsrc = ort->fib6_prefsrc;
rt->dst.lwtstate = lwtstate_get(ort->fib6_nh.nh_lwtstate);
}
static struct fib6_node* fib6_backtrack(struct fib6_node *fn,

View File

@ -947,8 +947,8 @@ static void ieee80211_rx_mgmt_deauth_ibss(struct ieee80211_sub_if_data *sdata,
if (len < IEEE80211_DEAUTH_FRAME_LEN)
return;
ibss_dbg(sdata, "RX DeAuth SA=%pM DA=%pM BSSID=%pM (reason: %d)\n",
mgmt->sa, mgmt->da, mgmt->bssid, reason);
ibss_dbg(sdata, "RX DeAuth SA=%pM DA=%pM\n", mgmt->sa, mgmt->da);
ibss_dbg(sdata, "\tBSSID=%pM (reason: %d)\n", mgmt->bssid, reason);
sta_info_destroy_addr(sdata, mgmt->sa);
}
@ -966,9 +966,9 @@ static void ieee80211_rx_mgmt_auth_ibss(struct ieee80211_sub_if_data *sdata,
auth_alg = le16_to_cpu(mgmt->u.auth.auth_alg);
auth_transaction = le16_to_cpu(mgmt->u.auth.auth_transaction);
ibss_dbg(sdata,
"RX Auth SA=%pM DA=%pM BSSID=%pM (auth_transaction=%d)\n",
mgmt->sa, mgmt->da, mgmt->bssid, auth_transaction);
ibss_dbg(sdata, "RX Auth SA=%pM DA=%pM\n", mgmt->sa, mgmt->da);
ibss_dbg(sdata, "\tBSSID=%pM (auth_transaction=%d)\n",
mgmt->bssid, auth_transaction);
if (auth_alg != WLAN_AUTH_OPEN || auth_transaction != 1)
return;
@ -1175,10 +1175,10 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
rx_timestamp = drv_get_tsf(local, sdata);
}
ibss_dbg(sdata,
"RX beacon SA=%pM BSSID=%pM TSF=0x%llx BCN=0x%llx diff=%lld @%lu\n",
ibss_dbg(sdata, "RX beacon SA=%pM BSSID=%pM TSF=0x%llx\n",
mgmt->sa, mgmt->bssid,
(unsigned long long)rx_timestamp,
(unsigned long long)rx_timestamp);
ibss_dbg(sdata, "\tBCN=0x%llx diff=%lld @%lu\n",
(unsigned long long)beacon_timestamp,
(unsigned long long)(rx_timestamp - beacon_timestamp),
jiffies);
@ -1537,9 +1537,9 @@ static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata,
tx_last_beacon = drv_tx_last_beacon(local);
ibss_dbg(sdata,
"RX ProbeReq SA=%pM DA=%pM BSSID=%pM (tx_last_beacon=%d)\n",
mgmt->sa, mgmt->da, mgmt->bssid, tx_last_beacon);
ibss_dbg(sdata, "RX ProbeReq SA=%pM DA=%pM\n", mgmt->sa, mgmt->da);
ibss_dbg(sdata, "\tBSSID=%pM (tx_last_beacon=%d)\n",
mgmt->bssid, tx_last_beacon);
if (!tx_last_beacon && is_multicast_ether_addr(mgmt->da))
return;

View File

@ -256,8 +256,27 @@ static void ieee80211_restart_work(struct work_struct *work)
flush_work(&local->radar_detected_work);
rtnl_lock();
list_for_each_entry(sdata, &local->interfaces, list)
list_for_each_entry(sdata, &local->interfaces, list) {
/*
* XXX: there may be more work for other vif types and even
* for station mode: a good thing would be to run most of
* the iface type's dependent _stop (ieee80211_mg_stop,
* ieee80211_ibss_stop) etc...
* For now, fix only the specific bug that was seen: race
* between csa_connection_drop_work and us.
*/
if (sdata->vif.type == NL80211_IFTYPE_STATION) {
/*
* This worker is scheduled from the iface worker that
* runs on mac80211's workqueue, so we can't be
* scheduling this worker after the cancel right here.
* The exception is ieee80211_chswitch_done.
* Then we can have a race...
*/
cancel_work_sync(&sdata->u.mgd.csa_connection_drop_work);
}
flush_delayed_work(&sdata->dec_tailroom_needed_wk);
}
ieee80211_scan_cancel(local);
/* make sure any new ROC will consider local->in_reconfig */
@ -471,10 +490,7 @@ static const struct ieee80211_vht_cap mac80211_vht_capa_mod_mask = {
cpu_to_le32(IEEE80211_VHT_CAP_RXLDPC |
IEEE80211_VHT_CAP_SHORT_GI_80 |
IEEE80211_VHT_CAP_SHORT_GI_160 |
IEEE80211_VHT_CAP_RXSTBC_1 |
IEEE80211_VHT_CAP_RXSTBC_2 |
IEEE80211_VHT_CAP_RXSTBC_3 |
IEEE80211_VHT_CAP_RXSTBC_4 |
IEEE80211_VHT_CAP_RXSTBC_MASK |
IEEE80211_VHT_CAP_TXSTBC |
IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
@ -1208,6 +1224,7 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw)
#if IS_ENABLED(CONFIG_IPV6)
unregister_inet6addr_notifier(&local->ifa6_notifier);
#endif
ieee80211_txq_teardown_flows(local);
rtnl_lock();
@ -1236,7 +1253,6 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw)
skb_queue_purge(&local->skb_queue);
skb_queue_purge(&local->skb_queue_unreliable);
skb_queue_purge(&local->skb_queue_tdls_chsw);
ieee80211_txq_teardown_flows(local);
destroy_workqueue(local->workqueue);
wiphy_unregister(local->hw.wiphy);

View File

@ -572,6 +572,10 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
forward = false;
reply = true;
target_metric = 0;
if (SN_GT(target_sn, ifmsh->sn))
ifmsh->sn = target_sn;
if (time_after(jiffies, ifmsh->last_sn_update +
net_traversal_jiffies(sdata)) ||
time_before(jiffies, ifmsh->last_sn_update)) {

View File

@ -1073,6 +1073,10 @@ static void ieee80211_chswitch_work(struct work_struct *work)
*/
if (sdata->reserved_chanctx) {
struct ieee80211_supported_band *sband = NULL;
struct sta_info *mgd_sta = NULL;
enum ieee80211_sta_rx_bandwidth bw = IEEE80211_STA_RX_BW_20;
/*
* with multi-vif csa driver may call ieee80211_csa_finish()
* many times while waiting for other interfaces to use their
@ -1081,6 +1085,48 @@ static void ieee80211_chswitch_work(struct work_struct *work)
if (sdata->reserved_ready)
goto out;
if (sdata->vif.bss_conf.chandef.width !=
sdata->csa_chandef.width) {
/*
* For managed interface, we need to also update the AP
* station bandwidth and align the rate scale algorithm
* on the bandwidth change. Here we only consider the
* bandwidth of the new channel definition (as channel
* switch flow does not have the full HT/VHT/HE
* information), assuming that if additional changes are
* required they would be done as part of the processing
* of the next beacon from the AP.
*/
switch (sdata->csa_chandef.width) {
case NL80211_CHAN_WIDTH_20_NOHT:
case NL80211_CHAN_WIDTH_20:
default:
bw = IEEE80211_STA_RX_BW_20;
break;
case NL80211_CHAN_WIDTH_40:
bw = IEEE80211_STA_RX_BW_40;
break;
case NL80211_CHAN_WIDTH_80:
bw = IEEE80211_STA_RX_BW_80;
break;
case NL80211_CHAN_WIDTH_80P80:
case NL80211_CHAN_WIDTH_160:
bw = IEEE80211_STA_RX_BW_160;
break;
}
mgd_sta = sta_info_get(sdata, ifmgd->bssid);
sband =
local->hw.wiphy->bands[sdata->csa_chandef.chan->band];
}
if (sdata->vif.bss_conf.chandef.width >
sdata->csa_chandef.width) {
mgd_sta->sta.bandwidth = bw;
rate_control_rate_update(local, sband, mgd_sta,
IEEE80211_RC_BW_CHANGED);
}
ret = ieee80211_vif_use_reserved_context(sdata);
if (ret) {
sdata_info(sdata,
@ -1091,6 +1137,13 @@ static void ieee80211_chswitch_work(struct work_struct *work)
goto out;
}
if (sdata->vif.bss_conf.chandef.width <
sdata->csa_chandef.width) {
mgd_sta->sta.bandwidth = bw;
rate_control_rate_update(local, sband, mgd_sta,
IEEE80211_RC_BW_CHANGED);
}
goto out;
}
@ -1312,6 +1365,16 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
cbss->beacon_interval));
return;
drop_connection:
/*
* This is just so that the disconnect flow will know that
* we were trying to switch channel and failed. In case the
* mode is 1 (we are not allowed to Tx), we will know not to
* send a deauthentication frame. Those two fields will be
* reset when the disconnection worker runs.
*/
sdata->vif.csa_active = true;
sdata->csa_block_tx = csa_ie.mode;
ieee80211_queue_work(&local->hw, &ifmgd->csa_connection_drop_work);
mutex_unlock(&local->chanctx_mtx);
mutex_unlock(&local->mtx);
@ -2522,6 +2585,7 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata)
struct ieee80211_local *local = sdata->local;
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN];
bool tx;
sdata_lock(sdata);
if (!ifmgd->associated) {
@ -2529,6 +2593,8 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata)
return;
}
tx = !sdata->csa_block_tx;
/* AP is probably out of range (or not reachable for another reason) so
* remove the bss struct for that AP.
*/
@ -2536,7 +2602,7 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata)
ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH,
WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY,
true, frame_buf);
tx, frame_buf);
mutex_lock(&local->mtx);
sdata->vif.csa_active = false;
ifmgd->csa_waiting_bcn = false;
@ -2547,7 +2613,7 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata)
}
mutex_unlock(&local->mtx);
ieee80211_report_disconnect(sdata, frame_buf, sizeof(frame_buf), true,
ieee80211_report_disconnect(sdata, frame_buf, sizeof(frame_buf), tx,
WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY);
sdata_unlock(sdata);

View File

@ -1728,6 +1728,7 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
*/
if (!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS) &&
!ieee80211_has_morefrags(hdr->frame_control) &&
!is_multicast_ether_addr(hdr->addr1) &&
(ieee80211_is_mgmt(hdr->frame_control) ||
ieee80211_is_data(hdr->frame_control)) &&
!(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) &&

View File

@ -3078,27 +3078,18 @@ void ieee80211_clear_fast_xmit(struct sta_info *sta)
}
static bool ieee80211_amsdu_realloc_pad(struct ieee80211_local *local,
struct sk_buff *skb, int headroom,
int *subframe_len)
struct sk_buff *skb, int headroom)
{
int amsdu_len = *subframe_len + sizeof(struct ethhdr);
int padding = (4 - amsdu_len) & 3;
if (skb_headroom(skb) < headroom || skb_tailroom(skb) < padding) {
if (skb_headroom(skb) < headroom) {
I802_DEBUG_INC(local->tx_expand_skb_head);
if (pskb_expand_head(skb, headroom, padding, GFP_ATOMIC)) {
if (pskb_expand_head(skb, headroom, 0, GFP_ATOMIC)) {
wiphy_debug(local->hw.wiphy,
"failed to reallocate TX buffer\n");
return false;
}
}
if (padding) {
*subframe_len += padding;
skb_put_zero(skb, padding);
}
return true;
}
@ -3122,8 +3113,7 @@ static bool ieee80211_amsdu_prepare_head(struct ieee80211_sub_if_data *sdata,
if (info->control.flags & IEEE80211_TX_CTRL_AMSDU)
return true;
if (!ieee80211_amsdu_realloc_pad(local, skb, sizeof(*amsdu_hdr),
&subframe_len))
if (!ieee80211_amsdu_realloc_pad(local, skb, sizeof(*amsdu_hdr)))
return false;
data = skb_push(skb, sizeof(*amsdu_hdr));
@ -3189,7 +3179,8 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata,
void *data;
bool ret = false;
unsigned int orig_len;
int n = 1, nfrags;
int n = 2, nfrags, pad = 0;
u16 hdrlen;
if (!ieee80211_hw_check(&local->hw, TX_AMSDU))
return false;
@ -3222,9 +3213,6 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata,
if (skb->len + head->len > max_amsdu_len)
goto out;
if (!ieee80211_amsdu_prepare_head(sdata, fast_tx, head))
goto out;
nfrags = 1 + skb_shinfo(skb)->nr_frags;
nfrags += 1 + skb_shinfo(head)->nr_frags;
frag_tail = &skb_shinfo(head)->frag_list;
@ -3240,10 +3228,24 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata,
if (max_frags && nfrags > max_frags)
goto out;
if (!ieee80211_amsdu_realloc_pad(local, skb, sizeof(rfc1042_header) + 2,
&subframe_len))
if (!ieee80211_amsdu_prepare_head(sdata, fast_tx, head))
goto out;
/*
* Pad out the previous subframe to a multiple of 4 by adding the
* padding to the next one, that's being added. Note that head->len
* is the length of the full A-MSDU, but that works since each time
* we add a new subframe we pad out the previous one to a multiple
* of 4 and thus it no longer matters in the next round.
*/
hdrlen = fast_tx->hdr_len - sizeof(rfc1042_header);
if ((head->len - hdrlen) & 3)
pad = 4 - ((head->len - hdrlen) & 3);
if (!ieee80211_amsdu_realloc_pad(local, skb, sizeof(rfc1042_header) +
2 + pad))
goto out_recalc;
ret = true;
data = skb_push(skb, ETH_ALEN + 2);
memmove(data, data + ETH_ALEN + 2, 2 * ETH_ALEN);
@ -3253,15 +3255,19 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata,
memcpy(data, &len, 2);
memcpy(data + 2, rfc1042_header, sizeof(rfc1042_header));
memset(skb_push(skb, pad), 0, pad);
head->len += skb->len;
head->data_len += skb->len;
*frag_tail = skb;
flow->backlog += head->len - orig_len;
tin->backlog_bytes += head->len - orig_len;
fq_recalc_backlog(fq, tin, flow);
out_recalc:
if (head->len != orig_len) {
flow->backlog += head->len - orig_len;
tin->backlog_bytes += head->len - orig_len;
fq_recalc_backlog(fq, tin, flow);
}
out:
spin_unlock_bh(&fq->lock);

View File

@ -1135,7 +1135,7 @@ void ieee80211_regulatory_limit_wmm_params(struct ieee80211_sub_if_data *sdata,
{
struct ieee80211_chanctx_conf *chanctx_conf;
const struct ieee80211_reg_rule *rrule;
struct ieee80211_wmm_ac *wmm_ac;
const struct ieee80211_wmm_ac *wmm_ac;
u16 center_freq = 0;
if (sdata->vif.type != NL80211_IFTYPE_AP &&
@ -1154,20 +1154,19 @@ void ieee80211_regulatory_limit_wmm_params(struct ieee80211_sub_if_data *sdata,
rrule = freq_reg_info(sdata->wdev.wiphy, MHZ_TO_KHZ(center_freq));
if (IS_ERR_OR_NULL(rrule) || !rrule->wmm_rule) {
if (IS_ERR_OR_NULL(rrule) || !rrule->has_wmm) {
rcu_read_unlock();
return;
}
if (sdata->vif.type == NL80211_IFTYPE_AP)
wmm_ac = &rrule->wmm_rule->ap[ac];
wmm_ac = &rrule->wmm_rule.ap[ac];
else
wmm_ac = &rrule->wmm_rule->client[ac];
wmm_ac = &rrule->wmm_rule.client[ac];
qparam->cw_min = max_t(u16, qparam->cw_min, wmm_ac->cw_min);
qparam->cw_max = max_t(u16, qparam->cw_max, wmm_ac->cw_max);
qparam->aifs = max_t(u8, qparam->aifs, wmm_ac->aifsn);
qparam->txop = !qparam->txop ? wmm_ac->cot / 32 :
min_t(u16, qparam->txop, wmm_ac->cot / 32);
qparam->txop = min_t(u16, qparam->txop, wmm_ac->cot / 32);
rcu_read_unlock();
}

View File

@ -4137,36 +4137,52 @@ static const struct vm_operations_struct packet_mmap_ops = {
.close = packet_mm_close,
};
static void free_pg_vec(struct pgv *pg_vec, unsigned int len)
static void free_pg_vec(struct pgv *pg_vec, unsigned int order,
unsigned int len)
{
int i;
for (i = 0; i < len; i++) {
if (likely(pg_vec[i].buffer)) {
kvfree(pg_vec[i].buffer);
if (is_vmalloc_addr(pg_vec[i].buffer))
vfree(pg_vec[i].buffer);
else
free_pages((unsigned long)pg_vec[i].buffer,
order);
pg_vec[i].buffer = NULL;
}
}
kfree(pg_vec);
}
static char *alloc_one_pg_vec_page(unsigned long size)
static char *alloc_one_pg_vec_page(unsigned long order)
{
char *buffer;
gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP |
__GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY;
buffer = kvzalloc(size, GFP_KERNEL);
buffer = (char *) __get_free_pages(gfp_flags, order);
if (buffer)
return buffer;
buffer = kvzalloc(size, GFP_KERNEL | __GFP_RETRY_MAYFAIL);
/* __get_free_pages failed, fall back to vmalloc */
buffer = vzalloc(array_size((1 << order), PAGE_SIZE));
if (buffer)
return buffer;
return buffer;
/* vmalloc failed, lets dig into swap here */
gfp_flags &= ~__GFP_NORETRY;
buffer = (char *) __get_free_pages(gfp_flags, order);
if (buffer)
return buffer;
/* complete and utter failure */
return NULL;
}
static struct pgv *alloc_pg_vec(struct tpacket_req *req)
static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order)
{
unsigned int block_nr = req->tp_block_nr;
unsigned long size = req->tp_block_size;
struct pgv *pg_vec;
int i;
@ -4175,7 +4191,7 @@ static struct pgv *alloc_pg_vec(struct tpacket_req *req)
goto out;
for (i = 0; i < block_nr; i++) {
pg_vec[i].buffer = alloc_one_pg_vec_page(size);
pg_vec[i].buffer = alloc_one_pg_vec_page(order);
if (unlikely(!pg_vec[i].buffer))
goto out_free_pgvec;
}
@ -4184,7 +4200,7 @@ out:
return pg_vec;
out_free_pgvec:
free_pg_vec(pg_vec, block_nr);
free_pg_vec(pg_vec, order, block_nr);
pg_vec = NULL;
goto out;
}
@ -4194,9 +4210,9 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
{
struct pgv *pg_vec = NULL;
struct packet_sock *po = pkt_sk(sk);
int was_running, order = 0;
struct packet_ring_buffer *rb;
struct sk_buff_head *rb_queue;
int was_running;
__be16 num;
int err = -EINVAL;
/* Added to avoid minimal code churn */
@ -4258,7 +4274,8 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
goto out;
err = -ENOMEM;
pg_vec = alloc_pg_vec(req);
order = get_order(req->tp_block_size);
pg_vec = alloc_pg_vec(req, order);
if (unlikely(!pg_vec))
goto out;
switch (po->tp_version) {
@ -4312,6 +4329,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
rb->frame_size = req->tp_frame_size;
spin_unlock_bh(&rb_queue->lock);
swap(rb->pg_vec_order, order);
swap(rb->pg_vec_len, req->tp_block_nr);
rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE;
@ -4337,7 +4355,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
}
if (pg_vec)
free_pg_vec(pg_vec, req->tp_block_nr);
free_pg_vec(pg_vec, order, req->tp_block_nr);
out:
return err;
}

View File

@ -64,6 +64,7 @@ struct packet_ring_buffer {
unsigned int frame_size;
unsigned int frame_max;
unsigned int pg_vec_order;
unsigned int pg_vec_pages;
unsigned int pg_vec_len;

View File

@ -1,6 +1,6 @@
config RDS
tristate "The RDS Protocol"
tristate "The Reliable Datagram Sockets Protocol"
depends on INET
---help---
The RDS (Reliable Datagram Sockets) protocol provides reliable,

View File

@ -341,15 +341,10 @@ static int rds6_ib_conn_info_visitor(struct rds_connection *conn,
if (rds_conn_state(conn) == RDS_CONN_UP) {
struct rds_ib_device *rds_ibdev;
struct rdma_dev_addr *dev_addr;
ic = conn->c_transport_data;
dev_addr = &ic->i_cm_id->route.addr.dev_addr;
rdma_addr_get_sgid(dev_addr,
(union ib_gid *)&iinfo6->src_gid);
rdma_addr_get_dgid(dev_addr,
(union ib_gid *)&iinfo6->dst_gid);
rdma_read_gids(ic->i_cm_id, (union ib_gid *)&iinfo6->src_gid,
(union ib_gid *)&iinfo6->dst_gid);
rds_ibdev = ic->rds_ibdev;
iinfo6->max_send_wr = ic->i_send_ring.w_nr;
iinfo6->max_recv_wr = ic->i_recv_ring.w_nr;

View File

@ -20,6 +20,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/rfkill.h>
#include <linux/platform_device.h>
#include <linux/clk.h>

View File

@ -662,6 +662,13 @@ int tcf_action_destroy(struct tc_action *actions[], int bind)
return ret;
}
static int tcf_action_destroy_1(struct tc_action *a, int bind)
{
struct tc_action *actions[] = { a, NULL };
return tcf_action_destroy(actions, bind);
}
static int tcf_action_put(struct tc_action *p)
{
return __tcf_action_put(p, false);
@ -881,17 +888,16 @@ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
if (TC_ACT_EXT_CMP(a->tcfa_action, TC_ACT_GOTO_CHAIN)) {
err = tcf_action_goto_chain_init(a, tp);
if (err) {
struct tc_action *actions[] = { a, NULL };
tcf_action_destroy(actions, bind);
tcf_action_destroy_1(a, bind);
NL_SET_ERR_MSG(extack, "Failed to init TC action chain");
return ERR_PTR(err);
}
}
if (!tcf_action_valid(a->tcfa_action)) {
NL_SET_ERR_MSG(extack, "invalid action value, using TC_ACT_UNSPEC instead");
a->tcfa_action = TC_ACT_UNSPEC;
tcf_action_destroy_1(a, bind);
NL_SET_ERR_MSG(extack, "Invalid control action value");
return ERR_PTR(-EINVAL);
}
return a;
@ -1173,6 +1179,7 @@ static int tcf_action_delete(struct net *net, struct tc_action *actions[])
struct tcf_idrinfo *idrinfo = a->idrinfo;
u32 act_index = a->tcfa_index;
actions[i] = NULL;
if (tcf_action_put(a)) {
/* last reference, action was deleted concurrently */
module_put(ops->owner);
@ -1184,7 +1191,6 @@ static int tcf_action_delete(struct net *net, struct tc_action *actions[])
if (ret < 0)
return ret;
}
actions[i] = NULL;
}
return 0;
}

View File

@ -326,6 +326,20 @@ static int __add_metainfo(const struct tcf_meta_ops *ops,
return ret;
}
static int add_metainfo_and_get_ops(const struct tcf_meta_ops *ops,
struct tcf_ife_info *ife, u32 metaid,
bool exists)
{
int ret;
if (!try_module_get(ops->owner))
return -ENOENT;
ret = __add_metainfo(ops, ife, metaid, NULL, 0, true, exists);
if (ret)
module_put(ops->owner);
return ret;
}
static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval,
int len, bool exists)
{
@ -349,7 +363,7 @@ static int use_all_metadata(struct tcf_ife_info *ife, bool exists)
read_lock(&ife_mod_lock);
list_for_each_entry(o, &ifeoplist, list) {
rc = __add_metainfo(o, ife, o->metaid, NULL, 0, true, exists);
rc = add_metainfo_and_get_ops(o, ife, o->metaid, exists);
if (rc == 0)
installed += 1;
}
@ -400,7 +414,6 @@ static void _tcf_ife_cleanup(struct tc_action *a)
struct tcf_meta_info *e, *n;
list_for_each_entry_safe(e, n, &ife->metalist, metalist) {
module_put(e->ops->owner);
list_del(&e->metalist);
if (e->metaval) {
if (e->ops->release)
@ -408,6 +421,7 @@ static void _tcf_ife_cleanup(struct tc_action *a)
else
kfree(e->metaval);
}
module_put(e->ops->owner);
kfree(e);
}
}

View File

@ -109,16 +109,18 @@ static int tcf_pedit_key_ex_dump(struct sk_buff *skb,
{
struct nlattr *keys_start = nla_nest_start(skb, TCA_PEDIT_KEYS_EX);
if (!keys_start)
goto nla_failure;
for (; n > 0; n--) {
struct nlattr *key_start;
key_start = nla_nest_start(skb, TCA_PEDIT_KEY_EX);
if (!key_start)
goto nla_failure;
if (nla_put_u16(skb, TCA_PEDIT_KEY_EX_HTYPE, keys_ex->htype) ||
nla_put_u16(skb, TCA_PEDIT_KEY_EX_CMD, keys_ex->cmd)) {
nlmsg_trim(skb, keys_start);
return -EINVAL;
}
nla_put_u16(skb, TCA_PEDIT_KEY_EX_CMD, keys_ex->cmd))
goto nla_failure;
nla_nest_end(skb, key_start);
@ -128,6 +130,9 @@ static int tcf_pedit_key_ex_dump(struct sk_buff *skb,
nla_nest_end(skb, keys_start);
return 0;
nla_failure:
nla_nest_cancel(skb, keys_start);
return -EINVAL;
}
static int tcf_pedit_init(struct net *net, struct nlattr *nla,
@ -418,7 +423,10 @@ static int tcf_pedit_dump(struct sk_buff *skb, struct tc_action *a,
opt->bindcnt = atomic_read(&p->tcf_bindcnt) - bind;
if (p->tcfp_keys_ex) {
tcf_pedit_key_ex_dump(skb, p->tcfp_keys_ex, p->tcfp_nkeys);
if (tcf_pedit_key_ex_dump(skb,
p->tcfp_keys_ex,
p->tcfp_nkeys))
goto nla_put_failure;
if (nla_put(skb, TCA_PEDIT_PARMS_EX, s, opt))
goto nla_put_failure;

View File

@ -1252,7 +1252,7 @@ replay:
}
chain = tcf_chain_get(block, chain_index, true);
if (!chain) {
NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
NL_SET_ERR_MSG(extack, "Cannot create specified filter chain");
err = -ENOMEM;
goto errout;
}
@ -1399,7 +1399,7 @@ static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
goto errout;
}
NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
err = -EINVAL;
err = -ENOENT;
goto errout;
}

View File

@ -215,7 +215,6 @@ static const struct seq_operations sctp_eps_ops = {
struct sctp_ht_iter {
struct seq_net_private p;
struct rhashtable_iter hti;
int start_fail;
};
static void *sctp_transport_seq_start(struct seq_file *seq, loff_t *pos)
@ -224,7 +223,6 @@ static void *sctp_transport_seq_start(struct seq_file *seq, loff_t *pos)
sctp_transport_walk_start(&iter->hti);
iter->start_fail = 0;
return sctp_transport_get_idx(seq_file_net(seq), &iter->hti, *pos);
}
@ -232,8 +230,6 @@ static void sctp_transport_seq_stop(struct seq_file *seq, void *v)
{
struct sctp_ht_iter *iter = seq->private;
if (iter->start_fail)
return;
sctp_transport_walk_stop(&iter->hti);
}
@ -264,8 +260,6 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
}
transport = (struct sctp_transport *)v;
if (!sctp_transport_hold(transport))
return 0;
assoc = transport->asoc;
epb = &assoc->base;
sk = epb->sk;
@ -322,8 +316,6 @@ static int sctp_remaddr_seq_show(struct seq_file *seq, void *v)
}
transport = (struct sctp_transport *)v;
if (!sctp_transport_hold(transport))
return 0;
assoc = transport->asoc;
list_for_each_entry_rcu(tsp, &assoc->peer.transport_addr_list,

View File

@ -2658,20 +2658,23 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params,
}
if (params->spp_flags & SPP_IPV6_FLOWLABEL) {
if (trans && trans->ipaddr.sa.sa_family == AF_INET6) {
trans->flowlabel = params->spp_ipv6_flowlabel &
SCTP_FLOWLABEL_VAL_MASK;
trans->flowlabel |= SCTP_FLOWLABEL_SET_MASK;
} else if (asoc) {
list_for_each_entry(trans,
&asoc->peer.transport_addr_list,
transports) {
if (trans->ipaddr.sa.sa_family != AF_INET6)
continue;
if (trans) {
if (trans->ipaddr.sa.sa_family == AF_INET6) {
trans->flowlabel = params->spp_ipv6_flowlabel &
SCTP_FLOWLABEL_VAL_MASK;
trans->flowlabel |= SCTP_FLOWLABEL_SET_MASK;
}
} else if (asoc) {
struct sctp_transport *t;
list_for_each_entry(t, &asoc->peer.transport_addr_list,
transports) {
if (t->ipaddr.sa.sa_family != AF_INET6)
continue;
t->flowlabel = params->spp_ipv6_flowlabel &
SCTP_FLOWLABEL_VAL_MASK;
t->flowlabel |= SCTP_FLOWLABEL_SET_MASK;
}
asoc->flowlabel = params->spp_ipv6_flowlabel &
SCTP_FLOWLABEL_VAL_MASK;
asoc->flowlabel |= SCTP_FLOWLABEL_SET_MASK;
@ -2687,12 +2690,13 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params,
trans->dscp = params->spp_dscp & SCTP_DSCP_VAL_MASK;
trans->dscp |= SCTP_DSCP_SET_MASK;
} else if (asoc) {
list_for_each_entry(trans,
&asoc->peer.transport_addr_list,
struct sctp_transport *t;
list_for_each_entry(t, &asoc->peer.transport_addr_list,
transports) {
trans->dscp = params->spp_dscp &
SCTP_DSCP_VAL_MASK;
trans->dscp |= SCTP_DSCP_SET_MASK;
t->dscp = params->spp_dscp &
SCTP_DSCP_VAL_MASK;
t->dscp |= SCTP_DSCP_SET_MASK;
}
asoc->dscp = params->spp_dscp & SCTP_DSCP_VAL_MASK;
asoc->dscp |= SCTP_DSCP_SET_MASK;
@ -5005,9 +5009,14 @@ struct sctp_transport *sctp_transport_get_next(struct net *net,
break;
}
if (!sctp_transport_hold(t))
continue;
if (net_eq(sock_net(t->asoc->base.sk), net) &&
t->asoc->peer.primary_path == t)
break;
sctp_transport_put(t);
}
return t;
@ -5017,13 +5026,18 @@ struct sctp_transport *sctp_transport_get_idx(struct net *net,
struct rhashtable_iter *iter,
int pos)
{
void *obj = SEQ_START_TOKEN;
struct sctp_transport *t;
while (pos && (obj = sctp_transport_get_next(net, iter)) &&
!IS_ERR(obj))
pos--;
if (!pos)
return SEQ_START_TOKEN;
return obj;
while ((t = sctp_transport_get_next(net, iter)) && !IS_ERR(t)) {
if (!--pos)
break;
sctp_transport_put(t);
}
return t;
}
int sctp_for_each_endpoint(int (*cb)(struct sctp_endpoint *, void *),
@ -5082,8 +5096,6 @@ again:
tsp = sctp_transport_get_idx(net, &hti, *pos + 1);
for (; !IS_ERR_OR_NULL(tsp); tsp = sctp_transport_get_next(net, &hti)) {
if (!sctp_transport_hold(tsp))
continue;
ret = cb(tsp, p);
if (ret)
break;

View File

@ -51,12 +51,12 @@ const char tipc_bclink_name[] = "broadcast-link";
* struct tipc_bc_base - base structure for keeping broadcast send state
* @link: broadcast send link structure
* @inputq: data input queue; will only carry SOCK_WAKEUP messages
* @dest: array keeping number of reachable destinations per bearer
* @dests: array keeping number of reachable destinations per bearer
* @primary_bearer: a bearer having links to all broadcast destinations, if any
* @bcast_support: indicates if primary bearer, if any, supports broadcast
* @rcast_support: indicates if all peer nodes support replicast
* @rc_ratio: dest count as percentage of cluster size where send method changes
* @bc_threshold: calculated drom rc_ratio; if dests > threshold use broadcast
* @bc_threshold: calculated from rc_ratio; if dests > threshold use broadcast
*/
struct tipc_bc_base {
struct tipc_link *link;

View File

@ -84,7 +84,9 @@ static int tipc_sock_diag_handler_dump(struct sk_buff *skb,
if (h->nlmsg_flags & NLM_F_DUMP) {
struct netlink_dump_control c = {
.start = tipc_dump_start,
.dump = tipc_diag_dump,
.done = tipc_dump_done,
};
netlink_dump_start(net->diag_nlsk, skb, h, &c);
return 0;

View File

@ -980,20 +980,17 @@ int tipc_nl_name_table_dump(struct sk_buff *skb, struct netlink_callback *cb)
struct tipc_dest *tipc_dest_find(struct list_head *l, u32 node, u32 port)
{
u64 value = (u64)node << 32 | port;
struct tipc_dest *dst;
list_for_each_entry(dst, l, list) {
if (dst->value != value)
continue;
return dst;
if (dst->node == node && dst->port == port)
return dst;
}
return NULL;
}
bool tipc_dest_push(struct list_head *l, u32 node, u32 port)
{
u64 value = (u64)node << 32 | port;
struct tipc_dest *dst;
if (tipc_dest_find(l, node, port))
@ -1002,7 +999,8 @@ bool tipc_dest_push(struct list_head *l, u32 node, u32 port)
dst = kmalloc(sizeof(*dst), GFP_ATOMIC);
if (unlikely(!dst))
return false;
dst->value = value;
dst->node = node;
dst->port = port;
list_add(&dst->list, l);
return true;
}

View File

@ -133,13 +133,8 @@ void tipc_nametbl_stop(struct net *net);
struct tipc_dest {
struct list_head list;
union {
struct {
u32 port;
u32 node;
};
u64 value;
};
u32 port;
u32 node;
};
struct tipc_dest *tipc_dest_find(struct list_head *l, u32 node, u32 port);

View File

@ -167,7 +167,9 @@ static const struct genl_ops tipc_genl_v2_ops[] = {
},
{
.cmd = TIPC_NL_SOCK_GET,
.start = tipc_dump_start,
.dumpit = tipc_nl_sk_dump,
.done = tipc_dump_done,
.policy = tipc_nl_policy,
},
{

View File

@ -2672,6 +2672,8 @@ void tipc_sk_reinit(struct net *net)
rhashtable_walk_stop(&iter);
} while (tsk == ERR_PTR(-EAGAIN));
rhashtable_walk_exit(&iter);
}
static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid)
@ -3227,45 +3229,69 @@ int tipc_nl_sk_walk(struct sk_buff *skb, struct netlink_callback *cb,
struct netlink_callback *cb,
struct tipc_sock *tsk))
{
struct net *net = sock_net(skb->sk);
struct tipc_net *tn = tipc_net(net);
const struct bucket_table *tbl;
u32 prev_portid = cb->args[1];
u32 tbl_id = cb->args[0];
struct rhash_head *pos;
struct rhashtable_iter *iter = (void *)cb->args[0];
struct tipc_sock *tsk;
int err;
rcu_read_lock();
tbl = rht_dereference_rcu((&tn->sk_rht)->tbl, &tn->sk_rht);
for (; tbl_id < tbl->size; tbl_id++) {
rht_for_each_entry_rcu(tsk, pos, tbl, tbl_id, node) {
spin_lock_bh(&tsk->sk.sk_lock.slock);
if (prev_portid && prev_portid != tsk->portid) {
spin_unlock_bh(&tsk->sk.sk_lock.slock);
rhashtable_walk_start(iter);
while ((tsk = rhashtable_walk_next(iter)) != NULL) {
if (IS_ERR(tsk)) {
err = PTR_ERR(tsk);
if (err == -EAGAIN) {
err = 0;
continue;
}
err = skb_handler(skb, cb, tsk);
if (err) {
prev_portid = tsk->portid;
spin_unlock_bh(&tsk->sk.sk_lock.slock);
goto out;
}
prev_portid = 0;
spin_unlock_bh(&tsk->sk.sk_lock.slock);
break;
}
}
out:
rcu_read_unlock();
cb->args[0] = tbl_id;
cb->args[1] = prev_portid;
sock_hold(&tsk->sk);
rhashtable_walk_stop(iter);
lock_sock(&tsk->sk);
err = skb_handler(skb, cb, tsk);
if (err) {
release_sock(&tsk->sk);
sock_put(&tsk->sk);
goto out;
}
release_sock(&tsk->sk);
rhashtable_walk_start(iter);
sock_put(&tsk->sk);
}
rhashtable_walk_stop(iter);
out:
return skb->len;
}
EXPORT_SYMBOL(tipc_nl_sk_walk);
int tipc_dump_start(struct netlink_callback *cb)
{
struct rhashtable_iter *iter = (void *)cb->args[0];
struct net *net = sock_net(cb->skb->sk);
struct tipc_net *tn = tipc_net(net);
if (!iter) {
iter = kmalloc(sizeof(*iter), GFP_KERNEL);
if (!iter)
return -ENOMEM;
cb->args[0] = (long)iter;
}
rhashtable_walk_enter(&tn->sk_rht, iter);
return 0;
}
EXPORT_SYMBOL(tipc_dump_start);
int tipc_dump_done(struct netlink_callback *cb)
{
struct rhashtable_iter *hti = (void *)cb->args[0];
rhashtable_walk_exit(hti);
kfree(hti);
return 0;
}
EXPORT_SYMBOL(tipc_dump_done);
int tipc_sk_fill_sock_diag(struct sk_buff *skb, struct netlink_callback *cb,
struct tipc_sock *tsk, u32 sk_filter_state,
u64 (*tipc_diag_gen_cookie)(struct sock *sk))

View File

@ -68,4 +68,6 @@ int tipc_nl_sk_walk(struct sk_buff *skb, struct netlink_callback *cb,
int (*skb_handler)(struct sk_buff *skb,
struct netlink_callback *cb,
struct tipc_sock *tsk));
int tipc_dump_start(struct netlink_callback *cb);
int tipc_dump_done(struct netlink_callback *cb);
#endif

View File

@ -313,8 +313,8 @@ static void tipc_conn_send_work(struct work_struct *work)
conn_put(con);
}
/* tipc_conn_queue_evt() - interrupt level call from a subscription instance
* The queued work is launched into tipc_send_work()->tipc_send_to_sock()
/* tipc_topsrv_queue_evt() - interrupt level call from a subscription instance
* The queued work is launched into tipc_conn_send_work()->tipc_conn_send_to_sock()
*/
void tipc_topsrv_queue_evt(struct net *net, int conid,
u32 event, struct tipc_event *evt)

View File

@ -669,13 +669,13 @@ static int nl80211_msg_put_wmm_rules(struct sk_buff *msg,
goto nla_put_failure;
if (nla_put_u16(msg, NL80211_WMMR_CW_MIN,
rule->wmm_rule->client[j].cw_min) ||
rule->wmm_rule.client[j].cw_min) ||
nla_put_u16(msg, NL80211_WMMR_CW_MAX,
rule->wmm_rule->client[j].cw_max) ||
rule->wmm_rule.client[j].cw_max) ||
nla_put_u8(msg, NL80211_WMMR_AIFSN,
rule->wmm_rule->client[j].aifsn) ||
nla_put_u8(msg, NL80211_WMMR_TXOP,
rule->wmm_rule->client[j].cot))
rule->wmm_rule.client[j].aifsn) ||
nla_put_u16(msg, NL80211_WMMR_TXOP,
rule->wmm_rule.client[j].cot))
goto nla_put_failure;
nla_nest_end(msg, nl_wmm_rule);
@ -766,9 +766,9 @@ static int nl80211_msg_put_channel(struct sk_buff *msg, struct wiphy *wiphy,
if (large) {
const struct ieee80211_reg_rule *rule =
freq_reg_info(wiphy, chan->center_freq);
freq_reg_info(wiphy, MHZ_TO_KHZ(chan->center_freq));
if (!IS_ERR(rule) && rule->wmm_rule) {
if (!IS_ERR_OR_NULL(rule) && rule->has_wmm) {
if (nl80211_msg_put_wmm_rules(msg, rule))
goto nla_put_failure;
}
@ -12205,6 +12205,7 @@ static int nl80211_update_ft_ies(struct sk_buff *skb, struct genl_info *info)
return -EOPNOTSUPP;
if (!info->attrs[NL80211_ATTR_MDID] ||
!info->attrs[NL80211_ATTR_IE] ||
!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE]))
return -EINVAL;

View File

@ -425,36 +425,23 @@ static const struct ieee80211_regdomain *
reg_copy_regd(const struct ieee80211_regdomain *src_regd)
{
struct ieee80211_regdomain *regd;
int size_of_regd, size_of_wmms;
int size_of_regd;
unsigned int i;
struct ieee80211_wmm_rule *d_wmm, *s_wmm;
size_of_regd =
sizeof(struct ieee80211_regdomain) +
src_regd->n_reg_rules * sizeof(struct ieee80211_reg_rule);
size_of_wmms = src_regd->n_wmm_rules *
sizeof(struct ieee80211_wmm_rule);
regd = kzalloc(size_of_regd + size_of_wmms, GFP_KERNEL);
regd = kzalloc(size_of_regd, GFP_KERNEL);
if (!regd)
return ERR_PTR(-ENOMEM);
memcpy(regd, src_regd, sizeof(struct ieee80211_regdomain));
d_wmm = (struct ieee80211_wmm_rule *)((u8 *)regd + size_of_regd);
s_wmm = (struct ieee80211_wmm_rule *)((u8 *)src_regd + size_of_regd);
memcpy(d_wmm, s_wmm, size_of_wmms);
for (i = 0; i < src_regd->n_reg_rules; i++) {
for (i = 0; i < src_regd->n_reg_rules; i++)
memcpy(&regd->reg_rules[i], &src_regd->reg_rules[i],
sizeof(struct ieee80211_reg_rule));
if (!src_regd->reg_rules[i].wmm_rule)
continue;
regd->reg_rules[i].wmm_rule = d_wmm +
(src_regd->reg_rules[i].wmm_rule - s_wmm) /
sizeof(struct ieee80211_wmm_rule);
}
return regd;
}
@ -860,9 +847,10 @@ static bool valid_regdb(const u8 *data, unsigned int size)
return true;
}
static void set_wmm_rule(struct ieee80211_wmm_rule *rule,
static void set_wmm_rule(struct ieee80211_reg_rule *rrule,
struct fwdb_wmm_rule *wmm)
{
struct ieee80211_wmm_rule *rule = &rrule->wmm_rule;
unsigned int i;
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
@ -876,11 +864,13 @@ static void set_wmm_rule(struct ieee80211_wmm_rule *rule,
rule->ap[i].aifsn = wmm->ap[i].aifsn;
rule->ap[i].cot = 1000 * be16_to_cpu(wmm->ap[i].cot);
}
rrule->has_wmm = true;
}
static int __regdb_query_wmm(const struct fwdb_header *db,
const struct fwdb_country *country, int freq,
u32 *dbptr, struct ieee80211_wmm_rule *rule)
struct ieee80211_reg_rule *rule)
{
unsigned int ptr = be16_to_cpu(country->coll_ptr) << 2;
struct fwdb_collection *coll = (void *)((u8 *)db + ptr);
@ -901,8 +891,6 @@ static int __regdb_query_wmm(const struct fwdb_header *db,
wmm_ptr = be16_to_cpu(rrule->wmm_ptr) << 2;
wmm = (void *)((u8 *)db + wmm_ptr);
set_wmm_rule(rule, wmm);
if (dbptr)
*dbptr = wmm_ptr;
return 0;
}
}
@ -910,8 +898,7 @@ static int __regdb_query_wmm(const struct fwdb_header *db,
return -ENODATA;
}
int reg_query_regdb_wmm(char *alpha2, int freq, u32 *dbptr,
struct ieee80211_wmm_rule *rule)
int reg_query_regdb_wmm(char *alpha2, int freq, struct ieee80211_reg_rule *rule)
{
const struct fwdb_header *hdr = regdb;
const struct fwdb_country *country;
@ -925,8 +912,7 @@ int reg_query_regdb_wmm(char *alpha2, int freq, u32 *dbptr,
country = &hdr->country[0];
while (country->coll_ptr) {
if (alpha2_equal(alpha2, country->alpha2))
return __regdb_query_wmm(regdb, country, freq, dbptr,
rule);
return __regdb_query_wmm(regdb, country, freq, rule);
country++;
}
@ -935,32 +921,13 @@ int reg_query_regdb_wmm(char *alpha2, int freq, u32 *dbptr,
}
EXPORT_SYMBOL(reg_query_regdb_wmm);
struct wmm_ptrs {
struct ieee80211_wmm_rule *rule;
u32 ptr;
};
static struct ieee80211_wmm_rule *find_wmm_ptr(struct wmm_ptrs *wmm_ptrs,
u32 wmm_ptr, int n_wmms)
{
int i;
for (i = 0; i < n_wmms; i++) {
if (wmm_ptrs[i].ptr == wmm_ptr)
return wmm_ptrs[i].rule;
}
return NULL;
}
static int regdb_query_country(const struct fwdb_header *db,
const struct fwdb_country *country)
{
unsigned int ptr = be16_to_cpu(country->coll_ptr) << 2;
struct fwdb_collection *coll = (void *)((u8 *)db + ptr);
struct ieee80211_regdomain *regdom;
struct ieee80211_regdomain *tmp_rd;
unsigned int size_of_regd, i, n_wmms = 0;
struct wmm_ptrs *wmm_ptrs;
unsigned int size_of_regd, i;
size_of_regd = sizeof(struct ieee80211_regdomain) +
coll->n_rules * sizeof(struct ieee80211_reg_rule);
@ -969,12 +936,6 @@ static int regdb_query_country(const struct fwdb_header *db,
if (!regdom)
return -ENOMEM;
wmm_ptrs = kcalloc(coll->n_rules, sizeof(*wmm_ptrs), GFP_KERNEL);
if (!wmm_ptrs) {
kfree(regdom);
return -ENOMEM;
}
regdom->n_reg_rules = coll->n_rules;
regdom->alpha2[0] = country->alpha2[0];
regdom->alpha2[1] = country->alpha2[1];
@ -1013,37 +974,11 @@ static int regdb_query_country(const struct fwdb_header *db,
1000 * be16_to_cpu(rule->cac_timeout);
if (rule->len >= offsetofend(struct fwdb_rule, wmm_ptr)) {
u32 wmm_ptr = be16_to_cpu(rule->wmm_ptr) << 2;
struct ieee80211_wmm_rule *wmm_pos =
find_wmm_ptr(wmm_ptrs, wmm_ptr, n_wmms);
struct fwdb_wmm_rule *wmm;
struct ieee80211_wmm_rule *wmm_rule;
struct fwdb_wmm_rule *wmm = (void *)((u8 *)db + wmm_ptr);
if (wmm_pos) {
rrule->wmm_rule = wmm_pos;
continue;
}
wmm = (void *)((u8 *)db + wmm_ptr);
tmp_rd = krealloc(regdom, size_of_regd + (n_wmms + 1) *
sizeof(struct ieee80211_wmm_rule),
GFP_KERNEL);
if (!tmp_rd) {
kfree(regdom);
kfree(wmm_ptrs);
return -ENOMEM;
}
regdom = tmp_rd;
wmm_rule = (struct ieee80211_wmm_rule *)
((u8 *)regdom + size_of_regd + n_wmms *
sizeof(struct ieee80211_wmm_rule));
set_wmm_rule(wmm_rule, wmm);
wmm_ptrs[n_wmms].ptr = wmm_ptr;
wmm_ptrs[n_wmms++].rule = wmm_rule;
set_wmm_rule(rrule, wmm);
}
}
kfree(wmm_ptrs);
return reg_schedule_apply(regdom);
}

View File

@ -1456,7 +1456,7 @@ bool ieee80211_chandef_to_operating_class(struct cfg80211_chan_def *chandef,
u8 *op_class)
{
u8 vht_opclass;
u16 freq = chandef->center_freq1;
u32 freq = chandef->center_freq1;
if (freq >= 2412 && freq <= 2472) {
if (chandef->width > NL80211_CHAN_WIDTH_40)

View File

@ -68,6 +68,7 @@ static const char * const map_type_name[] = {
[BPF_MAP_TYPE_DEVMAP] = "devmap",
[BPF_MAP_TYPE_SOCKMAP] = "sockmap",
[BPF_MAP_TYPE_CPUMAP] = "cpumap",
[BPF_MAP_TYPE_XSKMAP] = "xskmap",
[BPF_MAP_TYPE_SOCKHASH] = "sockhash",
[BPF_MAP_TYPE_CGROUP_STORAGE] = "cgroup_storage",
};

View File

@ -46,6 +46,9 @@
# Kselftest framework requirement - SKIP code is 4.
ksft_skip=4
# Some systems don't have a ping6 binary anymore
which ping6 > /dev/null 2>&1 && ping6=$(which ping6) || ping6=$(which ping)
tests="
pmtu_vti6_exception vti6: PMTU exceptions
pmtu_vti4_exception vti4: PMTU exceptions
@ -274,7 +277,7 @@ test_pmtu_vti6_exception() {
mtu "${ns_b}" veth_b 4000
mtu "${ns_a}" vti6_a 5000
mtu "${ns_b}" vti6_b 5000
${ns_a} ping6 -q -i 0.1 -w 2 -s 60000 ${vti6_b_addr} > /dev/null
${ns_a} ${ping6} -q -i 0.1 -w 2 -s 60000 ${vti6_b_addr} > /dev/null
# Check that exception was created
if [ "$(route_get_dst_pmtu_from_exception "${ns_a}" ${vti6_b_addr})" = "" ]; then
@ -334,7 +337,7 @@ test_pmtu_vti4_link_add_mtu() {
fail=0
min=68
max=$((65528 - 20))
max=$((65535 - 20))
# Check invalid values first
for v in $((min - 1)) $((max + 1)); do
${ns_a} ip link add vti4_a mtu ${v} type vti local ${veth4_a_addr} remote ${veth4_b_addr} key 10 2>/dev/null

View File

@ -312,6 +312,54 @@
"$TC actions flush action police"
]
},
{
"id": "6aaf",
"name": "Add police actions with conform-exceed control pass/pipe [with numeric values]",
"category": [
"actions",
"police"
],
"setup": [
[
"$TC actions flush action police",
0,
1,
255
]
],
"cmdUnderTest": "$TC actions add action police rate 3mbit burst 250k conform-exceed 0/3 index 1",
"expExitCode": "0",
"verifyCmd": "$TC actions get action police index 1",
"matchPattern": "action order [0-9]*: police 0x1 rate 3Mbit burst 250Kb mtu 2Kb action pass/pipe",
"matchCount": "1",
"teardown": [
"$TC actions flush action police"
]
},
{
"id": "29b1",
"name": "Add police actions with conform-exceed control <invalid>/drop",
"category": [
"actions",
"police"
],
"setup": [
[
"$TC actions flush action police",
0,
1,
255
]
],
"cmdUnderTest": "$TC actions add action police rate 3mbit burst 250k conform-exceed 10/drop index 1",
"expExitCode": "255",
"verifyCmd": "$TC actions ls action police",
"matchPattern": "action order [0-9]*: police 0x1 rate 3Mbit burst 250Kb mtu 2Kb action ",
"matchCount": "0",
"teardown": [
"$TC actions flush action police"
]
},
{
"id": "c26f",
"name": "Add police action with invalid peakrate value",