diff --git a/Documentation/devicetree/bindings/net/keystone-netcp.txt b/Documentation/devicetree/bindings/net/keystone-netcp.txt index 3a65aabc76a2..6262c2f293b0 100644 --- a/Documentation/devicetree/bindings/net/keystone-netcp.txt +++ b/Documentation/devicetree/bindings/net/keystone-netcp.txt @@ -139,9 +139,9 @@ Optional properties: sub-module attached to this interface. The MAC address will be determined using the optional properties defined in -ethernet.txt, as provided by the of_get_mac_address API and only if efuse-mac -is set to 0. If any of the optional MAC address properties are not present, -then the driver will use random MAC address. +ethernet.txt and only if efuse-mac is set to 0. If all of the optional MAC +address properties are not present, then the driver will use a random MAC +address. Example binding: diff --git a/Documentation/devicetree/bindings/net/wireless/mediatek,mt76.txt b/Documentation/devicetree/bindings/net/wireless/mediatek,mt76.txt index 74665502f4cf..7e675dafc256 100644 --- a/Documentation/devicetree/bindings/net/wireless/mediatek,mt76.txt +++ b/Documentation/devicetree/bindings/net/wireless/mediatek,mt76.txt @@ -16,8 +16,8 @@ Optional properties: - ieee80211-freq-limit: See ieee80211.txt - mediatek,mtd-eeprom: Specify a MTD partition + offset containing EEPROM data -The driver is using of_get_mac_address API, so the MAC address can be as well -be set with corresponding optional properties defined in net/ethernet.txt. +The MAC address can as well be set with corresponding optional properties +defined in net/ethernet.txt. Optional nodes: - led: Properties for a connected LED diff --git a/arch/powerpc/sysdev/tsi108_dev.c b/arch/powerpc/sysdev/tsi108_dev.c index c92dcac85231..026619c9a8cb 100644 --- a/arch/powerpc/sysdev/tsi108_dev.c +++ b/arch/powerpc/sysdev/tsi108_dev.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include #include @@ -106,7 +107,7 @@ static int __init tsi108_eth_of_init(void) mac_addr = of_get_mac_address(np); if (!IS_ERR(mac_addr)) - memcpy(tsi_eth_data.mac_addr, mac_addr, 6); + ether_addr_copy(tsi_eth_data.mac_addr, mac_addr); ph = of_get_property(np, "mdio-handle", NULL); mdio = of_find_node_by_phandle(*ph); diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c index da1fc17295d9..b996967af8d9 100644 --- a/drivers/net/bonding/bond_options.c +++ b/drivers/net/bonding/bond_options.c @@ -1098,13 +1098,6 @@ static int bond_option_arp_validate_set(struct bonding *bond, { netdev_dbg(bond->dev, "Setting arp_validate to %s (%llu)\n", newval->string, newval->value); - - if (bond->dev->flags & IFF_UP) { - if (!newval->value) - bond->recv_probe = NULL; - else if (bond->params.arp_interval) - bond->recv_probe = bond_arp_rcv; - } bond->params.arp_validate = newval->value; return 0; diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c index 37ebd890ef51..9e06dff619c3 100644 --- a/drivers/net/ethernet/allwinner/sun4i-emac.c +++ b/drivers/net/ethernet/allwinner/sun4i-emac.c @@ -871,7 +871,7 @@ static int emac_probe(struct platform_device *pdev) /* Read MAC-address from DT */ mac_addr = of_get_mac_address(np); if (!IS_ERR(mac_addr)) - memcpy(ndev->dev_addr, mac_addr, ETH_ALEN); + ether_addr_copy(ndev->dev_addr, mac_addr); /* Check if the MAC address is valid, if not get a random one */ if (!is_valid_ether_addr(ndev->dev_addr)) { diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c index 7f89ad5c336d..13a1d99b29c6 100644 --- a/drivers/net/ethernet/arc/emac_main.c +++ b/drivers/net/ethernet/arc/emac_main.c @@ -961,7 +961,7 @@ int arc_emac_probe(struct net_device *ndev, int interface) mac_addr = of_get_mac_address(dev->of_node); if (!IS_ERR(mac_addr)) - memcpy(ndev->dev_addr, mac_addr, ETH_ALEN); + ether_addr_copy(ndev->dev_addr, mac_addr); else eth_hw_addr_random(ndev); diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c index 15b1130aa4ae..0e5de88fd6e8 100644 --- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c +++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c @@ -1504,7 +1504,7 @@ static int octeon_mgmt_probe(struct platform_device *pdev) mac = of_get_mac_address(pdev->dev.of_node); if (!IS_ERR(mac)) - memcpy(netdev->dev_addr, mac, ETH_ALEN); + ether_addr_copy(netdev->dev_addr, mac); else eth_hw_addr_random(netdev); diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c index 953ee5616801..5e1aff9a5fd6 100644 --- a/drivers/net/ethernet/davicom/dm9000.c +++ b/drivers/net/ethernet/davicom/dm9000.c @@ -1413,7 +1413,7 @@ static struct dm9000_plat_data *dm9000_parse_dt(struct device *dev) mac_addr = of_get_mac_address(np); if (!IS_ERR(mac_addr)) - memcpy(pdata->dev_addr, mac_addr, sizeof(pdata->dev_addr)); + ether_addr_copy(pdata->dev_addr, mac_addr); return pdata; } diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c index 7b7e526869a7..30cdb246d020 100644 --- a/drivers/net/ethernet/freescale/fec_mpc52xx.c +++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c @@ -903,7 +903,7 @@ static int mpc52xx_fec_probe(struct platform_device *op) */ mac_addr = of_get_mac_address(np); if (!IS_ERR(mac_addr)) { - memcpy(ndev->dev_addr, mac_addr, ETH_ALEN); + ether_addr_copy(ndev->dev_addr, mac_addr); } else { struct mpc52xx_fec __iomem *fec = priv->fec; diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c index 9cd2c28d17df..7ab8095db192 100644 --- a/drivers/net/ethernet/freescale/fman/mac.c +++ b/drivers/net/ethernet/freescale/fman/mac.c @@ -729,7 +729,7 @@ static int mac_probe(struct platform_device *_of_dev) err = -EINVAL; goto _return_of_get_parent; } - memcpy(mac_dev->addr, mac_addr, sizeof(mac_dev->addr)); + ether_addr_copy(mac_dev->addr, mac_addr); /* Get the port handles */ nph = of_count_phandle_with_args(mac_node, "fsl,fman-ports", NULL); diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c index 90ea7a115d0f..5fad73b2e123 100644 --- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c +++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c @@ -1015,7 +1015,7 @@ static int fs_enet_probe(struct platform_device *ofdev) mac_addr = of_get_mac_address(ofdev->dev.of_node); if (!IS_ERR(mac_addr)) - memcpy(ndev->dev_addr, mac_addr, ETH_ALEN); + ether_addr_copy(ndev->dev_addr, mac_addr); ret = fep->ops->allocate_bd(ndev); if (ret) diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index df13c693b038..e670cd293dba 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -873,7 +873,7 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) mac_addr = of_get_mac_address(np); if (!IS_ERR(mac_addr)) - memcpy(dev->dev_addr, mac_addr, ETH_ALEN); + ether_addr_copy(dev->dev_addr, mac_addr); if (model && !strcasecmp(model, "TSEC")) priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT | diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c index 216e99af2b5a..4d6892d2f0a4 100644 --- a/drivers/net/ethernet/freescale/ucc_geth.c +++ b/drivers/net/ethernet/freescale/ucc_geth.c @@ -3911,7 +3911,7 @@ static int ucc_geth_probe(struct platform_device* ofdev) mac_addr = of_get_mac_address(np); if (!IS_ERR(mac_addr)) - memcpy(dev->dev_addr, mac_addr, ETH_ALEN); + ether_addr_copy(dev->dev_addr, mac_addr); ugeth->ug_info = ug_info; ugeth->dev = device; diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index b398d6c94dbd..3dcd9c3d8781 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -118,7 +118,7 @@ static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter); static int ibmvnic_init(struct ibmvnic_adapter *); static int ibmvnic_reset_init(struct ibmvnic_adapter *); static void release_crq_queue(struct ibmvnic_adapter *); -static int __ibmvnic_set_mac(struct net_device *netdev, struct sockaddr *p); +static int __ibmvnic_set_mac(struct net_device *, u8 *); static int init_crq_queue(struct ibmvnic_adapter *adapter); static int send_query_phys_parms(struct ibmvnic_adapter *adapter); @@ -849,11 +849,7 @@ static int ibmvnic_login(struct net_device *netdev) } } while (retry); - /* handle pending MAC address changes after successful login */ - if (adapter->mac_change_pending) { - __ibmvnic_set_mac(netdev, &adapter->desired.mac); - adapter->mac_change_pending = false; - } + __ibmvnic_set_mac(netdev, adapter->mac_addr); return 0; } @@ -1115,7 +1111,6 @@ static int ibmvnic_open(struct net_device *netdev) } rc = __ibmvnic_open(netdev); - netif_carrier_on(netdev); return rc; } @@ -1686,28 +1681,40 @@ static void ibmvnic_set_multi(struct net_device *netdev) } } -static int __ibmvnic_set_mac(struct net_device *netdev, struct sockaddr *p) +static int __ibmvnic_set_mac(struct net_device *netdev, u8 *dev_addr) { struct ibmvnic_adapter *adapter = netdev_priv(netdev); - struct sockaddr *addr = p; union ibmvnic_crq crq; int rc; - if (!is_valid_ether_addr(addr->sa_data)) - return -EADDRNOTAVAIL; + if (!is_valid_ether_addr(dev_addr)) { + rc = -EADDRNOTAVAIL; + goto err; + } memset(&crq, 0, sizeof(crq)); crq.change_mac_addr.first = IBMVNIC_CRQ_CMD; crq.change_mac_addr.cmd = CHANGE_MAC_ADDR; - ether_addr_copy(&crq.change_mac_addr.mac_addr[0], addr->sa_data); + ether_addr_copy(&crq.change_mac_addr.mac_addr[0], dev_addr); init_completion(&adapter->fw_done); rc = ibmvnic_send_crq(adapter, &crq); - if (rc) - return rc; + if (rc) { + rc = -EIO; + goto err; + } + wait_for_completion(&adapter->fw_done); /* netdev->dev_addr is changed in handle_change_mac_rsp function */ - return adapter->fw_done_rc ? -EIO : 0; + if (adapter->fw_done_rc) { + rc = -EIO; + goto err; + } + + return 0; +err: + ether_addr_copy(adapter->mac_addr, netdev->dev_addr); + return rc; } static int ibmvnic_set_mac(struct net_device *netdev, void *p) @@ -1716,13 +1723,10 @@ static int ibmvnic_set_mac(struct net_device *netdev, void *p) struct sockaddr *addr = p; int rc; - if (adapter->state == VNIC_PROBED) { - memcpy(&adapter->desired.mac, addr, sizeof(struct sockaddr)); - adapter->mac_change_pending = true; - return 0; - } - - rc = __ibmvnic_set_mac(netdev, addr); + rc = 0; + ether_addr_copy(adapter->mac_addr, addr->sa_data); + if (adapter->state != VNIC_PROBED) + rc = __ibmvnic_set_mac(netdev, addr->sa_data); return rc; } @@ -1859,8 +1863,6 @@ static int do_reset(struct ibmvnic_adapter *adapter, adapter->reset_reason != VNIC_RESET_CHANGE_PARAM) call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, netdev); - netif_carrier_on(netdev); - return 0; } @@ -1930,8 +1932,6 @@ static int do_hard_reset(struct ibmvnic_adapter *adapter, return 0; } - netif_carrier_on(netdev); - return 0; } @@ -3937,8 +3937,8 @@ static int handle_change_mac_rsp(union ibmvnic_crq *crq, dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc); goto out; } - memcpy(netdev->dev_addr, &crq->change_mac_addr_rsp.mac_addr[0], - ETH_ALEN); + ether_addr_copy(netdev->dev_addr, + &crq->change_mac_addr_rsp.mac_addr[0]); out: complete(&adapter->fw_done); return rc; @@ -4475,6 +4475,10 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq, crq->link_state_indication.phys_link_state; adapter->logical_link_state = crq->link_state_indication.logical_link_state; + if (adapter->phys_link_state && adapter->logical_link_state) + netif_carrier_on(netdev); + else + netif_carrier_off(netdev); break; case CHANGE_MAC_ADDR_RSP: netdev_dbg(netdev, "Got MAC address change Response\n"); @@ -4852,8 +4856,6 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) init_completion(&adapter->init_done); adapter->resetting = false; - adapter->mac_change_pending = false; - do { rc = init_crq_queue(adapter); if (rc) { diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h index cffdac372a33..dcf2eb6d9290 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.h +++ b/drivers/net/ethernet/ibm/ibmvnic.h @@ -969,7 +969,6 @@ struct ibmvnic_tunables { u64 rx_entries; u64 tx_entries; u64 mtu; - struct sockaddr mac; }; struct ibmvnic_adapter { @@ -1091,7 +1090,6 @@ struct ibmvnic_adapter { bool resetting; bool napi_enabled, from_passive_init; - bool mac_change_pending; bool failover_pending; bool force_reset_recovery; diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index 07e254fc96ef..409b69fd4374 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -2750,7 +2750,7 @@ static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev, mac_addr = of_get_mac_address(pnp); if (!IS_ERR(mac_addr)) - memcpy(ppd.mac_addr, mac_addr, ETH_ALEN); + ether_addr_copy(ppd.mac_addr, mac_addr); mv643xx_eth_property(pnp, "tx-queue-size", ppd.tx_queue_size); mv643xx_eth_property(pnp, "tx-sram-addr", ppd.tx_sram_addr); diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 8186135883ed..e758650b2c26 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -4565,7 +4565,7 @@ static int mvneta_probe(struct platform_device *pdev) dt_mac_addr = of_get_mac_address(dn); if (!IS_ERR(dt_mac_addr)) { mac_from = "device tree"; - memcpy(dev->dev_addr, dt_mac_addr, ETH_ALEN); + ether_addr_copy(dev->dev_addr, dt_mac_addr); } else { mvneta_get_mac_addr(pp, hw_mac_addr); if (is_valid_ether_addr(hw_mac_addr)) { diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index 56d43d9b43ef..d38952eb7aa9 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -5058,8 +5058,10 @@ static int mvpp2_port_probe(struct platform_device *pdev, dev->hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO | NETIF_F_HW_VLAN_CTAG_FILTER; - if (mvpp22_rss_is_supported()) + if (mvpp22_rss_is_supported()) { dev->hw_features |= NETIF_F_RXHASH; + dev->features |= NETIF_F_NTUPLE; + } if (port->pool_long->id == MVPP2_BM_JUMBO && port->id != 0) { dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index 9d070cca3e9e..5adf307fbbfd 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -4805,7 +4805,7 @@ static struct net_device *sky2_init_netdev(struct sky2_hw *hw, unsigned port, */ iap = of_get_mac_address(hw->pdev->dev.of_node); if (!IS_ERR(iap)) - memcpy(dev->dev_addr, iap, ETH_ALEN); + ether_addr_copy(dev->dev_addr, iap); else memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port * 8, ETH_ALEN); diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c index b44172a901ed..ba4fdf1b0dea 100644 --- a/drivers/net/ethernet/micrel/ks8851.c +++ b/drivers/net/ethernet/micrel/ks8851.c @@ -426,7 +426,7 @@ static void ks8851_init_mac(struct ks8851_net *ks) mac_addr = of_get_mac_address(ks->spidev->dev.of_node); if (!IS_ERR(mac_addr)) { - memcpy(dev->dev_addr, mac_addr, ETH_ALEN); + ether_addr_copy(dev->dev_addr, mac_addr); ks8851_write_mac_addr(dev); return; } diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c index dc76b0d15234..e5c8412c08c1 100644 --- a/drivers/net/ethernet/micrel/ks8851_mll.c +++ b/drivers/net/ethernet/micrel/ks8851_mll.c @@ -1328,7 +1328,7 @@ static int ks8851_probe(struct platform_device *pdev) if (pdev->dev.of_node) { mac = of_get_mac_address(pdev->dev.of_node); if (!IS_ERR(mac)) - memcpy(ks->mac_addr, mac, ETH_ALEN); + ether_addr_copy(ks->mac_addr, mac); } else { struct ks8851_mll_platform_data *pdata; diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c index da138edddd32..fec604c4c0d3 100644 --- a/drivers/net/ethernet/nxp/lpc_eth.c +++ b/drivers/net/ethernet/nxp/lpc_eth.c @@ -1369,7 +1369,7 @@ static int lpc_eth_drv_probe(struct platform_device *pdev) if (!is_valid_ether_addr(ndev->dev_addr)) { const char *macaddr = of_get_mac_address(np); if (!IS_ERR(macaddr)) - memcpy(ndev->dev_addr, macaddr, ETH_ALEN); + ether_addr_copy(ndev->dev_addr, macaddr); } if (!is_valid_ether_addr(ndev->dev_addr)) eth_hw_addr_random(ndev); diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 7c4e282242d5..6354f19a31eb 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -3193,7 +3193,7 @@ static struct sh_eth_plat_data *sh_eth_parse_dt(struct device *dev) mac_addr = of_get_mac_address(np); if (!IS_ERR(mac_addr)) - memcpy(pdata->mac_addr, mac_addr, ETH_ALEN); + ether_addr_copy(pdata->mac_addr, mac_addr); pdata->no_ether_link = of_property_read_bool(np, "renesas,no-ether-link"); diff --git a/drivers/net/ethernet/seeq/sgiseeq.c b/drivers/net/ethernet/seeq/sgiseeq.c index 70cce63a6081..696037d5ac3d 100644 --- a/drivers/net/ethernet/seeq/sgiseeq.c +++ b/drivers/net/ethernet/seeq/sgiseeq.c @@ -735,6 +735,7 @@ static int sgiseeq_probe(struct platform_device *pdev) } platform_set_drvdata(pdev, dev); + SET_NETDEV_DEV(dev, &pdev->dev); sp = netdev_priv(dev); /* Make private data page aligned */ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c index 195669f550f0..ba124a4da793 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c @@ -1015,6 +1015,8 @@ static struct mac_device_info *sun8i_dwmac_setup(void *ppriv) mac->mac = &sun8i_dwmac_ops; mac->dma = &sun8i_dwmac_dma_ops; + priv->dev->priv_flags |= IFF_UNICAST_FLT; + /* The loopback bit seems to be re-set when link change * Simply mask it each time * Speed 10/100/1000 are set in BIT(2)/BIT(3) diff --git a/drivers/net/ethernet/ti/Makefile b/drivers/net/ethernet/ti/Makefile index c3f53a40b48f..ed12e1e5df2f 100644 --- a/drivers/net/ethernet/ti/Makefile +++ b/drivers/net/ethernet/ti/Makefile @@ -19,4 +19,4 @@ ti_cpsw-y := cpsw.o davinci_cpdma.o cpsw_ale.o cpsw_priv.o cpsw_sl.o cpsw_ethtoo obj-$(CONFIG_TI_KEYSTONE_NETCP) += keystone_netcp.o keystone_netcp-y := netcp_core.o cpsw_ale.o obj-$(CONFIG_TI_KEYSTONE_NETCP_ETHSS) += keystone_netcp_ethss.o -keystone_netcp_ethss-y := netcp_ethss.o netcp_sgmii.o netcp_xgbepcsr.o +keystone_netcp_ethss-y := netcp_ethss.o netcp_sgmii.o netcp_xgbepcsr.o cpsw_ale.o diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index b18eeb05b993..634fc484a0b3 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -2233,7 +2233,7 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data, no_phy_slave: mac_addr = of_get_mac_address(slave_node); if (!IS_ERR(mac_addr)) { - memcpy(slave_data->mac_addr, mac_addr, ETH_ALEN); + ether_addr_copy(slave_data->mac_addr, mac_addr); } else { ret = ti_cm_get_macid(&pdev->dev, i, slave_data->mac_addr); diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c index 997475c209c0..47c45152132e 100644 --- a/drivers/net/ethernet/xilinx/ll_temac_main.c +++ b/drivers/net/ethernet/xilinx/ll_temac_main.c @@ -361,7 +361,7 @@ static void temac_do_set_mac_address(struct net_device *ndev) static int temac_init_mac_address(struct net_device *ndev, const void *address) { - memcpy(ndev->dev_addr, address, ETH_ALEN); + ether_addr_copy(ndev->dev_addr, address); if (!is_valid_ether_addr(ndev->dev_addr)) eth_hw_addr_random(ndev); temac_do_set_mac_address(ndev); diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c index 691170753563..6886270da695 100644 --- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c +++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c @@ -1167,7 +1167,7 @@ static int xemaclite_of_probe(struct platform_device *ofdev) if (!IS_ERR(mac_address)) { /* Set the MAC address. */ - memcpy(ndev->dev_addr, mac_address, ETH_ALEN); + ether_addr_copy(ndev->dev_addr, mac_address); } else { dev_warn(dev, "No MAC address found, using random\n"); eth_hw_addr_random(ndev); diff --git a/drivers/net/phy/mdio-mux-meson-g12a.c b/drivers/net/phy/mdio-mux-meson-g12a.c index 6fa29ea8e2a3..6644762ff2ab 100644 --- a/drivers/net/phy/mdio-mux-meson-g12a.c +++ b/drivers/net/phy/mdio-mux-meson-g12a.c @@ -33,7 +33,7 @@ #define ETH_PLL_CTL7 0x60 #define ETH_PHY_CNTL0 0x80 -#define EPHY_G12A_ID 0x33000180 +#define EPHY_G12A_ID 0x33010180 #define ETH_PHY_CNTL1 0x84 #define PHY_CNTL1_ST_MODE GENMASK(2, 0) #define PHY_CNTL1_ST_PHYADD GENMASK(7, 3) diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c index 761ce3b1e7bd..a669945eb829 100644 --- a/drivers/net/phy/realtek.c +++ b/drivers/net/phy/realtek.c @@ -217,12 +217,12 @@ static int rtl8211e_config_init(struct phy_device *phydev) if (oldpage < 0) goto err_restore_page; - ret = phy_write(phydev, RTL821x_EXT_PAGE_SELECT, 0xa4); + ret = __phy_write(phydev, RTL821x_EXT_PAGE_SELECT, 0xa4); if (ret) goto err_restore_page; - ret = phy_modify(phydev, 0x1c, RTL8211E_TX_DELAY | RTL8211E_RX_DELAY, - val); + ret = __phy_modify(phydev, 0x1c, RTL8211E_TX_DELAY | RTL8211E_RX_DELAY, + val); err_restore_page: return phy_restore_page(phydev, oldpage, ret); @@ -275,6 +275,8 @@ static struct phy_driver realtek_drvs[] = { .config_aneg = rtl8211_config_aneg, .read_mmd = &genphy_read_mmd_unsupported, .write_mmd = &genphy_write_mmd_unsupported, + .read_page = rtl821x_read_page, + .write_page = rtl821x_write_page, }, { PHY_ID_MATCH_EXACT(0x001cc912), .name = "RTL8211B Gigabit Ethernet", @@ -284,12 +286,16 @@ static struct phy_driver realtek_drvs[] = { .write_mmd = &genphy_write_mmd_unsupported, .suspend = rtl8211b_suspend, .resume = rtl8211b_resume, + .read_page = rtl821x_read_page, + .write_page = rtl821x_write_page, }, { PHY_ID_MATCH_EXACT(0x001cc913), .name = "RTL8211C Gigabit Ethernet", .config_init = rtl8211c_config_init, .read_mmd = &genphy_read_mmd_unsupported, .write_mmd = &genphy_write_mmd_unsupported, + .read_page = rtl821x_read_page, + .write_page = rtl821x_write_page, }, { PHY_ID_MATCH_EXACT(0x001cc914), .name = "RTL8211DN Gigabit Ethernet", @@ -297,6 +303,8 @@ static struct phy_driver realtek_drvs[] = { .config_intr = rtl8211e_config_intr, .suspend = genphy_suspend, .resume = genphy_resume, + .read_page = rtl821x_read_page, + .write_page = rtl821x_write_page, }, { PHY_ID_MATCH_EXACT(0x001cc915), .name = "RTL8211E Gigabit Ethernet", @@ -305,6 +313,8 @@ static struct phy_driver realtek_drvs[] = { .config_intr = &rtl8211e_config_intr, .suspend = genphy_suspend, .resume = genphy_resume, + .read_page = rtl821x_read_page, + .write_page = rtl821x_write_page, }, { PHY_ID_MATCH_EXACT(0x001cc916), .name = "RTL8211F Gigabit Ethernet", diff --git a/drivers/net/wireless/mediatek/mt76/eeprom.c b/drivers/net/wireless/mediatek/mt76/eeprom.c index 04964937a3af..b7a49ae6b327 100644 --- a/drivers/net/wireless/mediatek/mt76/eeprom.c +++ b/drivers/net/wireless/mediatek/mt76/eeprom.c @@ -95,7 +95,7 @@ mt76_eeprom_override(struct mt76_dev *dev) mac = of_get_mac_address(np); if (!IS_ERR(mac)) - memcpy(dev->macaddr, mac, ETH_ALEN); + ether_addr_copy(dev->macaddr, mac); #endif if (!is_valid_ether_addr(dev->macaddr)) { diff --git a/drivers/of/of_net.c b/drivers/of/of_net.c index 9649cd53e955..6f1be80e8c4e 100644 --- a/drivers/of/of_net.c +++ b/drivers/of/of_net.c @@ -52,39 +52,25 @@ static const void *of_get_mac_addr(struct device_node *np, const char *name) static const void *of_get_mac_addr_nvmem(struct device_node *np) { int ret; - u8 mac[ETH_ALEN]; - struct property *pp; + const void *mac; + u8 nvmem_mac[ETH_ALEN]; struct platform_device *pdev = of_find_device_by_node(np); if (!pdev) return ERR_PTR(-ENODEV); - ret = nvmem_get_mac_address(&pdev->dev, &mac); - if (ret) + ret = nvmem_get_mac_address(&pdev->dev, &nvmem_mac); + if (ret) { + put_device(&pdev->dev); return ERR_PTR(ret); - - pp = devm_kzalloc(&pdev->dev, sizeof(*pp), GFP_KERNEL); - if (!pp) - return ERR_PTR(-ENOMEM); - - pp->name = "nvmem-mac-address"; - pp->length = ETH_ALEN; - pp->value = devm_kmemdup(&pdev->dev, mac, ETH_ALEN, GFP_KERNEL); - if (!pp->value) { - ret = -ENOMEM; - goto free; } - ret = of_add_property(np, pp); - if (ret) - goto free; + mac = devm_kmemdup(&pdev->dev, nvmem_mac, ETH_ALEN, GFP_KERNEL); + put_device(&pdev->dev); + if (!mac) + return ERR_PTR(-ENOMEM); - return pp->value; -free: - devm_kfree(&pdev->dev, pp->value); - devm_kfree(&pdev->dev, pp); - - return ERR_PTR(ret); + return mac; } /** diff --git a/include/net/dsa.h b/include/net/dsa.h index 6aaaadd6a413..685294817712 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -99,24 +99,9 @@ struct __dsa_skb_cb { #define DSA_SKB_CB(skb) ((struct dsa_skb_cb *)((skb)->cb)) -#define DSA_SKB_CB_COPY(nskb, skb) \ - { *__DSA_SKB_CB(nskb) = *__DSA_SKB_CB(skb); } - -#define DSA_SKB_CB_ZERO(skb) \ - { *__DSA_SKB_CB(skb) = (struct __dsa_skb_cb) {0}; } - #define DSA_SKB_CB_PRIV(skb) \ ((void *)(skb)->cb + offsetof(struct __dsa_skb_cb, priv)) -#define DSA_SKB_CB_CLONE(_clone, _skb) \ - { \ - struct sk_buff *clone = _clone; \ - struct sk_buff *skb = _skb; \ - \ - DSA_SKB_CB_COPY(clone, skb); \ - DSA_SKB_CB(skb)->clone = clone; \ - } - struct dsa_switch_tree { struct list_head list; diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 72336bac7573..63e0cf66f01a 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -629,7 +629,7 @@ union bpf_attr { * **BPF_F_INVALIDATE_HASH** (set *skb*\ **->hash**, *skb*\ * **->swhash** and *skb*\ **->l4hash** to 0). * - * A call to this helper is susceptible to change the underlaying + * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with @@ -654,7 +654,7 @@ union bpf_attr { * flexibility and can handle sizes larger than 2 or 4 for the * checksum to update. * - * A call to this helper is susceptible to change the underlaying + * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with @@ -686,7 +686,7 @@ union bpf_attr { * flexibility and can handle sizes larger than 2 or 4 for the * checksum to update. * - * A call to this helper is susceptible to change the underlaying + * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with @@ -741,7 +741,7 @@ union bpf_attr { * efficient, but it is handled through an action code where the * redirection happens only after the eBPF program has returned. * - * A call to this helper is susceptible to change the underlaying + * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with @@ -806,7 +806,7 @@ union bpf_attr { * **ETH_P_8021Q** and **ETH_P_8021AD**, it is considered to * be **ETH_P_8021Q**. * - * A call to this helper is susceptible to change the underlaying + * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with @@ -818,7 +818,7 @@ union bpf_attr { * Description * Pop a VLAN header from the packet associated to *skb*. * - * A call to this helper is susceptible to change the underlaying + * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with @@ -1168,7 +1168,7 @@ union bpf_attr { * All values for *flags* are reserved for future usage, and must * be left at zero. * - * A call to this helper is susceptible to change the underlaying + * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with @@ -1281,7 +1281,7 @@ union bpf_attr { * implicitly linearizes, unclones and drops offloads from the * *skb*. * - * A call to this helper is susceptible to change the underlaying + * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with @@ -1317,7 +1317,7 @@ union bpf_attr { * **bpf_skb_pull_data()** to effectively unclone the *skb* from * the very beginning in case it is indeed cloned. * - * A call to this helper is susceptible to change the underlaying + * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with @@ -1369,7 +1369,7 @@ union bpf_attr { * All values for *flags* are reserved for future usage, and must * be left at zero. * - * A call to this helper is susceptible to change the underlaying + * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with @@ -1384,7 +1384,7 @@ union bpf_attr { * can be used to prepare the packet for pushing or popping * headers. * - * A call to this helper is susceptible to change the underlaying + * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with @@ -1518,20 +1518,20 @@ union bpf_attr { * * **BPF_F_ADJ_ROOM_FIXED_GSO**: Do not adjust gso_size. * Adjusting mss in this way is not allowed for datagrams. * - * * **BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 **: - * * **BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 **: + * * **BPF_F_ADJ_ROOM_ENCAP_L3_IPV4**, + * **BPF_F_ADJ_ROOM_ENCAP_L3_IPV6**: * Any new space is reserved to hold a tunnel header. * Configure skb offsets and other fields accordingly. * - * * **BPF_F_ADJ_ROOM_ENCAP_L4_GRE **: - * * **BPF_F_ADJ_ROOM_ENCAP_L4_UDP **: + * * **BPF_F_ADJ_ROOM_ENCAP_L4_GRE**, + * **BPF_F_ADJ_ROOM_ENCAP_L4_UDP**: * Use with ENCAP_L3 flags to further specify the tunnel type. * - * * **BPF_F_ADJ_ROOM_ENCAP_L2(len) **: + * * **BPF_F_ADJ_ROOM_ENCAP_L2**\ (*len*): * Use with ENCAP_L3/L4 flags to further specify the tunnel - * type; **len** is the length of the inner MAC header. + * type; *len* is the length of the inner MAC header. * - * A call to this helper is susceptible to change the underlaying + * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with @@ -1610,7 +1610,7 @@ union bpf_attr { * more flexibility as the user is free to store whatever meta * data they need. * - * A call to this helper is susceptible to change the underlaying + * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with @@ -1852,7 +1852,7 @@ union bpf_attr { * copied if necessary (i.e. if data was not linear and if start * and end pointers do not point to the same chunk). * - * A call to this helper is susceptible to change the underlaying + * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with @@ -1886,7 +1886,7 @@ union bpf_attr { * only possible to shrink the packet as of this writing, * therefore *delta* must be a negative integer. * - * A call to this helper is susceptible to change the underlaying + * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with @@ -2061,18 +2061,18 @@ union bpf_attr { * **BPF_LWT_ENCAP_IP** * IP encapsulation (GRE/GUE/IPIP/etc). The outer header * must be IPv4 or IPv6, followed by zero or more - * additional headers, up to LWT_BPF_MAX_HEADROOM total - * bytes in all prepended headers. Please note that - * if skb_is_gso(skb) is true, no more than two headers - * can be prepended, and the inner header, if present, - * should be either GRE or UDP/GUE. + * additional headers, up to **LWT_BPF_MAX_HEADROOM** + * total bytes in all prepended headers. Please note that + * if **skb_is_gso**\ (*skb*) is true, no more than two + * headers can be prepended, and the inner header, if + * present, should be either GRE or UDP/GUE. * - * BPF_LWT_ENCAP_SEG6*** types can be called by bpf programs of - * type BPF_PROG_TYPE_LWT_IN; BPF_LWT_ENCAP_IP type can be called - * by bpf programs of types BPF_PROG_TYPE_LWT_IN and - * BPF_PROG_TYPE_LWT_XMIT. + * **BPF_LWT_ENCAP_SEG6**\ \* types can be called by BPF programs + * of type **BPF_PROG_TYPE_LWT_IN**; **BPF_LWT_ENCAP_IP** type can + * be called by bpf programs of types **BPF_PROG_TYPE_LWT_IN** and + * **BPF_PROG_TYPE_LWT_XMIT**. * - * A call to this helper is susceptible to change the underlaying + * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with @@ -2087,7 +2087,7 @@ union bpf_attr { * inside the outermost IPv6 Segment Routing Header can be * modified through this helper. * - * A call to this helper is susceptible to change the underlaying + * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with @@ -2103,7 +2103,7 @@ union bpf_attr { * after the segments are accepted. *delta* can be as well * positive (growing) as negative (shrinking). * - * A call to this helper is susceptible to change the underlaying + * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with @@ -2126,13 +2126,13 @@ union bpf_attr { * Type of *param*: **int**. * **SEG6_LOCAL_ACTION_END_B6** * End.B6 action: Endpoint bound to an SRv6 policy. - * Type of param: **struct ipv6_sr_hdr**. + * Type of *param*: **struct ipv6_sr_hdr**. * **SEG6_LOCAL_ACTION_END_B6_ENCAP** * End.B6.Encap action: Endpoint bound to an SRv6 * encapsulation policy. - * Type of param: **struct ipv6_sr_hdr**. + * Type of *param*: **struct ipv6_sr_hdr**. * - * A call to this helper is susceptible to change the underlaying + * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with @@ -2285,7 +2285,8 @@ union bpf_attr { * Return * Pointer to **struct bpf_sock**, or **NULL** in case of failure. * For sockets with reuseport option, the **struct bpf_sock** - * result is from **reuse->socks**\ [] using the hash of the tuple. + * result is from *reuse*\ **->socks**\ [] using the hash of the + * tuple. * * struct bpf_sock *bpf_sk_lookup_udp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags) * Description @@ -2321,7 +2322,8 @@ union bpf_attr { * Return * Pointer to **struct bpf_sock**, or **NULL** in case of failure. * For sockets with reuseport option, the **struct bpf_sock** - * result is from **reuse->socks**\ [] using the hash of the tuple. + * result is from *reuse*\ **->socks**\ [] using the hash of the + * tuple. * * int bpf_sk_release(struct bpf_sock *sock) * Description @@ -2490,31 +2492,34 @@ union bpf_attr { * network namespace *netns*. The return value must be checked, * and if non-**NULL**, released via **bpf_sk_release**\ (). * - * This function is identical to bpf_sk_lookup_tcp, except that it - * also returns timewait or request sockets. Use bpf_sk_fullsock - * or bpf_tcp_socket to access the full structure. + * This function is identical to **bpf_sk_lookup_tcp**\ (), except + * that it also returns timewait or request sockets. Use + * **bpf_sk_fullsock**\ () or **bpf_tcp_sock**\ () to access the + * full structure. * * This helper is available only if the kernel was compiled with * **CONFIG_NET** configuration option. * Return * Pointer to **struct bpf_sock**, or **NULL** in case of failure. * For sockets with reuseport option, the **struct bpf_sock** - * result is from **reuse->socks**\ [] using the hash of the tuple. + * result is from *reuse*\ **->socks**\ [] using the hash of the + * tuple. * * int bpf_tcp_check_syncookie(struct bpf_sock *sk, void *iph, u32 iph_len, struct tcphdr *th, u32 th_len) * Description - * Check whether iph and th contain a valid SYN cookie ACK for - * the listening socket in sk. + * Check whether *iph* and *th* contain a valid SYN cookie ACK for + * the listening socket in *sk*. * - * iph points to the start of the IPv4 or IPv6 header, while - * iph_len contains sizeof(struct iphdr) or sizeof(struct ip6hdr). + * *iph* points to the start of the IPv4 or IPv6 header, while + * *iph_len* contains **sizeof**\ (**struct iphdr**) or + * **sizeof**\ (**struct ip6hdr**). * - * th points to the start of the TCP header, while th_len contains - * sizeof(struct tcphdr). + * *th* points to the start of the TCP header, while *th_len* + * contains **sizeof**\ (**struct tcphdr**). * * Return - * 0 if iph and th are a valid SYN cookie ACK, or a negative error - * otherwise. + * 0 if *iph* and *th* are a valid SYN cookie ACK, or a negative + * error otherwise. * * int bpf_sysctl_get_name(struct bpf_sysctl *ctx, char *buf, size_t buf_len, u64 flags) * Description @@ -2592,17 +2597,17 @@ union bpf_attr { * and save the result in *res*. * * The string may begin with an arbitrary amount of white space - * (as determined by isspace(3)) followed by a single optional '-' - * sign. + * (as determined by **isspace**\ (3)) followed by a single + * optional '**-**' sign. * * Five least significant bits of *flags* encode base, other bits * are currently unused. * * Base must be either 8, 10, 16 or 0 to detect it automatically - * similar to user space strtol(3). + * similar to user space **strtol**\ (3). * Return * Number of characters consumed on success. Must be positive but - * no more than buf_len. + * no more than *buf_len*. * * **-EINVAL** if no valid digits were found or unsupported base * was provided. @@ -2616,16 +2621,16 @@ union bpf_attr { * given base and save the result in *res*. * * The string may begin with an arbitrary amount of white space - * (as determined by isspace(3)). + * (as determined by **isspace**\ (3)). * * Five least significant bits of *flags* encode base, other bits * are currently unused. * * Base must be either 8, 10, 16 or 0 to detect it automatically - * similar to user space strtoul(3). + * similar to user space **strtoul**\ (3). * Return * Number of characters consumed on success. Must be positive but - * no more than buf_len. + * no more than *buf_len*. * * **-EINVAL** if no valid digits were found or unsupported base * was provided. @@ -2634,26 +2639,26 @@ union bpf_attr { * * void *bpf_sk_storage_get(struct bpf_map *map, struct bpf_sock *sk, void *value, u64 flags) * Description - * Get a bpf-local-storage from a sk. + * Get a bpf-local-storage from a *sk*. * * Logically, it could be thought of getting the value from * a *map* with *sk* as the **key**. From this * perspective, the usage is not much different from - * **bpf_map_lookup_elem(map, &sk)** except this - * helper enforces the key must be a **bpf_fullsock()** - * and the map must be a BPF_MAP_TYPE_SK_STORAGE also. + * **bpf_map_lookup_elem**\ (*map*, **&**\ *sk*) except this + * helper enforces the key must be a full socket and the map must + * be a **BPF_MAP_TYPE_SK_STORAGE** also. * * Underneath, the value is stored locally at *sk* instead of - * the map. The *map* is used as the bpf-local-storage **type**. - * The bpf-local-storage **type** (i.e. the *map*) is searched - * against all bpf-local-storages residing at sk. + * the *map*. The *map* is used as the bpf-local-storage + * "type". The bpf-local-storage "type" (i.e. the *map*) is + * searched against all bpf-local-storages residing at *sk*. * - * An optional *flags* (BPF_SK_STORAGE_GET_F_CREATE) can be + * An optional *flags* (**BPF_SK_STORAGE_GET_F_CREATE**) can be * used such that a new bpf-local-storage will be * created if one does not exist. *value* can be used - * together with BPF_SK_STORAGE_GET_F_CREATE to specify + * together with **BPF_SK_STORAGE_GET_F_CREATE** to specify * the initial value of a bpf-local-storage. If *value* is - * NULL, the new bpf-local-storage will be zero initialized. + * **NULL**, the new bpf-local-storage will be zero initialized. * Return * A bpf-local-storage pointer is returned on success. * @@ -2662,7 +2667,7 @@ union bpf_attr { * * int bpf_sk_storage_delete(struct bpf_map *map, struct bpf_sock *sk) * Description - * Delete a bpf-local-storage from a sk. + * Delete a bpf-local-storage from a *sk*. * Return * 0 on success. * diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h index f0cf7b0f4f35..505393c6e959 100644 --- a/include/uapi/linux/netfilter/nf_tables.h +++ b/include/uapi/linux/netfilter/nf_tables.h @@ -966,7 +966,6 @@ enum nft_socket_keys { * @NFT_CT_DST_IP: conntrack layer 3 protocol destination (IPv4 address) * @NFT_CT_SRC_IP6: conntrack layer 3 protocol source (IPv6 address) * @NFT_CT_DST_IP6: conntrack layer 3 protocol destination (IPv6 address) - * @NFT_CT_TIMEOUT: connection tracking timeout policy assigned to conntrack * @NFT_CT_ID: conntrack id */ enum nft_ct_keys { @@ -993,7 +992,6 @@ enum nft_ct_keys { NFT_CT_DST_IP, NFT_CT_SRC_IP6, NFT_CT_DST_IP6, - NFT_CT_TIMEOUT, NFT_CT_ID, __NFT_CT_MAX }; @@ -1138,7 +1136,7 @@ enum nft_log_level { NFT_LOGLEVEL_AUDIT, __NFT_LOGLEVEL_MAX }; -#define NFT_LOGLEVEL_MAX (__NFT_LOGLEVEL_MAX + 1) +#define NFT_LOGLEVEL_MAX (__NFT_LOGLEVEL_MAX - 1) /** * enum nft_queue_attributes - nf_tables queue expression netlink attributes diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index 3ba56e73c90e..242a643af82f 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -338,7 +338,7 @@ int bpf_prog_calc_tag(struct bpf_prog *fp) } static int bpf_adj_delta_to_imm(struct bpf_insn *insn, u32 pos, s32 end_old, - s32 end_new, u32 curr, const bool probe_pass) + s32 end_new, s32 curr, const bool probe_pass) { const s64 imm_min = S32_MIN, imm_max = S32_MAX; s32 delta = end_new - end_old; @@ -356,7 +356,7 @@ static int bpf_adj_delta_to_imm(struct bpf_insn *insn, u32 pos, s32 end_old, } static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, s32 end_old, - s32 end_new, u32 curr, const bool probe_pass) + s32 end_new, s32 curr, const bool probe_pass) { const s32 off_min = S16_MIN, off_max = S16_MAX; s32 delta = end_new - end_old; diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 7b05e8938d5c..95f9354495ad 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -7599,7 +7599,7 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env) insn->dst_reg, shift); insn_buf[cnt++] = BPF_ALU64_IMM(BPF_AND, insn->dst_reg, - (1 << size * 8) - 1); + (1ULL << size * 8) - 1); } } diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c index 4a9aaa3fac8f..6d4a24a7534b 100644 --- a/net/bridge/br_if.c +++ b/net/bridge/br_if.c @@ -602,13 +602,15 @@ int br_add_if(struct net_bridge *br, struct net_device *dev, call_netdevice_notifiers(NETDEV_JOIN, dev); err = dev_set_allmulti(dev, 1); - if (err) - goto put_back; + if (err) { + kfree(p); /* kobject not yet init'd, manually free */ + goto err1; + } err = kobject_init_and_add(&p->kobj, &brport_ktype, &(dev->dev.kobj), SYSFS_BRIDGE_PORT_ATTR); if (err) - goto err1; + goto err2; err = br_sysfs_addif(p); if (err) @@ -700,12 +702,9 @@ err3: sysfs_remove_link(br->ifobj, p->dev->name); err2: kobject_put(&p->kobj); - p = NULL; /* kobject_put frees */ -err1: dev_set_allmulti(dev, -1); -put_back: +err1: dev_put(dev); - kfree(p); return err; } diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c index 4e0091311d40..6b07e4978eb3 100644 --- a/net/bridge/netfilter/ebtables.c +++ b/net/bridge/netfilter/ebtables.c @@ -2153,7 +2153,9 @@ static int compat_copy_entries(unsigned char *data, unsigned int size_user, if (ret < 0) return ret; - WARN_ON(size_remaining); + if (size_remaining) + return -EINVAL; + return state->buf_kern_offset; } diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index 9ca784c592ac..548f39dde307 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c @@ -734,7 +734,9 @@ bool bpf_flow_dissect(struct bpf_prog *prog, struct bpf_flow_dissector *ctx, flow_keys->nhoff = nhoff; flow_keys->thoff = flow_keys->nhoff; + preempt_disable(); result = BPF_PROG_RUN(prog, ctx); + preempt_enable(); flow_keys->nhoff = clamp_t(u16, flow_keys->nhoff, nhoff, hlen); flow_keys->thoff = clamp_t(u16, flow_keys->thoff, diff --git a/net/dccp/proto.c b/net/dccp/proto.c index 0e2f71ab8367..5dd85ec51bfe 100644 --- a/net/dccp/proto.c +++ b/net/dccp/proto.c @@ -263,7 +263,6 @@ int dccp_disconnect(struct sock *sk, int flags) struct inet_connection_sock *icsk = inet_csk(sk); struct inet_sock *inet = inet_sk(sk); struct dccp_sock *dp = dccp_sk(sk); - int err = 0; const int old_state = sk->sk_state; if (old_state != DCCP_CLOSED) @@ -307,7 +306,7 @@ int dccp_disconnect(struct sock *sk, int flags) WARN_ON(inet->inet_num && !icsk->icsk_bind_hash); sk->sk_error_report(sk); - return err; + return 0; } EXPORT_SYMBOL_GPL(dccp_disconnect); diff --git a/net/dsa/slave.c b/net/dsa/slave.c index fe7b6a62e8f1..9892ca1f6859 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -463,6 +463,8 @@ static netdev_tx_t dsa_slave_xmit(struct sk_buff *skb, struct net_device *dev) s->tx_bytes += skb->len; u64_stats_update_end(&s->syncp); + DSA_SKB_CB(skb)->deferred_xmit = false; + /* Identify PTP protocol packets, clone them, and pass them to the * switch driver */ diff --git a/net/dsa/tag_brcm.c b/net/dsa/tag_brcm.c index d52db5f2c721..9c3114179690 100644 --- a/net/dsa/tag_brcm.c +++ b/net/dsa/tag_brcm.c @@ -206,10 +206,10 @@ static const struct dsa_device_ops brcm_prepend_netdev_ops = { .rcv = brcm_tag_rcv_prepend, .overhead = BRCM_TAG_LEN, }; -#endif DSA_TAG_DRIVER(brcm_prepend_netdev_ops); MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_BRCM_PREPEND); +#endif static struct dsa_tag_driver *dsa_tag_driver_array[] = { #if IS_ENABLED(CONFIG_NET_DSA_TAG_BRCM) diff --git a/net/netfilter/nf_conntrack_h323_asn1.c b/net/netfilter/nf_conntrack_h323_asn1.c index 1601275efe2d..4c2ef42e189c 100644 --- a/net/netfilter/nf_conntrack_h323_asn1.c +++ b/net/netfilter/nf_conntrack_h323_asn1.c @@ -172,7 +172,7 @@ static int nf_h323_error_boundary(struct bitstr *bs, size_t bytes, size_t bits) if (bits % BITS_PER_BYTE > 0) bytes++; - if (*bs->cur + bytes > *bs->end) + if (bs->cur + bytes > bs->end) return 1; return 0; diff --git a/net/netfilter/nf_conntrack_h323_main.c b/net/netfilter/nf_conntrack_h323_main.c index 005589c6d0f6..12de40390e97 100644 --- a/net/netfilter/nf_conntrack_h323_main.c +++ b/net/netfilter/nf_conntrack_h323_main.c @@ -748,24 +748,19 @@ static int callforward_do_filter(struct net *net, } break; } -#if IS_ENABLED(CONFIG_NF_CONNTRACK_IPV6) +#if IS_ENABLED(CONFIG_IPV6) case AF_INET6: { - const struct nf_ipv6_ops *v6ops; struct rt6_info *rt1, *rt2; struct flowi6 fl1, fl2; - v6ops = nf_get_ipv6_ops(); - if (!v6ops) - return 0; - memset(&fl1, 0, sizeof(fl1)); fl1.daddr = src->in6; memset(&fl2, 0, sizeof(fl2)); fl2.daddr = dst->in6; - if (!v6ops->route(net, (struct dst_entry **)&rt1, + if (!nf_ip6_route(net, (struct dst_entry **)&rt1, flowi6_to_flowi(&fl1), false)) { - if (!v6ops->route(net, (struct dst_entry **)&rt2, + if (!nf_ip6_route(net, (struct dst_entry **)&rt2, flowi6_to_flowi(&fl2), false)) { if (ipv6_addr_equal(rt6_nexthop(rt1, &fl1.daddr), rt6_nexthop(rt2, &fl2.daddr)) && diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index 8dcc064d518d..7db79c1b8084 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c @@ -1256,7 +1256,7 @@ static int ctnetlink_del_conntrack(struct net *net, struct sock *ctnl, struct nf_conntrack_tuple tuple; struct nf_conn *ct; struct nfgenmsg *nfmsg = nlmsg_data(nlh); - u_int8_t u3 = nfmsg->nfgen_family; + u_int8_t u3 = nfmsg->version ? nfmsg->nfgen_family : AF_UNSPEC; struct nf_conntrack_zone zone; int err; diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c index 7aabfd4b1e50..4469519a4879 100644 --- a/net/netfilter/nf_flow_table_core.c +++ b/net/netfilter/nf_flow_table_core.c @@ -185,14 +185,25 @@ static const struct rhashtable_params nf_flow_offload_rhash_params = { int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow) { - flow->timeout = (u32)jiffies; + int err; - rhashtable_insert_fast(&flow_table->rhashtable, - &flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].node, - nf_flow_offload_rhash_params); - rhashtable_insert_fast(&flow_table->rhashtable, - &flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].node, - nf_flow_offload_rhash_params); + err = rhashtable_insert_fast(&flow_table->rhashtable, + &flow->tuplehash[0].node, + nf_flow_offload_rhash_params); + if (err < 0) + return err; + + err = rhashtable_insert_fast(&flow_table->rhashtable, + &flow->tuplehash[1].node, + nf_flow_offload_rhash_params); + if (err < 0) { + rhashtable_remove_fast(&flow_table->rhashtable, + &flow->tuplehash[0].node, + nf_flow_offload_rhash_params); + return err; + } + + flow->timeout = (u32)jiffies; return 0; } EXPORT_SYMBOL_GPL(flow_offload_add); @@ -232,6 +243,7 @@ flow_offload_lookup(struct nf_flowtable *flow_table, { struct flow_offload_tuple_rhash *tuplehash; struct flow_offload *flow; + struct flow_offload_entry *e; int dir; tuplehash = rhashtable_lookup(&flow_table->rhashtable, tuple, @@ -244,6 +256,10 @@ flow_offload_lookup(struct nf_flowtable *flow_table, if (flow->flags & (FLOW_OFFLOAD_DYING | FLOW_OFFLOAD_TEARDOWN)) return NULL; + e = container_of(flow, struct flow_offload_entry, flow); + if (unlikely(nf_ct_is_dying(e->ct))) + return NULL; + return tuplehash; } EXPORT_SYMBOL_GPL(flow_offload_lookup); @@ -290,8 +306,10 @@ static inline bool nf_flow_has_expired(const struct flow_offload *flow) static void nf_flow_offload_gc_step(struct flow_offload *flow, void *data) { struct nf_flowtable *flow_table = data; + struct flow_offload_entry *e; - if (nf_flow_has_expired(flow) || + e = container_of(flow, struct flow_offload_entry, flow); + if (nf_flow_has_expired(flow) || nf_ct_is_dying(e->ct) || (flow->flags & (FLOW_OFFLOAD_DYING | FLOW_OFFLOAD_TEARDOWN))) flow_offload_del(flow_table, flow); } diff --git a/net/netfilter/nf_flow_table_ip.c b/net/netfilter/nf_flow_table_ip.c index 6452550d187f..0d603e20b519 100644 --- a/net/netfilter/nf_flow_table_ip.c +++ b/net/netfilter/nf_flow_table_ip.c @@ -181,6 +181,9 @@ static int nf_flow_tuple_ip(struct sk_buff *skb, const struct net_device *dev, iph->protocol != IPPROTO_UDP) return -1; + if (iph->ttl <= 1) + return -1; + thoff = iph->ihl * 4; if (!pskb_may_pull(skb, thoff + sizeof(*ports))) return -1; @@ -408,6 +411,9 @@ static int nf_flow_tuple_ipv6(struct sk_buff *skb, const struct net_device *dev, ip6h->nexthdr != IPPROTO_UDP) return -1; + if (ip6h->hop_limit <= 1) + return -1; + thoff = sizeof(*ip6h); if (!pskb_may_pull(skb, thoff + sizeof(*ports))) return -1; diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index d98416e83d4e..28241e82fd15 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -213,33 +213,33 @@ static int nft_deltable(struct nft_ctx *ctx) return err; } -static int nft_trans_chain_add(struct nft_ctx *ctx, int msg_type) +static struct nft_trans *nft_trans_chain_add(struct nft_ctx *ctx, int msg_type) { struct nft_trans *trans; trans = nft_trans_alloc(ctx, msg_type, sizeof(struct nft_trans_chain)); if (trans == NULL) - return -ENOMEM; + return ERR_PTR(-ENOMEM); if (msg_type == NFT_MSG_NEWCHAIN) nft_activate_next(ctx->net, ctx->chain); list_add_tail(&trans->list, &ctx->net->nft.commit_list); - return 0; + return trans; } static int nft_delchain(struct nft_ctx *ctx) { - int err; + struct nft_trans *trans; - err = nft_trans_chain_add(ctx, NFT_MSG_DELCHAIN); - if (err < 0) - return err; + trans = nft_trans_chain_add(ctx, NFT_MSG_DELCHAIN); + if (IS_ERR(trans)) + return PTR_ERR(trans); ctx->table->use--; nft_deactivate_next(ctx->net, ctx->chain); - return err; + return 0; } static void nft_rule_expr_activate(const struct nft_ctx *ctx, @@ -1189,6 +1189,9 @@ static int nft_dump_stats(struct sk_buff *skb, struct nft_stats __percpu *stats) u64 pkts, bytes; int cpu; + if (!stats) + return 0; + memset(&total, 0, sizeof(total)); for_each_possible_cpu(cpu) { cpu_stats = per_cpu_ptr(stats, cpu); @@ -1246,6 +1249,7 @@ static int nf_tables_fill_chain_info(struct sk_buff *skb, struct net *net, if (nft_is_base_chain(chain)) { const struct nft_base_chain *basechain = nft_base_chain(chain); const struct nf_hook_ops *ops = &basechain->ops; + struct nft_stats __percpu *stats; struct nlattr *nest; nest = nla_nest_start_noflag(skb, NFTA_CHAIN_HOOK); @@ -1267,8 +1271,9 @@ static int nf_tables_fill_chain_info(struct sk_buff *skb, struct net *net, if (nla_put_string(skb, NFTA_CHAIN_TYPE, basechain->type->name)) goto nla_put_failure; - if (rcu_access_pointer(basechain->stats) && - nft_dump_stats(skb, rcu_dereference(basechain->stats))) + stats = rcu_dereference_check(basechain->stats, + lockdep_commit_lock_is_held(net)); + if (nft_dump_stats(skb, stats)) goto nla_put_failure; } @@ -1615,6 +1620,7 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask, struct nft_base_chain *basechain; struct nft_stats __percpu *stats; struct net *net = ctx->net; + struct nft_trans *trans; struct nft_chain *chain; struct nft_rule **rules; int err; @@ -1662,7 +1668,7 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask, ops->dev = hook.dev; chain->flags |= NFT_BASE_CHAIN; - basechain->policy = policy; + basechain->policy = NF_ACCEPT; } else { chain = kzalloc(sizeof(*chain), GFP_KERNEL); if (chain == NULL) @@ -1698,13 +1704,18 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask, if (err) goto err2; - err = nft_trans_chain_add(ctx, NFT_MSG_NEWCHAIN); - if (err < 0) { + trans = nft_trans_chain_add(ctx, NFT_MSG_NEWCHAIN); + if (IS_ERR(trans)) { + err = PTR_ERR(trans); rhltable_remove(&table->chains_ht, &chain->rhlhead, nft_chain_ht_params); goto err2; } + nft_trans_chain_policy(trans) = -1; + if (nft_is_base_chain(chain)) + nft_trans_chain_policy(trans) = policy; + table->use++; list_add_tail_rcu(&chain->list, &table->chains); @@ -6310,6 +6321,27 @@ static int nf_tables_validate(struct net *net) return 0; } +/* a drop policy has to be deferred until all rules have been activated, + * otherwise a large ruleset that contains a drop-policy base chain will + * cause all packets to get dropped until the full transaction has been + * processed. + * + * We defer the drop policy until the transaction has been finalized. + */ +static void nft_chain_commit_drop_policy(struct nft_trans *trans) +{ + struct nft_base_chain *basechain; + + if (nft_trans_chain_policy(trans) != NF_DROP) + return; + + if (!nft_is_base_chain(trans->ctx.chain)) + return; + + basechain = nft_base_chain(trans->ctx.chain); + basechain->policy = NF_DROP; +} + static void nft_chain_commit_update(struct nft_trans *trans) { struct nft_base_chain *basechain; @@ -6631,6 +6663,7 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb) nf_tables_chain_notify(&trans->ctx, NFT_MSG_NEWCHAIN); /* trans destroyed after rcu grace period */ } else { + nft_chain_commit_drop_policy(trans); nft_clear(net, trans->ctx.chain); nf_tables_chain_notify(&trans->ctx, NFT_MSG_NEWCHAIN); nft_trans_destroy(trans); diff --git a/net/netfilter/nft_flow_offload.c b/net/netfilter/nft_flow_offload.c index 6e6b9adf7d38..69d7a8439c7a 100644 --- a/net/netfilter/nft_flow_offload.c +++ b/net/netfilter/nft_flow_offload.c @@ -94,8 +94,7 @@ static void nft_flow_offload_eval(const struct nft_expr *expr, if (help) goto out; - if (ctinfo == IP_CT_NEW || - ctinfo == IP_CT_RELATED) + if (!nf_ct_is_confirmed(ct)) goto out; if (test_and_set_bit(IPS_OFFLOAD_BIT, &ct->status)) @@ -113,6 +112,7 @@ static void nft_flow_offload_eval(const struct nft_expr *expr, if (ret < 0) goto err_flow_add; + dst_release(route.tuple[!dir].dst); return; err_flow_add: diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c index dd0e97f4f6c0..801872a2e7aa 100644 --- a/net/qrtr/qrtr.c +++ b/net/qrtr/qrtr.c @@ -728,12 +728,13 @@ static int qrtr_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) DECLARE_SOCKADDR(struct sockaddr_qrtr *, addr, msg->msg_name); int (*enqueue_fn)(struct qrtr_node *, struct sk_buff *, int, struct sockaddr_qrtr *, struct sockaddr_qrtr *); + __le32 qrtr_type = cpu_to_le32(QRTR_TYPE_DATA); struct qrtr_sock *ipc = qrtr_sk(sock->sk); struct sock *sk = sock->sk; struct qrtr_node *node; struct sk_buff *skb; + u32 type = 0; size_t plen; - u32 type = QRTR_TYPE_DATA; int rc; if (msg->msg_flags & ~(MSG_DONTWAIT)) @@ -807,8 +808,8 @@ static int qrtr_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) } /* control messages already require the type as 'command' */ - skb_copy_bits(skb, 0, &type, 4); - type = le32_to_cpu(type); + skb_copy_bits(skb, 0, &qrtr_type, 4); + type = le32_to_cpu(qrtr_type); } rc = enqueue_fn(node, skb, type, &ipc->us, addr); diff --git a/scripts/bpf_helpers_doc.py b/scripts/bpf_helpers_doc.py index 5010a4d5bfba..894cc58c1a03 100755 --- a/scripts/bpf_helpers_doc.py +++ b/scripts/bpf_helpers_doc.py @@ -1,7 +1,7 @@ #!/usr/bin/python3 # SPDX-License-Identifier: GPL-2.0-only # -# Copyright (C) 2018 Netronome Systems, Inc. +# Copyright (C) 2018-2019 Netronome Systems, Inc. # In case user attempts to run with Python 2. from __future__ import print_function @@ -39,7 +39,7 @@ class Helper(object): Break down helper function protocol into smaller chunks: return type, name, distincts arguments. """ - arg_re = re.compile('((const )?(struct )?(\w+|...))( (\**)(\w+))?$') + arg_re = re.compile('((\w+ )*?(\w+|...))( (\**)(\w+))?$') res = {} proto_re = re.compile('(.+) (\**)(\w+)\(((([^,]+)(, )?){1,5})\)$') @@ -54,8 +54,8 @@ class Helper(object): capture = arg_re.match(a) res['args'].append({ 'type' : capture.group(1), - 'star' : capture.group(6), - 'name' : capture.group(7) + 'star' : capture.group(5), + 'name' : capture.group(6) }) return res diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index d82b87c16b0a..c61787b15f27 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c @@ -4649,7 +4649,7 @@ static int selinux_socket_connect_helper(struct socket *sock, struct lsm_network_audit net = {0,}; struct sockaddr_in *addr4 = NULL; struct sockaddr_in6 *addr6 = NULL; - unsigned short snum = 0; + unsigned short snum; u32 sid, perm; /* sctp_connectx(3) calls via selinux_sctp_bind_connect() @@ -4674,12 +4674,12 @@ static int selinux_socket_connect_helper(struct socket *sock, break; default: /* Note that SCTP services expect -EINVAL, whereas - * others must handle this at the protocol level: - * connect(AF_UNSPEC) on a connected socket is - * a documented way disconnect the socket. + * others expect -EAFNOSUPPORT. */ if (sksec->sclass == SECCLASS_SCTP_SOCKET) return -EINVAL; + else + return -EAFNOSUPPORT; } err = sel_netport_sid(sk->sk_protocol, snum, &sid); diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 72336bac7573..63e0cf66f01a 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -629,7 +629,7 @@ union bpf_attr { * **BPF_F_INVALIDATE_HASH** (set *skb*\ **->hash**, *skb*\ * **->swhash** and *skb*\ **->l4hash** to 0). * - * A call to this helper is susceptible to change the underlaying + * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with @@ -654,7 +654,7 @@ union bpf_attr { * flexibility and can handle sizes larger than 2 or 4 for the * checksum to update. * - * A call to this helper is susceptible to change the underlaying + * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with @@ -686,7 +686,7 @@ union bpf_attr { * flexibility and can handle sizes larger than 2 or 4 for the * checksum to update. * - * A call to this helper is susceptible to change the underlaying + * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with @@ -741,7 +741,7 @@ union bpf_attr { * efficient, but it is handled through an action code where the * redirection happens only after the eBPF program has returned. * - * A call to this helper is susceptible to change the underlaying + * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with @@ -806,7 +806,7 @@ union bpf_attr { * **ETH_P_8021Q** and **ETH_P_8021AD**, it is considered to * be **ETH_P_8021Q**. * - * A call to this helper is susceptible to change the underlaying + * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with @@ -818,7 +818,7 @@ union bpf_attr { * Description * Pop a VLAN header from the packet associated to *skb*. * - * A call to this helper is susceptible to change the underlaying + * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with @@ -1168,7 +1168,7 @@ union bpf_attr { * All values for *flags* are reserved for future usage, and must * be left at zero. * - * A call to this helper is susceptible to change the underlaying + * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with @@ -1281,7 +1281,7 @@ union bpf_attr { * implicitly linearizes, unclones and drops offloads from the * *skb*. * - * A call to this helper is susceptible to change the underlaying + * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with @@ -1317,7 +1317,7 @@ union bpf_attr { * **bpf_skb_pull_data()** to effectively unclone the *skb* from * the very beginning in case it is indeed cloned. * - * A call to this helper is susceptible to change the underlaying + * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with @@ -1369,7 +1369,7 @@ union bpf_attr { * All values for *flags* are reserved for future usage, and must * be left at zero. * - * A call to this helper is susceptible to change the underlaying + * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with @@ -1384,7 +1384,7 @@ union bpf_attr { * can be used to prepare the packet for pushing or popping * headers. * - * A call to this helper is susceptible to change the underlaying + * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with @@ -1518,20 +1518,20 @@ union bpf_attr { * * **BPF_F_ADJ_ROOM_FIXED_GSO**: Do not adjust gso_size. * Adjusting mss in this way is not allowed for datagrams. * - * * **BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 **: - * * **BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 **: + * * **BPF_F_ADJ_ROOM_ENCAP_L3_IPV4**, + * **BPF_F_ADJ_ROOM_ENCAP_L3_IPV6**: * Any new space is reserved to hold a tunnel header. * Configure skb offsets and other fields accordingly. * - * * **BPF_F_ADJ_ROOM_ENCAP_L4_GRE **: - * * **BPF_F_ADJ_ROOM_ENCAP_L4_UDP **: + * * **BPF_F_ADJ_ROOM_ENCAP_L4_GRE**, + * **BPF_F_ADJ_ROOM_ENCAP_L4_UDP**: * Use with ENCAP_L3 flags to further specify the tunnel type. * - * * **BPF_F_ADJ_ROOM_ENCAP_L2(len) **: + * * **BPF_F_ADJ_ROOM_ENCAP_L2**\ (*len*): * Use with ENCAP_L3/L4 flags to further specify the tunnel - * type; **len** is the length of the inner MAC header. + * type; *len* is the length of the inner MAC header. * - * A call to this helper is susceptible to change the underlaying + * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with @@ -1610,7 +1610,7 @@ union bpf_attr { * more flexibility as the user is free to store whatever meta * data they need. * - * A call to this helper is susceptible to change the underlaying + * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with @@ -1852,7 +1852,7 @@ union bpf_attr { * copied if necessary (i.e. if data was not linear and if start * and end pointers do not point to the same chunk). * - * A call to this helper is susceptible to change the underlaying + * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with @@ -1886,7 +1886,7 @@ union bpf_attr { * only possible to shrink the packet as of this writing, * therefore *delta* must be a negative integer. * - * A call to this helper is susceptible to change the underlaying + * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with @@ -2061,18 +2061,18 @@ union bpf_attr { * **BPF_LWT_ENCAP_IP** * IP encapsulation (GRE/GUE/IPIP/etc). The outer header * must be IPv4 or IPv6, followed by zero or more - * additional headers, up to LWT_BPF_MAX_HEADROOM total - * bytes in all prepended headers. Please note that - * if skb_is_gso(skb) is true, no more than two headers - * can be prepended, and the inner header, if present, - * should be either GRE or UDP/GUE. + * additional headers, up to **LWT_BPF_MAX_HEADROOM** + * total bytes in all prepended headers. Please note that + * if **skb_is_gso**\ (*skb*) is true, no more than two + * headers can be prepended, and the inner header, if + * present, should be either GRE or UDP/GUE. * - * BPF_LWT_ENCAP_SEG6*** types can be called by bpf programs of - * type BPF_PROG_TYPE_LWT_IN; BPF_LWT_ENCAP_IP type can be called - * by bpf programs of types BPF_PROG_TYPE_LWT_IN and - * BPF_PROG_TYPE_LWT_XMIT. + * **BPF_LWT_ENCAP_SEG6**\ \* types can be called by BPF programs + * of type **BPF_PROG_TYPE_LWT_IN**; **BPF_LWT_ENCAP_IP** type can + * be called by bpf programs of types **BPF_PROG_TYPE_LWT_IN** and + * **BPF_PROG_TYPE_LWT_XMIT**. * - * A call to this helper is susceptible to change the underlaying + * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with @@ -2087,7 +2087,7 @@ union bpf_attr { * inside the outermost IPv6 Segment Routing Header can be * modified through this helper. * - * A call to this helper is susceptible to change the underlaying + * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with @@ -2103,7 +2103,7 @@ union bpf_attr { * after the segments are accepted. *delta* can be as well * positive (growing) as negative (shrinking). * - * A call to this helper is susceptible to change the underlaying + * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with @@ -2126,13 +2126,13 @@ union bpf_attr { * Type of *param*: **int**. * **SEG6_LOCAL_ACTION_END_B6** * End.B6 action: Endpoint bound to an SRv6 policy. - * Type of param: **struct ipv6_sr_hdr**. + * Type of *param*: **struct ipv6_sr_hdr**. * **SEG6_LOCAL_ACTION_END_B6_ENCAP** * End.B6.Encap action: Endpoint bound to an SRv6 * encapsulation policy. - * Type of param: **struct ipv6_sr_hdr**. + * Type of *param*: **struct ipv6_sr_hdr**. * - * A call to this helper is susceptible to change the underlaying + * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with @@ -2285,7 +2285,8 @@ union bpf_attr { * Return * Pointer to **struct bpf_sock**, or **NULL** in case of failure. * For sockets with reuseport option, the **struct bpf_sock** - * result is from **reuse->socks**\ [] using the hash of the tuple. + * result is from *reuse*\ **->socks**\ [] using the hash of the + * tuple. * * struct bpf_sock *bpf_sk_lookup_udp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags) * Description @@ -2321,7 +2322,8 @@ union bpf_attr { * Return * Pointer to **struct bpf_sock**, or **NULL** in case of failure. * For sockets with reuseport option, the **struct bpf_sock** - * result is from **reuse->socks**\ [] using the hash of the tuple. + * result is from *reuse*\ **->socks**\ [] using the hash of the + * tuple. * * int bpf_sk_release(struct bpf_sock *sock) * Description @@ -2490,31 +2492,34 @@ union bpf_attr { * network namespace *netns*. The return value must be checked, * and if non-**NULL**, released via **bpf_sk_release**\ (). * - * This function is identical to bpf_sk_lookup_tcp, except that it - * also returns timewait or request sockets. Use bpf_sk_fullsock - * or bpf_tcp_socket to access the full structure. + * This function is identical to **bpf_sk_lookup_tcp**\ (), except + * that it also returns timewait or request sockets. Use + * **bpf_sk_fullsock**\ () or **bpf_tcp_sock**\ () to access the + * full structure. * * This helper is available only if the kernel was compiled with * **CONFIG_NET** configuration option. * Return * Pointer to **struct bpf_sock**, or **NULL** in case of failure. * For sockets with reuseport option, the **struct bpf_sock** - * result is from **reuse->socks**\ [] using the hash of the tuple. + * result is from *reuse*\ **->socks**\ [] using the hash of the + * tuple. * * int bpf_tcp_check_syncookie(struct bpf_sock *sk, void *iph, u32 iph_len, struct tcphdr *th, u32 th_len) * Description - * Check whether iph and th contain a valid SYN cookie ACK for - * the listening socket in sk. + * Check whether *iph* and *th* contain a valid SYN cookie ACK for + * the listening socket in *sk*. * - * iph points to the start of the IPv4 or IPv6 header, while - * iph_len contains sizeof(struct iphdr) or sizeof(struct ip6hdr). + * *iph* points to the start of the IPv4 or IPv6 header, while + * *iph_len* contains **sizeof**\ (**struct iphdr**) or + * **sizeof**\ (**struct ip6hdr**). * - * th points to the start of the TCP header, while th_len contains - * sizeof(struct tcphdr). + * *th* points to the start of the TCP header, while *th_len* + * contains **sizeof**\ (**struct tcphdr**). * * Return - * 0 if iph and th are a valid SYN cookie ACK, or a negative error - * otherwise. + * 0 if *iph* and *th* are a valid SYN cookie ACK, or a negative + * error otherwise. * * int bpf_sysctl_get_name(struct bpf_sysctl *ctx, char *buf, size_t buf_len, u64 flags) * Description @@ -2592,17 +2597,17 @@ union bpf_attr { * and save the result in *res*. * * The string may begin with an arbitrary amount of white space - * (as determined by isspace(3)) followed by a single optional '-' - * sign. + * (as determined by **isspace**\ (3)) followed by a single + * optional '**-**' sign. * * Five least significant bits of *flags* encode base, other bits * are currently unused. * * Base must be either 8, 10, 16 or 0 to detect it automatically - * similar to user space strtol(3). + * similar to user space **strtol**\ (3). * Return * Number of characters consumed on success. Must be positive but - * no more than buf_len. + * no more than *buf_len*. * * **-EINVAL** if no valid digits were found or unsupported base * was provided. @@ -2616,16 +2621,16 @@ union bpf_attr { * given base and save the result in *res*. * * The string may begin with an arbitrary amount of white space - * (as determined by isspace(3)). + * (as determined by **isspace**\ (3)). * * Five least significant bits of *flags* encode base, other bits * are currently unused. * * Base must be either 8, 10, 16 or 0 to detect it automatically - * similar to user space strtoul(3). + * similar to user space **strtoul**\ (3). * Return * Number of characters consumed on success. Must be positive but - * no more than buf_len. + * no more than *buf_len*. * * **-EINVAL** if no valid digits were found or unsupported base * was provided. @@ -2634,26 +2639,26 @@ union bpf_attr { * * void *bpf_sk_storage_get(struct bpf_map *map, struct bpf_sock *sk, void *value, u64 flags) * Description - * Get a bpf-local-storage from a sk. + * Get a bpf-local-storage from a *sk*. * * Logically, it could be thought of getting the value from * a *map* with *sk* as the **key**. From this * perspective, the usage is not much different from - * **bpf_map_lookup_elem(map, &sk)** except this - * helper enforces the key must be a **bpf_fullsock()** - * and the map must be a BPF_MAP_TYPE_SK_STORAGE also. + * **bpf_map_lookup_elem**\ (*map*, **&**\ *sk*) except this + * helper enforces the key must be a full socket and the map must + * be a **BPF_MAP_TYPE_SK_STORAGE** also. * * Underneath, the value is stored locally at *sk* instead of - * the map. The *map* is used as the bpf-local-storage **type**. - * The bpf-local-storage **type** (i.e. the *map*) is searched - * against all bpf-local-storages residing at sk. + * the *map*. The *map* is used as the bpf-local-storage + * "type". The bpf-local-storage "type" (i.e. the *map*) is + * searched against all bpf-local-storages residing at *sk*. * - * An optional *flags* (BPF_SK_STORAGE_GET_F_CREATE) can be + * An optional *flags* (**BPF_SK_STORAGE_GET_F_CREATE**) can be * used such that a new bpf-local-storage will be * created if one does not exist. *value* can be used - * together with BPF_SK_STORAGE_GET_F_CREATE to specify + * together with **BPF_SK_STORAGE_GET_F_CREATE** to specify * the initial value of a bpf-local-storage. If *value* is - * NULL, the new bpf-local-storage will be zero initialized. + * **NULL**, the new bpf-local-storage will be zero initialized. * Return * A bpf-local-storage pointer is returned on success. * @@ -2662,7 +2667,7 @@ union bpf_attr { * * int bpf_sk_storage_delete(struct bpf_map *map, struct bpf_sock *sk) * Description - * Delete a bpf-local-storage from a sk. + * Delete a bpf-local-storage from a *sk*. * Return * 0 on success. * diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 11a65db4b93f..7e3b79d7c25f 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -44,6 +44,7 @@ #include "btf.h" #include "str_error.h" #include "libbpf_util.h" +#include "libbpf_internal.h" #ifndef EM_BPF #define EM_BPF 247 @@ -128,6 +129,10 @@ struct bpf_capabilities { __u32 name:1; /* v5.2: kernel support for global data sections. */ __u32 global_data:1; + /* BTF_KIND_FUNC and BTF_KIND_FUNC_PROTO support */ + __u32 btf_func:1; + /* BTF_KIND_VAR and BTF_KIND_DATASEC support */ + __u32 btf_datasec:1; }; /* @@ -1021,6 +1026,74 @@ static bool section_have_execinstr(struct bpf_object *obj, int idx) return false; } +static void bpf_object__sanitize_btf(struct bpf_object *obj) +{ + bool has_datasec = obj->caps.btf_datasec; + bool has_func = obj->caps.btf_func; + struct btf *btf = obj->btf; + struct btf_type *t; + int i, j, vlen; + __u16 kind; + + if (!obj->btf || (has_func && has_datasec)) + return; + + for (i = 1; i <= btf__get_nr_types(btf); i++) { + t = (struct btf_type *)btf__type_by_id(btf, i); + kind = BTF_INFO_KIND(t->info); + + if (!has_datasec && kind == BTF_KIND_VAR) { + /* replace VAR with INT */ + t->info = BTF_INFO_ENC(BTF_KIND_INT, 0, 0); + t->size = sizeof(int); + *(int *)(t+1) = BTF_INT_ENC(0, 0, 32); + } else if (!has_datasec && kind == BTF_KIND_DATASEC) { + /* replace DATASEC with STRUCT */ + struct btf_var_secinfo *v = (void *)(t + 1); + struct btf_member *m = (void *)(t + 1); + struct btf_type *vt; + char *name; + + name = (char *)btf__name_by_offset(btf, t->name_off); + while (*name) { + if (*name == '.') + *name = '_'; + name++; + } + + vlen = BTF_INFO_VLEN(t->info); + t->info = BTF_INFO_ENC(BTF_KIND_STRUCT, 0, vlen); + for (j = 0; j < vlen; j++, v++, m++) { + /* order of field assignments is important */ + m->offset = v->offset * 8; + m->type = v->type; + /* preserve variable name as member name */ + vt = (void *)btf__type_by_id(btf, v->type); + m->name_off = vt->name_off; + } + } else if (!has_func && kind == BTF_KIND_FUNC_PROTO) { + /* replace FUNC_PROTO with ENUM */ + vlen = BTF_INFO_VLEN(t->info); + t->info = BTF_INFO_ENC(BTF_KIND_ENUM, 0, vlen); + t->size = sizeof(__u32); /* kernel enforced */ + } else if (!has_func && kind == BTF_KIND_FUNC) { + /* replace FUNC with TYPEDEF */ + t->info = BTF_INFO_ENC(BTF_KIND_TYPEDEF, 0, 0); + } + } +} + +static void bpf_object__sanitize_btf_ext(struct bpf_object *obj) +{ + if (!obj->btf_ext) + return; + + if (!obj->caps.btf_func) { + btf_ext__free(obj->btf_ext); + obj->btf_ext = NULL; + } +} + static int bpf_object__elf_collect(struct bpf_object *obj, int flags) { Elf *elf = obj->efile.elf; @@ -1164,8 +1237,10 @@ static int bpf_object__elf_collect(struct bpf_object *obj, int flags) obj->btf = NULL; } else { err = btf__finalize_data(obj, obj->btf); - if (!err) + if (!err) { + bpf_object__sanitize_btf(obj); err = btf__load(obj->btf); + } if (err) { pr_warning("Error finalizing and loading %s into kernel: %d. Ignored and continue.\n", BTF_ELF_SEC, err); @@ -1187,6 +1262,8 @@ static int bpf_object__elf_collect(struct bpf_object *obj, int flags) BTF_EXT_ELF_SEC, PTR_ERR(obj->btf_ext)); obj->btf_ext = NULL; + } else { + bpf_object__sanitize_btf_ext(obj); } } } @@ -1556,12 +1633,63 @@ bpf_object__probe_global_data(struct bpf_object *obj) return 0; } +static int bpf_object__probe_btf_func(struct bpf_object *obj) +{ + const char strs[] = "\0int\0x\0a"; + /* void x(int a) {} */ + __u32 types[] = { + /* int */ + BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + /* FUNC_PROTO */ /* [2] */ + BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 0), + BTF_PARAM_ENC(7, 1), + /* FUNC x */ /* [3] */ + BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0), 2), + }; + int res; + + res = libbpf__probe_raw_btf((char *)types, sizeof(types), + strs, sizeof(strs)); + if (res < 0) + return res; + if (res > 0) + obj->caps.btf_func = 1; + return 0; +} + +static int bpf_object__probe_btf_datasec(struct bpf_object *obj) +{ + const char strs[] = "\0x\0.data"; + /* static int a; */ + __u32 types[] = { + /* int */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + /* VAR x */ /* [2] */ + BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1), + BTF_VAR_STATIC, + /* DATASEC val */ /* [3] */ + BTF_TYPE_ENC(3, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4), + BTF_VAR_SECINFO_ENC(2, 0, 4), + }; + int res; + + res = libbpf__probe_raw_btf((char *)types, sizeof(types), + strs, sizeof(strs)); + if (res < 0) + return res; + if (res > 0) + obj->caps.btf_datasec = 1; + return 0; +} + static int bpf_object__probe_caps(struct bpf_object *obj) { int (*probe_fn[])(struct bpf_object *obj) = { bpf_object__probe_name, bpf_object__probe_global_data, + bpf_object__probe_btf_func, + bpf_object__probe_btf_datasec, }; int i, ret; diff --git a/tools/lib/bpf/libbpf_internal.h b/tools/lib/bpf/libbpf_internal.h new file mode 100644 index 000000000000..789e435b5900 --- /dev/null +++ b/tools/lib/bpf/libbpf_internal.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ + +/* + * Internal libbpf helpers. + * + * Copyright (c) 2019 Facebook + */ + +#ifndef __LIBBPF_LIBBPF_INTERNAL_H +#define __LIBBPF_LIBBPF_INTERNAL_H + +#define BTF_INFO_ENC(kind, kind_flag, vlen) \ + ((!!(kind_flag) << 31) | ((kind) << 24) | ((vlen) & BTF_MAX_VLEN)) +#define BTF_TYPE_ENC(name, info, size_or_type) (name), (info), (size_or_type) +#define BTF_INT_ENC(encoding, bits_offset, nr_bits) \ + ((encoding) << 24 | (bits_offset) << 16 | (nr_bits)) +#define BTF_TYPE_INT_ENC(name, encoding, bits_offset, bits, sz) \ + BTF_TYPE_ENC(name, BTF_INFO_ENC(BTF_KIND_INT, 0, 0), sz), \ + BTF_INT_ENC(encoding, bits_offset, bits) +#define BTF_MEMBER_ENC(name, type, bits_offset) (name), (type), (bits_offset) +#define BTF_PARAM_ENC(name, type) (name), (type) +#define BTF_VAR_SECINFO_ENC(type, offset, size) (type), (offset), (size) + +int libbpf__probe_raw_btf(const char *raw_types, size_t types_len, + const char *str_sec, size_t str_len); + +#endif /* __LIBBPF_LIBBPF_INTERNAL_H */ diff --git a/tools/lib/bpf/libbpf_probes.c b/tools/lib/bpf/libbpf_probes.c index a2c64a9ce1a6..5e2aa83f637a 100644 --- a/tools/lib/bpf/libbpf_probes.c +++ b/tools/lib/bpf/libbpf_probes.c @@ -15,6 +15,7 @@ #include "bpf.h" #include "libbpf.h" +#include "libbpf_internal.h" static bool grep(const char *buffer, const char *pattern) { @@ -132,21 +133,43 @@ bool bpf_probe_prog_type(enum bpf_prog_type prog_type, __u32 ifindex) return errno != EINVAL && errno != EOPNOTSUPP; } -static int load_btf(void) +int libbpf__probe_raw_btf(const char *raw_types, size_t types_len, + const char *str_sec, size_t str_len) { -#define BTF_INFO_ENC(kind, kind_flag, vlen) \ - ((!!(kind_flag) << 31) | ((kind) << 24) | ((vlen) & BTF_MAX_VLEN)) -#define BTF_TYPE_ENC(name, info, size_or_type) \ - (name), (info), (size_or_type) -#define BTF_INT_ENC(encoding, bits_offset, nr_bits) \ - ((encoding) << 24 | (bits_offset) << 16 | (nr_bits)) -#define BTF_TYPE_INT_ENC(name, encoding, bits_offset, bits, sz) \ - BTF_TYPE_ENC(name, BTF_INFO_ENC(BTF_KIND_INT, 0, 0), sz), \ - BTF_INT_ENC(encoding, bits_offset, bits) -#define BTF_MEMBER_ENC(name, type, bits_offset) \ - (name), (type), (bits_offset) + struct btf_header hdr = { + .magic = BTF_MAGIC, + .version = BTF_VERSION, + .hdr_len = sizeof(struct btf_header), + .type_len = types_len, + .str_off = types_len, + .str_len = str_len, + }; + int btf_fd, btf_len; + __u8 *raw_btf; - const char btf_str_sec[] = "\0bpf_spin_lock\0val\0cnt\0l"; + btf_len = hdr.hdr_len + hdr.type_len + hdr.str_len; + raw_btf = malloc(btf_len); + if (!raw_btf) + return -ENOMEM; + + memcpy(raw_btf, &hdr, sizeof(hdr)); + memcpy(raw_btf + hdr.hdr_len, raw_types, hdr.type_len); + memcpy(raw_btf + hdr.hdr_len + hdr.type_len, str_sec, hdr.str_len); + + btf_fd = bpf_load_btf(raw_btf, btf_len, NULL, 0, false); + if (btf_fd < 0) { + free(raw_btf); + return 0; + } + + close(btf_fd); + free(raw_btf); + return 1; +} + +static int load_sk_storage_btf(void) +{ + const char strs[] = "\0bpf_spin_lock\0val\0cnt\0l"; /* struct bpf_spin_lock { * int val; * }; @@ -155,7 +178,7 @@ static int load_btf(void) * struct bpf_spin_lock l; * }; */ - __u32 btf_raw_types[] = { + __u32 types[] = { /* int */ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ /* struct bpf_spin_lock */ /* [2] */ @@ -166,23 +189,9 @@ static int load_btf(void) BTF_MEMBER_ENC(19, 1, 0), /* int cnt; */ BTF_MEMBER_ENC(23, 2, 32),/* struct bpf_spin_lock l; */ }; - struct btf_header btf_hdr = { - .magic = BTF_MAGIC, - .version = BTF_VERSION, - .hdr_len = sizeof(struct btf_header), - .type_len = sizeof(btf_raw_types), - .str_off = sizeof(btf_raw_types), - .str_len = sizeof(btf_str_sec), - }; - __u8 raw_btf[sizeof(struct btf_header) + sizeof(btf_raw_types) + - sizeof(btf_str_sec)]; - memcpy(raw_btf, &btf_hdr, sizeof(btf_hdr)); - memcpy(raw_btf + sizeof(btf_hdr), btf_raw_types, sizeof(btf_raw_types)); - memcpy(raw_btf + sizeof(btf_hdr) + sizeof(btf_raw_types), - btf_str_sec, sizeof(btf_str_sec)); - - return bpf_load_btf(raw_btf, sizeof(raw_btf), 0, 0, 0); + return libbpf__probe_raw_btf((char *)types, sizeof(types), + strs, sizeof(strs)); } bool bpf_probe_map_type(enum bpf_map_type map_type, __u32 ifindex) @@ -222,7 +231,7 @@ bool bpf_probe_map_type(enum bpf_map_type map_type, __u32 ifindex) value_size = 8; max_entries = 0; map_flags = BPF_F_NO_PREALLOC; - btf_fd = load_btf(); + btf_fd = load_sk_storage_btf(); if (btf_fd < 0) return false; break; diff --git a/tools/testing/selftests/bpf/.gitignore b/tools/testing/selftests/bpf/.gitignore index 41e8a689aa77..a877803e4ba8 100644 --- a/tools/testing/selftests/bpf/.gitignore +++ b/tools/testing/selftests/bpf/.gitignore @@ -32,3 +32,5 @@ test_tcpnotify_user test_libbpf test_tcp_check_syncookie_user alu32 +libbpf.pc +libbpf.so.* diff --git a/tools/testing/selftests/bpf/verifier/jump.c b/tools/testing/selftests/bpf/verifier/jump.c index 8e6fcc8940f0..6f951d1ff0a4 100644 --- a/tools/testing/selftests/bpf/verifier/jump.c +++ b/tools/testing/selftests/bpf/verifier/jump.c @@ -178,3 +178,198 @@ .result_unpriv = REJECT, .result = ACCEPT, }, +{ + "jump test 6", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_MOV64_IMM(BPF_REG_1, 2), + BPF_JMP_IMM(BPF_JA, 0, 0, 2), + BPF_MOV64_IMM(BPF_REG_0, 2), + BPF_EXIT_INSN(), + BPF_JMP_REG(BPF_JNE, BPF_REG_0, BPF_REG_1, 16), + BPF_JMP_IMM(BPF_JA, 0, 0, 0), + BPF_JMP_IMM(BPF_JA, 0, 0, 0), + BPF_JMP_IMM(BPF_JA, 0, 0, 0), + BPF_JMP_IMM(BPF_JA, 0, 0, 0), + BPF_JMP_IMM(BPF_JA, 0, 0, 0), + BPF_JMP_IMM(BPF_JA, 0, 0, 0), + BPF_JMP_IMM(BPF_JA, 0, 0, 0), + BPF_JMP_IMM(BPF_JA, 0, 0, 0), + BPF_JMP_IMM(BPF_JA, 0, 0, 0), + BPF_JMP_IMM(BPF_JA, 0, 0, 0), + BPF_JMP_IMM(BPF_JA, 0, 0, 0), + BPF_JMP_IMM(BPF_JA, 0, 0, 0), + BPF_JMP_IMM(BPF_JA, 0, 0, 0), + BPF_JMP_IMM(BPF_JA, 0, 0, 0), + BPF_JMP_IMM(BPF_JA, 0, 0, 0), + BPF_JMP_IMM(BPF_JA, 0, 0, 0), + BPF_JMP_IMM(BPF_JA, 0, 0, -20), + }, + .result = ACCEPT, + .retval = 2, +}, +{ + "jump test 7", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_JMP_IMM(BPF_JA, 0, 0, 2), + BPF_MOV64_IMM(BPF_REG_0, 3), + BPF_EXIT_INSN(), + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 2, 16), + BPF_MOV64_IMM(BPF_REG_0, 42), + BPF_MOV64_IMM(BPF_REG_0, 42), + BPF_MOV64_IMM(BPF_REG_0, 42), + BPF_MOV64_IMM(BPF_REG_0, 42), + BPF_MOV64_IMM(BPF_REG_0, 42), + BPF_MOV64_IMM(BPF_REG_0, 42), + BPF_MOV64_IMM(BPF_REG_0, 42), + BPF_MOV64_IMM(BPF_REG_0, 42), + BPF_MOV64_IMM(BPF_REG_0, 42), + BPF_MOV64_IMM(BPF_REG_0, 42), + BPF_MOV64_IMM(BPF_REG_0, 42), + BPF_MOV64_IMM(BPF_REG_0, 42), + BPF_MOV64_IMM(BPF_REG_0, 42), + BPF_MOV64_IMM(BPF_REG_0, 42), + BPF_MOV64_IMM(BPF_REG_0, 42), + BPF_MOV64_IMM(BPF_REG_0, 42), + BPF_JMP_IMM(BPF_JA, 0, 0, -20), + }, + .result = ACCEPT, + .retval = 3, +}, +{ + "jump test 8", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_MOV64_IMM(BPF_REG_1, 2), + BPF_JMP_IMM(BPF_JA, 0, 0, 2), + BPF_MOV64_IMM(BPF_REG_0, 3), + BPF_EXIT_INSN(), + BPF_JMP_REG(BPF_JNE, BPF_REG_0, BPF_REG_1, 16), + BPF_MOV64_IMM(BPF_REG_0, 42), + BPF_MOV64_IMM(BPF_REG_0, 42), + BPF_MOV64_IMM(BPF_REG_0, 42), + BPF_MOV64_IMM(BPF_REG_0, 42), + BPF_MOV64_IMM(BPF_REG_0, 42), + BPF_MOV64_IMM(BPF_REG_0, 42), + BPF_MOV64_IMM(BPF_REG_0, 42), + BPF_MOV64_IMM(BPF_REG_0, 42), + BPF_MOV64_IMM(BPF_REG_0, 42), + BPF_MOV64_IMM(BPF_REG_0, 42), + BPF_MOV64_IMM(BPF_REG_0, 42), + BPF_MOV64_IMM(BPF_REG_0, 42), + BPF_MOV64_IMM(BPF_REG_0, 42), + BPF_MOV64_IMM(BPF_REG_0, 42), + BPF_MOV64_IMM(BPF_REG_0, 42), + BPF_MOV64_IMM(BPF_REG_0, 42), + BPF_JMP_IMM(BPF_JA, 0, 0, -20), + }, + .result = ACCEPT, + .retval = 3, +}, +{ + "jump/call test 9", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_JMP_IMM(BPF_JA, 0, 0, 2), + BPF_MOV64_IMM(BPF_REG_0, 3), + BPF_EXIT_INSN(), + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 2, 16), + BPF_MOV64_IMM(BPF_REG_0, 42), + BPF_MOV64_IMM(BPF_REG_0, 42), + BPF_MOV64_IMM(BPF_REG_0, 42), + BPF_MOV64_IMM(BPF_REG_0, 42), + BPF_MOV64_IMM(BPF_REG_0, 42), + BPF_MOV64_IMM(BPF_REG_0, 42), + BPF_MOV64_IMM(BPF_REG_0, 42), + BPF_MOV64_IMM(BPF_REG_0, 42), + BPF_MOV64_IMM(BPF_REG_0, 42), + BPF_MOV64_IMM(BPF_REG_0, 42), + BPF_MOV64_IMM(BPF_REG_0, 42), + BPF_MOV64_IMM(BPF_REG_0, 42), + BPF_MOV64_IMM(BPF_REG_0, 42), + BPF_MOV64_IMM(BPF_REG_0, 42), + BPF_MOV64_IMM(BPF_REG_0, 42), + BPF_MOV64_IMM(BPF_REG_0, 42), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -20), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = REJECT, + .errstr = "jump out of range from insn 1 to 4", +}, +{ + "jump/call test 10", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), + BPF_MOV64_IMM(BPF_REG_0, 3), + BPF_EXIT_INSN(), + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 2, 16), + BPF_MOV64_IMM(BPF_REG_0, 42), + BPF_MOV64_IMM(BPF_REG_0, 42), + BPF_MOV64_IMM(BPF_REG_0, 42), + BPF_MOV64_IMM(BPF_REG_0, 42), + BPF_MOV64_IMM(BPF_REG_0, 42), + BPF_MOV64_IMM(BPF_REG_0, 42), + BPF_MOV64_IMM(BPF_REG_0, 42), + BPF_MOV64_IMM(BPF_REG_0, 42), + BPF_MOV64_IMM(BPF_REG_0, 42), + BPF_MOV64_IMM(BPF_REG_0, 42), + BPF_MOV64_IMM(BPF_REG_0, 42), + BPF_MOV64_IMM(BPF_REG_0, 42), + BPF_MOV64_IMM(BPF_REG_0, 42), + BPF_MOV64_IMM(BPF_REG_0, 42), + BPF_MOV64_IMM(BPF_REG_0, 42), + BPF_MOV64_IMM(BPF_REG_0, 42), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -20), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = REJECT, + .errstr = "last insn is not an exit or jmp", +}, +{ + "jump/call test 11", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4), + BPF_MOV64_IMM(BPF_REG_0, 3), + BPF_EXIT_INSN(), + BPF_MOV64_IMM(BPF_REG_0, 3), + BPF_EXIT_INSN(), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 2, 26), + BPF_MOV64_IMM(BPF_REG_0, 42), + BPF_MOV64_IMM(BPF_REG_0, 42), + BPF_MOV64_IMM(BPF_REG_0, 42), + BPF_MOV64_IMM(BPF_REG_0, 42), + BPF_MOV64_IMM(BPF_REG_0, 42), + BPF_MOV64_IMM(BPF_REG_0, 42), + BPF_MOV64_IMM(BPF_REG_0, 42), + BPF_MOV64_IMM(BPF_REG_0, 42), + BPF_MOV64_IMM(BPF_REG_0, 42), + BPF_MOV64_IMM(BPF_REG_0, 42), + BPF_MOV64_IMM(BPF_REG_0, 42), + BPF_MOV64_IMM(BPF_REG_0, 42), + BPF_MOV64_IMM(BPF_REG_0, 42), + BPF_MOV64_IMM(BPF_REG_0, 42), + BPF_MOV64_IMM(BPF_REG_0, 42), + BPF_MOV64_IMM(BPF_REG_0, 42), + BPF_MOV64_IMM(BPF_REG_0, 42), + BPF_MOV64_IMM(BPF_REG_0, 42), + BPF_MOV64_IMM(BPF_REG_0, 42), + BPF_MOV64_IMM(BPF_REG_0, 42), + BPF_MOV64_IMM(BPF_REG_0, 42), + BPF_MOV64_IMM(BPF_REG_0, 42), + BPF_MOV64_IMM(BPF_REG_0, 42), + BPF_MOV64_IMM(BPF_REG_0, 42), + BPF_MOV64_IMM(BPF_REG_0, 42), + BPF_MOV64_IMM(BPF_REG_0, 42), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -31), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + .retval = 3, +},