mlx5e-updates-2018-05-17

From: Or Gerlitz <ogerlitz@mellanox.com>
 
 This series addresses a regression introduced by the
 shared block TC changes [1]. Currently, for VF->VF and uplink->VF rules, the
 TC core (cls_api) attempts to offload the same flow multiple times into
 the driver, as a side effect of the mlx5 registration to the egdev callback.
 
 We use the flow cookie to ignore attempts to add such flows, we can't
 reject them (return error), b/c this will fail the offload attempt, so we
 ignore that.
 
 The last patch of the series deals with exposing HW stats counters through
 ethtool for the vport reps.
 
 Dave - the regression that we are addressing was introduced in 4.15 [1] and applies
 to nfp and mlx5. Jiri suggested to push driver side fixes to net-next, this is
 already done for nfp [2][3]. Once this is upstream, we will submit a small/point
 single patch fix for the TC core code which can serve for net and stable, but not
 carried into net-next, b/c it might limit some future use-cases.
 
 [1] 208c0f4b52 "net: sched: use tc_setup_cb_call to call per-block callbacks"
 [2] c50647d "nfp: flower: ignore duplicate cb requests for same rule"
 [3] 54a4a03 "nfp: flower: support offloading multiple rules with same cookie"
 -----BEGIN PGP SIGNATURE-----
 
 iQEcBAABAgAGBQJa/iMHAAoJEEg/ir3gV/o+6YcIAMIcUmH0Yga2CQLl1VGZr4v7
 5Yo5z8upZ2pKVlBNtgeDonIckcFNbtPaUC7xTolFmOks6DgoGKKLvrIyq3tNG+42
 ShF91BmLvrpl/+8GmsjNf5qvsmc6piOHfBknlaIl7XeoaKLMfy4ts7/Cryt0U24k
 meE/zu7slOOam6H2RyXKLsJa0uP/SpxrCq1OAlAwhmVe60p9SRCVkutmRqU47OuO
 Oc3XGtYbU3IVa3B0bdi+SdOyF1RykCH3PKSrChy2WhdfpSp29I+gydfWMX8/3+z4
 mH3/LDi4CAoHiCqnUr3s5h6zGuYqwcpSYY3tUPUD3A48LnKjT70LJF85F/v7exY=
 =QeXI
 -----END PGP SIGNATURE-----

Merge tag 'mlx5e-updates-2018-05-17' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5e-updates-2018-05-17

From: Or Gerlitz <ogerlitz@mellanox.com>

This series addresses a regression introduced by the
shared block TC changes [1]. Currently, for VF->VF and uplink->VF rules, the
TC core (cls_api) attempts to offload the same flow multiple times into
the driver, as a side effect of the mlx5 registration to the egdev callback.

We use the flow cookie to ignore attempts to add such flows, we can't
reject them (return error), b/c this will fail the offload attempt, so we
ignore that.

The last patch of the series deals with exposing HW stats counters through
ethtool for the vport reps.

Dave - the regression that we are addressing was introduced in 4.15 [1] and applies
to nfp and mlx5. Jiri suggested to push driver side fixes to net-next, this is
already done for nfp [2][3]. Once this is upstream, we will submit a small/point
single patch fix for the TC core code which can serve for net and stable, but not
carried into net-next, b/c it might limit some future use-cases.

[1] 208c0f4b52 "net: sched: use tc_setup_cb_call to call per-block callbacks"
[2] c50647d "nfp: flower: ignore duplicate cb requests for same rule"
[3] 54a4a03 "nfp: flower: support offloading multiple rules with same cookie"
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2018-05-18 13:00:43 -04:00
commit d6830519a9
6 changed files with 199 additions and 83 deletions

View File

@ -634,7 +634,6 @@ struct mlx5e_flow_table {
struct mlx5e_tc_table {
struct mlx5_flow_table *t;
struct rhashtable_params ht_params;
struct rhashtable ht;
DECLARE_HASHTABLE(mod_hdr_tbl, 8);
@ -1118,9 +1117,6 @@ int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv,
int mlx5e_ethtool_flash_device(struct mlx5e_priv *priv,
struct ethtool_flash *flash);
int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
void *cb_priv);
/* mlx5e generic netdev management API */
struct net_device*
mlx5e_create_netdev(struct mlx5_core_dev *mdev, const struct mlx5e_profile *profile,

View File

@ -3136,22 +3136,23 @@ out:
#ifdef CONFIG_MLX5_ESWITCH
static int mlx5e_setup_tc_cls_flower(struct mlx5e_priv *priv,
struct tc_cls_flower_offload *cls_flower)
struct tc_cls_flower_offload *cls_flower,
int flags)
{
switch (cls_flower->command) {
case TC_CLSFLOWER_REPLACE:
return mlx5e_configure_flower(priv, cls_flower);
return mlx5e_configure_flower(priv, cls_flower, flags);
case TC_CLSFLOWER_DESTROY:
return mlx5e_delete_flower(priv, cls_flower);
return mlx5e_delete_flower(priv, cls_flower, flags);
case TC_CLSFLOWER_STATS:
return mlx5e_stats_flower(priv, cls_flower);
return mlx5e_stats_flower(priv, cls_flower, flags);
default:
return -EOPNOTSUPP;
}
}
int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
void *cb_priv)
static int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
void *cb_priv)
{
struct mlx5e_priv *priv = cb_priv;
@ -3160,7 +3161,7 @@ int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
switch (type) {
case TC_SETUP_CLSFLOWER:
return mlx5e_setup_tc_cls_flower(priv, type_data);
return mlx5e_setup_tc_cls_flower(priv, type_data, MLX5E_TC_INGRESS);
default:
return -EOPNOTSUPP;
}
@ -4461,7 +4462,7 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
goto err_destroy_direct_tirs;
}
err = mlx5e_tc_init(priv);
err = mlx5e_tc_nic_init(priv);
if (err)
goto err_destroy_flow_steering;
@ -4482,7 +4483,7 @@ err_destroy_indirect_rqts:
static void mlx5e_cleanup_nic_rx(struct mlx5e_priv *priv)
{
mlx5e_tc_cleanup(priv);
mlx5e_tc_nic_cleanup(priv);
mlx5e_destroy_flow_steering(priv);
mlx5e_destroy_direct_tirs(priv);
mlx5e_destroy_indirect_tirs(priv);

View File

@ -66,18 +66,36 @@ static const struct counter_desc sw_rep_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) },
};
#define NUM_VPORT_REP_COUNTERS ARRAY_SIZE(sw_rep_stats_desc)
struct vport_stats {
u64 vport_rx_packets;
u64 vport_tx_packets;
u64 vport_rx_bytes;
u64 vport_tx_bytes;
};
static const struct counter_desc vport_rep_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct vport_stats, vport_rx_packets) },
{ MLX5E_DECLARE_STAT(struct vport_stats, vport_rx_bytes) },
{ MLX5E_DECLARE_STAT(struct vport_stats, vport_tx_packets) },
{ MLX5E_DECLARE_STAT(struct vport_stats, vport_tx_bytes) },
};
#define NUM_VPORT_REP_SW_COUNTERS ARRAY_SIZE(sw_rep_stats_desc)
#define NUM_VPORT_REP_HW_COUNTERS ARRAY_SIZE(vport_rep_stats_desc)
static void mlx5e_rep_get_strings(struct net_device *dev,
u32 stringset, uint8_t *data)
{
int i;
int i, j;
switch (stringset) {
case ETH_SS_STATS:
for (i = 0; i < NUM_VPORT_REP_COUNTERS; i++)
for (i = 0; i < NUM_VPORT_REP_SW_COUNTERS; i++)
strcpy(data + (i * ETH_GSTRING_LEN),
sw_rep_stats_desc[i].format);
for (j = 0; j < NUM_VPORT_REP_HW_COUNTERS; j++, i++)
strcpy(data + (i * ETH_GSTRING_LEN),
vport_rep_stats_desc[j].format);
break;
}
}
@ -140,7 +158,7 @@ static void mlx5e_rep_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
struct mlx5e_priv *priv = netdev_priv(dev);
int i;
int i, j;
if (!data)
return;
@ -148,18 +166,23 @@ static void mlx5e_rep_get_ethtool_stats(struct net_device *dev,
mutex_lock(&priv->state_lock);
if (test_bit(MLX5E_STATE_OPENED, &priv->state))
mlx5e_rep_update_sw_counters(priv);
mlx5e_rep_update_hw_counters(priv);
mutex_unlock(&priv->state_lock);
for (i = 0; i < NUM_VPORT_REP_COUNTERS; i++)
for (i = 0; i < NUM_VPORT_REP_SW_COUNTERS; i++)
data[i] = MLX5E_READ_CTR64_CPU(&priv->stats.sw,
sw_rep_stats_desc, i);
for (j = 0; j < NUM_VPORT_REP_HW_COUNTERS; j++, i++)
data[i] = MLX5E_READ_CTR64_CPU(&priv->stats.vf_vport,
vport_rep_stats_desc, j);
}
static int mlx5e_rep_get_sset_count(struct net_device *dev, int sset)
{
switch (sset) {
case ETH_SS_STATS:
return NUM_VPORT_REP_COUNTERS;
return NUM_VPORT_REP_SW_COUNTERS + NUM_VPORT_REP_HW_COUNTERS;
default:
return -EOPNOTSUPP;
}
@ -723,15 +746,31 @@ static int mlx5e_rep_get_phys_port_name(struct net_device *dev,
static int
mlx5e_rep_setup_tc_cls_flower(struct mlx5e_priv *priv,
struct tc_cls_flower_offload *cls_flower)
struct tc_cls_flower_offload *cls_flower, int flags)
{
switch (cls_flower->command) {
case TC_CLSFLOWER_REPLACE:
return mlx5e_configure_flower(priv, cls_flower);
return mlx5e_configure_flower(priv, cls_flower, flags);
case TC_CLSFLOWER_DESTROY:
return mlx5e_delete_flower(priv, cls_flower);
return mlx5e_delete_flower(priv, cls_flower, flags);
case TC_CLSFLOWER_STATS:
return mlx5e_stats_flower(priv, cls_flower);
return mlx5e_stats_flower(priv, cls_flower, flags);
default:
return -EOPNOTSUPP;
}
}
static int mlx5e_rep_setup_tc_cb_egdev(enum tc_setup_type type, void *type_data,
void *cb_priv)
{
struct mlx5e_priv *priv = cb_priv;
if (!tc_cls_can_offload_and_chain0(priv->netdev, type_data))
return -EOPNOTSUPP;
switch (type) {
case TC_SETUP_CLSFLOWER:
return mlx5e_rep_setup_tc_cls_flower(priv, type_data, MLX5E_TC_EGRESS);
default:
return -EOPNOTSUPP;
}
@ -747,7 +786,7 @@ static int mlx5e_rep_setup_tc_cb(enum tc_setup_type type, void *type_data,
switch (type) {
case TC_SETUP_CLSFLOWER:
return mlx5e_rep_setup_tc_cls_flower(priv, type_data);
return mlx5e_rep_setup_tc_cls_flower(priv, type_data, MLX5E_TC_INGRESS);
default:
return -EOPNOTSUPP;
}
@ -965,14 +1004,8 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
}
rpriv->vport_rx_rule = flow_rule;
err = mlx5e_tc_init(priv);
if (err)
goto err_del_flow_rule;
return 0;
err_del_flow_rule:
mlx5_del_flow_rules(rpriv->vport_rx_rule);
err_destroy_direct_tirs:
mlx5e_destroy_direct_tirs(priv);
err_destroy_direct_rqts:
@ -984,7 +1017,6 @@ static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
{
struct mlx5e_rep_priv *rpriv = priv->ppriv;
mlx5e_tc_cleanup(priv);
mlx5_del_flow_rules(rpriv->vport_rx_rule);
mlx5e_destroy_direct_tirs(priv);
mlx5e_destroy_direct_rqts(priv);
@ -1042,8 +1074,15 @@ mlx5e_nic_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
if (err)
goto err_remove_sqs;
/* init shared tc flow table */
err = mlx5e_tc_esw_init(&rpriv->tc_ht);
if (err)
goto err_neigh_cleanup;
return 0;
err_neigh_cleanup:
mlx5e_rep_neigh_cleanup(rpriv);
err_remove_sqs:
mlx5e_remove_sqs_fwd_rules(priv);
return err;
@ -1058,9 +1097,8 @@ mlx5e_nic_rep_unload(struct mlx5_eswitch_rep *rep)
if (test_bit(MLX5E_STATE_OPENED, &priv->state))
mlx5e_remove_sqs_fwd_rules(priv);
/* clean (and re-init) existing uplink offloaded TC rules */
mlx5e_tc_cleanup(priv);
mlx5e_tc_init(priv);
/* clean uplink offloaded TC rules, delete shared tc flow table */
mlx5e_tc_esw_cleanup(&rpriv->tc_ht);
mlx5e_rep_neigh_cleanup(rpriv);
}
@ -1107,7 +1145,7 @@ mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
uplink_rpriv = mlx5_eswitch_get_uplink_priv(dev->priv.eswitch, REP_ETH);
upriv = netdev_priv(uplink_rpriv->netdev);
err = tc_setup_cb_egdev_register(netdev, mlx5e_setup_tc_block_cb,
err = tc_setup_cb_egdev_register(netdev, mlx5e_rep_setup_tc_cb_egdev,
upriv);
if (err)
goto err_neigh_cleanup;
@ -1122,7 +1160,7 @@ mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
return 0;
err_egdev_cleanup:
tc_setup_cb_egdev_unregister(netdev, mlx5e_setup_tc_block_cb,
tc_setup_cb_egdev_unregister(netdev, mlx5e_rep_setup_tc_cb_egdev,
upriv);
err_neigh_cleanup:
@ -1151,7 +1189,7 @@ mlx5e_vport_rep_unload(struct mlx5_eswitch_rep *rep)
uplink_rpriv = mlx5_eswitch_get_uplink_priv(priv->mdev->priv.eswitch,
REP_ETH);
upriv = netdev_priv(uplink_rpriv->netdev);
tc_setup_cb_egdev_unregister(netdev, mlx5e_setup_tc_block_cb,
tc_setup_cb_egdev_unregister(netdev, mlx5e_rep_setup_tc_cb_egdev,
upriv);
mlx5e_rep_neigh_cleanup(rpriv);
mlx5e_detach_netdev(priv);

View File

@ -59,6 +59,7 @@ struct mlx5e_rep_priv {
struct net_device *netdev;
struct mlx5_flow_handle *vport_rx_rule;
struct list_head vport_sqs_list;
struct rhashtable tc_ht; /* valid for uplink rep */
};
static inline

View File

@ -62,16 +62,21 @@ struct mlx5_nic_flow_attr {
struct mlx5_flow_table *hairpin_ft;
};
#define MLX5E_TC_FLOW_BASE (MLX5E_TC_LAST_EXPORTED_BIT + 1)
enum {
MLX5E_TC_FLOW_ESWITCH = BIT(0),
MLX5E_TC_FLOW_NIC = BIT(1),
MLX5E_TC_FLOW_OFFLOADED = BIT(2),
MLX5E_TC_FLOW_HAIRPIN = BIT(3),
MLX5E_TC_FLOW_HAIRPIN_RSS = BIT(4),
MLX5E_TC_FLOW_INGRESS = MLX5E_TC_INGRESS,
MLX5E_TC_FLOW_EGRESS = MLX5E_TC_EGRESS,
MLX5E_TC_FLOW_ESWITCH = BIT(MLX5E_TC_FLOW_BASE),
MLX5E_TC_FLOW_NIC = BIT(MLX5E_TC_FLOW_BASE + 1),
MLX5E_TC_FLOW_OFFLOADED = BIT(MLX5E_TC_FLOW_BASE + 2),
MLX5E_TC_FLOW_HAIRPIN = BIT(MLX5E_TC_FLOW_BASE + 3),
MLX5E_TC_FLOW_HAIRPIN_RSS = BIT(MLX5E_TC_FLOW_BASE + 4),
};
struct mlx5e_tc_flow {
struct rhash_head node;
struct mlx5e_priv *priv;
u64 cookie;
u8 flags;
struct mlx5_flow_handle *rule;
@ -2077,6 +2082,20 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
return 0;
}
static bool is_merged_eswitch_dev(struct mlx5e_priv *priv,
struct net_device *peer_netdev)
{
struct mlx5e_priv *peer_priv;
peer_priv = netdev_priv(peer_netdev);
return (MLX5_CAP_ESW(priv->mdev, merged_eswitch) &&
(priv->netdev->netdev_ops == peer_netdev->netdev_ops) &&
same_hw_devs(priv, peer_priv) &&
MLX5_VPORT_MANAGER(peer_priv->mdev) &&
(peer_priv->mdev->priv.eswitch->mode == SRIOV_OFFLOADS));
}
static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
struct net_device *mirred_dev,
struct net_device **out_dev,
@ -2535,7 +2554,8 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
out_dev = tcf_mirred_dev(a);
if (switchdev_port_same_parent_id(priv->netdev,
out_dev)) {
out_dev) ||
is_merged_eswitch_dev(priv, out_dev)) {
action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
out_priv = netdev_priv(out_dev);
@ -2603,21 +2623,60 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
return 0;
}
static void get_flags(int flags, u8 *flow_flags)
{
u8 __flow_flags = 0;
if (flags & MLX5E_TC_INGRESS)
__flow_flags |= MLX5E_TC_FLOW_INGRESS;
if (flags & MLX5E_TC_EGRESS)
__flow_flags |= MLX5E_TC_FLOW_EGRESS;
*flow_flags = __flow_flags;
}
static const struct rhashtable_params tc_ht_params = {
.head_offset = offsetof(struct mlx5e_tc_flow, node),
.key_offset = offsetof(struct mlx5e_tc_flow, cookie),
.key_len = sizeof(((struct mlx5e_tc_flow *)0)->cookie),
.automatic_shrinking = true,
};
static struct rhashtable *get_tc_ht(struct mlx5e_priv *priv)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5e_rep_priv *uplink_rpriv;
if (MLX5_VPORT_MANAGER(priv->mdev) && esw->mode == SRIOV_OFFLOADS) {
uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
return &uplink_rpriv->tc_ht;
} else
return &priv->fs.tc.ht;
}
int mlx5e_configure_flower(struct mlx5e_priv *priv,
struct tc_cls_flower_offload *f)
struct tc_cls_flower_offload *f, int flags)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5e_tc_flow_parse_attr *parse_attr;
struct mlx5e_tc_table *tc = &priv->fs.tc;
struct rhashtable *tc_ht = get_tc_ht(priv);
struct mlx5e_tc_flow *flow;
int attr_size, err = 0;
u8 flow_flags = 0;
get_flags(flags, &flow_flags);
flow = rhashtable_lookup_fast(tc_ht, &f->cookie, tc_ht_params);
if (flow) {
netdev_warn_once(priv->netdev, "flow cookie %lx already exists, ignoring\n", f->cookie);
return 0;
}
if (esw && esw->mode == SRIOV_OFFLOADS) {
flow_flags = MLX5E_TC_FLOW_ESWITCH;
flow_flags |= MLX5E_TC_FLOW_ESWITCH;
attr_size = sizeof(struct mlx5_esw_flow_attr);
} else {
flow_flags = MLX5E_TC_FLOW_NIC;
flow_flags |= MLX5E_TC_FLOW_NIC;
attr_size = sizeof(struct mlx5_nic_flow_attr);
}
@ -2630,6 +2689,7 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv,
flow->cookie = f->cookie;
flow->flags = flow_flags;
flow->priv = priv;
err = parse_cls_flower(priv, flow, &parse_attr->spec, f);
if (err < 0)
@ -2660,8 +2720,7 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv,
!(flow->esw_attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP))
kvfree(parse_attr);
err = rhashtable_insert_fast(&tc->ht, &flow->node,
tc->ht_params);
err = rhashtable_insert_fast(tc_ht, &flow->node, tc_ht_params);
if (err) {
mlx5e_tc_del_flow(priv, flow);
kfree(flow);
@ -2675,18 +2734,28 @@ err_free:
return err;
}
int mlx5e_delete_flower(struct mlx5e_priv *priv,
struct tc_cls_flower_offload *f)
{
struct mlx5e_tc_flow *flow;
struct mlx5e_tc_table *tc = &priv->fs.tc;
#define DIRECTION_MASK (MLX5E_TC_INGRESS | MLX5E_TC_EGRESS)
#define FLOW_DIRECTION_MASK (MLX5E_TC_FLOW_INGRESS | MLX5E_TC_FLOW_EGRESS)
flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
tc->ht_params);
if (!flow)
static bool same_flow_direction(struct mlx5e_tc_flow *flow, int flags)
{
if ((flow->flags & FLOW_DIRECTION_MASK) == (flags & DIRECTION_MASK))
return true;
return false;
}
int mlx5e_delete_flower(struct mlx5e_priv *priv,
struct tc_cls_flower_offload *f, int flags)
{
struct rhashtable *tc_ht = get_tc_ht(priv);
struct mlx5e_tc_flow *flow;
flow = rhashtable_lookup_fast(tc_ht, &f->cookie, tc_ht_params);
if (!flow || !same_flow_direction(flow, flags))
return -EINVAL;
rhashtable_remove_fast(&tc->ht, &flow->node, tc->ht_params);
rhashtable_remove_fast(tc_ht, &flow->node, tc_ht_params);
mlx5e_tc_del_flow(priv, flow);
@ -2696,18 +2765,17 @@ int mlx5e_delete_flower(struct mlx5e_priv *priv,
}
int mlx5e_stats_flower(struct mlx5e_priv *priv,
struct tc_cls_flower_offload *f)
struct tc_cls_flower_offload *f, int flags)
{
struct mlx5e_tc_table *tc = &priv->fs.tc;
struct rhashtable *tc_ht = get_tc_ht(priv);
struct mlx5e_tc_flow *flow;
struct mlx5_fc *counter;
u64 bytes;
u64 packets;
u64 lastuse;
flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
tc->ht_params);
if (!flow)
flow = rhashtable_lookup_fast(tc_ht, &f->cookie, tc_ht_params);
if (!flow || !same_flow_direction(flow, flags))
return -EINVAL;
if (!(flow->flags & MLX5E_TC_FLOW_OFFLOADED))
@ -2724,41 +2792,43 @@ int mlx5e_stats_flower(struct mlx5e_priv *priv,
return 0;
}
static const struct rhashtable_params mlx5e_tc_flow_ht_params = {
.head_offset = offsetof(struct mlx5e_tc_flow, node),
.key_offset = offsetof(struct mlx5e_tc_flow, cookie),
.key_len = sizeof(((struct mlx5e_tc_flow *)0)->cookie),
.automatic_shrinking = true,
};
int mlx5e_tc_init(struct mlx5e_priv *priv)
int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
{
struct mlx5e_tc_table *tc = &priv->fs.tc;
hash_init(tc->mod_hdr_tbl);
hash_init(tc->hairpin_tbl);
tc->ht_params = mlx5e_tc_flow_ht_params;
return rhashtable_init(&tc->ht, &tc->ht_params);
return rhashtable_init(&tc->ht, &tc_ht_params);
}
static void _mlx5e_tc_del_flow(void *ptr, void *arg)
{
struct mlx5e_tc_flow *flow = ptr;
struct mlx5e_priv *priv = arg;
struct mlx5e_priv *priv = flow->priv;
mlx5e_tc_del_flow(priv, flow);
kfree(flow);
}
void mlx5e_tc_cleanup(struct mlx5e_priv *priv)
void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv)
{
struct mlx5e_tc_table *tc = &priv->fs.tc;
rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, priv);
rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, NULL);
if (!IS_ERR_OR_NULL(tc->t)) {
mlx5_destroy_flow_table(tc->t);
tc->t = NULL;
}
}
int mlx5e_tc_esw_init(struct rhashtable *tc_ht)
{
return rhashtable_init(tc_ht, &tc_ht_params);
}
void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht)
{
rhashtable_free_and_destroy(tc_ht, _mlx5e_tc_del_flow, NULL);
}

View File

@ -38,16 +38,26 @@
#define MLX5E_TC_FLOW_ID_MASK 0x0000ffff
#ifdef CONFIG_MLX5_ESWITCH
int mlx5e_tc_init(struct mlx5e_priv *priv);
void mlx5e_tc_cleanup(struct mlx5e_priv *priv);
enum {
MLX5E_TC_INGRESS = BIT(0),
MLX5E_TC_EGRESS = BIT(1),
MLX5E_TC_LAST_EXPORTED_BIT = 1,
};
int mlx5e_tc_nic_init(struct mlx5e_priv *priv);
void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv);
int mlx5e_tc_esw_init(struct rhashtable *tc_ht);
void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht);
int mlx5e_configure_flower(struct mlx5e_priv *priv,
struct tc_cls_flower_offload *f);
struct tc_cls_flower_offload *f, int flags);
int mlx5e_delete_flower(struct mlx5e_priv *priv,
struct tc_cls_flower_offload *f);
struct tc_cls_flower_offload *f, int flags);
int mlx5e_stats_flower(struct mlx5e_priv *priv,
struct tc_cls_flower_offload *f);
struct tc_cls_flower_offload *f, int flags);
struct mlx5e_encap_entry;
void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
@ -64,8 +74,8 @@ static inline int mlx5e_tc_num_filters(struct mlx5e_priv *priv)
}
#else /* CONFIG_MLX5_ESWITCH */
static inline int mlx5e_tc_init(struct mlx5e_priv *priv) { return 0; }
static inline void mlx5e_tc_cleanup(struct mlx5e_priv *priv) {}
static inline int mlx5e_tc_nic_init(struct mlx5e_priv *priv) { return 0; }
static inline void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv) {}
static inline int mlx5e_tc_num_filters(struct mlx5e_priv *priv) { return 0; }
#endif