Merge branch 'mlx4-vf-counters'

Or Gerlitz says:

====================
mlx4 driver update (+ new VF ndo)

This series from Eran and Hadar is further dealing with traffic
counters in the mlx4 driver, this time mostly around SRIOV.

We added a new ndo to read the VF counters through the PF netdev
netlink infrastructure plus mlx4 implementation for that ndo.

changes from V0:
  - applied feedback from John to use nested netlink encoding
    for the VF counters so we can extend it later
  - add handling of single ported VFs in the mlx4_en driver new ndo
  - avoid chopping the FW counters from 64 to 32 bits in mlx4_en PF flow
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2015-06-15 17:23:03 -07:00
commit 916035ddd3
20 changed files with 587 additions and 70 deletions

View File

@ -64,14 +64,6 @@ enum {
#define GUID_TBL_BLK_NUM_ENTRIES 8
#define GUID_TBL_BLK_SIZE (GUID_TBL_ENTRY_SIZE * GUID_TBL_BLK_NUM_ENTRIES)
/* Counters should be saturate once they reach their maximum value */
#define ASSIGN_32BIT_COUNTER(counter, value) do {\
if ((value) > U32_MAX) \
counter = cpu_to_be32(U32_MAX); \
else \
counter = cpu_to_be32(value); \
} while (0)
struct mlx4_mad_rcv_buf {
struct ib_grh grh;
u8 payload[256];
@ -828,31 +820,25 @@ static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
struct ib_wc *in_wc, struct ib_grh *in_grh,
struct ib_mad *in_mad, struct ib_mad *out_mad)
{
struct mlx4_cmd_mailbox *mailbox;
struct mlx4_counter counter_stats;
struct mlx4_ib_dev *dev = to_mdev(ibdev);
int err;
u32 inmod = dev->counters[port_num - 1] & 0xffff;
u8 mode;
if (in_mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_PERF_MGMT)
return -EINVAL;
mailbox = mlx4_alloc_cmd_mailbox(dev->dev);
if (IS_ERR(mailbox))
return IB_MAD_RESULT_FAILURE;
err = mlx4_cmd_box(dev->dev, 0, mailbox->dma, inmod, 0,
MLX4_CMD_QUERY_IF_STAT, MLX4_CMD_TIME_CLASS_C,
MLX4_CMD_WRAPPED);
memset(&counter_stats, 0, sizeof(counter_stats));
err = mlx4_get_counter_stats(dev->dev,
dev->counters[port_num - 1].index,
&counter_stats, 0);
if (err)
err = IB_MAD_RESULT_FAILURE;
else {
memset(out_mad->data, 0, sizeof out_mad->data);
mode = ((struct mlx4_counter *)mailbox->buf)->counter_mode;
switch (mode & 0xf) {
switch (counter_stats.counter_mode & 0xf) {
case 0:
edit_counter(mailbox->buf,
(void *)(out_mad->data + 40));
edit_counter(&counter_stats,
(void *)(out_mad->data + 40));
err = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
break;
default:
@ -860,8 +846,6 @@ static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
}
}
mlx4_free_cmd_mailbox(dev->dev, mailbox);
return err;
}
@ -869,10 +853,12 @@ int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
struct ib_wc *in_wc, struct ib_grh *in_grh,
struct ib_mad *in_mad, struct ib_mad *out_mad)
{
struct mlx4_ib_dev *dev = to_mdev(ibdev);
switch (rdma_port_get_link_layer(ibdev, port_num)) {
case IB_LINK_LAYER_INFINIBAND:
return ib_process_mad(ibdev, mad_flags, port_num, in_wc,
in_grh, in_mad, out_mad);
if (!mlx4_is_slave(dev->dev))
return ib_process_mad(ibdev, mad_flags, port_num, in_wc,
in_grh, in_mad, out_mad);
case IB_LINK_LAYER_ETHERNET:
return iboe_process_mad(ibdev, mad_flags, port_num, in_wc,
in_grh, in_mad, out_mad);

View File

@ -2098,6 +2098,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
struct mlx4_ib_iboe *iboe;
int ib_num_ports = 0;
int num_req_counters;
int allocated;
u32 counter_index;
pr_info_once("%s", mlx4_ib_version);
@ -2263,19 +2265,31 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
num_req_counters = mlx4_is_bonded(dev) ? 1 : ibdev->num_ports;
for (i = 0; i < num_req_counters; ++i) {
mutex_init(&ibdev->qp1_proxy_lock[i]);
allocated = 0;
if (mlx4_ib_port_link_layer(&ibdev->ib_dev, i + 1) ==
IB_LINK_LAYER_ETHERNET) {
err = mlx4_counter_alloc(ibdev->dev, &ibdev->counters[i]);
err = mlx4_counter_alloc(ibdev->dev, &counter_index);
/* if failed to allocate a new counter, use default */
if (err)
ibdev->counters[i] = -1;
} else {
ibdev->counters[i] = -1;
counter_index =
mlx4_get_default_counter_index(dev,
i + 1);
else
allocated = 1;
} else { /* IB_LINK_LAYER_INFINIBAND use the default counter */
counter_index = mlx4_get_default_counter_index(dev,
i + 1);
}
ibdev->counters[i].index = counter_index;
ibdev->counters[i].allocated = allocated;
pr_info("counter index %d for port %d allocated %d\n",
counter_index, i + 1, allocated);
}
if (mlx4_is_bonded(dev))
for (i = 1; i < ibdev->num_ports ; ++i)
ibdev->counters[i] = ibdev->counters[0];
for (i = 1; i < ibdev->num_ports ; ++i) {
ibdev->counters[i].index = ibdev->counters[0].index;
ibdev->counters[i].allocated = 0;
}
mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
ib_num_ports++;
@ -2415,10 +2429,12 @@ err_steer_qp_release:
mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
ibdev->steer_qpn_count);
err_counter:
for (; i; --i)
if (ibdev->counters[i - 1] != -1)
mlx4_counter_free(ibdev->dev, ibdev->counters[i - 1]);
for (i = 0; i < ibdev->num_ports; ++i) {
if (ibdev->counters[i].index != -1 &&
ibdev->counters[i].allocated)
mlx4_counter_free(ibdev->dev,
ibdev->counters[i].index);
}
err_map:
iounmap(ibdev->uar_map);
@ -2535,8 +2551,9 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
iounmap(ibdev->uar_map);
for (p = 0; p < ibdev->num_ports; ++p)
if (ibdev->counters[p] != -1)
mlx4_counter_free(ibdev->dev, ibdev->counters[p]);
if (ibdev->counters[p].index != -1 &&
ibdev->counters[p].allocated)
mlx4_counter_free(ibdev->dev, ibdev->counters[p].index);
mlx4_foreach_port(p, dev, MLX4_PORT_TYPE_IB)
mlx4_CLOSE_PORT(dev, p);

View File

@ -503,6 +503,11 @@ struct mlx4_ib_iov_port {
struct mlx4_ib_iov_sysfs_attr mcg_dentry;
};
struct counter_index {
u32 index;
u8 allocated;
};
struct mlx4_ib_dev {
struct ib_device ib_dev;
struct mlx4_dev *dev;
@ -521,7 +526,7 @@ struct mlx4_ib_dev {
struct mutex cap_mask_mutex;
bool ib_active;
struct mlx4_ib_iboe iboe;
int counters[MLX4_MAX_PORTS];
struct counter_index counters[MLX4_MAX_PORTS];
int *eq_table;
struct kobject *iov_parent;
struct kobject *ports_parent;

View File

@ -1539,12 +1539,13 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
}
if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
if (dev->counters[qp->port - 1] != -1) {
if (dev->counters[qp->port - 1].index != -1) {
context->pri_path.counter_index =
dev->counters[qp->port - 1];
dev->counters[qp->port - 1].index;
optpar |= MLX4_QP_OPTPAR_COUNTER_INDEX;
} else
context->pri_path.counter_index = 0xff;
context->pri_path.counter_index =
MLX4_SINK_COUNTER_INDEX(dev->dev);
if (qp->flags & MLX4_IB_QP_NETIF) {
mlx4_ib_steer_qp_reg(dev, qp, 1);

View File

@ -49,6 +49,7 @@
#include "mlx4.h"
#include "fw.h"
#include "fw_qos.h"
#include "mlx4_stats.h"
#define CMD_POLL_TOKEN 0xffff
#define INBOX_MASK 0xffffffffffffff00ULL
@ -3166,6 +3167,92 @@ int mlx4_set_vf_link_state(struct mlx4_dev *dev, int port, int vf, int link_stat
}
EXPORT_SYMBOL_GPL(mlx4_set_vf_link_state);
int mlx4_get_counter_stats(struct mlx4_dev *dev, int counter_index,
struct mlx4_counter *counter_stats, int reset)
{
struct mlx4_cmd_mailbox *mailbox = NULL;
struct mlx4_counter *tmp_counter;
int err;
u32 if_stat_in_mod;
if (!counter_stats)
return -EINVAL;
if (counter_index == MLX4_SINK_COUNTER_INDEX(dev))
return 0;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
memset(mailbox->buf, 0, sizeof(struct mlx4_counter));
if_stat_in_mod = counter_index;
if (reset)
if_stat_in_mod |= MLX4_QUERY_IF_STAT_RESET;
err = mlx4_cmd_box(dev, 0, mailbox->dma,
if_stat_in_mod, 0,
MLX4_CMD_QUERY_IF_STAT,
MLX4_CMD_TIME_CLASS_C,
MLX4_CMD_NATIVE);
if (err) {
mlx4_dbg(dev, "%s: failed to read statistics for counter index %d\n",
__func__, counter_index);
goto if_stat_out;
}
tmp_counter = (struct mlx4_counter *)mailbox->buf;
counter_stats->counter_mode = tmp_counter->counter_mode;
if (counter_stats->counter_mode == 0) {
counter_stats->rx_frames =
cpu_to_be64(be64_to_cpu(counter_stats->rx_frames) +
be64_to_cpu(tmp_counter->rx_frames));
counter_stats->tx_frames =
cpu_to_be64(be64_to_cpu(counter_stats->tx_frames) +
be64_to_cpu(tmp_counter->tx_frames));
counter_stats->rx_bytes =
cpu_to_be64(be64_to_cpu(counter_stats->rx_bytes) +
be64_to_cpu(tmp_counter->rx_bytes));
counter_stats->tx_bytes =
cpu_to_be64(be64_to_cpu(counter_stats->tx_bytes) +
be64_to_cpu(tmp_counter->tx_bytes));
}
if_stat_out:
mlx4_free_cmd_mailbox(dev, mailbox);
return err;
}
EXPORT_SYMBOL_GPL(mlx4_get_counter_stats);
int mlx4_get_vf_stats(struct mlx4_dev *dev, int port, int vf_idx,
struct ifla_vf_stats *vf_stats)
{
struct mlx4_counter tmp_vf_stats;
int slave;
int err = 0;
if (!vf_stats)
return -EINVAL;
if (!mlx4_is_master(dev))
return -EPROTONOSUPPORT;
slave = mlx4_get_slave_indx(dev, vf_idx);
if (slave < 0)
return -EINVAL;
port = mlx4_slaves_closest_port(dev, slave, port);
err = mlx4_calc_vf_counters(dev, slave, port, &tmp_vf_stats);
if (!err && tmp_vf_stats.counter_mode == 0) {
vf_stats->rx_packets = be64_to_cpu(tmp_vf_stats.rx_frames);
vf_stats->tx_packets = be64_to_cpu(tmp_vf_stats.tx_frames);
vf_stats->rx_bytes = be64_to_cpu(tmp_vf_stats.rx_bytes);
vf_stats->tx_bytes = be64_to_cpu(tmp_vf_stats.tx_bytes);
}
return err;
}
EXPORT_SYMBOL_GPL(mlx4_get_vf_stats);
int mlx4_vf_smi_enabled(struct mlx4_dev *dev, int slave, int port)
{
struct mlx4_priv *priv = mlx4_priv(dev);

View File

@ -119,6 +119,12 @@ static const char main_strings[][ETH_GSTRING_LEN] = {
"queue_stopped", "wake_queue", "tx_timeout", "rx_alloc_failed",
"rx_csum_good", "rx_csum_none", "rx_csum_complete", "tx_chksum_offload",
/* pf statistics */
"pf_rx_packets",
"pf_rx_bytes",
"pf_tx_packets",
"pf_tx_bytes",
/* priority flow control statistics rx */
"rx_pause_prio_0", "rx_pause_duration_prio_0",
"rx_pause_transition_prio_0",
@ -368,6 +374,11 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev,
if (bitmap_iterator_test(&it))
data[index++] = ((unsigned long *)&priv->port_stats)[i];
for (i = 0; i < NUM_PF_STATS; i++, bitmap_iterator_inc(&it))
if (bitmap_iterator_test(&it))
data[index++] =
((unsigned long *)&priv->pf_stats)[i];
for (i = 0; i < NUM_FLOW_PRIORITY_STATS_RX;
i++, bitmap_iterator_inc(&it))
if (bitmap_iterator_test(&it))
@ -448,6 +459,12 @@ static void mlx4_en_get_strings(struct net_device *dev,
strcpy(data + (index++) * ETH_GSTRING_LEN,
main_strings[strings]);
for (i = 0; i < NUM_PF_STATS; i++, strings++,
bitmap_iterator_inc(&it))
if (bitmap_iterator_test(&it))
strcpy(data + (index++) * ETH_GSTRING_LEN,
main_strings[strings]);
for (i = 0; i < NUM_FLOW_STATS; i++, strings++,
bitmap_iterator_inc(&it))
if (bitmap_iterator_test(&it))

View File

@ -1597,6 +1597,9 @@ int mlx4_en_start_port(struct net_device *dev)
}
mdev->mac_removed[priv->port] = 0;
priv->counter_index =
mlx4_get_default_counter_index(mdev->dev, priv->port);
err = mlx4_en_config_rss_steer(priv);
if (err) {
en_err(priv, "Failed configuring rss steering\n");
@ -1755,6 +1758,7 @@ void mlx4_en_stop_port(struct net_device *dev, int detach)
/* Set port as not active */
priv->port_up = false;
priv->counter_index = MLX4_SINK_COUNTER_INDEX(mdev->dev);
/* Promsicuous mode */
if (mdev->dev->caps.steering_mode ==
@ -1891,6 +1895,7 @@ static void mlx4_en_clear_stats(struct net_device *dev)
sizeof(priv->rx_priority_flowstats));
memset(&priv->tx_priority_flowstats, 0,
sizeof(priv->tx_priority_flowstats));
memset(&priv->pf_stats, 0, sizeof(priv->pf_stats));
for (i = 0; i < priv->tx_ring_num; i++) {
priv->tx_ring[i]->bytes = 0;
@ -2287,6 +2292,15 @@ static int mlx4_en_set_vf_link_state(struct net_device *dev, int vf, int link_st
return mlx4_set_vf_link_state(mdev->dev, en_priv->port, vf, link_state);
}
static int mlx4_en_get_vf_stats(struct net_device *dev, int vf,
struct ifla_vf_stats *vf_stats)
{
struct mlx4_en_priv *en_priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = en_priv->mdev;
return mlx4_get_vf_stats(mdev->dev, en_priv->port, vf, vf_stats);
}
#define PORT_ID_BYTE_LEN 8
static int mlx4_en_get_phys_port_id(struct net_device *dev,
struct netdev_phys_item_id *ppid)
@ -2484,6 +2498,7 @@ static const struct net_device_ops mlx4_netdev_ops_master = {
.ndo_set_vf_rate = mlx4_en_set_vf_rate,
.ndo_set_vf_spoofchk = mlx4_en_set_vf_spoofchk,
.ndo_set_vf_link_state = mlx4_en_set_vf_link_state,
.ndo_get_vf_stats = mlx4_en_get_vf_stats,
.ndo_get_vf_config = mlx4_en_get_vf_config,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = mlx4_en_netpoll,
@ -2681,7 +2696,7 @@ void mlx4_en_update_pfc_stats_bitmap(struct mlx4_dev *dev,
u8 rx_ppp, u8 rx_pause,
u8 tx_ppp, u8 tx_pause)
{
int last_i = NUM_MAIN_STATS + NUM_PORT_STATS;
int last_i = NUM_MAIN_STATS + NUM_PORT_STATS + NUM_PF_STATS;
if (!mlx4_is_slave(dev) &&
(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN)) {
@ -2743,6 +2758,11 @@ void mlx4_en_set_stats_bitmap(struct mlx4_dev *dev,
bitmap_set(stats_bitmap->bitmap, last_i, NUM_PORT_STATS);
last_i += NUM_PORT_STATS;
if (mlx4_is_master(dev))
bitmap_set(stats_bitmap->bitmap, last_i,
NUM_PF_STATS);
last_i += NUM_PF_STATS;
mlx4_en_update_pfc_stats_bitmap(dev, stats_bitmap,
rx_ppp, rx_pause,
tx_ppp, tx_pause);
@ -2778,6 +2798,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
priv = netdev_priv(dev);
memset(priv, 0, sizeof(struct mlx4_en_priv));
priv->counter_index = MLX4_SINK_COUNTER_INDEX(mdev->dev);
spin_lock_init(&priv->stats_lock);
INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode);
INIT_WORK(&priv->watchdog_task, mlx4_en_restart);

View File

@ -149,6 +149,7 @@ static unsigned long en_stats_adder(__be64 *start, __be64 *next, int num)
int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
{
struct mlx4_counter tmp_counter_stats;
struct mlx4_en_stat_out_mbox *mlx4_en_stats;
struct mlx4_en_stat_out_flow_control_mbox *flowstats;
struct mlx4_en_priv *priv = netdev_priv(mdev->pndev[port]);
@ -156,7 +157,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
struct mlx4_cmd_mailbox *mailbox;
u64 in_mod = reset << 8 | port;
int err;
int i;
int i, counter_index;
mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
if (IS_ERR(mailbox))
@ -296,6 +297,11 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
spin_unlock_bh(&priv->stats_lock);
memset(&tmp_counter_stats, 0, sizeof(tmp_counter_stats));
counter_index = mlx4_get_default_counter_index(mdev->dev, port);
err = mlx4_get_counter_stats(mdev->dev, counter_index,
&tmp_counter_stats, reset);
/* 0xffs indicates invalid value */
memset(mailbox->buf, 0xff, sizeof(*flowstats) * MLX4_NUM_PRIORITIES);
@ -314,6 +320,13 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
spin_lock_bh(&priv->stats_lock);
if (tmp_counter_stats.counter_mode == 0) {
priv->pf_stats.rx_bytes = be64_to_cpu(tmp_counter_stats.rx_bytes);
priv->pf_stats.tx_bytes = be64_to_cpu(tmp_counter_stats.tx_bytes);
priv->pf_stats.rx_packets = be64_to_cpu(tmp_counter_stats.rx_frames);
priv->pf_stats.tx_packets = be64_to_cpu(tmp_counter_stats.tx_frames);
}
for (i = 0; i < MLX4_NUM_PRIORITIES; i++) {
priv->rx_priority_flowstats[i].rx_pause =
be64_to_cpu(flowstats[i].rx_pause);

View File

@ -66,7 +66,7 @@ void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
context->pri_path.sched_queue |= user_prio << 3;
context->pri_path.feup = MLX4_FEUP_FORCE_ETH_UP;
}
context->pri_path.counter_index = 0xff;
context->pri_path.counter_index = priv->counter_index;
context->cqn_send = cpu_to_be32(cqn);
context->cqn_recv = cpu_to_be32(cqn);
context->db_rec_addr = cpu_to_be64(priv->res.db.dma << 2);

View File

@ -479,7 +479,7 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
}
}
dev->caps.max_counters = 1 << ilog2(dev_cap->max_counters);
dev->caps.max_counters = dev_cap->max_counters;
dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] = dev_cap->reserved_qps;
dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] =
@ -2193,20 +2193,73 @@ err_free_icm:
static int mlx4_init_counters_table(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
int nent;
int nent_pow2;
if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS))
return -ENOENT;
nent = dev->caps.max_counters;
return mlx4_bitmap_init(&priv->counters_bitmap, nent, nent - 1, 0, 0);
if (!dev->caps.max_counters)
return -ENOSPC;
nent_pow2 = roundup_pow_of_two(dev->caps.max_counters);
/* reserve last counter index for sink counter */
return mlx4_bitmap_init(&priv->counters_bitmap, nent_pow2,
nent_pow2 - 1, 0,
nent_pow2 - dev->caps.max_counters + 1);
}
static void mlx4_cleanup_counters_table(struct mlx4_dev *dev)
{
if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS))
return;
if (!dev->caps.max_counters)
return;
mlx4_bitmap_cleanup(&mlx4_priv(dev)->counters_bitmap);
}
static void mlx4_cleanup_default_counters(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
int port;
for (port = 0; port < dev->caps.num_ports; port++)
if (priv->def_counter[port] != -1)
mlx4_counter_free(dev, priv->def_counter[port]);
}
static int mlx4_allocate_default_counters(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
int port, err = 0;
u32 idx;
for (port = 0; port < dev->caps.num_ports; port++)
priv->def_counter[port] = -1;
for (port = 0; port < dev->caps.num_ports; port++) {
err = mlx4_counter_alloc(dev, &idx);
if (!err || err == -ENOSPC) {
priv->def_counter[port] = idx;
} else if (err == -ENOENT) {
err = 0;
continue;
} else {
mlx4_err(dev, "%s: failed to allocate default counter port %d err %d\n",
__func__, port + 1, err);
mlx4_cleanup_default_counters(dev);
return err;
}
mlx4_dbg(dev, "%s: default counter index %d for port %d\n",
__func__, priv->def_counter[port], port + 1);
}
return err;
}
int __mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx)
{
struct mlx4_priv *priv = mlx4_priv(dev);
@ -2215,8 +2268,10 @@ int __mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx)
return -ENOENT;
*idx = mlx4_bitmap_alloc(&priv->counters_bitmap);
if (*idx == -1)
return -ENOMEM;
if (*idx == -1) {
*idx = MLX4_SINK_COUNTER_INDEX(dev);
return -ENOSPC;
}
return 0;
}
@ -2239,8 +2294,35 @@ int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx)
}
EXPORT_SYMBOL_GPL(mlx4_counter_alloc);
static int __mlx4_clear_if_stat(struct mlx4_dev *dev,
u8 counter_index)
{
struct mlx4_cmd_mailbox *if_stat_mailbox;
int err;
u32 if_stat_in_mod = (counter_index & 0xff) | MLX4_QUERY_IF_STAT_RESET;
if_stat_mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(if_stat_mailbox))
return PTR_ERR(if_stat_mailbox);
err = mlx4_cmd_box(dev, 0, if_stat_mailbox->dma, if_stat_in_mod, 0,
MLX4_CMD_QUERY_IF_STAT, MLX4_CMD_TIME_CLASS_C,
MLX4_CMD_NATIVE);
mlx4_free_cmd_mailbox(dev, if_stat_mailbox);
return err;
}
void __mlx4_counter_free(struct mlx4_dev *dev, u32 idx)
{
if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS))
return;
if (idx == MLX4_SINK_COUNTER_INDEX(dev))
return;
__mlx4_clear_if_stat(dev, idx);
mlx4_bitmap_free(&mlx4_priv(dev)->counters_bitmap, idx, MLX4_USE_RR);
return;
}
@ -2260,6 +2342,14 @@ void mlx4_counter_free(struct mlx4_dev *dev, u32 idx)
}
EXPORT_SYMBOL_GPL(mlx4_counter_free);
int mlx4_get_default_counter_index(struct mlx4_dev *dev, int port)
{
struct mlx4_priv *priv = mlx4_priv(dev);
return priv->def_counter[port - 1];
}
EXPORT_SYMBOL_GPL(mlx4_get_default_counter_index);
void mlx4_set_admin_guid(struct mlx4_dev *dev, __be64 guid, int entry, int port)
{
struct mlx4_priv *priv = mlx4_priv(dev);
@ -2395,10 +2485,18 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
goto err_srq_table_free;
}
err = mlx4_init_counters_table(dev);
if (err && err != -ENOENT) {
mlx4_err(dev, "Failed to initialize counters table, aborting\n");
goto err_qp_table_free;
if (!mlx4_is_slave(dev)) {
err = mlx4_init_counters_table(dev);
if (err && err != -ENOENT) {
mlx4_err(dev, "Failed to initialize counters table, aborting\n");
goto err_qp_table_free;
}
}
err = mlx4_allocate_default_counters(dev);
if (err) {
mlx4_err(dev, "Failed to allocate default counters, aborting\n");
goto err_counters_table_free;
}
if (!mlx4_is_slave(dev)) {
@ -2432,15 +2530,19 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
if (err) {
mlx4_err(dev, "Failed to set port %d, aborting\n",
port);
goto err_counters_table_free;
goto err_default_countes_free;
}
}
}
return 0;
err_default_countes_free:
mlx4_cleanup_default_counters(dev);
err_counters_table_free:
mlx4_cleanup_counters_table(dev);
if (!mlx4_is_slave(dev))
mlx4_cleanup_counters_table(dev);
err_qp_table_free:
mlx4_cleanup_qp_table(dev);
@ -3173,7 +3275,9 @@ err_port:
for (--port; port >= 1; --port)
mlx4_cleanup_port_info(&priv->port[port]);
mlx4_cleanup_counters_table(dev);
mlx4_cleanup_default_counters(dev);
if (!mlx4_is_slave(dev))
mlx4_cleanup_counters_table(dev);
mlx4_cleanup_qp_table(dev);
mlx4_cleanup_srq_table(dev);
mlx4_cleanup_cq_table(dev);
@ -3471,7 +3575,9 @@ static void mlx4_unload_one(struct pci_dev *pdev)
mlx4_free_resource_tracker(dev,
RES_TR_FREE_SLAVES_ONLY);
mlx4_cleanup_counters_table(dev);
mlx4_cleanup_default_counters(dev);
if (!mlx4_is_slave(dev))
mlx4_cleanup_counters_table(dev);
mlx4_cleanup_qp_table(dev);
mlx4_cleanup_srq_table(dev);
mlx4_cleanup_cq_table(dev);

View File

@ -65,6 +65,8 @@
#define INIT_HCA_TPT_MW_ENABLE (1 << 7)
#define MLX4_QUERY_IF_STAT_RESET BIT(31)
enum {
MLX4_HCR_BASE = 0x80680,
MLX4_HCR_SIZE = 0x0001c,
@ -874,6 +876,7 @@ struct mlx4_priv {
struct mlx4_qp_table qp_table;
struct mlx4_mcg_table mcg_table;
struct mlx4_bitmap counters_bitmap;
int def_counter[MLX4_MAX_PORTS];
struct mlx4_catas_err catas_err;
@ -1007,6 +1010,8 @@ int __mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
int start_index, int npages, u64 *page_list);
int __mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx);
void __mlx4_counter_free(struct mlx4_dev *dev, u32 idx);
int mlx4_calc_vf_counters(struct mlx4_dev *dev, int slave, int port,
struct mlx4_counter *data);
int __mlx4_xrcd_alloc(struct mlx4_dev *dev, u32 *xrcdn);
void __mlx4_xrcd_free(struct mlx4_dev *dev, u32 xrcdn);

View File

@ -566,6 +566,7 @@ struct mlx4_en_priv {
#endif
struct mlx4_en_perf_stats pstats;
struct mlx4_en_pkt_stats pkstats;
struct mlx4_en_counter_stats pf_stats;
struct mlx4_en_flow_stats_rx rx_priority_flowstats[MLX4_NUM_PRIORITIES];
struct mlx4_en_flow_stats_tx tx_priority_flowstats[MLX4_NUM_PRIORITIES];
struct mlx4_en_flow_stats_rx rx_flowstats;
@ -582,6 +583,7 @@ struct mlx4_en_priv {
int base_tx_qpn;
struct hlist_head mac_hash[MLX4_EN_MAC_HASH_SIZE];
struct hwtstamp_config hwtstamp_config;
u32 counter_index;
#ifdef CONFIG_MLX4_EN_DCB
struct ieee_ets ets;

View File

@ -23,6 +23,14 @@ struct mlx4_en_pkt_stats {
#define NUM_PKT_STATS 43
};
struct mlx4_en_counter_stats {
unsigned long rx_packets;
unsigned long rx_bytes;
unsigned long tx_packets;
unsigned long tx_bytes;
#define NUM_PF_STATS 4
};
struct mlx4_en_port_stats {
unsigned long tso_packets;
unsigned long xmit_more;
@ -71,7 +79,8 @@ struct mlx4_en_flow_stats_tx {
#define NUM_FLOW_STATS (NUM_FLOW_STATS_RX + NUM_FLOW_STATS_TX + \
NUM_FLOW_PRIORITY_STATS_TX + \
NUM_FLOW_PRIORITY_STATS_RX)
NUM_FLOW_PRIORITY_STATS_RX + \
NUM_PF_STATS)
struct mlx4_en_stat_out_flow_control_mbox {
/* Total number of PAUSE frames received from the far-end port */

View File

@ -46,8 +46,11 @@
#include "mlx4.h"
#include "fw.h"
#include "mlx4_stats.h"
#define MLX4_MAC_VALID (1ull << 63)
#define MLX4_PF_COUNTERS_PER_PORT 2
#define MLX4_VF_COUNTERS_PER_PORT 1
struct mac_res {
struct list_head list;
@ -459,11 +462,21 @@ void mlx4_init_quotas(struct mlx4_dev *dev)
dev->quotas.mpt =
priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[pf];
}
static int get_max_gauranteed_vfs_counter(struct mlx4_dev *dev)
{
/* reduce the sink counter */
return (dev->caps.max_counters - 1 -
(MLX4_PF_COUNTERS_PER_PORT * MLX4_MAX_PORTS))
/ MLX4_MAX_PORTS;
}
int mlx4_init_resource_tracker(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
int i, j;
int t;
int max_vfs_guarantee_counter = get_max_gauranteed_vfs_counter(dev);
priv->mfunc.master.res_tracker.slave_list =
kzalloc(dev->num_slaves * sizeof(struct slave_list),
@ -499,6 +512,9 @@ int mlx4_init_resource_tracker(struct mlx4_dev *dev)
res_alloc->allocated = kzalloc((dev->persist->
num_vfs + 1) *
sizeof(int), GFP_KERNEL);
/* Reduce the sink counter */
if (i == RES_COUNTER)
res_alloc->res_free = dev->caps.max_counters - 1;
if (!res_alloc->quota || !res_alloc->guaranteed ||
!res_alloc->allocated)
@ -577,9 +593,17 @@ int mlx4_init_resource_tracker(struct mlx4_dev *dev)
break;
case RES_COUNTER:
res_alloc->quota[t] = dev->caps.max_counters;
res_alloc->guaranteed[t] = 0;
if (t == mlx4_master_func_num(dev))
res_alloc->res_free = res_alloc->quota[t];
res_alloc->guaranteed[t] =
MLX4_PF_COUNTERS_PER_PORT *
MLX4_MAX_PORTS;
else if (t <= max_vfs_guarantee_counter)
res_alloc->guaranteed[t] =
MLX4_VF_COUNTERS_PER_PORT *
MLX4_MAX_PORTS;
else
res_alloc->guaranteed[t] = 0;
res_alloc->res_free -= res_alloc->guaranteed[t];
break;
default:
break;
@ -700,6 +724,9 @@ static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox,
}
}
static int handle_counter(struct mlx4_dev *dev, struct mlx4_qp_context *qpc,
u8 slave, int port);
static int update_vport_qp_param(struct mlx4_dev *dev,
struct mlx4_cmd_mailbox *inbox,
u8 slave, u32 qpn)
@ -715,6 +742,10 @@ static int update_vport_qp_param(struct mlx4_dev *dev,
vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
qp_type = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
err = handle_counter(dev, qpc, slave, port);
if (err)
goto out;
if (MLX4_VGT != vp_oper->state.default_vlan) {
/* the reserved QPs (special, proxy, tunnel)
* do not operate over vlans
@ -859,6 +890,83 @@ static void put_res(struct mlx4_dev *dev, int slave, u64 res_id,
spin_unlock_irq(mlx4_tlock(dev));
}
static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
u64 in_param, u64 *out_param, int port);
static int handle_existing_counter(struct mlx4_dev *dev, u8 slave, int port,
int counter_index)
{
struct res_common *r;
struct res_counter *counter;
int ret = 0;
if (counter_index == MLX4_SINK_COUNTER_INDEX(dev))
return ret;
spin_lock_irq(mlx4_tlock(dev));
r = find_res(dev, counter_index, RES_COUNTER);
if (!r || r->owner != slave)
ret = -EINVAL;
counter = container_of(r, struct res_counter, com);
if (!counter->port)
counter->port = port;
spin_unlock_irq(mlx4_tlock(dev));
return ret;
}
static int handle_unexisting_counter(struct mlx4_dev *dev,
struct mlx4_qp_context *qpc, u8 slave,
int port)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
struct res_common *tmp;
struct res_counter *counter;
u64 counter_idx = MLX4_SINK_COUNTER_INDEX(dev);
int err = 0;
spin_lock_irq(mlx4_tlock(dev));
list_for_each_entry(tmp,
&tracker->slave_list[slave].res_list[RES_COUNTER],
list) {
counter = container_of(tmp, struct res_counter, com);
if (port == counter->port) {
qpc->pri_path.counter_index = counter->com.res_id;
spin_unlock_irq(mlx4_tlock(dev));
return 0;
}
}
spin_unlock_irq(mlx4_tlock(dev));
/* No existing counter, need to allocate a new counter */
err = counter_alloc_res(dev, slave, RES_OP_RESERVE, 0, 0, &counter_idx,
port);
if (err == -ENOENT) {
err = 0;
} else if (err && err != -ENOSPC) {
mlx4_err(dev, "%s: failed to create new counter for slave %d err %d\n",
__func__, slave, err);
} else {
qpc->pri_path.counter_index = counter_idx;
mlx4_dbg(dev, "%s: alloc new counter for slave %d index %d\n",
__func__, slave, qpc->pri_path.counter_index);
err = 0;
}
return err;
}
static int handle_counter(struct mlx4_dev *dev, struct mlx4_qp_context *qpc,
u8 slave, int port)
{
if (qpc->pri_path.counter_index != MLX4_SINK_COUNTER_INDEX(dev))
return handle_existing_counter(dev, slave, port,
qpc->pri_path.counter_index);
return handle_unexisting_counter(dev, qpc, slave, port);
}
static struct res_common *alloc_qp_tr(int id)
{
struct res_qp *ret;
@ -952,7 +1060,7 @@ static struct res_common *alloc_srq_tr(int id)
return &ret->com;
}
static struct res_common *alloc_counter_tr(int id)
static struct res_common *alloc_counter_tr(int id, int port)
{
struct res_counter *ret;
@ -962,6 +1070,7 @@ static struct res_common *alloc_counter_tr(int id)
ret->com.res_id = id;
ret->com.state = RES_COUNTER_ALLOCATED;
ret->port = port;
return &ret->com;
}
@ -1022,7 +1131,7 @@ static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
pr_err("implementation missing\n");
return NULL;
case RES_COUNTER:
ret = alloc_counter_tr(id);
ret = alloc_counter_tr(id, extra);
break;
case RES_XRCD:
ret = alloc_xrcdn_tr(id);
@ -1039,6 +1148,53 @@ static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
return ret;
}
int mlx4_calc_vf_counters(struct mlx4_dev *dev, int slave, int port,
struct mlx4_counter *data)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
struct res_common *tmp;
struct res_counter *counter;
int *counters_arr;
int i = 0, err = 0;
memset(data, 0, sizeof(*data));
counters_arr = kmalloc_array(dev->caps.max_counters,
sizeof(*counters_arr), GFP_KERNEL);
if (!counters_arr)
return -ENOMEM;
spin_lock_irq(mlx4_tlock(dev));
list_for_each_entry(tmp,
&tracker->slave_list[slave].res_list[RES_COUNTER],
list) {
counter = container_of(tmp, struct res_counter, com);
if (counter->port == port) {
counters_arr[i] = (int)tmp->res_id;
i++;
}
}
spin_unlock_irq(mlx4_tlock(dev));
counters_arr[i] = -1;
i = 0;
while (counters_arr[i] != -1) {
err = mlx4_get_counter_stats(dev, counters_arr[i], data,
0);
if (err) {
memset(data, 0, sizeof(*data));
goto table_changed;
}
i++;
}
table_changed:
kfree(counters_arr);
return 0;
}
static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
enum mlx4_resource type, int extra)
{
@ -2001,7 +2157,7 @@ static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
}
static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
u64 in_param, u64 *out_param)
u64 in_param, u64 *out_param, int port)
{
u32 index;
int err;
@ -2019,7 +2175,7 @@ static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
return err;
}
err = add_res_range(dev, slave, index, 1, RES_COUNTER, 0);
err = add_res_range(dev, slave, index, 1, RES_COUNTER, port);
if (err) {
__mlx4_counter_free(dev, index);
mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
@ -2101,7 +2257,7 @@ int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
case RES_COUNTER:
err = counter_alloc_res(dev, slave, vhcr->op_modifier, alop,
vhcr->in_param, &vhcr->out_param);
vhcr->in_param, &vhcr->out_param, 0);
break;
case RES_XRCD:
@ -2335,6 +2491,9 @@ static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
return -EINVAL;
index = get_param_l(&in_param);
if (index == MLX4_SINK_COUNTER_INDEX(dev))
return 0;
err = rem_res_range(dev, slave, index, 1, RES_COUNTER, 0);
if (err)
return err;

View File

@ -5,6 +5,15 @@
/* We don't want this structure exposed to user space */
struct ifla_vf_stats {
__u64 rx_packets;
__u64 tx_packets;
__u64 rx_bytes;
__u64 tx_bytes;
__u64 broadcast;
__u64 multicast;
};
struct ifla_vf_info {
__u32 vf;
__u8 mac[32];

View File

@ -35,6 +35,8 @@
#include <linux/dma-mapping.h>
#include <linux/if_link.h>
#include <linux/mlx4/device.h>
#include <linux/netdevice.h>
enum {
/* initialization and general commands */
@ -300,6 +302,10 @@ static inline int mlx4_cmd_imm(struct mlx4_dev *dev, u64 in_param, u64 *out_para
struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev);
void mlx4_free_cmd_mailbox(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox);
int mlx4_get_counter_stats(struct mlx4_dev *dev, int counter_index,
struct mlx4_counter *counter_stats, int reset);
int mlx4_get_vf_stats(struct mlx4_dev *dev, int port, int vf_idx,
struct ifla_vf_stats *vf_stats);
u32 mlx4_comm_get_version(void);
int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac);
int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos);

View File

@ -771,6 +771,14 @@ union mlx4_ext_av {
struct mlx4_eth_av eth;
};
/* Counters should be saturate once they reach their maximum value */
#define ASSIGN_32BIT_COUNTER(counter, value) do { \
if ((value) > U32_MAX) \
counter = cpu_to_be32(U32_MAX); \
else \
counter = cpu_to_be32(value); \
} while (0)
struct mlx4_counter {
u8 reserved1[3];
u8 counter_mode;
@ -957,6 +965,7 @@ struct mlx4_mad_ifc {
((dev)->caps.flags & MLX4_DEV_CAP_FLAG_IBOE))
#define MLX4_INVALID_SLAVE_ID 0xFF
#define MLX4_SINK_COUNTER_INDEX(dev) (dev->caps.max_counters - 1)
void handle_port_mgmt_change_event(struct work_struct *work);
@ -1347,6 +1356,7 @@ int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port);
int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx);
void mlx4_counter_free(struct mlx4_dev *dev, u32 idx);
int mlx4_get_default_counter_index(struct mlx4_dev *dev, int port);
void mlx4_set_admin_guid(struct mlx4_dev *dev, __be64 guid, int entry,
int port);

View File

@ -1100,6 +1100,10 @@ struct net_device_ops {
struct ifla_vf_info *ivf);
int (*ndo_set_vf_link_state)(struct net_device *dev,
int vf, int link_state);
int (*ndo_get_vf_stats)(struct net_device *dev,
int vf,
struct ifla_vf_stats
*vf_stats);
int (*ndo_set_vf_port)(struct net_device *dev,
int vf,
struct nlattr *port[]);

View File

@ -484,6 +484,7 @@ enum {
IFLA_VF_RSS_QUERY_EN, /* RSS Redirection Table and Hash Key query
* on/off switch
*/
IFLA_VF_STATS, /* network device statistics */
__IFLA_VF_MAX,
};
@ -533,6 +534,18 @@ struct ifla_vf_rss_query_en {
__u32 setting;
};
enum {
IFLA_VF_STATS_RX_PACKETS,
IFLA_VF_STATS_TX_PACKETS,
IFLA_VF_STATS_RX_BYTES,
IFLA_VF_STATS_TX_BYTES,
IFLA_VF_STATS_BROADCAST,
IFLA_VF_STATS_MULTICAST,
__IFLA_VF_STATS_MAX,
};
#define IFLA_VF_STATS_MAX (__IFLA_VF_STATS_MAX - 1)
/* VF ports management section
*
* Nested layout of set/get msg is:

View File

@ -819,7 +819,19 @@ static inline int rtnl_vfinfo_size(const struct net_device *dev,
nla_total_size(sizeof(struct ifla_vf_spoofchk)) +
nla_total_size(sizeof(struct ifla_vf_rate)) +
nla_total_size(sizeof(struct ifla_vf_link_state)) +
nla_total_size(sizeof(struct ifla_vf_rss_query_en)));
nla_total_size(sizeof(struct ifla_vf_rss_query_en)) +
/* IFLA_VF_STATS_RX_PACKETS */
nla_total_size(sizeof(__u64)) +
/* IFLA_VF_STATS_TX_PACKETS */
nla_total_size(sizeof(__u64)) +
/* IFLA_VF_STATS_RX_BYTES */
nla_total_size(sizeof(__u64)) +
/* IFLA_VF_STATS_TX_BYTES */
nla_total_size(sizeof(__u64)) +
/* IFLA_VF_STATS_BROADCAST */
nla_total_size(sizeof(__u64)) +
/* IFLA_VF_STATS_MULTICAST */
nla_total_size(sizeof(__u64)));
return size;
} else
return 0;
@ -1123,7 +1135,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
&& (ext_filter_mask & RTEXT_FILTER_VF)) {
int i;
struct nlattr *vfinfo, *vf;
struct nlattr *vfinfo, *vf, *vfstats;
int num_vfs = dev_num_vf(dev->dev.parent);
vfinfo = nla_nest_start(skb, IFLA_VFINFO_LIST);
@ -1138,6 +1150,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
struct ifla_vf_spoofchk vf_spoofchk;
struct ifla_vf_link_state vf_linkstate;
struct ifla_vf_rss_query_en vf_rss_query_en;
struct ifla_vf_stats vf_stats;
/*
* Not all SR-IOV capable drivers support the
@ -1190,6 +1203,30 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
sizeof(vf_rss_query_en),
&vf_rss_query_en))
goto nla_put_failure;
memset(&vf_stats, 0, sizeof(vf_stats));
if (dev->netdev_ops->ndo_get_vf_stats)
dev->netdev_ops->ndo_get_vf_stats(dev, i,
&vf_stats);
vfstats = nla_nest_start(skb, IFLA_VF_STATS);
if (!vfstats) {
nla_nest_cancel(skb, vf);
nla_nest_cancel(skb, vfinfo);
goto nla_put_failure;
}
if (nla_put_u64(skb, IFLA_VF_STATS_RX_PACKETS,
vf_stats.rx_packets) ||
nla_put_u64(skb, IFLA_VF_STATS_TX_PACKETS,
vf_stats.tx_packets) ||
nla_put_u64(skb, IFLA_VF_STATS_RX_BYTES,
vf_stats.rx_bytes) ||
nla_put_u64(skb, IFLA_VF_STATS_TX_BYTES,
vf_stats.tx_bytes) ||
nla_put_u64(skb, IFLA_VF_STATS_BROADCAST,
vf_stats.broadcast) ||
nla_put_u64(skb, IFLA_VF_STATS_MULTICAST,
vf_stats.multicast))
goto nla_put_failure;
nla_nest_end(skb, vfstats);
nla_nest_end(skb, vf);
}
nla_nest_end(skb, vfinfo);
@ -1303,6 +1340,16 @@ static const struct nla_policy ifla_vf_policy[IFLA_VF_MAX+1] = {
[IFLA_VF_RATE] = { .len = sizeof(struct ifla_vf_rate) },
[IFLA_VF_LINK_STATE] = { .len = sizeof(struct ifla_vf_link_state) },
[IFLA_VF_RSS_QUERY_EN] = { .len = sizeof(struct ifla_vf_rss_query_en) },
[IFLA_VF_STATS] = { .type = NLA_NESTED },
};
static const struct nla_policy ifla_vf_stats_policy[IFLA_VF_STATS_MAX + 1] = {
[IFLA_VF_STATS_RX_PACKETS] = { .type = NLA_U64 },
[IFLA_VF_STATS_TX_PACKETS] = { .type = NLA_U64 },
[IFLA_VF_STATS_RX_BYTES] = { .type = NLA_U64 },
[IFLA_VF_STATS_TX_BYTES] = { .type = NLA_U64 },
[IFLA_VF_STATS_BROADCAST] = { .type = NLA_U64 },
[IFLA_VF_STATS_MULTICAST] = { .type = NLA_U64 },
};
static const struct nla_policy ifla_port_policy[IFLA_PORT_MAX+1] = {