Merge branch 'gred-add-offload-support'

Jakub Kicinski says:

====================
gred: add offload support

This series adds support for GRED offload in the nfp driver.  So
far we have only supported the RED Qdisc offload, but we need a
way to differentiate traffic types e.g. based on DSCP marking.

It may seem like PRIO+RED is a good match for this job, however,
(a) we don't need strict priority behaviour of PRIO, and (b) PRIO
uses the legacy way of mapping ToS fields to bands, which is quite
awkward and limitting.

The less commonly used GRED Qdisc is a better much for the scenario,
it allows multiple sets of RED parameters and queue lengths to be
maintained with a single FIFO queue.  This is exactly how nfp offload
behaves.  We use a trivial u32 classifier to assign packets to virtual
queues.

There is also the minor advantage that GRED can't have its child
changed, therefore limitting ways in which the configuration of SW
path can diverge from HW offload.

Last patch of the series adds support for (G)RED in non-ECN mode,
where packets are dropped instead of marked.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2018-11-19 18:53:46 -08:00
commit 6133e78f41
13 changed files with 1041 additions and 115 deletions

View File

@ -56,6 +56,7 @@ endif
ifeq ($(CONFIG_NFP_APP_ABM_NIC),y)
nfp-objs += \
abm/cls.o \
abm/ctrl.o \
abm/qdisc.o \
abm/main.o

View File

@ -0,0 +1,283 @@
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
/* Copyright (C) 2018 Netronome Systems, Inc. */
#include <linux/bitfield.h>
#include <net/pkt_cls.h>
#include "../nfpcore/nfp_cpp.h"
#include "../nfp_app.h"
#include "../nfp_net_repr.h"
#include "main.h"
struct nfp_abm_u32_match {
u32 handle;
u32 band;
u8 mask;
u8 val;
struct list_head list;
};
static bool
nfp_abm_u32_check_knode(struct nfp_abm *abm, struct tc_cls_u32_knode *knode,
__be16 proto, struct netlink_ext_ack *extack)
{
struct tc_u32_key *k;
unsigned int tos_off;
if (knode->exts && tcf_exts_has_actions(knode->exts)) {
NL_SET_ERR_MSG_MOD(extack, "action offload not supported");
return false;
}
if (knode->link_handle) {
NL_SET_ERR_MSG_MOD(extack, "linking not supported");
return false;
}
if (knode->sel->flags != TC_U32_TERMINAL) {
NL_SET_ERR_MSG_MOD(extack,
"flags must be equal to TC_U32_TERMINAL");
return false;
}
if (knode->sel->off || knode->sel->offshift || knode->sel->offmask ||
knode->sel->offoff || knode->fshift) {
NL_SET_ERR_MSG_MOD(extack, "variable offseting not supported");
return false;
}
if (knode->sel->hoff || knode->sel->hmask) {
NL_SET_ERR_MSG_MOD(extack, "hashing not supported");
return false;
}
if (knode->val || knode->mask) {
NL_SET_ERR_MSG_MOD(extack, "matching on mark not supported");
return false;
}
if (knode->res && knode->res->class) {
NL_SET_ERR_MSG_MOD(extack, "setting non-0 class not supported");
return false;
}
if (knode->res && knode->res->classid >= abm->num_bands) {
NL_SET_ERR_MSG_MOD(extack,
"classid higher than number of bands");
return false;
}
if (knode->sel->nkeys != 1) {
NL_SET_ERR_MSG_MOD(extack, "exactly one key required");
return false;
}
switch (proto) {
case htons(ETH_P_IP):
tos_off = 16;
break;
case htons(ETH_P_IPV6):
tos_off = 20;
break;
default:
NL_SET_ERR_MSG_MOD(extack, "only IP and IPv6 supported as filter protocol");
return false;
}
k = &knode->sel->keys[0];
if (k->offmask) {
NL_SET_ERR_MSG_MOD(extack, "offset mask - variable offseting not supported");
return false;
}
if (k->off) {
NL_SET_ERR_MSG_MOD(extack, "only DSCP fields can be matched");
return false;
}
if (k->val & ~k->mask) {
NL_SET_ERR_MSG_MOD(extack, "mask does not cover the key");
return false;
}
if (be32_to_cpu(k->mask) >> tos_off & ~abm->dscp_mask) {
NL_SET_ERR_MSG_MOD(extack, "only high DSCP class selector bits can be used");
nfp_err(abm->app->cpp,
"u32 offload: requested mask %x FW can support only %x\n",
be32_to_cpu(k->mask) >> tos_off, abm->dscp_mask);
return false;
}
return true;
}
/* This filter list -> map conversion is O(n * m), we expect single digit or
* low double digit number of prios and likewise for the filters. Also u32
* doesn't report stats, so it's really only setup time cost.
*/
static unsigned int
nfp_abm_find_band_for_prio(struct nfp_abm_link *alink, unsigned int prio)
{
struct nfp_abm_u32_match *iter;
list_for_each_entry(iter, &alink->dscp_map, list)
if ((prio & iter->mask) == iter->val)
return iter->band;
return alink->def_band;
}
static int nfp_abm_update_band_map(struct nfp_abm_link *alink)
{
unsigned int i, bits_per_prio, prios_per_word, base_shift;
struct nfp_abm *abm = alink->abm;
u32 field_mask;
alink->has_prio = !list_empty(&alink->dscp_map);
bits_per_prio = roundup_pow_of_two(order_base_2(abm->num_bands));
field_mask = (1 << bits_per_prio) - 1;
prios_per_word = sizeof(u32) * BITS_PER_BYTE / bits_per_prio;
/* FW mask applies from top bits */
base_shift = 8 - order_base_2(abm->num_prios);
for (i = 0; i < abm->num_prios; i++) {
unsigned int offset;
u32 *word;
u8 band;
word = &alink->prio_map[i / prios_per_word];
offset = (i % prios_per_word) * bits_per_prio;
band = nfp_abm_find_band_for_prio(alink, i << base_shift);
*word &= ~(field_mask << offset);
*word |= band << offset;
}
/* Qdisc offload status may change if has_prio changed */
nfp_abm_qdisc_offload_update(alink);
return nfp_abm_ctrl_prio_map_update(alink, alink->prio_map);
}
static void
nfp_abm_u32_knode_delete(struct nfp_abm_link *alink,
struct tc_cls_u32_knode *knode)
{
struct nfp_abm_u32_match *iter;
list_for_each_entry(iter, &alink->dscp_map, list)
if (iter->handle == knode->handle) {
list_del(&iter->list);
kfree(iter);
nfp_abm_update_band_map(alink);
return;
}
}
static int
nfp_abm_u32_knode_replace(struct nfp_abm_link *alink,
struct tc_cls_u32_knode *knode,
__be16 proto, struct netlink_ext_ack *extack)
{
struct nfp_abm_u32_match *match = NULL, *iter;
unsigned int tos_off;
u8 mask, val;
int err;
if (!nfp_abm_u32_check_knode(alink->abm, knode, proto, extack))
goto err_delete;
tos_off = proto == htons(ETH_P_IP) ? 16 : 20;
/* Extract the DSCP Class Selector bits */
val = be32_to_cpu(knode->sel->keys[0].val) >> tos_off & 0xff;
mask = be32_to_cpu(knode->sel->keys[0].mask) >> tos_off & 0xff;
/* Check if there is no conflicting mapping and find match by handle */
list_for_each_entry(iter, &alink->dscp_map, list) {
u32 cmask;
if (iter->handle == knode->handle) {
match = iter;
continue;
}
cmask = iter->mask & mask;
if ((iter->val & cmask) == (val & cmask) &&
iter->band != knode->res->classid) {
NL_SET_ERR_MSG_MOD(extack, "conflict with already offloaded filter");
goto err_delete;
}
}
if (!match) {
match = kzalloc(sizeof(*match), GFP_KERNEL);
if (!match)
return -ENOMEM;
list_add(&match->list, &alink->dscp_map);
}
match->handle = knode->handle;
match->band = knode->res->classid;
match->mask = mask;
match->val = val;
err = nfp_abm_update_band_map(alink);
if (err)
goto err_delete;
return 0;
err_delete:
nfp_abm_u32_knode_delete(alink, knode);
return -EOPNOTSUPP;
}
static int nfp_abm_setup_tc_block_cb(enum tc_setup_type type,
void *type_data, void *cb_priv)
{
struct tc_cls_u32_offload *cls_u32 = type_data;
struct nfp_repr *repr = cb_priv;
struct nfp_abm_link *alink;
alink = repr->app_priv;
if (type != TC_SETUP_CLSU32) {
NL_SET_ERR_MSG_MOD(cls_u32->common.extack,
"only offload of u32 classifier supported");
return -EOPNOTSUPP;
}
if (!tc_cls_can_offload_and_chain0(repr->netdev, &cls_u32->common))
return -EOPNOTSUPP;
if (cls_u32->common.protocol != htons(ETH_P_IP) &&
cls_u32->common.protocol != htons(ETH_P_IPV6)) {
NL_SET_ERR_MSG_MOD(cls_u32->common.extack,
"only IP and IPv6 supported as filter protocol");
return -EOPNOTSUPP;
}
switch (cls_u32->command) {
case TC_CLSU32_NEW_KNODE:
case TC_CLSU32_REPLACE_KNODE:
return nfp_abm_u32_knode_replace(alink, &cls_u32->knode,
cls_u32->common.protocol,
cls_u32->common.extack);
case TC_CLSU32_DELETE_KNODE:
nfp_abm_u32_knode_delete(alink, &cls_u32->knode);
return 0;
default:
return -EOPNOTSUPP;
}
}
int nfp_abm_setup_cls_block(struct net_device *netdev, struct nfp_repr *repr,
struct tc_block_offload *f)
{
if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
return -EOPNOTSUPP;
switch (f->command) {
case TC_BLOCK_BIND:
return tcf_block_cb_register(f->block,
nfp_abm_setup_tc_block_cb,
repr, repr, f->extack);
case TC_BLOCK_UNBIND:
tcf_block_cb_unregister(f->block, nfp_abm_setup_tc_block_cb,
repr);
return 0;
default:
return -EOPNOTSUPP;
}
}

View File

@ -1,7 +1,9 @@
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
/* Copyright (C) 2018 Netronome Systems, Inc. */
#include <linux/bitops.h>
#include <linux/kernel.h>
#include <linux/log2.h>
#include "../nfpcore/nfp_cpp.h"
#include "../nfpcore/nfp_nffw.h"
@ -11,38 +13,56 @@
#include "../nfp_net.h"
#include "main.h"
#define NFP_QLVL_SYM_NAME "_abi_nfd_out_q_lvls_%u"
#define NFP_NUM_PRIOS_SYM_NAME "_abi_pci_dscp_num_prio_%u"
#define NFP_NUM_BANDS_SYM_NAME "_abi_pci_dscp_num_band_%u"
#define NFP_ACT_MASK_SYM_NAME "_abi_nfd_out_q_actions_%u"
#define NFP_QLVL_SYM_NAME "_abi_nfd_out_q_lvls_%u%s"
#define NFP_QLVL_STRIDE 16
#define NFP_QLVL_BLOG_BYTES 0
#define NFP_QLVL_BLOG_PKTS 4
#define NFP_QLVL_THRS 8
#define NFP_QLVL_ACT 12
#define NFP_QMSTAT_SYM_NAME "_abi_nfdqm%u_stats"
#define NFP_QMSTAT_SYM_NAME "_abi_nfdqm%u_stats%s"
#define NFP_QMSTAT_STRIDE 32
#define NFP_QMSTAT_NON_STO 0
#define NFP_QMSTAT_STO 8
#define NFP_QMSTAT_DROP 16
#define NFP_QMSTAT_ECN 24
#define NFP_Q_STAT_SYM_NAME "_abi_nfd_rxq_stats%u%s"
#define NFP_Q_STAT_STRIDE 16
#define NFP_Q_STAT_PKTS 0
#define NFP_Q_STAT_BYTES 8
#define NFP_NET_ABM_MBOX_CMD NFP_NET_CFG_MBOX_SIMPLE_CMD
#define NFP_NET_ABM_MBOX_RET NFP_NET_CFG_MBOX_SIMPLE_RET
#define NFP_NET_ABM_MBOX_DATALEN NFP_NET_CFG_MBOX_SIMPLE_VAL
#define NFP_NET_ABM_MBOX_RESERVED (NFP_NET_CFG_MBOX_SIMPLE_VAL + 4)
#define NFP_NET_ABM_MBOX_DATA (NFP_NET_CFG_MBOX_SIMPLE_VAL + 8)
static int
nfp_abm_ctrl_stat(struct nfp_abm_link *alink, const struct nfp_rtsym *sym,
unsigned int stride, unsigned int offset, unsigned int i,
bool is_u64, u64 *res)
unsigned int stride, unsigned int offset, unsigned int band,
unsigned int queue, bool is_u64, u64 *res)
{
struct nfp_cpp *cpp = alink->abm->app->cpp;
u64 val, sym_offset;
unsigned int qid;
u32 val32;
int err;
sym_offset = (alink->queue_base + i) * stride + offset;
qid = band * NFP_NET_MAX_RX_RINGS + alink->queue_base + queue;
sym_offset = qid * stride + offset;
if (is_u64)
err = __nfp_rtsym_readq(cpp, sym, 3, 0, sym_offset, &val);
else
err = __nfp_rtsym_readl(cpp, sym, 3, 0, sym_offset, &val32);
if (err) {
nfp_err(cpp,
"RED offload reading stat failed on vNIC %d queue %d\n",
alink->id, i);
nfp_err(cpp, "RED offload reading stat failed on vNIC %d band %d queue %d (+ %d)\n",
alink->id, band, queue, alink->queue_base);
return err;
}
@ -73,81 +93,156 @@ int __nfp_abm_ctrl_set_q_lvl(struct nfp_abm *abm, unsigned int id, u32 val)
return 0;
}
int nfp_abm_ctrl_set_q_lvl(struct nfp_abm_link *alink, unsigned int queue,
u32 val)
int nfp_abm_ctrl_set_q_lvl(struct nfp_abm_link *alink, unsigned int band,
unsigned int queue, u32 val)
{
unsigned int threshold;
threshold = alink->queue_base + queue;
threshold = band * NFP_NET_MAX_RX_RINGS + alink->queue_base + queue;
return __nfp_abm_ctrl_set_q_lvl(alink->abm, threshold, val);
}
u64 nfp_abm_ctrl_stat_non_sto(struct nfp_abm_link *alink, unsigned int i)
int __nfp_abm_ctrl_set_q_act(struct nfp_abm *abm, unsigned int id,
enum nfp_abm_q_action act)
{
u64 val;
struct nfp_cpp *cpp = abm->app->cpp;
u64 sym_offset;
int err;
if (nfp_abm_ctrl_stat(alink, alink->abm->qm_stats, NFP_QMSTAT_STRIDE,
NFP_QMSTAT_NON_STO, i, true, &val))
if (abm->actions[id] == act)
return 0;
return val;
sym_offset = id * NFP_QLVL_STRIDE + NFP_QLVL_ACT;
err = __nfp_rtsym_writel(cpp, abm->q_lvls, 4, 0, sym_offset, act);
if (err) {
nfp_err(cpp,
"RED offload setting action failed on subqueue %d\n",
id);
return err;
}
abm->actions[id] = act;
return 0;
}
u64 nfp_abm_ctrl_stat_sto(struct nfp_abm_link *alink, unsigned int i)
int nfp_abm_ctrl_set_q_act(struct nfp_abm_link *alink, unsigned int band,
unsigned int queue, enum nfp_abm_q_action act)
{
u64 val;
unsigned int qid;
if (nfp_abm_ctrl_stat(alink, alink->abm->qm_stats, NFP_QMSTAT_STRIDE,
NFP_QMSTAT_STO, i, true, &val))
return 0;
return val;
qid = band * NFP_NET_MAX_RX_RINGS + alink->queue_base + queue;
return __nfp_abm_ctrl_set_q_act(alink->abm, qid, act);
}
int nfp_abm_ctrl_read_q_stats(struct nfp_abm_link *alink, unsigned int i,
struct nfp_alink_stats *stats)
u64 nfp_abm_ctrl_stat_non_sto(struct nfp_abm_link *alink, unsigned int queue)
{
unsigned int band;
u64 val, sum = 0;
for (band = 0; band < alink->abm->num_bands; band++) {
if (nfp_abm_ctrl_stat(alink, alink->abm->qm_stats,
NFP_QMSTAT_STRIDE, NFP_QMSTAT_NON_STO,
band, queue, true, &val))
return 0;
sum += val;
}
return sum;
}
u64 nfp_abm_ctrl_stat_sto(struct nfp_abm_link *alink, unsigned int queue)
{
unsigned int band;
u64 val, sum = 0;
for (band = 0; band < alink->abm->num_bands; band++) {
if (nfp_abm_ctrl_stat(alink, alink->abm->qm_stats,
NFP_QMSTAT_STRIDE, NFP_QMSTAT_STO,
band, queue, true, &val))
return 0;
sum += val;
}
return sum;
}
static int
nfp_abm_ctrl_stat_basic(struct nfp_abm_link *alink, unsigned int band,
unsigned int queue, unsigned int off, u64 *val)
{
if (!nfp_abm_has_prio(alink->abm)) {
if (!band) {
unsigned int id = alink->queue_base + queue;
*val = nn_readq(alink->vnic,
NFP_NET_CFG_RXR_STATS(id) + off);
} else {
*val = 0;
}
return 0;
} else {
return nfp_abm_ctrl_stat(alink, alink->abm->q_stats,
NFP_Q_STAT_STRIDE, off, band, queue,
true, val);
}
}
int nfp_abm_ctrl_read_q_stats(struct nfp_abm_link *alink, unsigned int band,
unsigned int queue, struct nfp_alink_stats *stats)
{
int err;
stats->tx_pkts = nn_readq(alink->vnic, NFP_NET_CFG_RXR_STATS(i));
stats->tx_bytes = nn_readq(alink->vnic, NFP_NET_CFG_RXR_STATS(i) + 8);
err = nfp_abm_ctrl_stat_basic(alink, band, queue, NFP_Q_STAT_PKTS,
&stats->tx_pkts);
if (err)
return err;
err = nfp_abm_ctrl_stat(alink, alink->abm->q_lvls,
NFP_QLVL_STRIDE, NFP_QLVL_BLOG_BYTES,
i, false, &stats->backlog_bytes);
err = nfp_abm_ctrl_stat_basic(alink, band, queue, NFP_Q_STAT_BYTES,
&stats->tx_bytes);
if (err)
return err;
err = nfp_abm_ctrl_stat(alink, alink->abm->q_lvls, NFP_QLVL_STRIDE,
NFP_QLVL_BLOG_BYTES, band, queue, false,
&stats->backlog_bytes);
if (err)
return err;
err = nfp_abm_ctrl_stat(alink, alink->abm->q_lvls,
NFP_QLVL_STRIDE, NFP_QLVL_BLOG_PKTS,
i, false, &stats->backlog_pkts);
band, queue, false, &stats->backlog_pkts);
if (err)
return err;
err = nfp_abm_ctrl_stat(alink, alink->abm->qm_stats,
NFP_QMSTAT_STRIDE, NFP_QMSTAT_DROP,
i, true, &stats->drops);
band, queue, true, &stats->drops);
if (err)
return err;
return nfp_abm_ctrl_stat(alink, alink->abm->qm_stats,
NFP_QMSTAT_STRIDE, NFP_QMSTAT_ECN,
i, true, &stats->overlimits);
band, queue, true, &stats->overlimits);
}
int nfp_abm_ctrl_read_q_xstats(struct nfp_abm_link *alink, unsigned int i,
int nfp_abm_ctrl_read_q_xstats(struct nfp_abm_link *alink,
unsigned int band, unsigned int queue,
struct nfp_alink_xstats *xstats)
{
int err;
err = nfp_abm_ctrl_stat(alink, alink->abm->qm_stats,
NFP_QMSTAT_STRIDE, NFP_QMSTAT_DROP,
i, true, &xstats->pdrop);
band, queue, true, &xstats->pdrop);
if (err)
return err;
return nfp_abm_ctrl_stat(alink, alink->abm->qm_stats,
NFP_QMSTAT_STRIDE, NFP_QMSTAT_ECN,
i, true, &xstats->ecn_marked);
band, queue, true, &xstats->ecn_marked);
}
int nfp_abm_ctrl_qm_enable(struct nfp_abm *abm)
@ -162,10 +257,64 @@ int nfp_abm_ctrl_qm_disable(struct nfp_abm *abm)
NULL, 0, NULL, 0);
}
void nfp_abm_ctrl_read_params(struct nfp_abm_link *alink)
int nfp_abm_ctrl_prio_map_update(struct nfp_abm_link *alink, u32 *packed)
{
struct nfp_net *nn = alink->vnic;
unsigned int i;
int err;
/* Write data_len and wipe reserved */
nn_writeq(nn, nn->tlv_caps.mbox_off + NFP_NET_ABM_MBOX_DATALEN,
alink->abm->prio_map_len);
for (i = 0; i < alink->abm->prio_map_len; i += sizeof(u32))
nn_writel(nn, nn->tlv_caps.mbox_off + NFP_NET_ABM_MBOX_DATA + i,
packed[i / sizeof(u32)]);
err = nfp_net_reconfig_mbox(nn,
NFP_NET_CFG_MBOX_CMD_PCI_DSCP_PRIOMAP_SET);
if (err)
nfp_err(alink->abm->app->cpp,
"setting DSCP -> VQ map failed with error %d\n", err);
return err;
}
static int nfp_abm_ctrl_prio_check_params(struct nfp_abm_link *alink)
{
struct nfp_abm *abm = alink->abm;
struct nfp_net *nn = alink->vnic;
unsigned int min_mbox_sz;
if (!nfp_abm_has_prio(alink->abm))
return 0;
min_mbox_sz = NFP_NET_ABM_MBOX_DATA + alink->abm->prio_map_len;
if (nn->tlv_caps.mbox_len < min_mbox_sz) {
nfp_err(abm->app->pf->cpp, "vNIC mailbox too small for prio offload: %u, need: %u\n",
nn->tlv_caps.mbox_len, min_mbox_sz);
return -EINVAL;
}
return 0;
}
int nfp_abm_ctrl_read_params(struct nfp_abm_link *alink)
{
alink->queue_base = nn_readl(alink->vnic, NFP_NET_CFG_START_RXQ);
alink->queue_base /= alink->vnic->stride_rx;
return nfp_abm_ctrl_prio_check_params(alink);
}
static unsigned int nfp_abm_ctrl_prio_map_size(struct nfp_abm *abm)
{
unsigned int size;
size = roundup_pow_of_two(order_base_2(abm->num_bands));
size = DIV_ROUND_UP(size * abm->num_prios, BITS_PER_BYTE);
size = round_up(size, sizeof(u32));
return size;
}
static const struct nfp_rtsym *
@ -189,33 +338,77 @@ nfp_abm_ctrl_find_rtsym(struct nfp_pf *pf, const char *name, unsigned int size)
}
static const struct nfp_rtsym *
nfp_abm_ctrl_find_q_rtsym(struct nfp_pf *pf, const char *name,
unsigned int size)
nfp_abm_ctrl_find_q_rtsym(struct nfp_abm *abm, const char *name_fmt,
size_t size)
{
return nfp_abm_ctrl_find_rtsym(pf, name, size * NFP_NET_MAX_RX_RINGS);
char pf_symbol[64];
size = array3_size(size, abm->num_bands, NFP_NET_MAX_RX_RINGS);
snprintf(pf_symbol, sizeof(pf_symbol), name_fmt,
abm->pf_id, nfp_abm_has_prio(abm) ? "_per_band" : "");
return nfp_abm_ctrl_find_rtsym(abm->app->pf, pf_symbol, size);
}
int nfp_abm_ctrl_find_addrs(struct nfp_abm *abm)
{
struct nfp_pf *pf = abm->app->pf;
const struct nfp_rtsym *sym;
unsigned int pf_id;
char pf_symbol[64];
int res;
pf_id = nfp_cppcore_pcie_unit(pf->cpp);
abm->pf_id = pf_id;
abm->pf_id = nfp_cppcore_pcie_unit(pf->cpp);
snprintf(pf_symbol, sizeof(pf_symbol), NFP_QLVL_SYM_NAME, pf_id);
sym = nfp_abm_ctrl_find_q_rtsym(pf, pf_symbol, NFP_QLVL_STRIDE);
/* Read count of prios and prio bands */
res = nfp_pf_rtsym_read_optional(pf, NFP_NUM_BANDS_SYM_NAME, 1);
if (res < 0)
return res;
abm->num_bands = res;
res = nfp_pf_rtsym_read_optional(pf, NFP_NUM_PRIOS_SYM_NAME, 1);
if (res < 0)
return res;
abm->num_prios = res;
/* Read available actions */
res = nfp_pf_rtsym_read_optional(pf, NFP_ACT_MASK_SYM_NAME,
BIT(NFP_ABM_ACT_MARK_DROP));
if (res < 0)
return res;
abm->action_mask = res;
abm->prio_map_len = nfp_abm_ctrl_prio_map_size(abm);
abm->dscp_mask = GENMASK(7, 8 - order_base_2(abm->num_prios));
/* Check values are sane, U16_MAX is arbitrarily chosen as max */
if (!is_power_of_2(abm->num_bands) || !is_power_of_2(abm->num_prios) ||
abm->num_bands > U16_MAX || abm->num_prios > U16_MAX ||
(abm->num_bands == 1) != (abm->num_prios == 1)) {
nfp_err(pf->cpp,
"invalid priomap description num bands: %u and num prios: %u\n",
abm->num_bands, abm->num_prios);
return -EINVAL;
}
/* Find level and stat symbols */
sym = nfp_abm_ctrl_find_q_rtsym(abm, NFP_QLVL_SYM_NAME,
NFP_QLVL_STRIDE);
if (IS_ERR(sym))
return PTR_ERR(sym);
abm->q_lvls = sym;
snprintf(pf_symbol, sizeof(pf_symbol), NFP_QMSTAT_SYM_NAME, pf_id);
sym = nfp_abm_ctrl_find_q_rtsym(pf, pf_symbol, NFP_QMSTAT_STRIDE);
sym = nfp_abm_ctrl_find_q_rtsym(abm, NFP_QMSTAT_SYM_NAME,
NFP_QMSTAT_STRIDE);
if (IS_ERR(sym))
return PTR_ERR(sym);
abm->qm_stats = sym;
if (nfp_abm_has_prio(abm)) {
sym = nfp_abm_ctrl_find_q_rtsym(abm, NFP_Q_STAT_SYM_NAME,
NFP_Q_STAT_STRIDE);
if (IS_ERR(sym))
return PTR_ERR(sym);
abm->q_stats = sym;
}
return 0;
}

View File

@ -44,6 +44,10 @@ nfp_abm_setup_tc(struct nfp_app *app, struct net_device *netdev,
return nfp_abm_setup_tc_mq(netdev, repr->app_priv, type_data);
case TC_SETUP_QDISC_RED:
return nfp_abm_setup_tc_red(netdev, repr->app_priv, type_data);
case TC_SETUP_QDISC_GRED:
return nfp_abm_setup_tc_gred(netdev, repr->app_priv, type_data);
case TC_SETUP_BLOCK:
return nfp_abm_setup_cls_block(netdev, repr, type_data);
default:
return -EOPNOTSUPP;
}
@ -313,21 +317,32 @@ nfp_abm_vnic_alloc(struct nfp_app *app, struct nfp_net *nn, unsigned int id)
alink->id = id;
alink->total_queues = alink->vnic->max_rx_rings;
INIT_LIST_HEAD(&alink->dscp_map);
err = nfp_abm_ctrl_read_params(alink);
if (err)
goto err_free_alink;
alink->prio_map = kzalloc(abm->prio_map_len, GFP_KERNEL);
if (!alink->prio_map)
goto err_free_alink;
/* This is a multi-host app, make sure MAC/PHY is up, but don't
* make the MAC/PHY state follow the state of any of the ports.
*/
err = nfp_eth_set_configured(app->cpp, eth_port->index, true);
if (err < 0)
goto err_free_alink;
goto err_free_priomap;
netif_keep_dst(nn->dp.netdev);
nfp_abm_vnic_set_mac(app->pf, abm, nn, id);
nfp_abm_ctrl_read_params(alink);
INIT_RADIX_TREE(&alink->qdiscs, GFP_KERNEL);
return 0;
err_free_priomap:
kfree(alink->prio_map);
err_free_alink:
kfree(alink);
return err;
@ -339,9 +354,19 @@ static void nfp_abm_vnic_free(struct nfp_app *app, struct nfp_net *nn)
nfp_abm_kill_reprs(alink->abm, alink);
WARN(!radix_tree_empty(&alink->qdiscs), "left over qdiscs\n");
kfree(alink->prio_map);
kfree(alink);
}
static int nfp_abm_vnic_init(struct nfp_app *app, struct nfp_net *nn)
{
struct nfp_abm_link *alink = nn->app_priv;
if (nfp_abm_has_prio(alink->abm))
return nfp_abm_ctrl_prio_map_update(alink, alink->prio_map);
return 0;
}
static u64 *
nfp_abm_port_get_stats(struct nfp_app *app, struct nfp_port *port, u64 *data)
{
@ -422,7 +447,7 @@ static int nfp_abm_init(struct nfp_app *app)
goto err_free_abm;
err = -ENOMEM;
abm->num_thresholds = NFP_NET_MAX_RX_RINGS;
abm->num_thresholds = array_size(abm->num_bands, NFP_NET_MAX_RX_RINGS);
abm->threshold_undef = bitmap_zalloc(abm->num_thresholds, GFP_KERNEL);
if (!abm->threshold_undef)
goto err_free_abm;
@ -431,18 +456,25 @@ static int nfp_abm_init(struct nfp_app *app)
sizeof(*abm->thresholds), GFP_KERNEL);
if (!abm->thresholds)
goto err_free_thresh_umap;
for (i = 0; i < NFP_NET_MAX_RX_RINGS; i++)
for (i = 0; i < abm->num_bands * NFP_NET_MAX_RX_RINGS; i++)
__nfp_abm_ctrl_set_q_lvl(abm, i, NFP_ABM_LVL_INFINITY);
abm->actions = kvcalloc(abm->num_thresholds, sizeof(*abm->actions),
GFP_KERNEL);
if (!abm->actions)
goto err_free_thresh;
for (i = 0; i < abm->num_bands * NFP_NET_MAX_RX_RINGS; i++)
__nfp_abm_ctrl_set_q_act(abm, i, NFP_ABM_ACT_DROP);
/* We start in legacy mode, make sure advanced queuing is disabled */
err = nfp_abm_ctrl_qm_disable(abm);
if (err)
goto err_free_thresh;
goto err_free_act;
err = -ENOMEM;
reprs = nfp_reprs_alloc(pf->max_data_vnics);
if (!reprs)
goto err_free_thresh;
goto err_free_act;
RCU_INIT_POINTER(app->reprs[NFP_REPR_TYPE_PHYS_PORT], reprs);
reprs = nfp_reprs_alloc(pf->max_data_vnics);
@ -454,6 +486,8 @@ static int nfp_abm_init(struct nfp_app *app)
err_free_phys:
nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PHYS_PORT);
err_free_act:
kvfree(abm->actions);
err_free_thresh:
kvfree(abm->thresholds);
err_free_thresh_umap:
@ -472,6 +506,7 @@ static void nfp_abm_clean(struct nfp_app *app)
nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PF);
nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PHYS_PORT);
bitmap_free(abm->threshold_undef);
kvfree(abm->actions);
kvfree(abm->thresholds);
kfree(abm);
app->priv = NULL;
@ -486,6 +521,7 @@ const struct nfp_app_type app_abm = {
.vnic_alloc = nfp_abm_vnic_alloc,
.vnic_free = nfp_abm_vnic_free,
.vnic_init = nfp_abm_vnic_init,
.port_get_stats = nfp_abm_port_get_stats,
.port_get_stats_count = nfp_abm_port_get_stats_count,

View File

@ -5,9 +5,11 @@
#define __NFP_ABM_H__ 1
#include <linux/bits.h>
#include <linux/list.h>
#include <linux/radix-tree.h>
#include <net/devlink.h>
#include <net/pkt_cls.h>
#include <net/pkt_sched.h>
/* Dump of 64 PRIOs and 256 REDs seems to take 850us on Xeon v4 @ 2.20GHz;
* 2.5ms / 400Hz seems more than sufficient for stats resolution.
@ -22,31 +24,62 @@ struct nfp_net;
#define NFP_ABM_PORTID_TYPE GENMASK(23, 16)
#define NFP_ABM_PORTID_ID GENMASK(7, 0)
/* The possible actions if thresholds are exceeded */
enum nfp_abm_q_action {
/* mark if ECN capable, otherwise drop */
NFP_ABM_ACT_MARK_DROP = 0,
/* mark if ECN capable, otherwise goto QM */
NFP_ABM_ACT_MARK_QUEUE = 1,
NFP_ABM_ACT_DROP = 2,
NFP_ABM_ACT_QUEUE = 3,
NFP_ABM_ACT_NOQUEUE = 4,
};
/**
* struct nfp_abm - ABM NIC app structure
* @app: back pointer to nfp_app
* @pf_id: ID of our PF link
*
* @num_prios: number of supported DSCP priorities
* @num_bands: number of supported DSCP priority bands
* @action_mask: bitmask of supported actions
*
* @thresholds: current threshold configuration
* @threshold_undef: bitmap of thresholds which have not been set
* @actions: current FW action configuration
* @num_thresholds: number of @thresholds and bits in @threshold_undef
*
* @prio_map_len: computed length of FW priority map (in bytes)
* @dscp_mask: mask FW will apply on DSCP field
*
* @eswitch_mode: devlink eswitch mode, advanced functions only visible
* in switchdev mode
*
* @q_lvls: queue level control area
* @qm_stats: queue statistics symbol
* @q_stats: basic queue statistics (only in per-band case)
*/
struct nfp_abm {
struct nfp_app *app;
unsigned int pf_id;
unsigned int num_prios;
unsigned int num_bands;
unsigned int action_mask;
u32 *thresholds;
unsigned long *threshold_undef;
u8 *actions;
size_t num_thresholds;
unsigned int prio_map_len;
u8 dscp_mask;
enum devlink_eswitch_mode eswitch_mode;
const struct nfp_rtsym *q_lvls;
const struct nfp_rtsym *qm_stats;
const struct nfp_rtsym *q_stats;
};
/**
@ -81,6 +114,7 @@ enum nfp_qdisc_type {
NFP_QDISC_NONE = 0,
NFP_QDISC_MQ,
NFP_QDISC_RED,
NFP_QDISC_GRED,
};
#define NFP_QDISC_UNTRACKED ((struct nfp_qdisc *)1UL)
@ -104,11 +138,14 @@ enum nfp_qdisc_type {
* @mq.prev_stats: previously reported @mq.stats
*
* @red: RED Qdisc specific parameters and state
* @red.threshold: ECN marking threshold
* @red.stats: current stats of the RED Qdisc
* @red.prev_stats: previously reported @red.stats
* @red.xstats: extended stats for RED - current
* @red.prev_xstats: extended stats for RED - previously reported
* @red.num_bands: Number of valid entries in the @red.band table
* @red.band: Per-band array of RED instances
* @red.band.ecn: ECN marking is enabled (rather than drop)
* @red.band.threshold: ECN marking threshold
* @red.band.stats: current stats of the RED Qdisc
* @red.band.prev_stats: previously reported @red.stats
* @red.band.xstats: extended stats for RED - current
* @red.band.prev_xstats: extended stats for RED - previously reported
*/
struct nfp_qdisc {
struct net_device *netdev;
@ -129,13 +166,18 @@ struct nfp_qdisc {
struct nfp_alink_stats stats;
struct nfp_alink_stats prev_stats;
} mq;
/* TC_SETUP_QDISC_RED */
/* TC_SETUP_QDISC_RED, TC_SETUP_QDISC_GRED */
struct {
u32 threshold;
struct nfp_alink_stats stats;
struct nfp_alink_stats prev_stats;
struct nfp_alink_xstats xstats;
struct nfp_alink_xstats prev_xstats;
unsigned int num_bands;
struct {
bool ecn;
u32 threshold;
struct nfp_alink_stats stats;
struct nfp_alink_stats prev_stats;
struct nfp_alink_xstats xstats;
struct nfp_alink_xstats prev_xstats;
} band[MAX_DPs];
} red;
};
};
@ -150,6 +192,12 @@ struct nfp_qdisc {
*
* @last_stats_update: ktime of last stats update
*
* @prio_map: current map of priorities
* @has_prio: @prio_map is valid
*
* @def_band: default band to use
* @dscp_map: list of DSCP to band mappings
*
* @root_qdisc: pointer to the current root of the Qdisc hierarchy
* @qdiscs: all qdiscs recorded by major part of the handle
*/
@ -162,10 +210,31 @@ struct nfp_abm_link {
u64 last_stats_update;
u32 *prio_map;
bool has_prio;
u8 def_band;
struct list_head dscp_map;
struct nfp_qdisc *root_qdisc;
struct radix_tree_root qdiscs;
};
static inline bool nfp_abm_has_prio(struct nfp_abm *abm)
{
return abm->num_bands > 1;
}
static inline bool nfp_abm_has_drop(struct nfp_abm *abm)
{
return abm->action_mask & BIT(NFP_ABM_ACT_DROP);
}
static inline bool nfp_abm_has_mark(struct nfp_abm *abm)
{
return abm->action_mask & BIT(NFP_ABM_ACT_MARK_DROP);
}
void nfp_abm_qdisc_offload_update(struct nfp_abm_link *alink);
int nfp_abm_setup_root(struct net_device *netdev, struct nfp_abm_link *alink,
struct tc_root_qopt_offload *opt);
@ -173,18 +242,30 @@ int nfp_abm_setup_tc_red(struct net_device *netdev, struct nfp_abm_link *alink,
struct tc_red_qopt_offload *opt);
int nfp_abm_setup_tc_mq(struct net_device *netdev, struct nfp_abm_link *alink,
struct tc_mq_qopt_offload *opt);
int nfp_abm_setup_tc_gred(struct net_device *netdev, struct nfp_abm_link *alink,
struct tc_gred_qopt_offload *opt);
int nfp_abm_setup_cls_block(struct net_device *netdev, struct nfp_repr *repr,
struct tc_block_offload *opt);
void nfp_abm_ctrl_read_params(struct nfp_abm_link *alink);
int nfp_abm_ctrl_read_params(struct nfp_abm_link *alink);
int nfp_abm_ctrl_find_addrs(struct nfp_abm *abm);
int __nfp_abm_ctrl_set_q_lvl(struct nfp_abm *abm, unsigned int id, u32 val);
int nfp_abm_ctrl_set_q_lvl(struct nfp_abm_link *alink, unsigned int queue,
u32 val);
int nfp_abm_ctrl_read_q_stats(struct nfp_abm_link *alink, unsigned int i,
int nfp_abm_ctrl_set_q_lvl(struct nfp_abm_link *alink, unsigned int band,
unsigned int queue, u32 val);
int __nfp_abm_ctrl_set_q_act(struct nfp_abm *abm, unsigned int id,
enum nfp_abm_q_action act);
int nfp_abm_ctrl_set_q_act(struct nfp_abm_link *alink, unsigned int band,
unsigned int queue, enum nfp_abm_q_action act);
int nfp_abm_ctrl_read_q_stats(struct nfp_abm_link *alink,
unsigned int band, unsigned int queue,
struct nfp_alink_stats *stats);
int nfp_abm_ctrl_read_q_xstats(struct nfp_abm_link *alink, unsigned int i,
int nfp_abm_ctrl_read_q_xstats(struct nfp_abm_link *alink,
unsigned int band, unsigned int queue,
struct nfp_alink_xstats *xstats);
u64 nfp_abm_ctrl_stat_non_sto(struct nfp_abm_link *alink, unsigned int i);
u64 nfp_abm_ctrl_stat_sto(struct nfp_abm_link *alink, unsigned int i);
int nfp_abm_ctrl_qm_enable(struct nfp_abm *abm);
int nfp_abm_ctrl_qm_disable(struct nfp_abm *abm);
void nfp_abm_prio_map_update(struct nfp_abm *abm);
int nfp_abm_ctrl_prio_map_update(struct nfp_abm_link *alink, u32 *packed);
#endif

View File

@ -15,7 +15,7 @@
static bool nfp_abm_qdisc_is_red(struct nfp_qdisc *qdisc)
{
return qdisc->type == NFP_QDISC_RED;
return qdisc->type == NFP_QDISC_RED || qdisc->type == NFP_QDISC_GRED;
}
static bool nfp_abm_qdisc_child_valid(struct nfp_qdisc *qdisc, unsigned int id)
@ -46,20 +46,25 @@ nfp_abm_stats_update_red(struct nfp_abm_link *alink, struct nfp_qdisc *qdisc,
unsigned int queue)
{
struct nfp_cpp *cpp = alink->abm->app->cpp;
unsigned int i;
int err;
if (!qdisc->offloaded)
return;
err = nfp_abm_ctrl_read_q_stats(alink, queue, &qdisc->red.stats);
if (err)
nfp_err(cpp, "RED stats (%d) read failed with error %d\n",
queue, err);
for (i = 0; i < qdisc->red.num_bands; i++) {
err = nfp_abm_ctrl_read_q_stats(alink, i, queue,
&qdisc->red.band[i].stats);
if (err)
nfp_err(cpp, "RED stats (%d, %d) read failed with error %d\n",
i, queue, err);
err = nfp_abm_ctrl_read_q_xstats(alink, queue, &qdisc->red.xstats);
if (err)
nfp_err(cpp, "RED xstats (%d) read failed with error %d\n",
queue, err);
err = nfp_abm_ctrl_read_q_xstats(alink, i, queue,
&qdisc->red.band[i].xstats);
if (err)
nfp_err(cpp, "RED xstats (%d, %d) read failed with error %d\n",
i, queue, err);
}
}
static void
@ -113,6 +118,8 @@ nfp_abm_qdisc_unlink_children(struct nfp_qdisc *qdisc,
static void
nfp_abm_qdisc_offload_stop(struct nfp_abm_link *alink, struct nfp_qdisc *qdisc)
{
unsigned int i;
/* Don't complain when qdisc is getting unlinked */
if (qdisc->use_cnt)
nfp_warn(alink->abm->app->cpp, "Offload of '%08x' stopped\n",
@ -121,12 +128,14 @@ nfp_abm_qdisc_offload_stop(struct nfp_abm_link *alink, struct nfp_qdisc *qdisc)
if (!nfp_abm_qdisc_is_red(qdisc))
return;
qdisc->red.stats.backlog_pkts = 0;
qdisc->red.stats.backlog_bytes = 0;
for (i = 0; i < qdisc->red.num_bands; i++) {
qdisc->red.band[i].stats.backlog_pkts = 0;
qdisc->red.band[i].stats.backlog_bytes = 0;
}
}
static int
__nfp_abm_stats_init(struct nfp_abm_link *alink,
__nfp_abm_stats_init(struct nfp_abm_link *alink, unsigned int band,
unsigned int queue, struct nfp_alink_stats *prev_stats,
struct nfp_alink_xstats *prev_xstats)
{
@ -139,19 +148,19 @@ __nfp_abm_stats_init(struct nfp_abm_link *alink,
backlog_pkts = prev_stats->backlog_pkts;
backlog_bytes = prev_stats->backlog_bytes;
err = nfp_abm_ctrl_read_q_stats(alink, queue, prev_stats);
err = nfp_abm_ctrl_read_q_stats(alink, band, queue, prev_stats);
if (err) {
nfp_err(alink->abm->app->cpp,
"RED stats init (%d) failed with error %d\n",
queue, err);
"RED stats init (%d, %d) failed with error %d\n",
band, queue, err);
return err;
}
err = nfp_abm_ctrl_read_q_xstats(alink, queue, prev_xstats);
err = nfp_abm_ctrl_read_q_xstats(alink, band, queue, prev_xstats);
if (err) {
nfp_err(alink->abm->app->cpp,
"RED xstats init (%d) failed with error %d\n",
queue, err);
"RED xstats init (%d, %d) failed with error %d\n",
band, queue, err);
return err;
}
@ -164,19 +173,36 @@ static int
nfp_abm_stats_init(struct nfp_abm_link *alink, struct nfp_qdisc *qdisc,
unsigned int queue)
{
return __nfp_abm_stats_init(alink, queue,
&qdisc->red.prev_stats,
&qdisc->red.prev_xstats);
unsigned int i;
int err;
for (i = 0; i < qdisc->red.num_bands; i++) {
err = __nfp_abm_stats_init(alink, i, queue,
&qdisc->red.band[i].prev_stats,
&qdisc->red.band[i].prev_xstats);
if (err)
return err;
}
return 0;
}
static void
nfp_abm_offload_compile_red(struct nfp_abm_link *alink, struct nfp_qdisc *qdisc,
unsigned int queue)
{
qdisc->offload_mark = qdisc->type == NFP_QDISC_RED &&
qdisc->params_ok &&
qdisc->use_cnt == 1 &&
!qdisc->children[0];
bool good_red, good_gred;
unsigned int i;
good_red = qdisc->type == NFP_QDISC_RED &&
qdisc->params_ok &&
qdisc->use_cnt == 1 &&
!alink->has_prio &&
!qdisc->children[0];
good_gred = qdisc->type == NFP_QDISC_GRED &&
qdisc->params_ok &&
qdisc->use_cnt == 1;
qdisc->offload_mark = good_red || good_gred;
/* If we are starting offload init prev_stats */
if (qdisc->offload_mark && !qdisc->offloaded)
@ -186,7 +212,15 @@ nfp_abm_offload_compile_red(struct nfp_abm_link *alink, struct nfp_qdisc *qdisc,
if (!qdisc->offload_mark)
return;
nfp_abm_ctrl_set_q_lvl(alink, queue, qdisc->red.threshold);
for (i = 0; i < alink->abm->num_bands; i++) {
enum nfp_abm_q_action act;
nfp_abm_ctrl_set_q_lvl(alink, i, queue,
qdisc->red.band[i].threshold);
act = qdisc->red.band[i].ecn ?
NFP_ABM_ACT_MARK_DROP : NFP_ABM_ACT_DROP;
nfp_abm_ctrl_set_q_act(alink, i, queue, act);
}
}
static void
@ -217,8 +251,10 @@ void nfp_abm_qdisc_offload_update(struct nfp_abm_link *alink)
size_t i;
/* Mark all thresholds as unconfigured */
__bitmap_set(abm->threshold_undef,
alink->queue_base, alink->total_queues);
for (i = 0; i < abm->num_bands; i++)
__bitmap_set(abm->threshold_undef,
i * NFP_NET_MAX_RX_RINGS + alink->queue_base,
alink->total_queues);
/* Clear offload marks */
radix_tree_for_each_slot(slot, &alink->qdiscs, &iter, 0) {
@ -312,9 +348,11 @@ nfp_abm_qdisc_alloc(struct net_device *netdev, struct nfp_abm_link *alink,
if (!qdisc)
return NULL;
qdisc->children = kcalloc(children, sizeof(void *), GFP_KERNEL);
if (!qdisc->children)
goto err_free_qdisc;
if (children) {
qdisc->children = kcalloc(children, sizeof(void *), GFP_KERNEL);
if (!qdisc->children)
goto err_free_qdisc;
}
qdisc->netdev = netdev;
qdisc->type = type;
@ -440,6 +478,144 @@ nfp_abm_stats_red_calculate(struct nfp_alink_xstats *new,
stats->pdrop += new->pdrop - old->pdrop;
}
static int
nfp_abm_gred_stats(struct nfp_abm_link *alink, u32 handle,
struct tc_gred_qopt_offload_stats *stats)
{
struct nfp_qdisc *qdisc;
unsigned int i;
nfp_abm_stats_update(alink);
qdisc = nfp_abm_qdisc_find(alink, handle);
if (!qdisc)
return -EOPNOTSUPP;
/* If the qdisc offload has stopped we may need to adjust the backlog
* counters back so carry on even if qdisc is not currently offloaded.
*/
for (i = 0; i < qdisc->red.num_bands; i++) {
if (!stats->xstats[i])
continue;
nfp_abm_stats_calculate(&qdisc->red.band[i].stats,
&qdisc->red.band[i].prev_stats,
&stats->bstats[i], &stats->qstats[i]);
qdisc->red.band[i].prev_stats = qdisc->red.band[i].stats;
nfp_abm_stats_red_calculate(&qdisc->red.band[i].xstats,
&qdisc->red.band[i].prev_xstats,
stats->xstats[i]);
qdisc->red.band[i].prev_xstats = qdisc->red.band[i].xstats;
}
return qdisc->offloaded ? 0 : -EOPNOTSUPP;
}
static bool
nfp_abm_gred_check_params(struct nfp_abm_link *alink,
struct tc_gred_qopt_offload *opt)
{
struct nfp_cpp *cpp = alink->abm->app->cpp;
struct nfp_abm *abm = alink->abm;
unsigned int i;
if (opt->set.grio_on || opt->set.wred_on) {
nfp_warn(cpp, "GRED offload failed - GRIO and WRED not supported (p:%08x h:%08x)\n",
opt->parent, opt->handle);
return false;
}
if (opt->set.dp_def != alink->def_band) {
nfp_warn(cpp, "GRED offload failed - default band must be %d (p:%08x h:%08x)\n",
alink->def_band, opt->parent, opt->handle);
return false;
}
if (opt->set.dp_cnt != abm->num_bands) {
nfp_warn(cpp, "GRED offload failed - band count must be %d (p:%08x h:%08x)\n",
abm->num_bands, opt->parent, opt->handle);
return false;
}
for (i = 0; i < abm->num_bands; i++) {
struct tc_gred_vq_qopt_offload_params *band = &opt->set.tab[i];
if (!band->present)
return false;
if (!band->is_ecn && !nfp_abm_has_drop(abm)) {
nfp_warn(cpp, "GRED offload failed - drop is not supported (ECN option required) (p:%08x h:%08x vq:%d)\n",
opt->parent, opt->handle, i);
return false;
}
if (band->is_ecn && !nfp_abm_has_mark(abm)) {
nfp_warn(cpp, "GRED offload failed - ECN marking not supported (p:%08x h:%08x vq:%d)\n",
opt->parent, opt->handle, i);
return false;
}
if (band->is_harddrop) {
nfp_warn(cpp, "GRED offload failed - harddrop is not supported (p:%08x h:%08x vq:%d)\n",
opt->parent, opt->handle, i);
return false;
}
if (band->min != band->max) {
nfp_warn(cpp, "GRED offload failed - threshold mismatch (p:%08x h:%08x vq:%d)\n",
opt->parent, opt->handle, i);
return false;
}
if (band->min > S32_MAX) {
nfp_warn(cpp, "GRED offload failed - threshold too large %d > %d (p:%08x h:%08x vq:%d)\n",
band->min, S32_MAX, opt->parent, opt->handle,
i);
return false;
}
}
return true;
}
static int
nfp_abm_gred_replace(struct net_device *netdev, struct nfp_abm_link *alink,
struct tc_gred_qopt_offload *opt)
{
struct nfp_qdisc *qdisc;
unsigned int i;
int ret;
ret = nfp_abm_qdisc_replace(netdev, alink, NFP_QDISC_GRED, opt->parent,
opt->handle, 0, &qdisc);
if (ret < 0)
return ret;
qdisc->params_ok = nfp_abm_gred_check_params(alink, opt);
if (qdisc->params_ok) {
qdisc->red.num_bands = opt->set.dp_cnt;
for (i = 0; i < qdisc->red.num_bands; i++) {
qdisc->red.band[i].ecn = opt->set.tab[i].is_ecn;
qdisc->red.band[i].threshold = opt->set.tab[i].min;
}
}
if (qdisc->use_cnt)
nfp_abm_qdisc_offload_update(alink);
return 0;
}
int nfp_abm_setup_tc_gred(struct net_device *netdev, struct nfp_abm_link *alink,
struct tc_gred_qopt_offload *opt)
{
switch (opt->command) {
case TC_GRED_REPLACE:
return nfp_abm_gred_replace(netdev, alink, opt);
case TC_GRED_DESTROY:
nfp_abm_qdisc_destroy(netdev, alink, opt->handle);
return 0;
case TC_GRED_STATS:
return nfp_abm_gred_stats(alink, opt->handle, &opt->stats);
default:
return -EOPNOTSUPP;
}
}
static int
nfp_abm_red_xstats(struct nfp_abm_link *alink, struct tc_red_qopt_offload *opt)
{
@ -451,10 +627,10 @@ nfp_abm_red_xstats(struct nfp_abm_link *alink, struct tc_red_qopt_offload *opt)
if (!qdisc || !qdisc->offloaded)
return -EOPNOTSUPP;
nfp_abm_stats_red_calculate(&qdisc->red.xstats,
&qdisc->red.prev_xstats,
nfp_abm_stats_red_calculate(&qdisc->red.band[0].xstats,
&qdisc->red.band[0].prev_xstats,
opt->xstats);
qdisc->red.prev_xstats = qdisc->red.xstats;
qdisc->red.band[0].prev_xstats = qdisc->red.band[0].xstats;
return 0;
}
@ -473,10 +649,10 @@ nfp_abm_red_stats(struct nfp_abm_link *alink, u32 handle,
* counters back so carry on even if qdisc is not currently offloaded.
*/
nfp_abm_stats_calculate(&qdisc->red.stats,
&qdisc->red.prev_stats,
nfp_abm_stats_calculate(&qdisc->red.band[0].stats,
&qdisc->red.band[0].prev_stats,
stats->bstats, stats->qstats);
qdisc->red.prev_stats = qdisc->red.stats;
qdisc->red.band[0].prev_stats = qdisc->red.band[0].stats;
return qdisc->offloaded ? 0 : -EOPNOTSUPP;
}
@ -486,12 +662,18 @@ nfp_abm_red_check_params(struct nfp_abm_link *alink,
struct tc_red_qopt_offload *opt)
{
struct nfp_cpp *cpp = alink->abm->app->cpp;
struct nfp_abm *abm = alink->abm;
if (!opt->set.is_ecn) {
if (!opt->set.is_ecn && !nfp_abm_has_drop(abm)) {
nfp_warn(cpp, "RED offload failed - drop is not supported (ECN option required) (p:%08x h:%08x)\n",
opt->parent, opt->handle);
return false;
}
if (opt->set.is_ecn && !nfp_abm_has_mark(abm)) {
nfp_warn(cpp, "RED offload failed - ECN marking not supported (p:%08x h:%08x)\n",
opt->parent, opt->handle);
return false;
}
if (opt->set.is_harddrop) {
nfp_warn(cpp, "RED offload failed - harddrop is not supported (p:%08x h:%08x)\n",
opt->parent, opt->handle);
@ -538,8 +720,11 @@ nfp_abm_red_replace(struct net_device *netdev, struct nfp_abm_link *alink,
}
qdisc->params_ok = nfp_abm_red_check_params(alink, opt);
if (qdisc->params_ok)
qdisc->red.threshold = opt->set.min;
if (qdisc->params_ok) {
qdisc->red.num_bands = 1;
qdisc->red.band[0].ecn = opt->set.is_ecn;
qdisc->red.band[0].threshold = opt->set.min;
}
if (qdisc->use_cnt == 1)
nfp_abm_qdisc_offload_update(alink);
@ -592,7 +777,7 @@ nfp_abm_mq_stats(struct nfp_abm_link *alink, u32 handle,
struct tc_qopt_offload_stats *stats)
{
struct nfp_qdisc *qdisc, *red;
unsigned int i;
unsigned int i, j;
qdisc = nfp_abm_qdisc_find(alink, handle);
if (!qdisc)
@ -614,10 +799,12 @@ nfp_abm_mq_stats(struct nfp_abm_link *alink, u32 handle,
continue;
red = qdisc->children[i];
nfp_abm_stats_propagate(&qdisc->mq.stats,
&red->red.stats);
nfp_abm_stats_propagate(&qdisc->mq.prev_stats,
&red->red.prev_stats);
for (j = 0; j < red->red.num_bands; j++) {
nfp_abm_stats_propagate(&qdisc->mq.stats,
&red->red.band[j].stats);
nfp_abm_stats_propagate(&qdisc->mq.prev_stats,
&red->red.band[j].prev_stats);
}
}
nfp_abm_stats_calculate(&qdisc->mq.stats, &qdisc->mq.prev_stats,

View File

@ -868,6 +868,7 @@ unsigned int nfp_net_rss_key_sz(struct nfp_net *nn);
void nfp_net_rss_write_itbl(struct nfp_net *nn);
void nfp_net_rss_write_key(struct nfp_net *nn);
void nfp_net_coalesce_write_cfg(struct nfp_net *nn);
int nfp_net_reconfig_mbox(struct nfp_net *nn, u32 mbox_cmd);
unsigned int
nfp_net_irqs_alloc(struct pci_dev *pdev, struct msix_entry *irq_entries,

View File

@ -279,7 +279,7 @@ int nfp_net_reconfig(struct nfp_net *nn, u32 update)
*
* Return: Negative errno on error, 0 on success
*/
static int nfp_net_reconfig_mbox(struct nfp_net *nn, u32 mbox_cmd)
int nfp_net_reconfig_mbox(struct nfp_net *nn, u32 mbox_cmd)
{
u32 mbox = nn->tlv_caps.mbox_off;
int ret;

View File

@ -397,6 +397,8 @@
#define NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_ADD 1
#define NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_KILL 2
#define NFP_NET_CFG_MBOX_CMD_PCI_DSCP_PRIOMAP_SET 5
/**
* VLAN filtering using general use mailbox
* %NFP_NET_CFG_VLAN_FILTER: Base address of VLAN filter mailbox

View File

@ -846,6 +846,7 @@ enum tc_setup_type {
TC_SETUP_QDISC_MQ,
TC_SETUP_QDISC_ETF,
TC_SETUP_ROOT_QDISC,
TC_SETUP_QDISC_GRED,
};
/* These structures hold the attributes of bpf state that are being passed

View File

@ -643,6 +643,7 @@ struct tc_cls_common_offload {
struct tc_cls_u32_knode {
struct tcf_exts *exts;
struct tcf_result *res;
struct tc_u32_sel *sel;
u32 handle;
u32 val;
@ -868,6 +869,50 @@ struct tc_red_qopt_offload {
};
};
enum tc_gred_command {
TC_GRED_REPLACE,
TC_GRED_DESTROY,
TC_GRED_STATS,
};
struct tc_gred_vq_qopt_offload_params {
bool present;
u32 limit;
u32 prio;
u32 min;
u32 max;
bool is_ecn;
bool is_harddrop;
u32 probability;
/* Only need backlog, see struct tc_prio_qopt_offload_params */
u32 *backlog;
};
struct tc_gred_qopt_offload_params {
bool grio_on;
bool wred_on;
unsigned int dp_cnt;
unsigned int dp_def;
struct gnet_stats_queue *qstats;
struct tc_gred_vq_qopt_offload_params tab[MAX_DPs];
};
struct tc_gred_qopt_offload_stats {
struct gnet_stats_basic_packed bstats[MAX_DPs];
struct gnet_stats_queue qstats[MAX_DPs];
struct red_stats *xstats[MAX_DPs];
};
struct tc_gred_qopt_offload {
enum tc_gred_command command;
u32 handle;
u32 parent;
union {
struct tc_gred_qopt_offload_params set;
struct tc_gred_qopt_offload_stats stats;
};
};
enum tc_prio_command {
TC_PRIO_REPLACE,
TC_PRIO_DESTROY,

View File

@ -558,6 +558,7 @@ static int u32_replace_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n,
cls_u32.knode.mask = 0;
#endif
cls_u32.knode.sel = &n->sel;
cls_u32.knode.res = &n->res;
cls_u32.knode.exts = &n->exts;
if (n->ht_down)
cls_u32.knode.link_handle = ht->handle;
@ -1206,6 +1207,7 @@ static int u32_reoffload_knode(struct tcf_proto *tp, struct tc_u_knode *n,
cls_u32.knode.mask = 0;
#endif
cls_u32.knode.sel = &n->sel;
cls_u32.knode.res = &n->res;
cls_u32.knode.exts = &n->exts;
if (n->ht_down)
cls_u32.knode.link_handle = ht->handle;

View File

@ -23,6 +23,7 @@
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/skbuff.h>
#include <net/pkt_cls.h>
#include <net/pkt_sched.h>
#include <net/red.h>
@ -311,6 +312,92 @@ static void gred_reset(struct Qdisc *sch)
}
}
static void gred_offload(struct Qdisc *sch, enum tc_gred_command command)
{
struct gred_sched *table = qdisc_priv(sch);
struct net_device *dev = qdisc_dev(sch);
struct tc_gred_qopt_offload opt = {
.command = command,
.handle = sch->handle,
.parent = sch->parent,
};
if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
return;
if (command == TC_GRED_REPLACE) {
unsigned int i;
opt.set.grio_on = gred_rio_mode(table);
opt.set.wred_on = gred_wred_mode(table);
opt.set.dp_cnt = table->DPs;
opt.set.dp_def = table->def;
for (i = 0; i < table->DPs; i++) {
struct gred_sched_data *q = table->tab[i];
if (!q)
continue;
opt.set.tab[i].present = true;
opt.set.tab[i].limit = q->limit;
opt.set.tab[i].prio = q->prio;
opt.set.tab[i].min = q->parms.qth_min >> q->parms.Wlog;
opt.set.tab[i].max = q->parms.qth_max >> q->parms.Wlog;
opt.set.tab[i].is_ecn = gred_use_ecn(q);
opt.set.tab[i].is_harddrop = gred_use_harddrop(q);
opt.set.tab[i].probability = q->parms.max_P;
opt.set.tab[i].backlog = &q->backlog;
}
opt.set.qstats = &sch->qstats;
}
dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_GRED, &opt);
}
static int gred_offload_dump_stats(struct Qdisc *sch)
{
struct gred_sched *table = qdisc_priv(sch);
struct tc_gred_qopt_offload *hw_stats;
unsigned int i;
int ret;
hw_stats = kzalloc(sizeof(*hw_stats), GFP_KERNEL);
if (!hw_stats)
return -ENOMEM;
hw_stats->command = TC_GRED_STATS;
hw_stats->handle = sch->handle;
hw_stats->parent = sch->parent;
for (i = 0; i < MAX_DPs; i++)
if (table->tab[i])
hw_stats->stats.xstats[i] = &table->tab[i]->stats;
ret = qdisc_offload_dump_helper(sch, TC_SETUP_QDISC_GRED, hw_stats);
/* Even if driver returns failure adjust the stats - in case offload
* ended but driver still wants to adjust the values.
*/
for (i = 0; i < MAX_DPs; i++) {
if (!table->tab[i])
continue;
table->tab[i]->packetsin += hw_stats->stats.bstats[i].packets;
table->tab[i]->bytesin += hw_stats->stats.bstats[i].bytes;
table->tab[i]->backlog += hw_stats->stats.qstats[i].backlog;
_bstats_update(&sch->bstats,
hw_stats->stats.bstats[i].bytes,
hw_stats->stats.bstats[i].packets);
sch->qstats.qlen += hw_stats->stats.qstats[i].qlen;
sch->qstats.backlog += hw_stats->stats.qstats[i].backlog;
sch->qstats.drops += hw_stats->stats.qstats[i].drops;
sch->qstats.requeues += hw_stats->stats.qstats[i].requeues;
sch->qstats.overlimits += hw_stats->stats.qstats[i].overlimits;
}
kfree(hw_stats);
return ret;
}
static inline void gred_destroy_vq(struct gred_sched_data *q)
{
kfree(q);
@ -385,6 +472,7 @@ static int gred_change_table_def(struct Qdisc *sch, struct nlattr *dps,
}
}
gred_offload(sch, TC_GRED_REPLACE);
return 0;
}
@ -630,6 +718,8 @@ static int gred_change(struct Qdisc *sch, struct nlattr *opt,
sch_tree_unlock(sch);
kfree(prealloc);
gred_offload(sch, TC_GRED_REPLACE);
return 0;
err_unlock_free:
@ -679,6 +769,9 @@ static int gred_dump(struct Qdisc *sch, struct sk_buff *skb)
.flags = table->red_flags,
};
if (gred_offload_dump_stats(sch))
goto nla_put_failure;
opts = nla_nest_start(skb, TCA_OPTIONS);
if (opts == NULL)
goto nla_put_failure;
@ -815,6 +908,7 @@ static void gred_destroy(struct Qdisc *sch)
if (table->tab[i])
gred_destroy_vq(table->tab[i]);
}
gred_offload(sch, TC_GRED_DESTROY);
}
static struct Qdisc_ops gred_qdisc_ops __read_mostly = {