Merge branch 'mlx5e-next'

Amir Vadai says:

====================
ConnectX-4 driver update 2015-07-23

This patchset introduce some performance enhancements to the ConnectX-4 driver.
1. Improving RSS distribution, and make RSS function controlable using ethtool.
2. Make memory that is written by NIC and read by host CPU allocate in the
   local NUMA to the processing CPU
3. Support tx copybreak
4. Using hardware feature called blueflame to save DMA reads when possible

Another patch by Achiad fix some cosmetic issues in the driver.

Patchset was applied and tested on top of commit 045a0fa ("ip_tunnel: Call
ip_tunnel_core_init() from inet_init()")
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2015-07-27 00:29:18 -07:00
commit 6ecfdd28c8
12 changed files with 535 additions and 149 deletions

View File

@ -45,15 +45,34 @@
* register it in a memory region at HCA virtual address 0.
*/
int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf)
static void *mlx5_dma_zalloc_coherent_node(struct mlx5_core_dev *dev,
size_t size, dma_addr_t *dma_handle,
int node)
{
struct mlx5_priv *priv = &dev->priv;
int original_node;
void *cpu_handle;
mutex_lock(&priv->alloc_mutex);
original_node = dev_to_node(&dev->pdev->dev);
set_dev_node(&dev->pdev->dev, node);
cpu_handle = dma_zalloc_coherent(&dev->pdev->dev, size,
dma_handle, GFP_KERNEL);
set_dev_node(&dev->pdev->dev, original_node);
mutex_unlock(&priv->alloc_mutex);
return cpu_handle;
}
int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size,
struct mlx5_buf *buf, int node)
{
dma_addr_t t;
buf->size = size;
buf->npages = 1;
buf->page_shift = (u8)get_order(size) + PAGE_SHIFT;
buf->direct.buf = dma_zalloc_coherent(&dev->pdev->dev,
size, &t, GFP_KERNEL);
buf->direct.buf = mlx5_dma_zalloc_coherent_node(dev, size,
&t, node);
if (!buf->direct.buf)
return -ENOMEM;
@ -66,6 +85,11 @@ int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf)
return 0;
}
int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf)
{
return mlx5_buf_alloc_node(dev, size, buf, dev->priv.numa_node);
}
EXPORT_SYMBOL_GPL(mlx5_buf_alloc);
void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf)
@ -75,7 +99,8 @@ void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf)
}
EXPORT_SYMBOL_GPL(mlx5_buf_free);
static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct device *dma_device)
static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct mlx5_core_dev *dev,
int node)
{
struct mlx5_db_pgdir *pgdir;
@ -84,8 +109,9 @@ static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct device *dma_device)
return NULL;
bitmap_fill(pgdir->bitmap, MLX5_DB_PER_PAGE);
pgdir->db_page = dma_alloc_coherent(dma_device, PAGE_SIZE,
&pgdir->db_dma, GFP_KERNEL);
pgdir->db_page = mlx5_dma_zalloc_coherent_node(dev, PAGE_SIZE,
&pgdir->db_dma, node);
if (!pgdir->db_page) {
kfree(pgdir);
return NULL;
@ -118,7 +144,7 @@ static int mlx5_alloc_db_from_pgdir(struct mlx5_db_pgdir *pgdir,
return 0;
}
int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db)
int mlx5_db_alloc_node(struct mlx5_core_dev *dev, struct mlx5_db *db, int node)
{
struct mlx5_db_pgdir *pgdir;
int ret = 0;
@ -129,7 +155,7 @@ int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db)
if (!mlx5_alloc_db_from_pgdir(pgdir, db))
goto out;
pgdir = mlx5_alloc_db_pgdir(&(dev->pdev->dev));
pgdir = mlx5_alloc_db_pgdir(dev, node);
if (!pgdir) {
ret = -ENOMEM;
goto out;
@ -145,6 +171,12 @@ out:
return ret;
}
EXPORT_SYMBOL_GPL(mlx5_db_alloc_node);
int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db)
{
return mlx5_db_alloc_node(dev, db, dev->priv.numa_node);
}
EXPORT_SYMBOL_GPL(mlx5_db_alloc);
void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db)

View File

@ -60,6 +60,7 @@
#define MLX5E_TX_CQ_POLL_BUDGET 128
#define MLX5E_UPDATE_STATS_INTERVAL 200 /* msecs */
#define MLX5E_SQ_BF_BUDGET 16
static const char vport_strings[][ETH_GSTRING_LEN] = {
/* vport statistics */
@ -195,6 +196,8 @@ struct mlx5e_params {
u16 rx_hash_log_tbl_sz;
bool lro_en;
u32 lro_wqe_sz;
u8 rss_hfunc;
u16 tx_max_inline;
};
enum {
@ -266,7 +269,9 @@ struct mlx5e_sq {
/* dirtied @xmit */
u16 pc ____cacheline_aligned_in_smp;
u32 dma_fifo_pc;
u32 bf_offset;
u16 bf_offset;
u16 prev_cc;
u8 bf_budget;
struct mlx5e_sq_stats stats;
struct mlx5e_cq cq;
@ -279,9 +284,10 @@ struct mlx5e_sq {
struct mlx5_wq_cyc wq;
u32 dma_fifo_mask;
void __iomem *uar_map;
void __iomem *uar_bf_map;
struct netdev_queue *txq;
u32 sqn;
u32 bf_buf_size;
u16 bf_buf_size;
u16 max_inline;
u16 edge;
struct device *pdev;
@ -324,14 +330,18 @@ struct mlx5e_channel {
};
enum mlx5e_traffic_types {
MLX5E_TT_IPV4_TCP = 0,
MLX5E_TT_IPV6_TCP = 1,
MLX5E_TT_IPV4_UDP = 2,
MLX5E_TT_IPV6_UDP = 3,
MLX5E_TT_IPV4 = 4,
MLX5E_TT_IPV6 = 5,
MLX5E_TT_ANY = 6,
MLX5E_NUM_TT = 7,
MLX5E_TT_IPV4_TCP,
MLX5E_TT_IPV6_TCP,
MLX5E_TT_IPV4_UDP,
MLX5E_TT_IPV6_UDP,
MLX5E_TT_IPV4_IPSEC_AH,
MLX5E_TT_IPV6_IPSEC_AH,
MLX5E_TT_IPV4_IPSEC_ESP,
MLX5E_TT_IPV6_IPSEC_ESP,
MLX5E_TT_IPV4,
MLX5E_TT_IPV6,
MLX5E_TT_ANY,
MLX5E_NUM_TT,
};
enum {
@ -491,8 +501,10 @@ int mlx5e_update_priv_params(struct mlx5e_priv *priv,
struct mlx5e_params *new_params);
static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq,
struct mlx5e_tx_wqe *wqe)
struct mlx5e_tx_wqe *wqe, int bf_sz)
{
u16 ofst = MLX5_BF_OFFSET + sq->bf_offset;
/* ensure wqe is visible to device before updating doorbell record */
dma_wmb();
@ -503,9 +515,15 @@ static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq,
*/
wmb();
mlx5_write64((__be32 *)&wqe->ctrl,
sq->uar_map + MLX5_BF_OFFSET + sq->bf_offset,
NULL);
if (bf_sz) {
__iowrite64_copy(sq->uar_bf_map + ofst, &wqe->ctrl, bf_sz);
/* flush the write-combining mapped buffer */
wmb();
} else {
mlx5_write64((__be32 *)&wqe->ctrl, sq->uar_map + ofst, NULL);
}
sq->bf_offset ^= sq->bf_buf_size;
}
@ -519,3 +537,4 @@ static inline void mlx5e_cq_arm(struct mlx5e_cq *cq)
}
extern const struct ethtool_ops mlx5e_ethtool_ops;
u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev);

View File

@ -662,6 +662,94 @@ out:
return err;
}
static int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
u8 *hfunc)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
if (hfunc)
*hfunc = priv->params.rss_hfunc;
return 0;
}
static int mlx5e_set_rxfh(struct net_device *netdev, const u32 *indir,
const u8 *key, const u8 hfunc)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
int err = 0;
if (hfunc == ETH_RSS_HASH_NO_CHANGE)
return 0;
if ((hfunc != ETH_RSS_HASH_XOR) &&
(hfunc != ETH_RSS_HASH_TOP))
return -EINVAL;
mutex_lock(&priv->state_lock);
priv->params.rss_hfunc = hfunc;
if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
mlx5e_close_locked(priv->netdev);
err = mlx5e_open_locked(priv->netdev);
}
mutex_unlock(&priv->state_lock);
return err;
}
static int mlx5e_get_tunable(struct net_device *dev,
const struct ethtool_tunable *tuna,
void *data)
{
const struct mlx5e_priv *priv = netdev_priv(dev);
int err = 0;
switch (tuna->id) {
case ETHTOOL_TX_COPYBREAK:
*(u32 *)data = priv->params.tx_max_inline;
break;
default:
err = -EINVAL;
break;
}
return err;
}
static int mlx5e_set_tunable(struct net_device *dev,
const struct ethtool_tunable *tuna,
const void *data)
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_params new_params;
u32 val;
int err = 0;
switch (tuna->id) {
case ETHTOOL_TX_COPYBREAK:
val = *(u32 *)data;
if (val > mlx5e_get_max_inline_cap(mdev)) {
err = -EINVAL;
break;
}
mutex_lock(&priv->state_lock);
new_params = priv->params;
new_params.tx_max_inline = val;
err = mlx5e_update_priv_params(priv, &new_params);
mutex_unlock(&priv->state_lock);
break;
default:
err = -EINVAL;
break;
}
return err;
}
const struct ethtool_ops mlx5e_ethtool_ops = {
.get_drvinfo = mlx5e_get_drvinfo,
.get_link = ethtool_op_get_link,
@ -676,4 +764,8 @@ const struct ethtool_ops mlx5e_ethtool_ops = {
.set_coalesce = mlx5e_set_coalesce,
.get_settings = mlx5e_get_settings,
.set_settings = mlx5e_set_settings,
.get_rxfh = mlx5e_get_rxfh,
.set_rxfh = mlx5e_set_rxfh,
.get_tunable = mlx5e_get_tunable,
.set_tunable = mlx5e_set_tunable,
};

View File

@ -105,25 +105,41 @@ static void mlx5e_del_eth_addr_from_flow_table(struct mlx5e_priv *priv,
{
void *ft = priv->ft.main;
if (ai->tt_vec & (1 << MLX5E_TT_IPV6_TCP))
if (ai->tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_ESP))
mlx5_del_flow_table_entry(ft,
ai->ft_ix[MLX5E_TT_IPV6_IPSEC_ESP]);
if (ai->tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_ESP))
mlx5_del_flow_table_entry(ft,
ai->ft_ix[MLX5E_TT_IPV4_IPSEC_ESP]);
if (ai->tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_AH))
mlx5_del_flow_table_entry(ft,
ai->ft_ix[MLX5E_TT_IPV6_IPSEC_AH]);
if (ai->tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_AH))
mlx5_del_flow_table_entry(ft,
ai->ft_ix[MLX5E_TT_IPV4_IPSEC_AH]);
if (ai->tt_vec & BIT(MLX5E_TT_IPV6_TCP))
mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6_TCP]);
if (ai->tt_vec & (1 << MLX5E_TT_IPV4_TCP))
if (ai->tt_vec & BIT(MLX5E_TT_IPV4_TCP))
mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4_TCP]);
if (ai->tt_vec & (1 << MLX5E_TT_IPV6_UDP))
if (ai->tt_vec & BIT(MLX5E_TT_IPV6_UDP))
mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6_UDP]);
if (ai->tt_vec & (1 << MLX5E_TT_IPV4_UDP))
if (ai->tt_vec & BIT(MLX5E_TT_IPV4_UDP))
mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4_UDP]);
if (ai->tt_vec & (1 << MLX5E_TT_IPV6))
if (ai->tt_vec & BIT(MLX5E_TT_IPV6))
mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6]);
if (ai->tt_vec & (1 << MLX5E_TT_IPV4))
if (ai->tt_vec & BIT(MLX5E_TT_IPV4))
mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4]);
if (ai->tt_vec & (1 << MLX5E_TT_ANY))
if (ai->tt_vec & BIT(MLX5E_TT_ANY))
mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_ANY]);
}
@ -156,33 +172,37 @@ static u32 mlx5e_get_tt_vec(struct mlx5e_eth_addr_info *ai, int type)
switch (eth_addr_type) {
case MLX5E_UC:
ret =
(1 << MLX5E_TT_IPV4_TCP) |
(1 << MLX5E_TT_IPV6_TCP) |
(1 << MLX5E_TT_IPV4_UDP) |
(1 << MLX5E_TT_IPV6_UDP) |
(1 << MLX5E_TT_IPV4) |
(1 << MLX5E_TT_IPV6) |
(1 << MLX5E_TT_ANY) |
BIT(MLX5E_TT_IPV4_TCP) |
BIT(MLX5E_TT_IPV6_TCP) |
BIT(MLX5E_TT_IPV4_UDP) |
BIT(MLX5E_TT_IPV6_UDP) |
BIT(MLX5E_TT_IPV4_IPSEC_AH) |
BIT(MLX5E_TT_IPV6_IPSEC_AH) |
BIT(MLX5E_TT_IPV4_IPSEC_ESP) |
BIT(MLX5E_TT_IPV6_IPSEC_ESP) |
BIT(MLX5E_TT_IPV4) |
BIT(MLX5E_TT_IPV6) |
BIT(MLX5E_TT_ANY) |
0;
break;
case MLX5E_MC_IPV4:
ret =
(1 << MLX5E_TT_IPV4_UDP) |
(1 << MLX5E_TT_IPV4) |
BIT(MLX5E_TT_IPV4_UDP) |
BIT(MLX5E_TT_IPV4) |
0;
break;
case MLX5E_MC_IPV6:
ret =
(1 << MLX5E_TT_IPV6_UDP) |
(1 << MLX5E_TT_IPV6) |
BIT(MLX5E_TT_IPV6_UDP) |
BIT(MLX5E_TT_IPV6) |
0;
break;
case MLX5E_MC_OTHER:
ret =
(1 << MLX5E_TT_ANY) |
BIT(MLX5E_TT_ANY) |
0;
break;
}
@ -191,23 +211,27 @@ static u32 mlx5e_get_tt_vec(struct mlx5e_eth_addr_info *ai, int type)
case MLX5E_ALLMULTI:
ret =
(1 << MLX5E_TT_IPV4_UDP) |
(1 << MLX5E_TT_IPV6_UDP) |
(1 << MLX5E_TT_IPV4) |
(1 << MLX5E_TT_IPV6) |
(1 << MLX5E_TT_ANY) |
BIT(MLX5E_TT_IPV4_UDP) |
BIT(MLX5E_TT_IPV6_UDP) |
BIT(MLX5E_TT_IPV4) |
BIT(MLX5E_TT_IPV6) |
BIT(MLX5E_TT_ANY) |
0;
break;
default: /* MLX5E_PROMISC */
ret =
(1 << MLX5E_TT_IPV4_TCP) |
(1 << MLX5E_TT_IPV6_TCP) |
(1 << MLX5E_TT_IPV4_UDP) |
(1 << MLX5E_TT_IPV6_UDP) |
(1 << MLX5E_TT_IPV4) |
(1 << MLX5E_TT_IPV6) |
(1 << MLX5E_TT_ANY) |
BIT(MLX5E_TT_IPV4_TCP) |
BIT(MLX5E_TT_IPV6_TCP) |
BIT(MLX5E_TT_IPV4_UDP) |
BIT(MLX5E_TT_IPV6_UDP) |
BIT(MLX5E_TT_IPV4_IPSEC_AH) |
BIT(MLX5E_TT_IPV6_IPSEC_AH) |
BIT(MLX5E_TT_IPV4_IPSEC_ESP) |
BIT(MLX5E_TT_IPV6_IPSEC_ESP) |
BIT(MLX5E_TT_IPV4) |
BIT(MLX5E_TT_IPV6) |
BIT(MLX5E_TT_ANY) |
0;
break;
}
@ -226,6 +250,7 @@ static int __mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
u8 *match_criteria_dmac;
void *ft = priv->ft.main;
u32 *tirn = priv->tirn;
u32 *ft_ix;
u32 tt_vec;
int err;
@ -261,51 +286,51 @@ static int __mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
tt_vec = mlx5e_get_tt_vec(ai, type);
if (tt_vec & (1 << MLX5E_TT_ANY)) {
ft_ix = &ai->ft_ix[MLX5E_TT_ANY];
if (tt_vec & BIT(MLX5E_TT_ANY)) {
MLX5_SET(dest_format_struct, dest, destination_id,
tirn[MLX5E_TT_ANY]);
err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
match_criteria, flow_context,
&ai->ft_ix[MLX5E_TT_ANY]);
if (err) {
mlx5e_del_eth_addr_from_flow_table(priv, ai);
return err;
}
ai->tt_vec |= (1 << MLX5E_TT_ANY);
ft_ix);
if (err)
goto err_del_ai;
ai->tt_vec |= BIT(MLX5E_TT_ANY);
}
match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
MLX5_SET_TO_ONES(fte_match_param, match_criteria,
outer_headers.ethertype);
if (tt_vec & (1 << MLX5E_TT_IPV4)) {
ft_ix = &ai->ft_ix[MLX5E_TT_IPV4];
if (tt_vec & BIT(MLX5E_TT_IPV4)) {
MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
ETH_P_IP);
MLX5_SET(dest_format_struct, dest, destination_id,
tirn[MLX5E_TT_IPV4]);
err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
match_criteria, flow_context,
&ai->ft_ix[MLX5E_TT_IPV4]);
if (err) {
mlx5e_del_eth_addr_from_flow_table(priv, ai);
return err;
}
ai->tt_vec |= (1 << MLX5E_TT_IPV4);
ft_ix);
if (err)
goto err_del_ai;
ai->tt_vec |= BIT(MLX5E_TT_IPV4);
}
if (tt_vec & (1 << MLX5E_TT_IPV6)) {
ft_ix = &ai->ft_ix[MLX5E_TT_IPV6];
if (tt_vec & BIT(MLX5E_TT_IPV6)) {
MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
ETH_P_IPV6);
MLX5_SET(dest_format_struct, dest, destination_id,
tirn[MLX5E_TT_IPV6]);
err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
match_criteria, flow_context,
&ai->ft_ix[MLX5E_TT_IPV6]);
if (err) {
mlx5e_del_eth_addr_from_flow_table(priv, ai);
return err;
}
ai->tt_vec |= (1 << MLX5E_TT_IPV6);
ft_ix);
if (err)
goto err_del_ai;
ai->tt_vec |= BIT(MLX5E_TT_IPV6);
}
MLX5_SET_TO_ONES(fte_match_param, match_criteria,
@ -313,70 +338,141 @@ static int __mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol,
IPPROTO_UDP);
if (tt_vec & (1 << MLX5E_TT_IPV4_UDP)) {
ft_ix = &ai->ft_ix[MLX5E_TT_IPV4_UDP];
if (tt_vec & BIT(MLX5E_TT_IPV4_UDP)) {
MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
ETH_P_IP);
MLX5_SET(dest_format_struct, dest, destination_id,
tirn[MLX5E_TT_IPV4_UDP]);
err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
match_criteria, flow_context,
&ai->ft_ix[MLX5E_TT_IPV4_UDP]);
if (err) {
mlx5e_del_eth_addr_from_flow_table(priv, ai);
return err;
}
ai->tt_vec |= (1 << MLX5E_TT_IPV4_UDP);
ft_ix);
if (err)
goto err_del_ai;
ai->tt_vec |= BIT(MLX5E_TT_IPV4_UDP);
}
if (tt_vec & (1 << MLX5E_TT_IPV6_UDP)) {
ft_ix = &ai->ft_ix[MLX5E_TT_IPV6_UDP];
if (tt_vec & BIT(MLX5E_TT_IPV6_UDP)) {
MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
ETH_P_IPV6);
MLX5_SET(dest_format_struct, dest, destination_id,
tirn[MLX5E_TT_IPV6_UDP]);
err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
match_criteria, flow_context,
&ai->ft_ix[MLX5E_TT_IPV6_UDP]);
if (err) {
mlx5e_del_eth_addr_from_flow_table(priv, ai);
return err;
}
ai->tt_vec |= (1 << MLX5E_TT_IPV6_UDP);
ft_ix);
if (err)
goto err_del_ai;
ai->tt_vec |= BIT(MLX5E_TT_IPV6_UDP);
}
MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol,
IPPROTO_TCP);
if (tt_vec & (1 << MLX5E_TT_IPV4_TCP)) {
ft_ix = &ai->ft_ix[MLX5E_TT_IPV4_TCP];
if (tt_vec & BIT(MLX5E_TT_IPV4_TCP)) {
MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
ETH_P_IP);
MLX5_SET(dest_format_struct, dest, destination_id,
tirn[MLX5E_TT_IPV4_TCP]);
err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
match_criteria, flow_context,
&ai->ft_ix[MLX5E_TT_IPV4_TCP]);
if (err) {
mlx5e_del_eth_addr_from_flow_table(priv, ai);
return err;
}
ai->tt_vec |= (1 << MLX5E_TT_IPV4_TCP);
ft_ix);
if (err)
goto err_del_ai;
ai->tt_vec |= BIT(MLX5E_TT_IPV4_TCP);
}
if (tt_vec & (1 << MLX5E_TT_IPV6_TCP)) {
ft_ix = &ai->ft_ix[MLX5E_TT_IPV6_TCP];
if (tt_vec & BIT(MLX5E_TT_IPV6_TCP)) {
MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
ETH_P_IPV6);
MLX5_SET(dest_format_struct, dest, destination_id,
tirn[MLX5E_TT_IPV6_TCP]);
err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
match_criteria, flow_context,
&ai->ft_ix[MLX5E_TT_IPV6_TCP]);
if (err) {
mlx5e_del_eth_addr_from_flow_table(priv, ai);
return err;
}
ai->tt_vec |= (1 << MLX5E_TT_IPV6_TCP);
ft_ix);
if (err)
goto err_del_ai;
ai->tt_vec |= BIT(MLX5E_TT_IPV6_TCP);
}
MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol,
IPPROTO_AH);
ft_ix = &ai->ft_ix[MLX5E_TT_IPV4_IPSEC_AH];
if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_AH)) {
MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
ETH_P_IP);
MLX5_SET(dest_format_struct, dest, destination_id,
tirn[MLX5E_TT_IPV4_IPSEC_AH]);
err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
match_criteria, flow_context,
ft_ix);
if (err)
goto err_del_ai;
ai->tt_vec |= BIT(MLX5E_TT_IPV4_IPSEC_AH);
}
ft_ix = &ai->ft_ix[MLX5E_TT_IPV6_IPSEC_AH];
if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_AH)) {
MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
ETH_P_IPV6);
MLX5_SET(dest_format_struct, dest, destination_id,
tirn[MLX5E_TT_IPV6_IPSEC_AH]);
err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
match_criteria, flow_context,
ft_ix);
if (err)
goto err_del_ai;
ai->tt_vec |= BIT(MLX5E_TT_IPV6_IPSEC_AH);
}
MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol,
IPPROTO_ESP);
ft_ix = &ai->ft_ix[MLX5E_TT_IPV4_IPSEC_ESP];
if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_ESP)) {
MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
ETH_P_IP);
MLX5_SET(dest_format_struct, dest, destination_id,
tirn[MLX5E_TT_IPV4_IPSEC_ESP]);
err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
match_criteria, flow_context,
ft_ix);
if (err)
goto err_del_ai;
ai->tt_vec |= BIT(MLX5E_TT_IPV4_IPSEC_ESP);
}
ft_ix = &ai->ft_ix[MLX5E_TT_IPV6_IPSEC_ESP];
if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_ESP)) {
MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
ETH_P_IPV6);
MLX5_SET(dest_format_struct, dest, destination_id,
tirn[MLX5E_TT_IPV6_IPSEC_ESP]);
err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
match_criteria, flow_context,
ft_ix);
if (err)
goto err_del_ai;
ai->tt_vec |= BIT(MLX5E_TT_IPV6_IPSEC_ESP);
}
return 0;
err_del_ai:
mlx5e_del_eth_addr_from_flow_table(priv, ai);
return err;
}
static int mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
@ -725,7 +821,7 @@ static int mlx5e_create_main_flow_table(struct mlx5e_priv *priv)
if (!g)
return -ENOMEM;
g[0].log_sz = 2;
g[0].log_sz = 3;
g[0].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria,
outer_headers.ethertype);

View File

@ -41,6 +41,7 @@ struct mlx5e_rq_param {
struct mlx5e_sq_param {
u32 sqc[MLX5_ST_SZ_DW(sqc)];
struct mlx5_wq_param wq;
u16 max_inline;
};
struct mlx5e_cq_param {
@ -272,6 +273,8 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
int err;
int i;
param->wq.db_numa_node = cpu_to_node(c->cpu);
err = mlx5_wq_ll_create(mdev, &param->wq, rqc_wq, &rq->wq,
&rq->wq_ctrl);
if (err)
@ -502,6 +505,8 @@ static int mlx5e_create_sq(struct mlx5e_channel *c,
if (err)
return err;
param->wq.db_numa_node = cpu_to_node(c->cpu);
err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq,
&sq->wq_ctrl);
if (err)
@ -509,7 +514,9 @@ static int mlx5e_create_sq(struct mlx5e_channel *c,
sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
sq->uar_map = sq->uar.map;
sq->uar_bf_map = sq->uar.bf_map;
sq->bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
sq->max_inline = param->max_inline;
err = mlx5e_alloc_sq_db(sq, cpu_to_node(c->cpu));
if (err)
@ -518,11 +525,12 @@ static int mlx5e_create_sq(struct mlx5e_channel *c,
txq_ix = c->ix + tc * priv->params.num_channels;
sq->txq = netdev_get_tx_queue(priv->netdev, txq_ix);
sq->pdev = c->pdev;
sq->mkey_be = c->mkey_be;
sq->channel = c;
sq->tc = tc;
sq->edge = (sq->wq.sz_m1 + 1) - MLX5_SEND_WQE_MAX_WQEBBS;
sq->pdev = c->pdev;
sq->mkey_be = c->mkey_be;
sq->channel = c;
sq->tc = tc;
sq->edge = (sq->wq.sz_m1 + 1) - MLX5_SEND_WQE_MAX_WQEBBS;
sq->bf_budget = MLX5E_SQ_BF_BUDGET;
priv->txq_to_sq_map[txq_ix] = sq;
return 0;
@ -702,7 +710,8 @@ static int mlx5e_create_cq(struct mlx5e_channel *c,
int err;
u32 i;
param->wq.numa = cpu_to_node(c->cpu);
param->wq.buf_numa_node = cpu_to_node(c->cpu);
param->wq.db_numa_node = cpu_to_node(c->cpu);
param->eq_ix = c->ix;
err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq->wq,
@ -1000,7 +1009,7 @@ static void mlx5e_build_rq_param(struct mlx5e_priv *priv,
MLX5_SET(wq, wq, log_wq_sz, priv->params.log_rq_size);
MLX5_SET(wq, wq, pd, priv->pdn);
param->wq.numa = dev_to_node(&priv->mdev->pdev->dev);
param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev);
param->wq.linear = 1;
}
@ -1014,7 +1023,8 @@ static void mlx5e_build_sq_param(struct mlx5e_priv *priv,
MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
MLX5_SET(wq, wq, pd, priv->pdn);
param->wq.numa = dev_to_node(&priv->mdev->pdev->dev);
param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev);
param->max_inline = priv->params.tx_max_inline;
}
static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv,
@ -1158,6 +1168,24 @@ static void mlx5e_close_tises(struct mlx5e_priv *priv)
mlx5e_close_tis(priv, tc);
}
static int mlx5e_rx_hash_fn(int hfunc)
{
return (hfunc == ETH_RSS_HASH_TOP) ?
MLX5_RX_HASH_FN_TOEPLITZ :
MLX5_RX_HASH_FN_INVERTED_XOR8;
}
static int mlx5e_bits_invert(unsigned long a, int size)
{
int inv = 0;
int i;
for (i = 0; i < size; i++)
inv |= (test_bit(size - i - 1, &a) ? 1 : 0) << i;
return inv;
}
static int mlx5e_open_rqt(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
@ -1166,11 +1194,10 @@ static int mlx5e_open_rqt(struct mlx5e_priv *priv)
void *rqtc;
int inlen;
int err;
int sz;
int log_tbl_sz = priv->params.rx_hash_log_tbl_sz;
int sz = 1 << log_tbl_sz;
int i;
sz = 1 << priv->params.rx_hash_log_tbl_sz;
inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
in = mlx5_vzalloc(inlen);
if (!in)
@ -1182,8 +1209,12 @@ static int mlx5e_open_rqt(struct mlx5e_priv *priv)
MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
for (i = 0; i < sz; i++) {
int ix = i % priv->params.num_channels;
int ix = i;
if (priv->params.rss_hfunc == ETH_RSS_HASH_XOR)
ix = mlx5e_bits_invert(i, log_tbl_sz);
ix = ix % priv->params.num_channels;
MLX5_SET(rqtc, rqtc, rq_num[i], priv->channel[ix]->rq.rqn);
}
@ -1221,13 +1252,17 @@ static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt)
#define ROUGH_MAX_L2_L3_HDR_SZ 256
#define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\
MLX5_HASH_FIELD_SEL_DST_IP)
#define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\
MLX5_HASH_FIELD_SEL_DST_IP)
#define MLX5_HASH_ALL (MLX5_HASH_FIELD_SEL_SRC_IP |\
MLX5_HASH_FIELD_SEL_DST_IP |\
MLX5_HASH_FIELD_SEL_L4_SPORT |\
MLX5_HASH_FIELD_SEL_L4_DPORT)
#define MLX5_HASH_IP_L4PORTS (MLX5_HASH_FIELD_SEL_SRC_IP |\
MLX5_HASH_FIELD_SEL_DST_IP |\
MLX5_HASH_FIELD_SEL_L4_SPORT |\
MLX5_HASH_FIELD_SEL_L4_DPORT)
#define MLX5_HASH_IP_IPSEC_SPI (MLX5_HASH_FIELD_SEL_SRC_IP |\
MLX5_HASH_FIELD_SEL_DST_IP |\
MLX5_HASH_FIELD_SEL_IPSEC_SPI)
if (priv->params.lro_en) {
MLX5_SET(tirc, tirc, lro_enable_mask,
@ -1254,12 +1289,16 @@ static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt)
MLX5_SET(tirc, tirc, indirect_table,
priv->rqtn);
MLX5_SET(tirc, tirc, rx_hash_fn,
MLX5_TIRC_RX_HASH_FN_HASH_TOEPLITZ);
MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
netdev_rss_key_fill(MLX5_ADDR_OF(tirc, tirc,
rx_hash_toeplitz_key),
MLX5_FLD_SZ_BYTES(tirc,
rx_hash_toeplitz_key));
mlx5e_rx_hash_fn(priv->params.rss_hfunc));
if (priv->params.rss_hfunc == ETH_RSS_HASH_TOP) {
void *rss_key = MLX5_ADDR_OF(tirc, tirc,
rx_hash_toeplitz_key);
size_t len = MLX5_FLD_SZ_BYTES(tirc,
rx_hash_toeplitz_key);
MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
netdev_rss_key_fill(rss_key, len);
}
break;
}
@ -1270,7 +1309,7 @@ static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt)
MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
MLX5_L4_PROT_TYPE_TCP);
MLX5_SET(rx_hash_field_select, hfso, selected_fields,
MLX5_HASH_ALL);
MLX5_HASH_IP_L4PORTS);
break;
case MLX5E_TT_IPV6_TCP:
@ -1279,7 +1318,7 @@ static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt)
MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
MLX5_L4_PROT_TYPE_TCP);
MLX5_SET(rx_hash_field_select, hfso, selected_fields,
MLX5_HASH_ALL);
MLX5_HASH_IP_L4PORTS);
break;
case MLX5E_TT_IPV4_UDP:
@ -1288,7 +1327,7 @@ static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt)
MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
MLX5_L4_PROT_TYPE_UDP);
MLX5_SET(rx_hash_field_select, hfso, selected_fields,
MLX5_HASH_ALL);
MLX5_HASH_IP_L4PORTS);
break;
case MLX5E_TT_IPV6_UDP:
@ -1297,7 +1336,35 @@ static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt)
MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
MLX5_L4_PROT_TYPE_UDP);
MLX5_SET(rx_hash_field_select, hfso, selected_fields,
MLX5_HASH_ALL);
MLX5_HASH_IP_L4PORTS);
break;
case MLX5E_TT_IPV4_IPSEC_AH:
MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
MLX5_L3_PROT_TYPE_IPV4);
MLX5_SET(rx_hash_field_select, hfso, selected_fields,
MLX5_HASH_IP_IPSEC_SPI);
break;
case MLX5E_TT_IPV6_IPSEC_AH:
MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
MLX5_L3_PROT_TYPE_IPV6);
MLX5_SET(rx_hash_field_select, hfso, selected_fields,
MLX5_HASH_IP_IPSEC_SPI);
break;
case MLX5E_TT_IPV4_IPSEC_ESP:
MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
MLX5_L3_PROT_TYPE_IPV4);
MLX5_SET(rx_hash_field_select, hfso, selected_fields,
MLX5_HASH_IP_IPSEC_SPI);
break;
case MLX5E_TT_IPV6_IPSEC_ESP:
MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
MLX5_L3_PROT_TYPE_IPV6);
MLX5_SET(rx_hash_field_select, hfso, selected_fields,
MLX5_HASH_IP_IPSEC_SPI);
break;
case MLX5E_TT_IPV4:
@ -1673,6 +1740,15 @@ static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
return 0;
}
u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev)
{
int bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
return bf_buf_size -
sizeof(struct mlx5e_tx_wqe) +
2 /*sizeof(mlx5e_tx_wqe.inline_hdr_start)*/;
}
static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
struct net_device *netdev,
int num_comp_vectors)
@ -1691,6 +1767,7 @@ static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC;
priv->params.tx_cq_moderation_pkts =
MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
priv->params.tx_max_inline = mlx5e_get_max_inline_cap(mdev);
priv->params.min_rx_wqes =
MLX5E_PARAMS_DEFAULT_MIN_RX_WQES;
priv->params.rx_hash_log_tbl_sz =
@ -1700,6 +1777,7 @@ static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ;
priv->params.num_tc = 1;
priv->params.default_vlan_prio = 0;
priv->params.rss_hfunc = ETH_RSS_HASH_XOR;
priv->params.lro_en = false && !!MLX5_CAP_ETH(priv->mdev, lro_cap);
priv->params.lro_wqe_sz =

View File

@ -57,7 +57,7 @@ void mlx5e_send_nop(struct mlx5e_sq *sq, bool notify_hw)
if (notify_hw) {
cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
mlx5e_tx_notify_hw(sq, wqe);
mlx5e_tx_notify_hw(sq, wqe, 0);
}
}
@ -110,9 +110,17 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
}
static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq,
struct sk_buff *skb)
struct sk_buff *skb, bool bf)
{
#define MLX5E_MIN_INLINE 16 /* eth header with vlan (w/o next ethertype) */
/* Some NIC TX decisions, e.g loopback, are based on the packet
* headers and occur before the data gather.
* Therefore these headers must be copied into the WQE
*/
#define MLX5E_MIN_INLINE (ETH_HLEN + 2/*vlan tag*/)
if (bf && (skb_headlen(skb) <= sq->max_inline))
return skb_headlen(skb);
return MLX5E_MIN_INLINE;
}
@ -129,6 +137,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
u8 opcode = MLX5_OPCODE_SEND;
dma_addr_t dma_addr = 0;
bool bf = false;
u16 headlen;
u16 ds_cnt;
u16 ihs;
@ -141,6 +150,11 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
else
sq->stats.csum_offload_none++;
if (sq->cc != sq->prev_cc) {
sq->prev_cc = sq->cc;
sq->bf_budget = (sq->cc == sq->pc) ? MLX5E_SQ_BF_BUDGET : 0;
}
if (skb_is_gso(skb)) {
u32 payload_len;
@ -153,7 +167,10 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
sq->stats.tso_packets++;
sq->stats.tso_bytes += payload_len;
} else {
ihs = mlx5e_get_inline_hdr_size(sq, skb);
bf = sq->bf_budget &&
!skb->xmit_more &&
!skb_shinfo(skb)->nr_frags;
ihs = mlx5e_get_inline_hdr_size(sq, skb, bf);
MLX5E_TX_SKB_CB(skb)->num_bytes = max_t(unsigned int, skb->len,
ETH_ZLEN);
}
@ -225,14 +242,21 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
}
if (!skb->xmit_more || netif_xmit_stopped(sq->txq)) {
int bf_sz = 0;
if (bf && sq->uar_bf_map)
bf_sz = MLX5E_TX_SKB_CB(skb)->num_wqebbs << 3;
cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
mlx5e_tx_notify_hw(sq, wqe);
mlx5e_tx_notify_hw(sq, wqe, bf_sz);
}
/* fill sq edge with nops to avoid wqe wrap around */
while ((sq->pc & wq->sz_m1) > sq->edge)
mlx5e_send_nop(sq, false);
sq->bf_budget = bf ? sq->bf_budget - 1 : 0;
sq->stats.packets++;
return NETDEV_TX_OK;

View File

@ -455,7 +455,7 @@ static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i)
struct mlx5_priv *priv = &mdev->priv;
struct msix_entry *msix = priv->msix_arr;
int irq = msix[i + MLX5_EQ_VEC_COMP_BASE].vector;
int numa_node = dev_to_node(&mdev->pdev->dev);
int numa_node = priv->numa_node;
int err;
if (!zalloc_cpumask_var(&priv->irq_info[i].mask, GFP_KERNEL)) {
@ -654,6 +654,22 @@ static int mlx5_core_set_issi(struct mlx5_core_dev *dev)
}
#endif
static int map_bf_area(struct mlx5_core_dev *dev)
{
resource_size_t bf_start = pci_resource_start(dev->pdev, 0);
resource_size_t bf_len = pci_resource_len(dev->pdev, 0);
dev->priv.bf_mapping = io_mapping_create_wc(bf_start, bf_len);
return dev->priv.bf_mapping ? 0 : -ENOMEM;
}
static void unmap_bf_area(struct mlx5_core_dev *dev)
{
if (dev->priv.bf_mapping)
io_mapping_free(dev->priv.bf_mapping);
}
static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
{
struct mlx5_priv *priv = &dev->priv;
@ -668,6 +684,10 @@ static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
INIT_LIST_HEAD(&priv->pgdir_list);
spin_lock_init(&priv->mkey_lock);
mutex_init(&priv->alloc_mutex);
priv->numa_node = dev_to_node(&dev->pdev->dev);
priv->dbg_root = debugfs_create_dir(dev_name(&pdev->dev), mlx5_debugfs_root);
if (!priv->dbg_root)
return -ENOMEM;
@ -804,10 +824,13 @@ static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
goto err_stop_eqs;
}
if (map_bf_area(dev))
dev_err(&pdev->dev, "Failed to map blue flame area\n");
err = mlx5_irq_set_affinity_hints(dev);
if (err) {
dev_err(&pdev->dev, "Failed to alloc affinity hint cpumask\n");
goto err_free_comp_eqs;
goto err_unmap_bf_area;
}
MLX5_INIT_DOORBELL_LOCK(&priv->cq_uar_lock);
@ -819,7 +842,9 @@ static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
return 0;
err_free_comp_eqs:
err_unmap_bf_area:
unmap_bf_area(dev);
free_comp_eqs(dev);
err_stop_eqs:
@ -877,6 +902,7 @@ static void mlx5_dev_cleanup(struct mlx5_core_dev *dev)
mlx5_cleanup_qp_table(dev);
mlx5_cleanup_cq_table(dev);
mlx5_irq_clear_affinity_hints(dev);
unmap_bf_area(dev);
free_comp_eqs(dev);
mlx5_stop_eqs(dev);
mlx5_free_uuars(dev, &priv->uuari);

View File

@ -32,6 +32,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/io-mapping.h>
#include <linux/mlx5/driver.h>
#include <linux/mlx5/cmd.h>
#include "mlx5_core.h"
@ -246,6 +247,10 @@ int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar)
goto err_free_uar;
}
if (mdev->priv.bf_mapping)
uar->bf_map = io_mapping_map_wc(mdev->priv.bf_mapping,
uar->index << PAGE_SHIFT);
return 0;
err_free_uar:
@ -257,6 +262,7 @@ EXPORT_SYMBOL(mlx5_alloc_map_uar);
void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar)
{
io_mapping_unmap(uar->bf_map);
iounmap(uar->map);
mlx5_cmd_free_uar(mdev, uar->index);
}

View File

@ -73,13 +73,14 @@ int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
wq->log_stride = MLX5_GET(wq, wqc, log_wq_stride);
wq->sz_m1 = (1 << MLX5_GET(wq, wqc, log_wq_sz)) - 1;
err = mlx5_db_alloc(mdev, &wq_ctrl->db);
err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
if (err) {
mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err);
return err;
}
err = mlx5_buf_alloc(mdev, mlx5_wq_cyc_get_byte_size(wq), &wq_ctrl->buf);
err = mlx5_buf_alloc_node(mdev, mlx5_wq_cyc_get_byte_size(wq),
&wq_ctrl->buf, param->buf_numa_node);
if (err) {
mlx5_core_warn(mdev, "mlx5_buf_alloc() failed, %d\n", err);
goto err_db_free;
@ -108,13 +109,14 @@ int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
wq->log_sz = MLX5_GET(cqc, cqc, log_cq_size);
wq->sz_m1 = (1 << wq->log_sz) - 1;
err = mlx5_db_alloc(mdev, &wq_ctrl->db);
err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
if (err) {
mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err);
return err;
}
err = mlx5_buf_alloc(mdev, mlx5_cqwq_get_byte_size(wq), &wq_ctrl->buf);
err = mlx5_buf_alloc_node(mdev, mlx5_cqwq_get_byte_size(wq),
&wq_ctrl->buf, param->buf_numa_node);
if (err) {
mlx5_core_warn(mdev, "mlx5_buf_alloc() failed, %d\n", err);
goto err_db_free;
@ -144,7 +146,7 @@ int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
wq->log_stride = MLX5_GET(wq, wqc, log_wq_stride);
wq->sz_m1 = (1 << MLX5_GET(wq, wqc, log_wq_sz)) - 1;
err = mlx5_db_alloc(mdev, &wq_ctrl->db);
err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
if (err) {
mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err);
return err;

View File

@ -37,7 +37,8 @@
struct mlx5_wq_param {
int linear;
int numa;
int buf_numa_node;
int db_numa_node;
};
struct mlx5_wq_ctrl {

View File

@ -380,7 +380,7 @@ struct mlx5_uar {
u32 index;
struct list_head bf_list;
unsigned free_bf_bmap;
void __iomem *wc_map;
void __iomem *bf_map;
void __iomem *map;
};
@ -435,6 +435,8 @@ struct mlx5_priv {
struct mlx5_uuar_info uuari;
MLX5_DECLARE_DOORBELL_LOCK(cq_uar_lock);
struct io_mapping *bf_mapping;
/* pages stuff */
struct workqueue_struct *pg_wq;
struct rb_root page_root;
@ -463,6 +465,10 @@ struct mlx5_priv {
/* end: mr staff */
/* start: alloc staff */
/* protect buffer alocation according to numa node */
struct mutex alloc_mutex;
int numa_node;
struct mutex pgdir_mutex;
struct list_head pgdir_list;
/* end: alloc staff */
@ -672,6 +678,8 @@ void mlx5_health_cleanup(void);
void __init mlx5_health_init(void);
void mlx5_start_health_poll(struct mlx5_core_dev *dev);
void mlx5_stop_health_poll(struct mlx5_core_dev *dev);
int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size,
struct mlx5_buf *buf, int node);
int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf);
void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf);
struct mlx5_cmd_mailbox *mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev *dev,
@ -773,6 +781,8 @@ void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev);
int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev);
void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev);
int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db);
int mlx5_db_alloc_node(struct mlx5_core_dev *dev, struct mlx5_db *db,
int node);
void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db);
const char *mlx5_command_str(int command);

View File

@ -1936,9 +1936,9 @@ enum {
};
enum {
MLX5_TIRC_RX_HASH_FN_HASH_NONE = 0x0,
MLX5_TIRC_RX_HASH_FN_HASH_INVERTED_XOR8 = 0x1,
MLX5_TIRC_RX_HASH_FN_HASH_TOEPLITZ = 0x2,
MLX5_RX_HASH_FN_NONE = 0x0,
MLX5_RX_HASH_FN_INVERTED_XOR8 = 0x1,
MLX5_RX_HASH_FN_TOEPLITZ = 0x2,
};
enum {