mlx5-updates-2018-02-21

This series includes shared code updates for mlx5 core driver for both
 netdev and rdma subsystems.
 
 By Saeed,
 First six patches of the series are meant to address a performance issue
 and should provide a performance boost for multi core IRQ interrupt hungry
 workloads.  The issue is fixed in the first patch, all other patches are
 meant to refactor the code in light of this fix.
 
 The problem it comes to fix, is a shared spinlock accessed across all HCA
 IRQs which protects the CQ database.  To solve this we simply move the CQ
 database and its spinlock to be per EQ (IRQ), thus per core.
 
 By Yonatan,
 Fragmented completion queue (CQ) for RDMA,
 core driver implementation to create fragmented CQ buffers rather than
 one large contiguous memory buffer, the implementation scheme already
 exist and used by the netdev CQs, the patch shares that code with the
 rdma CQ creation flow and makes use of the new API in mlx5_ib driver.
 
 Thanks,
 Saeed.
 -----BEGIN PGP SIGNATURE-----
 
 iQEcBAABAgAGBQJajcwxAAoJEEg/ir3gV/o+uJAIAICP2LBu5ANBgX7Z7/B0IZX4
 Abkfk+IKR44v+3t0BBgEAZI2A9LcTQ8pwIOeLCDpcKd78665Y7rYQL1p31WuPKc5
 1WwwBesjQLAmdiNLIjUnjW1fd3YyOmqLBcaMMYIeF0m4m4pCQue1kwtxesQrhdZV
 I7FBxxnQt/8o6hoh/5nmU4DzGyZaY+uR/4FxUCEreklfhNeI5M2DZKe1xQCmhsJ4
 Tosh/cJ8kSBJfclH6hk1lh5eWGDYDltWCpY86KBWsYktl10VAE3P7LPmekn487ta
 e6cL1NPoEHbp+/UBThkNZMzUnA7wpoNeDbTTtVZrdcL1KivwColt1r3pmS1bqEQ=
 =DlfW
 -----END PGP SIGNATURE-----

Merge tag 'mlx5-updates-2018-02-21' of git://git.kernel.org/pub/scm/linux/kernel/git/mellanox/linux

Saeed Mahameed says:

====================
mlx5-updates-2018-02-21

This series includes shared code updates for mlx5 core driver for both
netdev and rdma subsystems.

By Saeed,
First six patches of the series are meant to address a performance issue
and should provide a performance boost for multi core IRQ interrupt hungry
workloads.  The issue is fixed in the first patch, all other patches are
meant to refactor the code in light of this fix.

The problem it comes to fix, is a shared spinlock accessed across all HCA
IRQs which protects the CQ database.  To solve this we simply move the CQ
database and its spinlock to be per EQ (IRQ), thus per core.

By Yonatan,
Fragmented completion queue (CQ) for RDMA,
core driver implementation to create fragmented CQ buffers rather than
one large contiguous memory buffer, the implementation scheme already
exist and used by the netdev CQs, the patch shares that code with the
rdma CQ creation flow and makes use of the new API in mlx5_ib driver.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2018-02-22 14:38:38 -05:00
commit f4af1db48b
12 changed files with 279 additions and 218 deletions

View File

@ -64,14 +64,9 @@ static void mlx5_ib_cq_event(struct mlx5_core_cq *mcq, enum mlx5_event type)
}
}
static void *get_cqe_from_buf(struct mlx5_ib_cq_buf *buf, int n, int size)
{
return mlx5_buf_offset(&buf->buf, n * size);
}
static void *get_cqe(struct mlx5_ib_cq *cq, int n)
{
return get_cqe_from_buf(&cq->buf, n, cq->mcq.cqe_sz);
return mlx5_frag_buf_get_wqe(&cq->buf.fbc, n);
}
static u8 sw_ownership_bit(int n, int nent)
@ -403,7 +398,7 @@ static void handle_atomics(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64,
static void free_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf)
{
mlx5_buf_free(dev->mdev, &buf->buf);
mlx5_frag_buf_free(dev->mdev, &buf->fbc.frag_buf);
}
static void get_sig_err_item(struct mlx5_sig_err_cqe *cqe,
@ -724,12 +719,25 @@ int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
return ret;
}
static int alloc_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf,
int nent, int cqe_size)
static int alloc_cq_frag_buf(struct mlx5_ib_dev *dev,
struct mlx5_ib_cq_buf *buf,
int nent,
int cqe_size)
{
struct mlx5_frag_buf_ctrl *c = &buf->fbc;
struct mlx5_frag_buf *frag_buf = &c->frag_buf;
u32 cqc_buff[MLX5_ST_SZ_DW(cqc)] = {0};
int err;
err = mlx5_buf_alloc(dev->mdev, nent * cqe_size, &buf->buf);
MLX5_SET(cqc, cqc_buff, log_cq_size, ilog2(cqe_size));
MLX5_SET(cqc, cqc_buff, cqe_sz, (cqe_size == 128) ? 1 : 0);
mlx5_core_init_cq_frag_buf(&buf->fbc, cqc_buff);
err = mlx5_frag_buf_alloc_node(dev->mdev,
nent * cqe_size,
frag_buf,
dev->mdev->priv.numa_node);
if (err)
return err;
@ -862,14 +870,15 @@ static void destroy_cq_user(struct mlx5_ib_cq *cq, struct ib_ucontext *context)
ib_umem_release(cq->buf.umem);
}
static void init_cq_buf(struct mlx5_ib_cq *cq, struct mlx5_ib_cq_buf *buf)
static void init_cq_frag_buf(struct mlx5_ib_cq *cq,
struct mlx5_ib_cq_buf *buf)
{
int i;
void *cqe;
struct mlx5_cqe64 *cqe64;
for (i = 0; i < buf->nent; i++) {
cqe = get_cqe_from_buf(buf, i, buf->cqe_size);
cqe = get_cqe(cq, i);
cqe64 = buf->cqe_size == 64 ? cqe : cqe + 64;
cqe64->op_own = MLX5_CQE_INVALID << 4;
}
@ -891,14 +900,15 @@ static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
cq->mcq.arm_db = cq->db.db + 1;
cq->mcq.cqe_sz = cqe_size;
err = alloc_cq_buf(dev, &cq->buf, entries, cqe_size);
err = alloc_cq_frag_buf(dev, &cq->buf, entries, cqe_size);
if (err)
goto err_db;
init_cq_buf(cq, &cq->buf);
init_cq_frag_buf(cq, &cq->buf);
*inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) * cq->buf.buf.npages;
MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) *
cq->buf.fbc.frag_buf.npages;
*cqb = kvzalloc(*inlen, GFP_KERNEL);
if (!*cqb) {
err = -ENOMEM;
@ -906,11 +916,12 @@ static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
}
pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, *cqb, pas);
mlx5_fill_page_array(&cq->buf.buf, pas);
mlx5_fill_page_frag_array(&cq->buf.fbc.frag_buf, pas);
cqc = MLX5_ADDR_OF(create_cq_in, *cqb, cq_context);
MLX5_SET(cqc, cqc, log_page_size,
cq->buf.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
cq->buf.fbc.frag_buf.page_shift -
MLX5_ADAPTER_PAGE_SHIFT);
*index = dev->mdev->priv.uar->index;
@ -1207,11 +1218,11 @@ static int resize_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
if (!cq->resize_buf)
return -ENOMEM;
err = alloc_cq_buf(dev, cq->resize_buf, entries, cqe_size);
err = alloc_cq_frag_buf(dev, cq->resize_buf, entries, cqe_size);
if (err)
goto ex;
init_cq_buf(cq, cq->resize_buf);
init_cq_frag_buf(cq, cq->resize_buf);
return 0;
@ -1256,9 +1267,8 @@ static int copy_resize_cqes(struct mlx5_ib_cq *cq)
}
while ((scqe64->op_own >> 4) != MLX5_CQE_RESIZE_CQ) {
dcqe = get_cqe_from_buf(cq->resize_buf,
(i + 1) & (cq->resize_buf->nent),
dsize);
dcqe = mlx5_frag_buf_get_wqe(&cq->resize_buf->fbc,
(i + 1) & cq->resize_buf->nent);
dcqe64 = dsize == 64 ? dcqe : dcqe + 64;
sw_own = sw_ownership_bit(i + 1, cq->resize_buf->nent);
memcpy(dcqe, scqe, dsize);
@ -1324,8 +1334,11 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
cqe_size = 64;
err = resize_kernel(dev, cq, entries, cqe_size);
if (!err) {
npas = cq->resize_buf->buf.npages;
page_shift = cq->resize_buf->buf.page_shift;
struct mlx5_frag_buf_ctrl *c;
c = &cq->resize_buf->fbc;
npas = c->frag_buf.npages;
page_shift = c->frag_buf.page_shift;
}
}
@ -1346,7 +1359,8 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
mlx5_ib_populate_pas(dev, cq->resize_umem, page_shift,
pas, 0);
else
mlx5_fill_page_array(&cq->resize_buf->buf, pas);
mlx5_fill_page_frag_array(&cq->resize_buf->fbc.frag_buf,
pas);
MLX5_SET(modify_cq_in, in,
modify_field_select_resize_field_select.resize_field_select.resize_field_select,

View File

@ -371,7 +371,7 @@ struct mlx5_ib_qp {
struct mlx5_ib_rss_qp rss_qp;
struct mlx5_ib_dct dct;
};
struct mlx5_buf buf;
struct mlx5_frag_buf buf;
struct mlx5_db db;
struct mlx5_ib_wq rq;
@ -413,7 +413,7 @@ struct mlx5_ib_qp {
};
struct mlx5_ib_cq_buf {
struct mlx5_buf buf;
struct mlx5_frag_buf_ctrl fbc;
struct ib_umem *umem;
int cqe_size;
int nent;
@ -495,7 +495,7 @@ struct mlx5_ib_wc {
struct mlx5_ib_srq {
struct ib_srq ibsrq;
struct mlx5_core_srq msrq;
struct mlx5_buf buf;
struct mlx5_frag_buf buf;
struct mlx5_db db;
u64 *wrid;
/* protect SRQ hanlding

View File

@ -71,19 +71,24 @@ static void *mlx5_dma_zalloc_coherent_node(struct mlx5_core_dev *dev,
}
int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size,
struct mlx5_buf *buf, int node)
struct mlx5_frag_buf *buf, int node)
{
dma_addr_t t;
buf->size = size;
buf->npages = 1;
buf->page_shift = (u8)get_order(size) + PAGE_SHIFT;
buf->direct.buf = mlx5_dma_zalloc_coherent_node(dev, size,
&t, node);
if (!buf->direct.buf)
buf->frags = kzalloc(sizeof(*buf->frags), GFP_KERNEL);
if (!buf->frags)
return -ENOMEM;
buf->direct.map = t;
buf->frags->buf = mlx5_dma_zalloc_coherent_node(dev, size,
&t, node);
if (!buf->frags->buf)
goto err_out;
buf->frags->map = t;
while (t & ((1 << buf->page_shift) - 1)) {
--buf->page_shift;
@ -91,18 +96,24 @@ int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size,
}
return 0;
err_out:
kfree(buf->frags);
return -ENOMEM;
}
int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf)
int mlx5_buf_alloc(struct mlx5_core_dev *dev,
int size, struct mlx5_frag_buf *buf)
{
return mlx5_buf_alloc_node(dev, size, buf, dev->priv.numa_node);
}
EXPORT_SYMBOL_GPL(mlx5_buf_alloc);
EXPORT_SYMBOL(mlx5_buf_alloc);
void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf)
void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf)
{
dma_free_coherent(&dev->pdev->dev, buf->size, buf->direct.buf,
buf->direct.map);
dma_free_coherent(&dev->pdev->dev, buf->size, buf->frags->buf,
buf->frags->map);
kfree(buf->frags);
}
EXPORT_SYMBOL_GPL(mlx5_buf_free);
@ -147,6 +158,7 @@ err_free_buf:
err_out:
return -ENOMEM;
}
EXPORT_SYMBOL_GPL(mlx5_frag_buf_alloc_node);
void mlx5_frag_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf)
{
@ -162,6 +174,7 @@ void mlx5_frag_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf)
}
kfree(buf->frags);
}
EXPORT_SYMBOL_GPL(mlx5_frag_buf_free);
static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct mlx5_core_dev *dev,
int node)
@ -275,13 +288,13 @@ void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db)
}
EXPORT_SYMBOL_GPL(mlx5_db_free);
void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas)
void mlx5_fill_page_array(struct mlx5_frag_buf *buf, __be64 *pas)
{
u64 addr;
int i;
for (i = 0; i < buf->npages; i++) {
addr = buf->direct.map + (i << buf->page_shift);
addr = buf->frags->map + (i << buf->page_shift);
pas[i] = cpu_to_be64(addr);
}

View File

@ -58,8 +58,7 @@ void mlx5_cq_tasklet_cb(unsigned long data)
tasklet_ctx.list) {
list_del_init(&mcq->tasklet_ctx.list);
mcq->tasklet_ctx.comp(mcq);
if (refcount_dec_and_test(&mcq->refcount))
complete(&mcq->free);
mlx5_cq_put(mcq);
if (time_after(jiffies, end))
break;
}
@ -80,69 +79,19 @@ static void mlx5_add_cq_to_tasklet(struct mlx5_core_cq *cq)
* still arrive.
*/
if (list_empty_careful(&cq->tasklet_ctx.list)) {
refcount_inc(&cq->refcount);
mlx5_cq_hold(cq);
list_add_tail(&cq->tasklet_ctx.list, &tasklet_ctx->list);
}
spin_unlock_irqrestore(&tasklet_ctx->lock, flags);
}
void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn)
{
struct mlx5_core_cq *cq;
struct mlx5_cq_table *table = &dev->priv.cq_table;
spin_lock(&table->lock);
cq = radix_tree_lookup(&table->tree, cqn);
if (likely(cq))
refcount_inc(&cq->refcount);
spin_unlock(&table->lock);
if (!cq) {
mlx5_core_warn(dev, "Completion event for bogus CQ 0x%x\n", cqn);
return;
}
++cq->arm_sn;
cq->comp(cq);
if (refcount_dec_and_test(&cq->refcount))
complete(&cq->free);
}
void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type)
{
struct mlx5_cq_table *table = &dev->priv.cq_table;
struct mlx5_core_cq *cq;
spin_lock(&table->lock);
cq = radix_tree_lookup(&table->tree, cqn);
if (cq)
refcount_inc(&cq->refcount);
spin_unlock(&table->lock);
if (!cq) {
mlx5_core_warn(dev, "Async event for bogus CQ 0x%x\n", cqn);
return;
}
cq->event(cq, event_type);
if (refcount_dec_and_test(&cq->refcount))
complete(&cq->free);
}
int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
u32 *in, int inlen)
{
struct mlx5_cq_table *table = &dev->priv.cq_table;
int eqn = MLX5_GET(cqc, MLX5_ADDR_OF(create_cq_in, in, cq_context), c_eqn);
u32 dout[MLX5_ST_SZ_DW(destroy_cq_out)];
u32 out[MLX5_ST_SZ_DW(create_cq_out)];
u32 din[MLX5_ST_SZ_DW(destroy_cq_in)];
u32 dout[MLX5_ST_SZ_DW(destroy_cq_out)];
int eqn = MLX5_GET(cqc, MLX5_ADDR_OF(create_cq_in, in, cq_context),
c_eqn);
struct mlx5_eq *eq;
int err;
@ -159,7 +108,9 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
cq->cqn = MLX5_GET(create_cq_out, out, cqn);
cq->cons_index = 0;
cq->arm_sn = 0;
refcount_set(&cq->refcount, 1);
cq->eq = eq;
refcount_set(&cq->refcount, 0);
mlx5_cq_hold(cq);
init_completion(&cq->free);
if (!cq->comp)
cq->comp = mlx5_add_cq_to_tasklet;
@ -167,12 +118,16 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
cq->tasklet_ctx.priv = &eq->tasklet_ctx;
INIT_LIST_HEAD(&cq->tasklet_ctx.list);
spin_lock_irq(&table->lock);
err = radix_tree_insert(&table->tree, cq->cqn, cq);
spin_unlock_irq(&table->lock);
/* Add to comp EQ CQ tree to recv comp events */
err = mlx5_eq_add_cq(eq, cq);
if (err)
goto err_cmd;
/* Add to async EQ CQ tree to recv async events */
err = mlx5_eq_add_cq(&dev->priv.eq_table.async_eq, cq);
if (err)
goto err_cq_add;
cq->pid = current->pid;
err = mlx5_debug_cq_add(dev, cq);
if (err)
@ -183,6 +138,8 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
return 0;
err_cq_add:
mlx5_eq_del_cq(eq, cq);
err_cmd:
memset(din, 0, sizeof(din));
memset(dout, 0, sizeof(dout));
@ -195,23 +152,17 @@ EXPORT_SYMBOL(mlx5_core_create_cq);
int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
{
struct mlx5_cq_table *table = &dev->priv.cq_table;
u32 out[MLX5_ST_SZ_DW(destroy_cq_out)] = {0};
u32 in[MLX5_ST_SZ_DW(destroy_cq_in)] = {0};
struct mlx5_core_cq *tmp;
int err;
spin_lock_irq(&table->lock);
tmp = radix_tree_delete(&table->tree, cq->cqn);
spin_unlock_irq(&table->lock);
if (!tmp) {
mlx5_core_warn(dev, "cq 0x%x not found in tree\n", cq->cqn);
return -EINVAL;
}
if (tmp != cq) {
mlx5_core_warn(dev, "corruption on srqn 0x%x\n", cq->cqn);
return -EINVAL;
}
err = mlx5_eq_del_cq(&dev->priv.eq_table.async_eq, cq);
if (err)
return err;
err = mlx5_eq_del_cq(cq->eq, cq);
if (err)
return err;
MLX5_SET(destroy_cq_in, in, opcode, MLX5_CMD_OP_DESTROY_CQ);
MLX5_SET(destroy_cq_in, in, cqn, cq->cqn);
@ -222,8 +173,7 @@ int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
synchronize_irq(cq->irqn);
mlx5_debug_cq_remove(dev, cq);
if (refcount_dec_and_test(&cq->refcount))
complete(&cq->free);
mlx5_cq_put(cq);
wait_for_completion(&cq->free);
return 0;
@ -270,21 +220,3 @@ int mlx5_core_modify_cq_moderation(struct mlx5_core_dev *dev,
return mlx5_core_modify_cq(dev, cq, in, sizeof(in));
}
EXPORT_SYMBOL(mlx5_core_modify_cq_moderation);
int mlx5_init_cq_table(struct mlx5_core_dev *dev)
{
struct mlx5_cq_table *table = &dev->priv.cq_table;
int err;
memset(table, 0, sizeof(*table));
spin_lock_init(&table->lock);
INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
err = mlx5_cq_debugfs_init(dev);
return err;
}
void mlx5_cleanup_cq_table(struct mlx5_core_dev *dev)
{
mlx5_cq_debugfs_cleanup(dev);
}

View File

@ -52,7 +52,7 @@ static inline bool mlx5e_rx_hw_stamp(struct hwtstamp_config *config)
static inline void mlx5e_read_cqe_slot(struct mlx5e_cq *cq, u32 cqcc,
void *data)
{
u32 ci = cqcc & cq->wq.sz_m1;
u32 ci = cqcc & cq->wq.fbc.sz_m1;
memcpy(data, mlx5_cqwq_get_wqe(&cq->wq, ci), sizeof(struct mlx5_cqe64));
}
@ -74,9 +74,10 @@ static inline void mlx5e_read_mini_arr_slot(struct mlx5e_cq *cq, u32 cqcc)
static inline void mlx5e_cqes_update_owner(struct mlx5e_cq *cq, u32 cqcc, int n)
{
u8 op_own = (cqcc >> cq->wq.log_sz) & 1;
u32 wq_sz = 1 << cq->wq.log_sz;
u32 ci = cqcc & cq->wq.sz_m1;
struct mlx5_frag_buf_ctrl *fbc = &cq->wq.fbc;
u8 op_own = (cqcc >> fbc->log_sz) & 1;
u32 wq_sz = 1 << fbc->log_sz;
u32 ci = cqcc & fbc->sz_m1;
u32 ci_top = min_t(u32, wq_sz, ci + n);
for (; ci < ci_top; ci++, n--) {
@ -101,7 +102,7 @@ static inline void mlx5e_decompress_cqe(struct mlx5e_rq *rq,
cq->title.byte_cnt = cq->mini_arr[cq->mini_arr_idx].byte_cnt;
cq->title.check_sum = cq->mini_arr[cq->mini_arr_idx].checksum;
cq->title.op_own &= 0xf0;
cq->title.op_own |= 0x01 & (cqcc >> cq->wq.log_sz);
cq->title.op_own |= 0x01 & (cqcc >> cq->wq.fbc.log_sz);
cq->title.wqe_counter = cpu_to_be16(cq->decmprs_wqe_counter);
if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)

View File

@ -393,6 +393,51 @@ static void general_event_handler(struct mlx5_core_dev *dev,
}
}
/* caller must eventually call mlx5_cq_put on the returned cq */
static struct mlx5_core_cq *mlx5_eq_cq_get(struct mlx5_eq *eq, u32 cqn)
{
struct mlx5_cq_table *table = &eq->cq_table;
struct mlx5_core_cq *cq = NULL;
spin_lock(&table->lock);
cq = radix_tree_lookup(&table->tree, cqn);
if (likely(cq))
mlx5_cq_hold(cq);
spin_unlock(&table->lock);
return cq;
}
static void mlx5_eq_cq_completion(struct mlx5_eq *eq, u32 cqn)
{
struct mlx5_core_cq *cq = mlx5_eq_cq_get(eq, cqn);
if (unlikely(!cq)) {
mlx5_core_warn(eq->dev, "Completion event for bogus CQ 0x%x\n", cqn);
return;
}
++cq->arm_sn;
cq->comp(cq);
mlx5_cq_put(cq);
}
static void mlx5_eq_cq_event(struct mlx5_eq *eq, u32 cqn, int event_type)
{
struct mlx5_core_cq *cq = mlx5_eq_cq_get(eq, cqn);
if (unlikely(!cq)) {
mlx5_core_warn(eq->dev, "Async event for bogus CQ 0x%x\n", cqn);
return;
}
cq->event(cq, event_type);
mlx5_cq_put(cq);
}
static irqreturn_t mlx5_eq_int(int irq, void *eq_ptr)
{
struct mlx5_eq *eq = eq_ptr;
@ -415,7 +460,7 @@ static irqreturn_t mlx5_eq_int(int irq, void *eq_ptr)
switch (eqe->type) {
case MLX5_EVENT_TYPE_COMP:
cqn = be32_to_cpu(eqe->data.comp.cqn) & 0xffffff;
mlx5_cq_completion(dev, cqn);
mlx5_eq_cq_completion(eq, cqn);
break;
case MLX5_EVENT_TYPE_DCT_DRAINED:
rsn = be32_to_cpu(eqe->data.dct.dctn) & 0xffffff;
@ -472,7 +517,7 @@ static irqreturn_t mlx5_eq_int(int irq, void *eq_ptr)
cqn = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff;
mlx5_core_warn(dev, "CQ error on CQN 0x%x, syndrome 0x%x\n",
cqn, eqe->data.cq_err.syndrome);
mlx5_cq_event(dev, cqn, eqe->type);
mlx5_eq_cq_event(eq, cqn, eqe->type);
break;
case MLX5_EVENT_TYPE_PAGE_REQUEST:
@ -567,6 +612,7 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
int nent, u64 mask, const char *name,
enum mlx5_eq_type type)
{
struct mlx5_cq_table *cq_table = &eq->cq_table;
u32 out[MLX5_ST_SZ_DW(create_eq_out)] = {0};
struct mlx5_priv *priv = &dev->priv;
irq_handler_t handler;
@ -576,6 +622,11 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
u32 *in;
int err;
/* Init CQ table */
memset(cq_table, 0, sizeof(*cq_table));
spin_lock_init(&cq_table->lock);
INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC);
eq->type = type;
eq->nent = roundup_pow_of_two(nent + MLX5_NUM_SPARE_EQE);
eq->cons_index = 0;
@ -669,7 +720,6 @@ err_buf:
mlx5_buf_free(dev, &eq->buf);
return err;
}
EXPORT_SYMBOL_GPL(mlx5_create_map_eq);
int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
{
@ -696,7 +746,40 @@ int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
return err;
}
EXPORT_SYMBOL_GPL(mlx5_destroy_unmap_eq);
int mlx5_eq_add_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq)
{
struct mlx5_cq_table *table = &eq->cq_table;
int err;
spin_lock_irq(&table->lock);
err = radix_tree_insert(&table->tree, cq->cqn, cq);
spin_unlock_irq(&table->lock);
return err;
}
int mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq)
{
struct mlx5_cq_table *table = &eq->cq_table;
struct mlx5_core_cq *tmp;
spin_lock_irq(&table->lock);
tmp = radix_tree_delete(&table->tree, cq->cqn);
spin_unlock_irq(&table->lock);
if (!tmp) {
mlx5_core_warn(eq->dev, "cq 0x%x not found in eq 0x%x tree\n", eq->eqn, cq->cqn);
return -ENOENT;
}
if (tmp != cq) {
mlx5_core_warn(eq->dev, "corruption on cqn 0x%x in eq 0x%x\n", eq->eqn, cq->cqn);
return -EINVAL;
}
return 0;
}
int mlx5_eq_init(struct mlx5_core_dev *dev)
{
@ -840,4 +923,3 @@ int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
MLX5_SET(query_eq_in, in, eq_number, eq->eqn);
return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
}
EXPORT_SYMBOL_GPL(mlx5_core_eq_query);

View File

@ -942,9 +942,9 @@ static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
goto out;
}
err = mlx5_init_cq_table(dev);
err = mlx5_cq_debugfs_init(dev);
if (err) {
dev_err(&pdev->dev, "failed to initialize cq table\n");
dev_err(&pdev->dev, "failed to initialize cq debugfs\n");
goto err_eq_cleanup;
}
@ -1002,7 +1002,7 @@ err_tables_cleanup:
mlx5_cleanup_mkey_table(dev);
mlx5_cleanup_srq_table(dev);
mlx5_cleanup_qp_table(dev);
mlx5_cleanup_cq_table(dev);
mlx5_cq_debugfs_cleanup(dev);
err_eq_cleanup:
mlx5_eq_cleanup(dev);
@ -1023,7 +1023,7 @@ static void mlx5_cleanup_once(struct mlx5_core_dev *dev)
mlx5_cleanup_mkey_table(dev);
mlx5_cleanup_srq_table(dev);
mlx5_cleanup_qp_table(dev);
mlx5_cleanup_cq_table(dev);
mlx5_cq_debugfs_cleanup(dev);
mlx5_eq_cleanup(dev);
}

View File

@ -38,6 +38,7 @@
#include <linux/sched.h>
#include <linux/if_link.h>
#include <linux/firmware.h>
#include <linux/mlx5/cq.h>
#define DRIVER_NAME "mlx5_core"
#define DRIVER_VERSION "5.0-0"
@ -115,9 +116,29 @@ int mlx5_destroy_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
u32 element_id);
int mlx5_wait_for_vf_pages(struct mlx5_core_dev *dev);
u64 mlx5_read_internal_timer(struct mlx5_core_dev *dev);
int mlx5_eq_init(struct mlx5_core_dev *dev);
void mlx5_eq_cleanup(struct mlx5_core_dev *dev);
int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
int nent, u64 mask, const char *name,
enum mlx5_eq_type type);
int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
int mlx5_eq_add_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq);
int mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq);
int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
u32 *out, int outlen);
int mlx5_start_eqs(struct mlx5_core_dev *dev);
void mlx5_stop_eqs(struct mlx5_core_dev *dev);
struct mlx5_eq *mlx5_eqn2eq(struct mlx5_core_dev *dev, int eqn);
u32 mlx5_eq_poll_irq_disabled(struct mlx5_eq *eq);
void mlx5_cq_tasklet_cb(unsigned long data);
void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced);
int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
int mlx5_eq_debugfs_init(struct mlx5_core_dev *dev);
void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev);
int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev);
void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev);
int mlx5_query_pcam_reg(struct mlx5_core_dev *dev, u32 *pcam, u8 feature_group,
u8 access_reg_group);

View File

@ -41,7 +41,7 @@ u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq)
u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq)
{
return wq->sz_m1 + 1;
return wq->fbc.sz_m1 + 1;
}
u32 mlx5_wq_ll_get_size(struct mlx5_wq_ll *wq)
@ -62,7 +62,7 @@ static u32 mlx5_wq_qp_get_byte_size(struct mlx5_wq_qp *wq)
static u32 mlx5_cqwq_get_byte_size(struct mlx5_cqwq *wq)
{
return mlx5_cqwq_get_size(wq) << wq->log_stride;
return mlx5_cqwq_get_size(wq) << wq->fbc.log_stride;
}
static u32 mlx5_wq_ll_get_byte_size(struct mlx5_wq_ll *wq)
@ -92,7 +92,7 @@ int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
goto err_db_free;
}
wq->buf = wq_ctrl->buf.direct.buf;
wq->buf = wq_ctrl->buf.frags->buf;
wq->db = wq_ctrl->db.db;
wq_ctrl->mdev = mdev;
@ -130,7 +130,7 @@ int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
goto err_db_free;
}
wq->rq.buf = wq_ctrl->buf.direct.buf;
wq->rq.buf = wq_ctrl->buf.frags->buf;
wq->sq.buf = wq->rq.buf + mlx5_wq_cyc_get_byte_size(&wq->rq);
wq->rq.db = &wq_ctrl->db.db[MLX5_RCV_DBR];
wq->sq.db = &wq_ctrl->db.db[MLX5_SND_DBR];
@ -151,11 +151,7 @@ int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
{
int err;
wq->log_stride = 6 + MLX5_GET(cqc, cqc, cqe_sz);
wq->log_sz = MLX5_GET(cqc, cqc, log_cq_size);
wq->sz_m1 = (1 << wq->log_sz) - 1;
wq->log_frag_strides = PAGE_SHIFT - wq->log_stride;
wq->frag_sz_m1 = (1 << wq->log_frag_strides) - 1;
mlx5_core_init_cq_frag_buf(&wq->fbc, cqc);
err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
if (err) {
@ -172,7 +168,7 @@ int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
goto err_db_free;
}
wq->frag_buf = wq_ctrl->frag_buf;
wq->fbc.frag_buf = wq_ctrl->frag_buf;
wq->db = wq_ctrl->db.db;
wq_ctrl->mdev = mdev;
@ -209,7 +205,7 @@ int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
goto err_db_free;
}
wq->buf = wq_ctrl->buf.direct.buf;
wq->buf = wq_ctrl->buf.frags->buf;
wq->db = wq_ctrl->db.db;
for (i = 0; i < wq->sz_m1; i++) {

View File

@ -45,7 +45,7 @@ struct mlx5_wq_param {
struct mlx5_wq_ctrl {
struct mlx5_core_dev *mdev;
struct mlx5_buf buf;
struct mlx5_frag_buf buf;
struct mlx5_db db;
};
@ -68,14 +68,9 @@ struct mlx5_wq_qp {
};
struct mlx5_cqwq {
struct mlx5_frag_buf frag_buf;
__be32 *db;
u32 sz_m1;
u32 frag_sz_m1;
u32 cc; /* consumer counter */
u8 log_sz;
u8 log_stride;
u8 log_frag_strides;
struct mlx5_frag_buf_ctrl fbc;
__be32 *db;
u32 cc; /* consumer counter */
};
struct mlx5_wq_ll {
@ -131,20 +126,17 @@ static inline int mlx5_wq_cyc_cc_bigger(u16 cc1, u16 cc2)
static inline u32 mlx5_cqwq_get_ci(struct mlx5_cqwq *wq)
{
return wq->cc & wq->sz_m1;
return wq->cc & wq->fbc.sz_m1;
}
static inline void *mlx5_cqwq_get_wqe(struct mlx5_cqwq *wq, u32 ix)
{
unsigned int frag = (ix >> wq->log_frag_strides);
return wq->frag_buf.frags[frag].buf +
((wq->frag_sz_m1 & ix) << wq->log_stride);
return mlx5_frag_buf_get_wqe(&wq->fbc, ix);
}
static inline u32 mlx5_cqwq_get_wrap_cnt(struct mlx5_cqwq *wq)
{
return wq->cc >> wq->log_sz;
return wq->cc >> wq->fbc.log_sz;
}
static inline void mlx5_cqwq_pop(struct mlx5_cqwq *wq)

View File

@ -60,6 +60,7 @@ struct mlx5_core_cq {
} tasklet_ctx;
int reset_notify_added;
struct list_head reset_notify;
struct mlx5_eq *eq;
};
@ -171,8 +172,17 @@ static inline void mlx5_cq_arm(struct mlx5_core_cq *cq, u32 cmd,
mlx5_write64(doorbell, uar_page + MLX5_CQ_DOORBELL, NULL);
}
int mlx5_init_cq_table(struct mlx5_core_dev *dev);
void mlx5_cleanup_cq_table(struct mlx5_core_dev *dev);
static inline void mlx5_cq_hold(struct mlx5_core_cq *cq)
{
refcount_inc(&cq->refcount);
}
static inline void mlx5_cq_put(struct mlx5_core_cq *cq)
{
if (refcount_dec_and_test(&cq->refcount))
complete(&cq->free);
}
int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
u32 *in, int inlen);
int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq);

View File

@ -345,13 +345,6 @@ struct mlx5_buf_list {
dma_addr_t map;
};
struct mlx5_buf {
struct mlx5_buf_list direct;
int npages;
int size;
u8 page_shift;
};
struct mlx5_frag_buf {
struct mlx5_buf_list *frags;
int npages;
@ -359,6 +352,15 @@ struct mlx5_frag_buf {
u8 page_shift;
};
struct mlx5_frag_buf_ctrl {
struct mlx5_frag_buf frag_buf;
u32 sz_m1;
u32 frag_sz_m1;
u8 log_sz;
u8 log_stride;
u8 log_frag_strides;
};
struct mlx5_eq_tasklet {
struct list_head list;
struct list_head process_list;
@ -375,11 +377,18 @@ struct mlx5_eq_pagefault {
mempool_t *pool;
};
struct mlx5_cq_table {
/* protect radix tree */
spinlock_t lock;
struct radix_tree_root tree;
};
struct mlx5_eq {
struct mlx5_core_dev *dev;
struct mlx5_cq_table cq_table;
__be32 __iomem *doorbell;
u32 cons_index;
struct mlx5_buf buf;
struct mlx5_frag_buf buf;
int size;
unsigned int irqn;
u8 eqn;
@ -526,13 +535,6 @@ struct mlx5_core_health {
struct delayed_work recover_work;
};
struct mlx5_cq_table {
/* protect radix tree
*/
spinlock_t lock;
struct radix_tree_root tree;
};
struct mlx5_qp_table {
/* protect radix tree
*/
@ -654,10 +656,6 @@ struct mlx5_priv {
struct dentry *cmdif_debugfs;
/* end: qp staff */
/* start: cq staff */
struct mlx5_cq_table cq_table;
/* end: cq staff */
/* start: mkey staff */
struct mlx5_mkey_table mkey_table;
/* end: mkey staff */
@ -936,9 +934,9 @@ struct mlx5_hca_vport_context {
bool grh_required;
};
static inline void *mlx5_buf_offset(struct mlx5_buf *buf, int offset)
static inline void *mlx5_buf_offset(struct mlx5_frag_buf *buf, int offset)
{
return buf->direct.buf + offset;
return buf->frags->buf + offset;
}
#define STRUCT_FIELD(header, field) \
@ -977,6 +975,25 @@ static inline u32 mlx5_base_mkey(const u32 key)
return key & 0xffffff00u;
}
static inline void mlx5_core_init_cq_frag_buf(struct mlx5_frag_buf_ctrl *fbc,
void *cqc)
{
fbc->log_stride = 6 + MLX5_GET(cqc, cqc, cqe_sz);
fbc->log_sz = MLX5_GET(cqc, cqc, log_cq_size);
fbc->sz_m1 = (1 << fbc->log_sz) - 1;
fbc->log_frag_strides = PAGE_SHIFT - fbc->log_stride;
fbc->frag_sz_m1 = (1 << fbc->log_frag_strides) - 1;
}
static inline void *mlx5_frag_buf_get_wqe(struct mlx5_frag_buf_ctrl *fbc,
u32 ix)
{
unsigned int frag = (ix >> fbc->log_frag_strides);
return fbc->frag_buf.frags[frag].buf +
((fbc->frag_sz_m1 & ix) << fbc->log_stride);
}
int mlx5_cmd_init(struct mlx5_core_dev *dev);
void mlx5_cmd_cleanup(struct mlx5_core_dev *dev);
void mlx5_cmd_use_events(struct mlx5_core_dev *dev);
@ -1002,9 +1019,10 @@ void mlx5_drain_health_wq(struct mlx5_core_dev *dev);
void mlx5_trigger_health_work(struct mlx5_core_dev *dev);
void mlx5_drain_health_recovery(struct mlx5_core_dev *dev);
int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size,
struct mlx5_buf *buf, int node);
int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf);
void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf);
struct mlx5_frag_buf *buf, int node);
int mlx5_buf_alloc(struct mlx5_core_dev *dev,
int size, struct mlx5_frag_buf *buf);
void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf);
int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size,
struct mlx5_frag_buf *buf, int node);
void mlx5_frag_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf);
@ -1049,22 +1067,12 @@ int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot);
int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev);
void mlx5_register_debugfs(void);
void mlx5_unregister_debugfs(void);
int mlx5_eq_init(struct mlx5_core_dev *dev);
void mlx5_eq_cleanup(struct mlx5_core_dev *dev);
void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas);
void mlx5_fill_page_array(struct mlx5_frag_buf *buf, __be64 *pas);
void mlx5_fill_page_frag_array(struct mlx5_frag_buf *frag_buf, __be64 *pas);
void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn);
void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type);
void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type);
struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn);
void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced);
void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type);
int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
int nent, u64 mask, const char *name,
enum mlx5_eq_type type);
int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
int mlx5_start_eqs(struct mlx5_core_dev *dev);
void mlx5_stop_eqs(struct mlx5_core_dev *dev);
int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
unsigned int *irqn);
int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
@ -1076,14 +1084,6 @@ int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in,
int size_in, void *data_out, int size_out,
u16 reg_num, int arg, int write);
int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
u32 *out, int outlen);
int mlx5_eq_debugfs_init(struct mlx5_core_dev *dev);
void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev);
int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev);
void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev);
int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db);
int mlx5_db_alloc_node(struct mlx5_core_dev *dev, struct mlx5_db *db,
int node);