hw/rdma: Modify create/destroy QP to support SRQ
Modify create/destroy QP to support shared receive queue and rearrange the destroy_qp() code to avoid touching the QP after calling rdma_rm_dealloc_qp(). Signed-off-by: Kamal Heib <kamalheib1@gmail.com> Message-Id: <20190403113343.26384-4-kamalheib1@gmail.com> Reviewed-by: Yuval Shaia <yuval.shaia@oracle.com> Signed-off-by: Marcel Apfelbaum <marcel.apfelbaum@gmail.com>
This commit is contained in:
parent
cdc84058bc
commit
8b42cfab82
@ -794,9 +794,9 @@ void rdma_backend_destroy_cq(RdmaBackendCQ *cq)
|
||||
|
||||
int rdma_backend_create_qp(RdmaBackendQP *qp, uint8_t qp_type,
|
||||
RdmaBackendPD *pd, RdmaBackendCQ *scq,
|
||||
RdmaBackendCQ *rcq, uint32_t max_send_wr,
|
||||
uint32_t max_recv_wr, uint32_t max_send_sge,
|
||||
uint32_t max_recv_sge)
|
||||
RdmaBackendCQ *rcq, RdmaBackendSRQ *srq,
|
||||
uint32_t max_send_wr, uint32_t max_recv_wr,
|
||||
uint32_t max_send_sge, uint32_t max_recv_sge)
|
||||
{
|
||||
struct ibv_qp_init_attr attr = {};
|
||||
|
||||
@ -824,6 +824,9 @@ int rdma_backend_create_qp(RdmaBackendQP *qp, uint8_t qp_type,
|
||||
attr.cap.max_recv_wr = max_recv_wr;
|
||||
attr.cap.max_send_sge = max_send_sge;
|
||||
attr.cap.max_recv_sge = max_recv_sge;
|
||||
if (srq) {
|
||||
attr.srq = srq->ibsrq;
|
||||
}
|
||||
|
||||
qp->ibqp = ibv_create_qp(pd->ibpd, &attr);
|
||||
if (!qp->ibqp) {
|
||||
|
@ -89,9 +89,9 @@ void rdma_backend_poll_cq(RdmaDeviceResources *rdma_dev_res, RdmaBackendCQ *cq);
|
||||
|
||||
int rdma_backend_create_qp(RdmaBackendQP *qp, uint8_t qp_type,
|
||||
RdmaBackendPD *pd, RdmaBackendCQ *scq,
|
||||
RdmaBackendCQ *rcq, uint32_t max_send_wr,
|
||||
uint32_t max_recv_wr, uint32_t max_send_sge,
|
||||
uint32_t max_recv_sge);
|
||||
RdmaBackendCQ *rcq, RdmaBackendSRQ *srq,
|
||||
uint32_t max_send_wr, uint32_t max_recv_wr,
|
||||
uint32_t max_send_sge, uint32_t max_recv_sge);
|
||||
int rdma_backend_qp_state_init(RdmaBackendDev *backend_dev, RdmaBackendQP *qp,
|
||||
uint8_t qp_type, uint32_t qkey);
|
||||
int rdma_backend_qp_state_rtr(RdmaBackendDev *backend_dev, RdmaBackendQP *qp,
|
||||
|
@ -386,12 +386,14 @@ int rdma_rm_alloc_qp(RdmaDeviceResources *dev_res, uint32_t pd_handle,
|
||||
uint8_t qp_type, uint32_t max_send_wr,
|
||||
uint32_t max_send_sge, uint32_t send_cq_handle,
|
||||
uint32_t max_recv_wr, uint32_t max_recv_sge,
|
||||
uint32_t recv_cq_handle, void *opaque, uint32_t *qpn)
|
||||
uint32_t recv_cq_handle, void *opaque, uint32_t *qpn,
|
||||
uint8_t is_srq, uint32_t srq_handle)
|
||||
{
|
||||
int rc;
|
||||
RdmaRmQP *qp;
|
||||
RdmaRmCQ *scq, *rcq;
|
||||
RdmaRmPD *pd;
|
||||
RdmaRmSRQ *srq = NULL;
|
||||
uint32_t rm_qpn;
|
||||
|
||||
pd = rdma_rm_get_pd(dev_res, pd_handle);
|
||||
@ -408,6 +410,16 @@ int rdma_rm_alloc_qp(RdmaDeviceResources *dev_res, uint32_t pd_handle,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (is_srq) {
|
||||
srq = rdma_rm_get_srq(dev_res, srq_handle);
|
||||
if (!srq) {
|
||||
rdma_error_report("Invalid srqn %d", srq_handle);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
srq->recv_cq_handle = recv_cq_handle;
|
||||
}
|
||||
|
||||
if (qp_type == IBV_QPT_GSI) {
|
||||
scq->notify = CNT_SET;
|
||||
rcq->notify = CNT_SET;
|
||||
@ -424,10 +436,14 @@ int rdma_rm_alloc_qp(RdmaDeviceResources *dev_res, uint32_t pd_handle,
|
||||
qp->send_cq_handle = send_cq_handle;
|
||||
qp->recv_cq_handle = recv_cq_handle;
|
||||
qp->opaque = opaque;
|
||||
qp->is_srq = is_srq;
|
||||
|
||||
rc = rdma_backend_create_qp(&qp->backend_qp, qp_type, &pd->backend_pd,
|
||||
&scq->backend_cq, &rcq->backend_cq, max_send_wr,
|
||||
max_recv_wr, max_send_sge, max_recv_sge);
|
||||
&scq->backend_cq, &rcq->backend_cq,
|
||||
is_srq ? &srq->backend_srq : NULL,
|
||||
max_send_wr, max_recv_wr, max_send_sge,
|
||||
max_recv_sge);
|
||||
|
||||
if (rc) {
|
||||
rc = -EIO;
|
||||
goto out_dealloc_qp;
|
||||
|
@ -53,7 +53,8 @@ int rdma_rm_alloc_qp(RdmaDeviceResources *dev_res, uint32_t pd_handle,
|
||||
uint8_t qp_type, uint32_t max_send_wr,
|
||||
uint32_t max_send_sge, uint32_t send_cq_handle,
|
||||
uint32_t max_recv_wr, uint32_t max_recv_sge,
|
||||
uint32_t recv_cq_handle, void *opaque, uint32_t *qpn);
|
||||
uint32_t recv_cq_handle, void *opaque, uint32_t *qpn,
|
||||
uint8_t is_srq, uint32_t srq_handle);
|
||||
RdmaRmQP *rdma_rm_get_qp(RdmaDeviceResources *dev_res, uint32_t qpn);
|
||||
int rdma_rm_modify_qp(RdmaDeviceResources *dev_res, RdmaBackendDev *backend_dev,
|
||||
uint32_t qp_handle, uint32_t attr_mask, uint8_t sgid_idx,
|
||||
|
@ -88,6 +88,7 @@ typedef struct RdmaRmQP {
|
||||
uint32_t send_cq_handle;
|
||||
uint32_t recv_cq_handle;
|
||||
enum ibv_qp_state qp_state;
|
||||
uint8_t is_srq;
|
||||
} RdmaRmQP;
|
||||
|
||||
typedef struct RdmaRmSRQ {
|
||||
|
@ -357,7 +357,7 @@ static int destroy_cq(PVRDMADev *dev, union pvrdma_cmd_req *req,
|
||||
static int create_qp_rings(PCIDevice *pci_dev, uint64_t pdir_dma,
|
||||
PvrdmaRing **rings, uint32_t scqe, uint32_t smax_sge,
|
||||
uint32_t spages, uint32_t rcqe, uint32_t rmax_sge,
|
||||
uint32_t rpages)
|
||||
uint32_t rpages, uint8_t is_srq)
|
||||
{
|
||||
uint64_t *dir = NULL, *tbl = NULL;
|
||||
PvrdmaRing *sr, *rr;
|
||||
@ -365,9 +365,14 @@ static int create_qp_rings(PCIDevice *pci_dev, uint64_t pdir_dma,
|
||||
char ring_name[MAX_RING_NAME_SZ];
|
||||
uint32_t wqe_sz;
|
||||
|
||||
if (!spages || spages > PVRDMA_MAX_FAST_REG_PAGES
|
||||
|| !rpages || rpages > PVRDMA_MAX_FAST_REG_PAGES) {
|
||||
rdma_error_report("Got invalid page count for QP ring: %d, %d", spages,
|
||||
if (!spages || spages > PVRDMA_MAX_FAST_REG_PAGES) {
|
||||
rdma_error_report("Got invalid send page count for QP ring: %d",
|
||||
spages);
|
||||
return rc;
|
||||
}
|
||||
|
||||
if (!is_srq && (!rpages || rpages > PVRDMA_MAX_FAST_REG_PAGES)) {
|
||||
rdma_error_report("Got invalid recv page count for QP ring: %d",
|
||||
rpages);
|
||||
return rc;
|
||||
}
|
||||
@ -384,8 +389,12 @@ static int create_qp_rings(PCIDevice *pci_dev, uint64_t pdir_dma,
|
||||
goto out;
|
||||
}
|
||||
|
||||
sr = g_malloc(2 * sizeof(*rr));
|
||||
rr = &sr[1];
|
||||
if (!is_srq) {
|
||||
sr = g_malloc(2 * sizeof(*rr));
|
||||
rr = &sr[1];
|
||||
} else {
|
||||
sr = g_malloc(sizeof(*sr));
|
||||
}
|
||||
|
||||
*rings = sr;
|
||||
|
||||
@ -407,15 +416,18 @@ static int create_qp_rings(PCIDevice *pci_dev, uint64_t pdir_dma,
|
||||
goto out_unmap_ring_state;
|
||||
}
|
||||
|
||||
/* Create recv ring */
|
||||
rr->ring_state = &sr->ring_state[1];
|
||||
wqe_sz = pow2ceil(sizeof(struct pvrdma_rq_wqe_hdr) +
|
||||
sizeof(struct pvrdma_sge) * rmax_sge - 1);
|
||||
sprintf(ring_name, "qp_rring_%" PRIx64, pdir_dma);
|
||||
rc = pvrdma_ring_init(rr, ring_name, pci_dev, rr->ring_state,
|
||||
rcqe, wqe_sz, (dma_addr_t *)&tbl[1 + spages], rpages);
|
||||
if (rc) {
|
||||
goto out_free_sr;
|
||||
if (!is_srq) {
|
||||
/* Create recv ring */
|
||||
rr->ring_state = &sr->ring_state[1];
|
||||
wqe_sz = pow2ceil(sizeof(struct pvrdma_rq_wqe_hdr) +
|
||||
sizeof(struct pvrdma_sge) * rmax_sge - 1);
|
||||
sprintf(ring_name, "qp_rring_%" PRIx64, pdir_dma);
|
||||
rc = pvrdma_ring_init(rr, ring_name, pci_dev, rr->ring_state,
|
||||
rcqe, wqe_sz, (dma_addr_t *)&tbl[1 + spages],
|
||||
rpages);
|
||||
if (rc) {
|
||||
goto out_free_sr;
|
||||
}
|
||||
}
|
||||
|
||||
goto out;
|
||||
@ -436,10 +448,12 @@ out:
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void destroy_qp_rings(PvrdmaRing *ring)
|
||||
static void destroy_qp_rings(PvrdmaRing *ring, uint8_t is_srq)
|
||||
{
|
||||
pvrdma_ring_free(&ring[0]);
|
||||
pvrdma_ring_free(&ring[1]);
|
||||
if (!is_srq) {
|
||||
pvrdma_ring_free(&ring[1]);
|
||||
}
|
||||
|
||||
rdma_pci_dma_unmap(ring->dev, ring->ring_state, TARGET_PAGE_SIZE);
|
||||
g_free(ring);
|
||||
@ -458,7 +472,7 @@ static int create_qp(PVRDMADev *dev, union pvrdma_cmd_req *req,
|
||||
rc = create_qp_rings(PCI_DEVICE(dev), cmd->pdir_dma, &rings,
|
||||
cmd->max_send_wr, cmd->max_send_sge, cmd->send_chunks,
|
||||
cmd->max_recv_wr, cmd->max_recv_sge,
|
||||
cmd->total_chunks - cmd->send_chunks - 1);
|
||||
cmd->total_chunks - cmd->send_chunks - 1, cmd->is_srq);
|
||||
if (rc) {
|
||||
return rc;
|
||||
}
|
||||
@ -467,9 +481,9 @@ static int create_qp(PVRDMADev *dev, union pvrdma_cmd_req *req,
|
||||
cmd->max_send_wr, cmd->max_send_sge,
|
||||
cmd->send_cq_handle, cmd->max_recv_wr,
|
||||
cmd->max_recv_sge, cmd->recv_cq_handle, rings,
|
||||
&resp->qpn);
|
||||
&resp->qpn, cmd->is_srq, cmd->srq_handle);
|
||||
if (rc) {
|
||||
destroy_qp_rings(rings);
|
||||
destroy_qp_rings(rings, cmd->is_srq);
|
||||
return rc;
|
||||
}
|
||||
|
||||
@ -531,10 +545,9 @@ static int destroy_qp(PVRDMADev *dev, union pvrdma_cmd_req *req,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
rdma_rm_dealloc_qp(&dev->rdma_dev_res, cmd->qp_handle);
|
||||
|
||||
ring = (PvrdmaRing *)qp->opaque;
|
||||
destroy_qp_rings(ring);
|
||||
destroy_qp_rings(ring, qp->is_srq);
|
||||
rdma_rm_dealloc_qp(&dev->rdma_dev_res, cmd->qp_handle);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user