hw/rdma: Introduce protected qlist

To make code more readable move handling of protected list to a
rdma_utils

Signed-off-by: Yuval Shaia <yuval.shaia@oracle.com>
Reviewed-by: Marcel Apfelbaum <marcel.apfelbaum@gmail.com>
Message-Id: <1552300155-25216-3-git-send-email-yuval.shaia@oracle.com>
Reviewed-by: Kamal Heib <kamalheib1@gmail.com>
Signed-off-by: Marcel Apfelbaum <marcel.apfelbaum@gmail.com>
This commit is contained in:
Yuval Shaia 2019-03-11 03:29:06 -07:00 committed by Marcel Apfelbaum
parent 4d71b38ae8
commit b20fc79510
4 changed files with 55 additions and 21 deletions

View File

@ -527,9 +527,7 @@ static unsigned int save_mad_recv_buffer(RdmaBackendDev *backend_dev,
bctx->up_ctx = ctx;
bctx->sge = *sge;
qemu_mutex_lock(&backend_dev->recv_mads_list.lock);
qlist_append_int(backend_dev->recv_mads_list.list, bctx_id);
qemu_mutex_unlock(&backend_dev->recv_mads_list.lock);
rdma_protected_qlist_append_int64(&backend_dev->recv_mads_list, bctx_id);
return 0;
}
@ -913,23 +911,19 @@ static inline void build_mad_hdr(struct ibv_grh *grh, union ibv_gid *sgid,
static void process_incoming_mad_req(RdmaBackendDev *backend_dev,
RdmaCmMuxMsg *msg)
{
QObject *o_ctx_id;
unsigned long cqe_ctx_id;
BackendCtx *bctx;
char *mad;
trace_mad_message("recv", msg->umad.mad, msg->umad_len);
qemu_mutex_lock(&backend_dev->recv_mads_list.lock);
o_ctx_id = qlist_pop(backend_dev->recv_mads_list.list);
qemu_mutex_unlock(&backend_dev->recv_mads_list.lock);
if (!o_ctx_id) {
cqe_ctx_id = rdma_protected_qlist_pop_int64(&backend_dev->recv_mads_list);
if (cqe_ctx_id == -ENOENT) {
rdma_warn_report("No more free MADs buffers, waiting for a while");
sleep(THR_POLL_TO);
return;
}
cqe_ctx_id = qnum_get_uint(qobject_to(QNum, o_ctx_id));
bctx = rdma_rm_get_cqe_ctx(backend_dev->rdma_dev_res, cqe_ctx_id);
if (unlikely(!bctx)) {
rdma_error_report("No matching ctx for req %ld", cqe_ctx_id);
@ -994,8 +988,7 @@ static int mad_init(RdmaBackendDev *backend_dev, CharBackend *mad_chr_be)
return -EIO;
}
qemu_mutex_init(&backend_dev->recv_mads_list.lock);
backend_dev->recv_mads_list.list = qlist_new();
rdma_protected_qlist_init(&backend_dev->recv_mads_list);
enable_rdmacm_mux_async(backend_dev);
@ -1010,10 +1003,7 @@ static void mad_fini(RdmaBackendDev *backend_dev)
{
disable_rdmacm_mux_async(backend_dev);
qemu_chr_fe_disconnect(backend_dev->rdmacm_mux.chr_be);
if (backend_dev->recv_mads_list.list) {
qlist_destroy_obj(QOBJECT(backend_dev->recv_mads_list.list));
qemu_mutex_destroy(&backend_dev->recv_mads_list.lock);
}
rdma_protected_qlist_destroy(&backend_dev->recv_mads_list);
}
int rdma_backend_get_gid_index(RdmaBackendDev *backend_dev,

View File

@ -20,6 +20,7 @@
#include "chardev/char-fe.h"
#include <infiniband/verbs.h>
#include "contrib/rdmacm-mux/rdmacm-mux.h"
#include "rdma_utils.h"
typedef struct RdmaDeviceResources RdmaDeviceResources;
@ -30,11 +31,6 @@ typedef struct RdmaBackendThread {
bool is_running; /* Set by the thread to report its status */
} RdmaBackendThread;
typedef struct RecvMadList {
QemuMutex lock;
QList *list;
} RecvMadList;
typedef struct RdmaCmMux {
CharBackend *chr_be;
int can_receive;
@ -48,7 +44,7 @@ typedef struct RdmaBackendDev {
struct ibv_context *context;
struct ibv_comp_channel *channel;
uint8_t port_num;
RecvMadList recv_mads_list;
RdmaProtectedQList recv_mads_list;
RdmaCmMux rdmacm_mux;
} RdmaBackendDev;

View File

@ -14,6 +14,8 @@
*/
#include "qemu/osdep.h"
#include "qapi/qmp/qlist.h"
#include "qapi/qmp/qnum.h"
#include "trace.h"
#include "rdma_utils.h"
@ -51,3 +53,40 @@ void rdma_pci_dma_unmap(PCIDevice *dev, void *buffer, dma_addr_t len)
pci_dma_unmap(dev, buffer, len, DMA_DIRECTION_TO_DEVICE, 0);
}
}
void rdma_protected_qlist_init(RdmaProtectedQList *list)
{
qemu_mutex_init(&list->lock);
list->list = qlist_new();
}
void rdma_protected_qlist_destroy(RdmaProtectedQList *list)
{
if (list->list) {
qlist_destroy_obj(QOBJECT(list->list));
qemu_mutex_destroy(&list->lock);
list->list = NULL;
}
}
void rdma_protected_qlist_append_int64(RdmaProtectedQList *list, int64_t value)
{
qemu_mutex_lock(&list->lock);
qlist_append_int(list->list, value);
qemu_mutex_unlock(&list->lock);
}
int64_t rdma_protected_qlist_pop_int64(RdmaProtectedQList *list)
{
QObject *obj;
qemu_mutex_lock(&list->lock);
obj = qlist_pop(list->list);
qemu_mutex_unlock(&list->lock);
if (!obj) {
return -ENOENT;
}
return qnum_get_uint(qobject_to(QNum, obj));
}

View File

@ -29,8 +29,17 @@
#define rdma_info_report(fmt, ...) \
info_report("%s: " fmt, "rdma", ## __VA_ARGS__)
typedef struct RdmaProtectedQList {
QemuMutex lock;
QList *list;
} RdmaProtectedQList;
void *rdma_pci_dma_map(PCIDevice *dev, dma_addr_t addr, dma_addr_t plen);
void rdma_pci_dma_unmap(PCIDevice *dev, void *buffer, dma_addr_t len);
void rdma_protected_qlist_init(RdmaProtectedQList *list);
void rdma_protected_qlist_destroy(RdmaProtectedQList *list);
void rdma_protected_qlist_append_int64(RdmaProtectedQList *list, int64_t value);
int64_t rdma_protected_qlist_pop_int64(RdmaProtectedQList *list);
static inline void addrconf_addr_eui48(uint8_t *eui, const char *addr)
{