migration/rdma: Convert qemu_rdma_alloc_pd_cq() to Error

Functions that use an Error **errp parameter to return errors should
not also report them to the user, because reporting is the caller's
job.  When the caller does, the error is reported twice.  When it
doesn't (because it recovered from the error), there is no error to
report, i.e. the report is bogus.

qemu_rdma_source_init() violates this principle: it calls
error_report() via qemu_rdma_alloc_pd_cq().  I elected not to
investigate how callers handle the error, i.e. precise impact is not
known.

Clean this up by converting qemu_rdma_alloc_pd_cq() to Error.

The conversion loses a piece of advice on one of two failure paths:

    Your mlock() limits may be too low. Please check $ ulimit -a # and search for 'ulimit -l' in the output

Not worth retaining.

Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Li Zhijian <lizhijian@fujitsu.com>
Reviewed-by: Juan Quintela <quintela@redhat.com>
Signed-off-by: Juan Quintela <quintela@redhat.com>
Message-ID: <20230928132019.2544702-45-armbru@redhat.com>
This commit is contained in:
Markus Armbruster 2023-09-28 15:20:10 +02:00 committed by Juan Quintela
parent 3c0c3eba8d
commit 07d5b94653

View File

@ -1052,19 +1052,19 @@ err_resolve_create_id:
/* /*
* Create protection domain and completion queues * Create protection domain and completion queues
*/ */
static int qemu_rdma_alloc_pd_cq(RDMAContext *rdma) static int qemu_rdma_alloc_pd_cq(RDMAContext *rdma, Error **errp)
{ {
/* allocate pd */ /* allocate pd */
rdma->pd = ibv_alloc_pd(rdma->verbs); rdma->pd = ibv_alloc_pd(rdma->verbs);
if (!rdma->pd) { if (!rdma->pd) {
error_report("failed to allocate protection domain"); error_setg(errp, "failed to allocate protection domain");
return -1; return -1;
} }
/* create receive completion channel */ /* create receive completion channel */
rdma->recv_comp_channel = ibv_create_comp_channel(rdma->verbs); rdma->recv_comp_channel = ibv_create_comp_channel(rdma->verbs);
if (!rdma->recv_comp_channel) { if (!rdma->recv_comp_channel) {
error_report("failed to allocate receive completion channel"); error_setg(errp, "failed to allocate receive completion channel");
goto err_alloc_pd_cq; goto err_alloc_pd_cq;
} }
@ -1074,21 +1074,21 @@ static int qemu_rdma_alloc_pd_cq(RDMAContext *rdma)
rdma->recv_cq = ibv_create_cq(rdma->verbs, (RDMA_SIGNALED_SEND_MAX * 3), rdma->recv_cq = ibv_create_cq(rdma->verbs, (RDMA_SIGNALED_SEND_MAX * 3),
NULL, rdma->recv_comp_channel, 0); NULL, rdma->recv_comp_channel, 0);
if (!rdma->recv_cq) { if (!rdma->recv_cq) {
error_report("failed to allocate receive completion queue"); error_setg(errp, "failed to allocate receive completion queue");
goto err_alloc_pd_cq; goto err_alloc_pd_cq;
} }
/* create send completion channel */ /* create send completion channel */
rdma->send_comp_channel = ibv_create_comp_channel(rdma->verbs); rdma->send_comp_channel = ibv_create_comp_channel(rdma->verbs);
if (!rdma->send_comp_channel) { if (!rdma->send_comp_channel) {
error_report("failed to allocate send completion channel"); error_setg(errp, "failed to allocate send completion channel");
goto err_alloc_pd_cq; goto err_alloc_pd_cq;
} }
rdma->send_cq = ibv_create_cq(rdma->verbs, (RDMA_SIGNALED_SEND_MAX * 3), rdma->send_cq = ibv_create_cq(rdma->verbs, (RDMA_SIGNALED_SEND_MAX * 3),
NULL, rdma->send_comp_channel, 0); NULL, rdma->send_comp_channel, 0);
if (!rdma->send_cq) { if (!rdma->send_cq) {
error_report("failed to allocate send completion queue"); error_setg(errp, "failed to allocate send completion queue");
goto err_alloc_pd_cq; goto err_alloc_pd_cq;
} }
@ -2503,12 +2503,8 @@ static int qemu_rdma_source_init(RDMAContext *rdma, bool pin_all, Error **errp)
goto err_rdma_source_init; goto err_rdma_source_init;
} }
ret = qemu_rdma_alloc_pd_cq(rdma); ret = qemu_rdma_alloc_pd_cq(rdma, errp);
if (ret < 0) { if (ret < 0) {
error_setg(errp, "RDMA ERROR: "
"rdma migration: error allocating pd and cq! Your mlock()"
" limits may be too low. Please check $ ulimit -a # and "
"search for 'ulimit -l' in the output");
goto err_rdma_source_init; goto err_rdma_source_init;
} }
@ -3482,9 +3478,9 @@ static int qemu_rdma_accept(RDMAContext *rdma)
qemu_rdma_dump_id("dest_init", verbs); qemu_rdma_dump_id("dest_init", verbs);
ret = qemu_rdma_alloc_pd_cq(rdma); ret = qemu_rdma_alloc_pd_cq(rdma, &err);
if (ret < 0) { if (ret < 0) {
error_report("rdma migration: error allocating pd and cq!"); error_report_err(err);
goto err_rdma_dest_wait; goto err_rdma_dest_wait;
} }