migration: convert rdma backend to accept MigrateAddress
RDMA based transport backend for 'migrate'/'migrate-incoming' QAPIs accept new wire protocol of MigrateAddress struct. It is achived by parsing 'uri' string and storing migration parameters required for RDMA connection into well defined InetSocketAddress struct. Suggested-by: Aravind Retnakaran <aravind.retnakaran@nutanix.com> Signed-off-by: Het Gala <het.gala@nutanix.com> Reviewed-by: Daniel P. Berrangé <berrange@redhat.com> Signed-off-by: Fabiano Rosas <farosas@suse.de> Reviewed-by: Juan Quintela <quintela@redhat.com> Signed-off-by: Juan Quintela <quintela@redhat.com> Message-ID: <20231023182053.8711-7-farosas@suse.de>
This commit is contained in:
parent
34dfc5e407
commit
3fa9642ff7
@ -534,7 +534,7 @@ static void qemu_start_incoming_migration(const char *uri, Error **errp)
|
||||
fd_start_incoming_migration(saddr->u.fd.str, errp);
|
||||
}
|
||||
#ifdef CONFIG_RDMA
|
||||
} else if (strstart(uri, "rdma:", &p)) {
|
||||
} else if (channel->transport == MIGRATION_ADDRESS_TYPE_RDMA) {
|
||||
if (migrate_compress()) {
|
||||
error_setg(errp, "RDMA and compression can't be used together");
|
||||
return;
|
||||
@ -547,7 +547,7 @@ static void qemu_start_incoming_migration(const char *uri, Error **errp)
|
||||
error_setg(errp, "RDMA and multifd can't be used together");
|
||||
return;
|
||||
}
|
||||
rdma_start_incoming_migration(p, errp);
|
||||
rdma_start_incoming_migration(&channel->u.rdma, errp);
|
||||
#endif
|
||||
} else if (strstart(uri, "exec:", &p)) {
|
||||
exec_start_incoming_migration(p, errp);
|
||||
@ -1935,8 +1935,8 @@ void qmp_migrate(const char *uri, bool has_blk, bool blk,
|
||||
fd_start_outgoing_migration(s, saddr->u.fd.str, &local_err);
|
||||
}
|
||||
#ifdef CONFIG_RDMA
|
||||
} else if (strstart(uri, "rdma:", &p)) {
|
||||
rdma_start_outgoing_migration(s, p, &local_err);
|
||||
} else if (channel->transport == MIGRATION_ADDRESS_TYPE_RDMA) {
|
||||
rdma_start_outgoing_migration(s, &channel->u.rdma, &local_err);
|
||||
#endif
|
||||
} else if (strstart(uri, "exec:", &p)) {
|
||||
exec_start_outgoing_migration(s, p, &local_err);
|
||||
|
@ -289,7 +289,6 @@ typedef struct RDMALocalBlocks {
|
||||
typedef struct RDMAContext {
|
||||
char *host;
|
||||
int port;
|
||||
char *host_port;
|
||||
|
||||
RDMAWorkRequestData wr_data[RDMA_WRID_MAX];
|
||||
|
||||
@ -2431,9 +2430,7 @@ static void qemu_rdma_cleanup(RDMAContext *rdma)
|
||||
rdma->channel = NULL;
|
||||
}
|
||||
g_free(rdma->host);
|
||||
g_free(rdma->host_port);
|
||||
rdma->host = NULL;
|
||||
rdma->host_port = NULL;
|
||||
}
|
||||
|
||||
|
||||
@ -2723,28 +2720,16 @@ static void qemu_rdma_return_path_dest_init(RDMAContext *rdma_return_path,
|
||||
rdma_return_path->is_return_path = true;
|
||||
}
|
||||
|
||||
static RDMAContext *qemu_rdma_data_init(const char *host_port, Error **errp)
|
||||
static RDMAContext *qemu_rdma_data_init(InetSocketAddress *saddr, Error **errp)
|
||||
{
|
||||
RDMAContext *rdma = NULL;
|
||||
InetSocketAddress *addr;
|
||||
|
||||
rdma = g_new0(RDMAContext, 1);
|
||||
rdma->current_index = -1;
|
||||
rdma->current_chunk = -1;
|
||||
|
||||
addr = g_new(InetSocketAddress, 1);
|
||||
if (!inet_parse(addr, host_port, NULL)) {
|
||||
rdma->port = atoi(addr->port);
|
||||
rdma->host = g_strdup(addr->host);
|
||||
rdma->host_port = g_strdup(host_port);
|
||||
} else {
|
||||
error_setg(errp, "RDMA ERROR: bad RDMA migration address '%s'",
|
||||
host_port);
|
||||
g_free(rdma);
|
||||
rdma = NULL;
|
||||
}
|
||||
|
||||
qapi_free_InetSocketAddress(addr);
|
||||
rdma->host = g_strdup(saddr->host);
|
||||
rdma->port = atoi(saddr->port);
|
||||
return rdma;
|
||||
}
|
||||
|
||||
@ -3353,6 +3338,7 @@ static int qemu_rdma_accept(RDMAContext *rdma)
|
||||
.private_data_len = sizeof(cap),
|
||||
};
|
||||
RDMAContext *rdma_return_path = NULL;
|
||||
g_autoptr(InetSocketAddress) isock = g_new0(InetSocketAddress, 1);
|
||||
struct rdma_cm_event *cm_event;
|
||||
struct ibv_context *verbs;
|
||||
int ret;
|
||||
@ -3367,13 +3353,16 @@ static int qemu_rdma_accept(RDMAContext *rdma)
|
||||
goto err_rdma_dest_wait;
|
||||
}
|
||||
|
||||
isock->host = rdma->host;
|
||||
isock->port = g_strdup_printf("%d", rdma->port);
|
||||
|
||||
/*
|
||||
* initialize the RDMAContext for return path for postcopy after first
|
||||
* connection request reached.
|
||||
*/
|
||||
if ((migrate_postcopy() || migrate_return_path())
|
||||
&& !rdma->is_return_path) {
|
||||
rdma_return_path = qemu_rdma_data_init(rdma->host_port, NULL);
|
||||
rdma_return_path = qemu_rdma_data_init(isock, NULL);
|
||||
if (rdma_return_path == NULL) {
|
||||
rdma_ack_cm_event(cm_event);
|
||||
goto err_rdma_dest_wait;
|
||||
@ -4074,7 +4063,8 @@ static void rdma_accept_incoming_migration(void *opaque)
|
||||
}
|
||||
}
|
||||
|
||||
void rdma_start_incoming_migration(const char *host_port, Error **errp)
|
||||
void rdma_start_incoming_migration(InetSocketAddress *host_port,
|
||||
Error **errp)
|
||||
{
|
||||
MigrationState *s = migrate_get_current();
|
||||
int ret;
|
||||
@ -4118,13 +4108,12 @@ cleanup_rdma:
|
||||
err:
|
||||
if (rdma) {
|
||||
g_free(rdma->host);
|
||||
g_free(rdma->host_port);
|
||||
}
|
||||
g_free(rdma);
|
||||
}
|
||||
|
||||
void rdma_start_outgoing_migration(void *opaque,
|
||||
const char *host_port, Error **errp)
|
||||
InetSocketAddress *host_port, Error **errp)
|
||||
{
|
||||
MigrationState *s = opaque;
|
||||
RDMAContext *rdma_return_path = NULL;
|
||||
|
@ -14,15 +14,17 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include "qemu/sockets.h"
|
||||
|
||||
#ifndef QEMU_MIGRATION_RDMA_H
|
||||
#define QEMU_MIGRATION_RDMA_H
|
||||
|
||||
#include "exec/memory.h"
|
||||
|
||||
void rdma_start_outgoing_migration(void *opaque, const char *host_port,
|
||||
void rdma_start_outgoing_migration(void *opaque, InetSocketAddress *host_port,
|
||||
Error **errp);
|
||||
|
||||
void rdma_start_incoming_migration(const char *host_port, Error **errp);
|
||||
void rdma_start_incoming_migration(InetSocketAddress *host_port, Error **errp);
|
||||
|
||||
/*
|
||||
* Constants used by rdma return codes
|
||||
|
Loading…
Reference in New Issue
Block a user