qemu-e2k/blockdev-nbd.c

279 lines
7.5 KiB
C
Raw Normal View History

/*
* Serving QEMU block devices via NBD
*
* Copyright (c) 2012 Red Hat, Inc.
*
* Author: Paolo Bonzini <pbonzini@redhat.com>
*
* This work is licensed under the terms of the GNU GPL, version 2 or
* later. See the COPYING file in the top-level directory.
*/
#include "qemu/osdep.h"
#include "sysemu/blockdev.h"
#include "sysemu/block-backend.h"
#include "hw/block/block.h"
#include "qapi/error.h"
#include "qapi/clone-visitor.h"
#include "qapi/qapi-visit-block-export.h"
#include "qapi/qapi-commands-block-export.h"
#include "block/nbd.h"
#include "io/channel-socket.h"
#include "io/net-listener.h"
typedef struct NBDServerData {
QIONetListener *listener;
QCryptoTLSCreds *tlscreds;
char *tlsauthz;
uint32_t max_connections;
uint32_t connections;
} NBDServerData;
static NBDServerData *nbd_server;
static bool is_qemu_nbd;
static void nbd_update_server_watch(NBDServerData *s);
void nbd_server_is_qemu_nbd(bool value)
{
is_qemu_nbd = value;
}
bool nbd_server_is_running(void)
{
return nbd_server || is_qemu_nbd;
}
nbd: Fix regression on resiliency to port scan Back in qemu 2.5, qemu-nbd was immune to port probes (a transient server would not quit, regardless of how many probe connections came and went, until a connection actually negotiated). But we broke that in commit ee7d7aa when removing the return value to nbd_client_new(), although that patch also introduced a bug causing an assertion failure on a client that fails negotiation. We then made it worse during refactoring in commit 1a6245a (a segfault before we could even assert); the (masked) assertion was cleaned up in d3780c2 (still in 2.6), and just recently we finally fixed the segfault ("nbd: Fully intialize client in case of failed negotiation"). But that still means that ever since we added TLS support to qemu-nbd, we have been vulnerable to an ill-timed port-scan being able to cause a denial of service by taking down qemu-nbd before a real client has a chance to connect. Since negotiation is now handled asynchronously via coroutines, we no longer have a synchronous point of return by re-adding a return value to nbd_client_new(). So this patch instead wires things up to pass the negotiation status through the close_fn callback function. Simple test across two terminals: $ qemu-nbd -f raw -p 30001 file $ nmap 127.0.0.1 -p 30001 && \ qemu-io -c 'r 0 512' -f raw nbd://localhost:30001 Note that this patch does not change what constitutes successful negotiation (thus, a client must enter transmission phase before that client can be considered as a reason to terminate the server when the connection ends). Perhaps we may want to tweak things in a later patch to also treat a client that uses NBD_OPT_ABORT as being a 'successful' negotiation (the client correctly talked the NBD protocol, and informed us it was not going to use our export after all), but that's a discussion for another day. Fixes: https://bugzilla.redhat.com/show_bug.cgi?id=1451614 Signed-off-by: Eric Blake <eblake@redhat.com> Message-Id: <20170608222617.20376-1-eblake@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2017-06-09 00:26:17 +02:00
static void nbd_blockdev_client_closed(NBDClient *client, bool ignored)
{
nbd_client_put(client);
assert(nbd_server->connections > 0);
nbd_server->connections--;
nbd_update_server_watch(nbd_server);
nbd: Fix regression on resiliency to port scan Back in qemu 2.5, qemu-nbd was immune to port probes (a transient server would not quit, regardless of how many probe connections came and went, until a connection actually negotiated). But we broke that in commit ee7d7aa when removing the return value to nbd_client_new(), although that patch also introduced a bug causing an assertion failure on a client that fails negotiation. We then made it worse during refactoring in commit 1a6245a (a segfault before we could even assert); the (masked) assertion was cleaned up in d3780c2 (still in 2.6), and just recently we finally fixed the segfault ("nbd: Fully intialize client in case of failed negotiation"). But that still means that ever since we added TLS support to qemu-nbd, we have been vulnerable to an ill-timed port-scan being able to cause a denial of service by taking down qemu-nbd before a real client has a chance to connect. Since negotiation is now handled asynchronously via coroutines, we no longer have a synchronous point of return by re-adding a return value to nbd_client_new(). So this patch instead wires things up to pass the negotiation status through the close_fn callback function. Simple test across two terminals: $ qemu-nbd -f raw -p 30001 file $ nmap 127.0.0.1 -p 30001 && \ qemu-io -c 'r 0 512' -f raw nbd://localhost:30001 Note that this patch does not change what constitutes successful negotiation (thus, a client must enter transmission phase before that client can be considered as a reason to terminate the server when the connection ends). Perhaps we may want to tweak things in a later patch to also treat a client that uses NBD_OPT_ABORT as being a 'successful' negotiation (the client correctly talked the NBD protocol, and informed us it was not going to use our export after all), but that's a discussion for another day. Fixes: https://bugzilla.redhat.com/show_bug.cgi?id=1451614 Signed-off-by: Eric Blake <eblake@redhat.com> Message-Id: <20170608222617.20376-1-eblake@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2017-06-09 00:26:17 +02:00
}
static void nbd_accept(QIONetListener *listener, QIOChannelSocket *cioc,
gpointer opaque)
{
nbd_server->connections++;
nbd_update_server_watch(nbd_server);
qio_channel_set_name(QIO_CHANNEL(cioc), "nbd-server");
nbd_client_new(cioc, nbd_server->tlscreds, nbd_server->tlsauthz,
nbd: Fix regression on resiliency to port scan Back in qemu 2.5, qemu-nbd was immune to port probes (a transient server would not quit, regardless of how many probe connections came and went, until a connection actually negotiated). But we broke that in commit ee7d7aa when removing the return value to nbd_client_new(), although that patch also introduced a bug causing an assertion failure on a client that fails negotiation. We then made it worse during refactoring in commit 1a6245a (a segfault before we could even assert); the (masked) assertion was cleaned up in d3780c2 (still in 2.6), and just recently we finally fixed the segfault ("nbd: Fully intialize client in case of failed negotiation"). But that still means that ever since we added TLS support to qemu-nbd, we have been vulnerable to an ill-timed port-scan being able to cause a denial of service by taking down qemu-nbd before a real client has a chance to connect. Since negotiation is now handled asynchronously via coroutines, we no longer have a synchronous point of return by re-adding a return value to nbd_client_new(). So this patch instead wires things up to pass the negotiation status through the close_fn callback function. Simple test across two terminals: $ qemu-nbd -f raw -p 30001 file $ nmap 127.0.0.1 -p 30001 && \ qemu-io -c 'r 0 512' -f raw nbd://localhost:30001 Note that this patch does not change what constitutes successful negotiation (thus, a client must enter transmission phase before that client can be considered as a reason to terminate the server when the connection ends). Perhaps we may want to tweak things in a later patch to also treat a client that uses NBD_OPT_ABORT as being a 'successful' negotiation (the client correctly talked the NBD protocol, and informed us it was not going to use our export after all), but that's a discussion for another day. Fixes: https://bugzilla.redhat.com/show_bug.cgi?id=1451614 Signed-off-by: Eric Blake <eblake@redhat.com> Message-Id: <20170608222617.20376-1-eblake@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2017-06-09 00:26:17 +02:00
nbd_blockdev_client_closed);
}
static void nbd_update_server_watch(NBDServerData *s)
{
if (!s->max_connections || s->connections < s->max_connections) {
qio_net_listener_set_client_func(s->listener, nbd_accept, NULL, NULL);
} else {
qio_net_listener_set_client_func(s->listener, NULL, NULL, NULL);
}
}
static void nbd_server_free(NBDServerData *server)
{
if (!server) {
return;
}
qio_net_listener_disconnect(server->listener);
object_unref(OBJECT(server->listener));
if (server->tlscreds) {
object_unref(OBJECT(server->tlscreds));
}
g_free(server->tlsauthz);
g_free(server);
}
static QCryptoTLSCreds *nbd_get_tls_creds(const char *id, Error **errp)
{
Object *obj;
QCryptoTLSCreds *creds;
obj = object_resolve_path_component(
object_get_objects_root(), id);
if (!obj) {
error_setg(errp, "No TLS credentials with id '%s'",
id);
return NULL;
}
creds = (QCryptoTLSCreds *)
object_dynamic_cast(obj, TYPE_QCRYPTO_TLS_CREDS);
if (!creds) {
error_setg(errp, "Object with id '%s' is not TLS credentials",
id);
return NULL;
}
if (!qcrypto_tls_creds_check_endpoint(creds,
QCRYPTO_TLS_CREDS_ENDPOINT_SERVER,
errp)) {
return NULL;
}
object_ref(obj);
return creds;
}
void nbd_server_start(SocketAddress *addr, const char *tls_creds,
const char *tls_authz, uint32_t max_connections,
Error **errp)
{
if (nbd_server) {
error_setg(errp, "NBD server already running");
return;
}
nbd_server = g_new0(NBDServerData, 1);
nbd_server->max_connections = max_connections;
nbd_server->listener = qio_net_listener_new();
qio_net_listener_set_name(nbd_server->listener,
"nbd-listener");
qemu-nbd: Use SOMAXCONN for socket listen() backlog Our default of a backlog of 1 connection is rather puny; it gets in the way when we are explicitly allowing multiple clients (such as qemu-nbd -e N [--shared], or nbd-server-start with its default "max-connections":0 for unlimited), but is even a problem when we stick to qemu-nbd's default of only 1 active client but use -t [--persistent] where a second client can start using the server once the first finishes. While the effects are less noticeable on TCP sockets (since the client can poll() to learn when the server is ready again), it is definitely observable on Unix sockets, where on Linux, a client will fail with EAGAIN and no recourse but to sleep an arbitrary amount of time before retrying if the server backlog is already full. Since QMP nbd-server-start is always persistent, it now always requests a backlog of SOMAXCONN; meanwhile, qemu-nbd will request SOMAXCONN if persistent, otherwise its backlog should be based on the expected number of clients. See https://bugzilla.redhat.com/1925045 for a demonstration of where our low backlog prevents libnbd from connecting as many parallel clients as it wants. Reported-by: Richard W.M. Jones <rjones@redhat.com> Signed-off-by: Eric Blake <eblake@redhat.com> CC: qemu-stable@nongnu.org Message-Id: <20210209152759.209074-2-eblake@redhat.com> Tested-by: Richard W.M. Jones <rjones@redhat.com> Reviewed-by: Daniel P. Berrangé <berrange@redhat.com> Signed-off-by: Eric Blake <eblake@redhat.com>
2021-02-09 16:27:58 +01:00
/*
* Because this server is persistent, a backlog of SOMAXCONN is
* better than trying to size it to max_connections.
*/
if (qio_net_listener_open_sync(nbd_server->listener, addr, SOMAXCONN,
errp) < 0) {
goto error;
}
if (tls_creds) {
nbd_server->tlscreds = nbd_get_tls_creds(tls_creds, errp);
if (!nbd_server->tlscreds) {
goto error;
}
/* TODO SOCKET_ADDRESS_TYPE_FD where fd has AF_INET or AF_INET6 */
if (addr->type != SOCKET_ADDRESS_TYPE_INET) {
error_setg(errp, "TLS is only supported with IPv4/IPv6");
goto error;
}
}
nbd_server->tlsauthz = g_strdup(tls_authz);
nbd_update_server_watch(nbd_server);
return;
error:
nbd_server_free(nbd_server);
nbd_server = NULL;
}
void nbd_server_start_options(NbdServerOptions *arg, Error **errp)
{
nbd_server_start(arg->addr, arg->tls_creds, arg->tls_authz,
arg->max_connections, errp);
}
void qmp_nbd_server_start(SocketAddressLegacy *addr,
bool has_tls_creds, const char *tls_creds,
bool has_tls_authz, const char *tls_authz,
bool has_max_connections, uint32_t max_connections,
Error **errp)
{
SocketAddress *addr_flat = socket_address_flatten(addr);
nbd_server_start(addr_flat, tls_creds, tls_authz, max_connections, errp);
qapi_free_SocketAddress(addr_flat);
}
void qmp_nbd_server_add(NbdServerAddOptions *arg, Error **errp)
{
BlockExport *export;
BlockDriverState *bs;
BlockBackend *on_eject_blk;
BlockExportOptions *export_opts;
bs = bdrv_lookup_bs(arg->device, arg->device, errp);
if (!bs) {
return;
}
/*
* block-export-add would default to the node-name, but we may have to use
* the device name as a default here for compatibility.
*/
if (!arg->has_name) {
arg->has_name = true;
arg->name = g_strdup(arg->device);
}
export_opts = g_new(BlockExportOptions, 1);
*export_opts = (BlockExportOptions) {
.type = BLOCK_EXPORT_TYPE_NBD,
.id = g_strdup(arg->name),
.node_name = g_strdup(bdrv_get_node_name(bs)),
.has_writable = arg->has_writable,
.writable = arg->writable,
};
QAPI_CLONE_MEMBERS(BlockExportOptionsNbdBase, &export_opts->u.nbd,
qapi_NbdServerAddOptions_base(arg));
if (arg->has_bitmap) {
export_opts->u.nbd.has_bitmaps = true;
QAPI_LIST_PREPEND(export_opts->u.nbd.bitmaps, g_strdup(arg->bitmap));
}
/*
* nbd-server-add doesn't complain when a read-only device should be
* exported as writable, but simply downgrades it. This is an error with
* block-export-add.
*/
if (bdrv_is_read_only(bs)) {
export_opts->has_writable = true;
export_opts->writable = false;
}
export = blk_exp_add(export_opts, errp);
if (!export) {
goto fail;
}
/*
* nbd-server-add removes the export when the named BlockBackend used for
* @device goes away.
*/
on_eject_blk = blk_by_name(arg->device);
if (on_eject_blk) {
nbd_export_set_on_eject_blk(export, on_eject_blk);
}
fail:
qapi_free_BlockExportOptions(export_opts);
}
void qmp_nbd_server_remove(const char *name,
bool has_mode, BlockExportRemoveMode mode,
Error **errp)
{
BlockExport *exp;
exp = blk_exp_find(name);
if (exp && exp->drv->type != BLOCK_EXPORT_TYPE_NBD) {
error_setg(errp, "Block export '%s' is not an NBD export", name);
return;
}
qmp_block_export_del(name, has_mode, mode, errp);
}
void qmp_nbd_server_stop(Error **errp)
{
if (!nbd_server) {
error_setg(errp, "NBD server not running");
return;
}
blk_exp_close_all_type(BLOCK_EXPORT_TYPE_NBD);
nbd_server_free(nbd_server);
nbd_server = NULL;
}