2016-01-14 09:41:02 +01:00
|
|
|
/*
|
2023-06-08 15:56:34 +02:00
|
|
|
* Copyright Red Hat
|
2016-01-14 09:41:02 +01:00
|
|
|
* Copyright (C) 2005 Anthony Liguori <anthony@codemonkey.ws>
|
|
|
|
*
|
|
|
|
* Network Block Device Client Side
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License as published by
|
|
|
|
* the Free Software Foundation; under version 2 of the License.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
* along with this program; if not, see <http://www.gnu.org/licenses/>.
|
|
|
|
*/
|
|
|
|
|
2016-01-29 18:50:05 +01:00
|
|
|
#include "qemu/osdep.h"
|
include/qemu/osdep.h: Don't include qapi/error.h
Commit 57cb38b included qapi/error.h into qemu/osdep.h to get the
Error typedef. Since then, we've moved to include qemu/osdep.h
everywhere. Its file comment explains: "To avoid getting into
possible circular include dependencies, this file should not include
any other QEMU headers, with the exceptions of config-host.h,
compiler.h, os-posix.h and os-win32.h, all of which are doing a
similar job to this file and are under similar constraints."
qapi/error.h doesn't do a similar job, and it doesn't adhere to
similar constraints: it includes qapi-types.h. That's in excess of
100KiB of crap most .c files don't actually need.
Add the typedef to qemu/typedefs.h, and include that instead of
qapi/error.h. Include qapi/error.h in .c files that need it and don't
get it now. Include qapi-types.h in qom/object.h for uint16List.
Update scripts/clean-includes accordingly. Update it further to match
reality: replace config.h by config-target.h, add sysemu/os-posix.h,
sysemu/os-win32.h. Update the list of includes in the qemu/osdep.h
comment quoted above similarly.
This reduces the number of objects depending on qapi/error.h from "all
of them" to less than a third. Unfortunately, the number depending on
qapi-types.h shrinks only a little. More work is needed for that one.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
[Fix compilation without the spice devel packages. - Paolo]
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2016-03-14 09:01:28 +01:00
|
|
|
#include "qapi/error.h"
|
2019-08-12 07:23:49 +02:00
|
|
|
#include "qemu/queue.h"
|
2017-07-07 17:29:18 +02:00
|
|
|
#include "trace.h"
|
2016-01-14 09:41:02 +01:00
|
|
|
#include "nbd-internal.h"
|
2019-01-17 20:36:57 +01:00
|
|
|
#include "qemu/cutils.h"
|
2016-01-14 09:41:02 +01:00
|
|
|
|
|
|
|
/* Definitions for opaque data types */
|
|
|
|
|
|
|
|
static QTAILQ_HEAD(, NBDExport) exports = QTAILQ_HEAD_INITIALIZER(exports);
|
|
|
|
|
|
|
|
/* That's all folks */
|
|
|
|
|
|
|
|
/* Basic flow for negotiation
|
|
|
|
|
|
|
|
Server Client
|
|
|
|
Negotiate
|
|
|
|
|
|
|
|
or
|
|
|
|
|
|
|
|
Server Client
|
|
|
|
Negotiate #1
|
|
|
|
Option
|
|
|
|
Negotiate #2
|
|
|
|
|
|
|
|
----
|
|
|
|
|
|
|
|
followed by
|
|
|
|
|
|
|
|
Server Client
|
|
|
|
Request
|
|
|
|
Response
|
|
|
|
Request
|
|
|
|
Response
|
|
|
|
...
|
|
|
|
...
|
|
|
|
Request (type == 2)
|
|
|
|
|
|
|
|
*/
|
|
|
|
|
2016-10-14 20:33:10 +02:00
|
|
|
/* Send an option request.
|
|
|
|
*
|
|
|
|
* The request is for option @opt, with @data containing @len bytes of
|
|
|
|
* additional payload for the request (@len may be -1 to treat @data as
|
|
|
|
* a C string; and @data may be NULL if @len is 0).
|
|
|
|
* Return 0 if successful, -1 with errp set if it is impossible to
|
|
|
|
* continue. */
|
|
|
|
static int nbd_send_option_request(QIOChannel *ioc, uint32_t opt,
|
|
|
|
uint32_t len, const char *data,
|
|
|
|
Error **errp)
|
|
|
|
{
|
nbd: Use ERRP_GUARD()
If we want to check error after errp-function call, we need to
introduce local_err and then propagate it to errp. Instead, use
the ERRP_GUARD() macro, benefits are:
1. No need of explicit error_propagate call
2. No need of explicit local_err variable: use errp directly
3. ERRP_GUARD() leaves errp as is if it's not NULL or
&error_fatal, this means that we don't break error_abort
(we'll abort on error_set, not on error_propagate)
If we want to add some info to errp (by error_prepend() or
error_append_hint()), we must use the ERRP_GUARD() macro.
Otherwise, this info will not be added when errp == &error_fatal
(the program will exit prior to the error_append_hint() or
error_prepend() call). Fix several such cases, e.g. in nbd_read().
This commit is generated by command
sed -n '/^Network Block Device (NBD)$/,/^$/{s/^F: //p}' \
MAINTAINERS | \
xargs git ls-files | grep '\.[hc]$' | \
xargs spatch \
--sp-file scripts/coccinelle/errp-guard.cocci \
--macro-file scripts/cocci-macro-file.h \
--in-place --no-show-diff --max-width 80
Reported-by: Kevin Wolf <kwolf@redhat.com>
Reported-by: Greg Kurz <groug@kaod.org>
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Reviewed-by: Markus Armbruster <armbru@redhat.com>
[Commit message tweaked]
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Message-Id: <20200707165037.1026246-8-armbru@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
[ERRP_AUTO_PROPAGATE() renamed to ERRP_GUARD(), and
auto-propagated-errp.cocci to errp-guard.cocci. Commit message
tweaked again.]
2020-07-07 18:50:36 +02:00
|
|
|
ERRP_GUARD();
|
2017-11-22 11:19:57 +01:00
|
|
|
NBDOption req;
|
2016-10-14 20:33:10 +02:00
|
|
|
QEMU_BUILD_BUG_ON(sizeof(req) != 16);
|
|
|
|
|
|
|
|
if (len == -1) {
|
|
|
|
req.length = len = strlen(data);
|
|
|
|
}
|
2017-07-07 22:30:43 +02:00
|
|
|
trace_nbd_send_option_request(opt, nbd_opt_lookup(opt), len);
|
2016-10-14 20:33:10 +02:00
|
|
|
|
|
|
|
stq_be_p(&req.magic, NBD_OPTS_MAGIC);
|
|
|
|
stl_be_p(&req.option, opt);
|
|
|
|
stl_be_p(&req.length, len);
|
|
|
|
|
2017-06-02 17:01:39 +02:00
|
|
|
if (nbd_write(ioc, &req, sizeof(req), errp) < 0) {
|
2017-11-13 16:24:24 +01:00
|
|
|
error_prepend(errp, "Failed to send option request header: ");
|
2016-10-14 20:33:10 +02:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2017-06-02 17:01:39 +02:00
|
|
|
if (len && nbd_write(ioc, (char *) data, len, errp) < 0) {
|
2017-11-13 16:24:24 +01:00
|
|
|
error_prepend(errp, "Failed to send option request data: ");
|
2016-10-14 20:33:10 +02:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-10-14 20:33:11 +02:00
|
|
|
/* Send NBD_OPT_ABORT as a courtesy to let the server know that we are
|
|
|
|
* not going to attempt further negotiation. */
|
|
|
|
static void nbd_send_opt_abort(QIOChannel *ioc)
|
|
|
|
{
|
|
|
|
/* Technically, a compliant server is supposed to reply to us; but
|
|
|
|
* older servers disconnected instead. At any rate, we're allowed
|
|
|
|
* to disconnect without waiting for the server reply, so we don't
|
|
|
|
* even care if the request makes it to the server, let alone
|
|
|
|
* waiting around for whether the server replies. */
|
|
|
|
nbd_send_option_request(ioc, NBD_OPT_ABORT, 0, NULL, NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-10-14 20:33:10 +02:00
|
|
|
/* Receive the header of an option reply, which should match the given
|
|
|
|
* opt. Read through the length field, but NOT the length bytes of
|
|
|
|
* payload. Return 0 if successful, -1 with errp set if it is
|
|
|
|
* impossible to continue. */
|
|
|
|
static int nbd_receive_option_reply(QIOChannel *ioc, uint32_t opt,
|
2017-11-22 11:19:57 +01:00
|
|
|
NBDOptionReply *reply, Error **errp)
|
2016-10-14 20:33:10 +02:00
|
|
|
{
|
|
|
|
QEMU_BUILD_BUG_ON(sizeof(*reply) != 20);
|
2019-01-28 17:58:30 +01:00
|
|
|
if (nbd_read(ioc, reply, sizeof(*reply), "option reply", errp) < 0) {
|
2016-10-14 20:33:11 +02:00
|
|
|
nbd_send_opt_abort(ioc);
|
2016-10-14 20:33:10 +02:00
|
|
|
return -1;
|
|
|
|
}
|
2018-09-27 18:42:00 +02:00
|
|
|
reply->magic = be64_to_cpu(reply->magic);
|
|
|
|
reply->option = be32_to_cpu(reply->option);
|
|
|
|
reply->type = be32_to_cpu(reply->type);
|
|
|
|
reply->length = be32_to_cpu(reply->length);
|
2016-10-14 20:33:10 +02:00
|
|
|
|
2017-07-07 22:30:43 +02:00
|
|
|
trace_nbd_receive_option_reply(reply->option, nbd_opt_lookup(reply->option),
|
|
|
|
reply->type, nbd_rep_lookup(reply->type),
|
|
|
|
reply->length);
|
2016-02-10 19:41:09 +01:00
|
|
|
|
2016-10-14 20:33:10 +02:00
|
|
|
if (reply->magic != NBD_REP_MAGIC) {
|
|
|
|
error_setg(errp, "Unexpected option reply magic");
|
2016-10-14 20:33:11 +02:00
|
|
|
nbd_send_opt_abort(ioc);
|
2016-10-14 20:33:10 +02:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
if (reply->option != opt) {
|
2018-12-15 14:53:07 +01:00
|
|
|
error_setg(errp, "Unexpected option type %u (%s), expected %u (%s)",
|
|
|
|
reply->option, nbd_opt_lookup(reply->option),
|
|
|
|
opt, nbd_opt_lookup(opt));
|
2016-10-14 20:33:11 +02:00
|
|
|
nbd_send_opt_abort(ioc);
|
2016-10-14 20:33:10 +02:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-08-24 19:28:13 +02:00
|
|
|
/*
|
|
|
|
* If reply represents success, return 1 without further action. If
|
|
|
|
* reply represents an error, consume the optional payload of the
|
|
|
|
* packet on ioc. Then return 0 for unsupported (so the client can
|
|
|
|
* fall back to other approaches), where @strict determines if only
|
|
|
|
* ERR_UNSUP or all errors fit that category, or -1 with errp set for
|
|
|
|
* other errors.
|
nbd: Fix NBD unsupported options
nbd-client.c currently fails to handle unsupported options properly.
If during option haggling the server finds an option that is
unsupported, it returns an NBD_REP_ERR_UNSUP reply.
According to nbd's proto.md, the format for such a reply
should be:
S: 64 bits, 0x3e889045565a9 (magic number for replies)
S: 32 bits, the option as sent by the client to which this is a reply
S: 32 bits, reply type (e.g., NBD_REP_ACK for successful completion,
or NBD_REP_ERR_UNSUP to mark use of an option not known by this server
S: 32 bits, length of the reply. This may be zero for some replies,
in which case the next field is not sent
S: any data as required by the reply (e.g., an export name in the case
of NBD_REP_SERVER, or optional UTF-8 message for NBD_REP_ERR_*)
However, in nbd-client.c, the reply type was being read, and if it
contained an error, it was bailing out and issuing the next option
request without first reading the length. This meant that the
next option / handshake read had an extra 4 or more bytes of data in it.
In practice, this makes Qemu incompatible with servers that do not
support NBD_OPT_LIST.
To verify this isn't an error in the specification or my reading of
it, replies are sent by the reference implementation here:
https://github.com/yoe/nbd/blob/66dfb35/nbd-server.c#L1232
and as is evident it always sends a 'datasize' (aka length) 32 bit
word. Unsupported elements are replied to here:
https://github.com/yoe/nbd/blob/66dfb35/nbd-server.c#L1371
Signed-off-by: Alex Bligh <alex@alex.org.uk>
Message-Id: <1459882500-24316-1-git-send-email-alex@alex.org.uk>
[rework to ALWAYS consume an optional UTF-8 message from the server]
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-Id: <1459961962-18771-1-git-send-email-eblake@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2016-04-06 18:59:22 +02:00
|
|
|
*/
|
2017-11-22 11:19:57 +01:00
|
|
|
static int nbd_handle_reply_err(QIOChannel *ioc, NBDOptionReply *reply,
|
2019-08-24 19:28:13 +02:00
|
|
|
bool strict, Error **errp)
|
2016-02-10 19:41:09 +01:00
|
|
|
{
|
nbd: Use ERRP_GUARD()
If we want to check error after errp-function call, we need to
introduce local_err and then propagate it to errp. Instead, use
the ERRP_GUARD() macro, benefits are:
1. No need of explicit error_propagate call
2. No need of explicit local_err variable: use errp directly
3. ERRP_GUARD() leaves errp as is if it's not NULL or
&error_fatal, this means that we don't break error_abort
(we'll abort on error_set, not on error_propagate)
If we want to add some info to errp (by error_prepend() or
error_append_hint()), we must use the ERRP_GUARD() macro.
Otherwise, this info will not be added when errp == &error_fatal
(the program will exit prior to the error_append_hint() or
error_prepend() call). Fix several such cases, e.g. in nbd_read().
This commit is generated by command
sed -n '/^Network Block Device (NBD)$/,/^$/{s/^F: //p}' \
MAINTAINERS | \
xargs git ls-files | grep '\.[hc]$' | \
xargs spatch \
--sp-file scripts/coccinelle/errp-guard.cocci \
--macro-file scripts/cocci-macro-file.h \
--in-place --no-show-diff --max-width 80
Reported-by: Kevin Wolf <kwolf@redhat.com>
Reported-by: Greg Kurz <groug@kaod.org>
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Reviewed-by: Markus Armbruster <armbru@redhat.com>
[Commit message tweaked]
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Message-Id: <20200707165037.1026246-8-armbru@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
[ERRP_AUTO_PROPAGATE() renamed to ERRP_GUARD(), and
auto-propagated-errp.cocci to errp-guard.cocci. Commit message
tweaked again.]
2020-07-07 18:50:36 +02:00
|
|
|
ERRP_GUARD();
|
2019-08-24 19:28:13 +02:00
|
|
|
g_autofree char *msg = NULL;
|
nbd: Fix NBD unsupported options
nbd-client.c currently fails to handle unsupported options properly.
If during option haggling the server finds an option that is
unsupported, it returns an NBD_REP_ERR_UNSUP reply.
According to nbd's proto.md, the format for such a reply
should be:
S: 64 bits, 0x3e889045565a9 (magic number for replies)
S: 32 bits, the option as sent by the client to which this is a reply
S: 32 bits, reply type (e.g., NBD_REP_ACK for successful completion,
or NBD_REP_ERR_UNSUP to mark use of an option not known by this server
S: 32 bits, length of the reply. This may be zero for some replies,
in which case the next field is not sent
S: any data as required by the reply (e.g., an export name in the case
of NBD_REP_SERVER, or optional UTF-8 message for NBD_REP_ERR_*)
However, in nbd-client.c, the reply type was being read, and if it
contained an error, it was bailing out and issuing the next option
request without first reading the length. This meant that the
next option / handshake read had an extra 4 or more bytes of data in it.
In practice, this makes Qemu incompatible with servers that do not
support NBD_OPT_LIST.
To verify this isn't an error in the specification or my reading of
it, replies are sent by the reference implementation here:
https://github.com/yoe/nbd/blob/66dfb35/nbd-server.c#L1232
and as is evident it always sends a 'datasize' (aka length) 32 bit
word. Unsupported elements are replied to here:
https://github.com/yoe/nbd/blob/66dfb35/nbd-server.c#L1371
Signed-off-by: Alex Bligh <alex@alex.org.uk>
Message-Id: <1459882500-24316-1-git-send-email-alex@alex.org.uk>
[rework to ALWAYS consume an optional UTF-8 message from the server]
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-Id: <1459961962-18771-1-git-send-email-eblake@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2016-04-06 18:59:22 +02:00
|
|
|
|
2016-10-14 20:33:10 +02:00
|
|
|
if (!(reply->type & (1 << 31))) {
|
nbd: Fix NBD unsupported options
nbd-client.c currently fails to handle unsupported options properly.
If during option haggling the server finds an option that is
unsupported, it returns an NBD_REP_ERR_UNSUP reply.
According to nbd's proto.md, the format for such a reply
should be:
S: 64 bits, 0x3e889045565a9 (magic number for replies)
S: 32 bits, the option as sent by the client to which this is a reply
S: 32 bits, reply type (e.g., NBD_REP_ACK for successful completion,
or NBD_REP_ERR_UNSUP to mark use of an option not known by this server
S: 32 bits, length of the reply. This may be zero for some replies,
in which case the next field is not sent
S: any data as required by the reply (e.g., an export name in the case
of NBD_REP_SERVER, or optional UTF-8 message for NBD_REP_ERR_*)
However, in nbd-client.c, the reply type was being read, and if it
contained an error, it was bailing out and issuing the next option
request without first reading the length. This meant that the
next option / handshake read had an extra 4 or more bytes of data in it.
In practice, this makes Qemu incompatible with servers that do not
support NBD_OPT_LIST.
To verify this isn't an error in the specification or my reading of
it, replies are sent by the reference implementation here:
https://github.com/yoe/nbd/blob/66dfb35/nbd-server.c#L1232
and as is evident it always sends a 'datasize' (aka length) 32 bit
word. Unsupported elements are replied to here:
https://github.com/yoe/nbd/blob/66dfb35/nbd-server.c#L1371
Signed-off-by: Alex Bligh <alex@alex.org.uk>
Message-Id: <1459882500-24316-1-git-send-email-alex@alex.org.uk>
[rework to ALWAYS consume an optional UTF-8 message from the server]
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-Id: <1459961962-18771-1-git-send-email-eblake@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2016-04-06 18:59:22 +02:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2016-10-14 20:33:10 +02:00
|
|
|
if (reply->length) {
|
|
|
|
if (reply->length > NBD_MAX_BUFFER_SIZE) {
|
2018-02-15 14:51:43 +01:00
|
|
|
error_setg(errp, "server error %" PRIu32
|
2017-07-07 22:30:43 +02:00
|
|
|
" (%s) message is too long",
|
|
|
|
reply->type, nbd_rep_lookup(reply->type));
|
2019-08-24 19:28:13 +02:00
|
|
|
goto err;
|
nbd: Fix NBD unsupported options
nbd-client.c currently fails to handle unsupported options properly.
If during option haggling the server finds an option that is
unsupported, it returns an NBD_REP_ERR_UNSUP reply.
According to nbd's proto.md, the format for such a reply
should be:
S: 64 bits, 0x3e889045565a9 (magic number for replies)
S: 32 bits, the option as sent by the client to which this is a reply
S: 32 bits, reply type (e.g., NBD_REP_ACK for successful completion,
or NBD_REP_ERR_UNSUP to mark use of an option not known by this server
S: 32 bits, length of the reply. This may be zero for some replies,
in which case the next field is not sent
S: any data as required by the reply (e.g., an export name in the case
of NBD_REP_SERVER, or optional UTF-8 message for NBD_REP_ERR_*)
However, in nbd-client.c, the reply type was being read, and if it
contained an error, it was bailing out and issuing the next option
request without first reading the length. This meant that the
next option / handshake read had an extra 4 or more bytes of data in it.
In practice, this makes Qemu incompatible with servers that do not
support NBD_OPT_LIST.
To verify this isn't an error in the specification or my reading of
it, replies are sent by the reference implementation here:
https://github.com/yoe/nbd/blob/66dfb35/nbd-server.c#L1232
and as is evident it always sends a 'datasize' (aka length) 32 bit
word. Unsupported elements are replied to here:
https://github.com/yoe/nbd/blob/66dfb35/nbd-server.c#L1371
Signed-off-by: Alex Bligh <alex@alex.org.uk>
Message-Id: <1459882500-24316-1-git-send-email-alex@alex.org.uk>
[rework to ALWAYS consume an optional UTF-8 message from the server]
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-Id: <1459961962-18771-1-git-send-email-eblake@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2016-04-06 18:59:22 +02:00
|
|
|
}
|
2016-10-14 20:33:10 +02:00
|
|
|
msg = g_malloc(reply->length + 1);
|
2019-01-28 17:58:30 +01:00
|
|
|
if (nbd_read(ioc, msg, reply->length, NULL, errp) < 0) {
|
|
|
|
error_prepend(errp, "Failed to read option error %" PRIu32
|
2017-11-13 16:24:24 +01:00
|
|
|
" (%s) message: ",
|
2017-07-07 22:30:43 +02:00
|
|
|
reply->type, nbd_rep_lookup(reply->type));
|
2019-08-24 19:28:13 +02:00
|
|
|
goto err;
|
nbd: Fix NBD unsupported options
nbd-client.c currently fails to handle unsupported options properly.
If during option haggling the server finds an option that is
unsupported, it returns an NBD_REP_ERR_UNSUP reply.
According to nbd's proto.md, the format for such a reply
should be:
S: 64 bits, 0x3e889045565a9 (magic number for replies)
S: 32 bits, the option as sent by the client to which this is a reply
S: 32 bits, reply type (e.g., NBD_REP_ACK for successful completion,
or NBD_REP_ERR_UNSUP to mark use of an option not known by this server
S: 32 bits, length of the reply. This may be zero for some replies,
in which case the next field is not sent
S: any data as required by the reply (e.g., an export name in the case
of NBD_REP_SERVER, or optional UTF-8 message for NBD_REP_ERR_*)
However, in nbd-client.c, the reply type was being read, and if it
contained an error, it was bailing out and issuing the next option
request without first reading the length. This meant that the
next option / handshake read had an extra 4 or more bytes of data in it.
In practice, this makes Qemu incompatible with servers that do not
support NBD_OPT_LIST.
To verify this isn't an error in the specification or my reading of
it, replies are sent by the reference implementation here:
https://github.com/yoe/nbd/blob/66dfb35/nbd-server.c#L1232
and as is evident it always sends a 'datasize' (aka length) 32 bit
word. Unsupported elements are replied to here:
https://github.com/yoe/nbd/blob/66dfb35/nbd-server.c#L1371
Signed-off-by: Alex Bligh <alex@alex.org.uk>
Message-Id: <1459882500-24316-1-git-send-email-alex@alex.org.uk>
[rework to ALWAYS consume an optional UTF-8 message from the server]
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-Id: <1459961962-18771-1-git-send-email-eblake@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2016-04-06 18:59:22 +02:00
|
|
|
}
|
2016-10-14 20:33:10 +02:00
|
|
|
msg[reply->length] = '\0';
|
2018-12-18 23:57:13 +01:00
|
|
|
trace_nbd_server_error_msg(reply->type,
|
|
|
|
nbd_reply_type_lookup(reply->type), msg);
|
2016-02-10 19:41:09 +01:00
|
|
|
}
|
|
|
|
|
2019-08-24 19:28:13 +02:00
|
|
|
if (reply->type == NBD_REP_ERR_UNSUP || !strict) {
|
|
|
|
trace_nbd_reply_err_ignored(reply->option,
|
|
|
|
nbd_opt_lookup(reply->option),
|
|
|
|
reply->type, nbd_rep_lookup(reply->type));
|
|
|
|
return 0;
|
|
|
|
}
|
2016-02-10 19:41:09 +01:00
|
|
|
|
2019-08-24 19:28:13 +02:00
|
|
|
switch (reply->type) {
|
2016-02-10 19:41:11 +01:00
|
|
|
case NBD_REP_ERR_POLICY:
|
2018-02-15 14:51:43 +01:00
|
|
|
error_setg(errp, "Denied by server for option %" PRIu32 " (%s)",
|
2017-07-07 22:30:43 +02:00
|
|
|
reply->option, nbd_opt_lookup(reply->option));
|
2016-02-10 19:41:11 +01:00
|
|
|
break;
|
|
|
|
|
2016-02-10 19:41:09 +01:00
|
|
|
case NBD_REP_ERR_INVALID:
|
2018-02-15 14:51:43 +01:00
|
|
|
error_setg(errp, "Invalid parameters for option %" PRIu32 " (%s)",
|
2017-07-07 22:30:43 +02:00
|
|
|
reply->option, nbd_opt_lookup(reply->option));
|
2016-02-10 19:41:09 +01:00
|
|
|
break;
|
|
|
|
|
2016-10-14 20:33:16 +02:00
|
|
|
case NBD_REP_ERR_PLATFORM:
|
2018-02-15 14:51:43 +01:00
|
|
|
error_setg(errp, "Server lacks support for option %" PRIu32 " (%s)",
|
2017-07-07 22:30:43 +02:00
|
|
|
reply->option, nbd_opt_lookup(reply->option));
|
2016-10-14 20:33:16 +02:00
|
|
|
break;
|
|
|
|
|
2016-02-10 19:41:11 +01:00
|
|
|
case NBD_REP_ERR_TLS_REQD:
|
2018-02-15 14:51:43 +01:00
|
|
|
error_setg(errp, "TLS negotiation required before option %" PRIu32
|
2017-07-07 22:30:43 +02:00
|
|
|
" (%s)", reply->option, nbd_opt_lookup(reply->option));
|
nbd/client: Add hint when TLS is missing
I received an off-list report of failure to connect to an NBD server
expecting an x509 certificate, when the client was attempting something
similar to this command line:
$ ./x86_64-softmmu/qemu-system-x86_64 -name 'blah' -machine q35 -nodefaults \
-object tls-creds-x509,id=tls0,endpoint=client,dir=$path_to_certs \
-device virtio-scsi-pci,id=virtio_scsi_pci0,bus=pcie.0,addr=0x6 \
-drive id=drive_image1,if=none,snapshot=off,aio=threads,cache=none,format=raw,file=nbd:localhost:9000,werror=stop,rerror=stop,tls-creds=tls0 \
-device scsi-hd,id=image1,drive=drive_image1,bootindex=0
qemu-system-x86_64: -drive id=drive_image1,if=none,snapshot=off,aio=threads,cache=none,format=raw,file=nbd:localhost:9000,werror=stop,rerror=stop,tls-creds=tls0: TLS negotiation required before option 7 (go)
server reported: Option 0x7 not permitted before TLS
The problem? As specified, -drive is trying to pass tls-creds to the
raw format driver instead of the nbd protocol driver, but before we
get to the point where we can detect that raw doesn't know what to do
with tls-creds, the nbd driver has already failed because the server
complained. The fix to the broken command line? Pass
'...,file.tls-creds=tls0' to ensure the tls-creds option is handed to
nbd, not raw. But since the error message was rather cryptic, I'm
trying to improve the error message.
With this patch, the error message adds a line:
qemu-system-x86_64: -drive id=drive_image1,if=none,snapshot=off,aio=threads,cache=none,format=raw,file=nbd:localhost:9000,werror=stop,rerror=stop,tls-creds=tls0: TLS negotiation required before option 7 (go)
Did you forget a valid tls-creds?
server reported: Option 0x7 not permitted before TLS
And with luck, someone grepping for that error message will find this
commit message and figure out their command line mistake. Sadly, the
only mention of file.tls-creds in our docs relates to an --image-opts
use of PSK encryption with qemu-img as the client, rather than x509
certificate encryption with qemu-kvm as the client.
CC: Tingting Mao <timao@redhat.com>
CC: Daniel P. Berrangé <berrange@redhat.com>
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-Id: <20190907172055.26870-1-eblake@redhat.com>
[eblake: squash in iotest 233 fix]
Reviewed-by: Daniel P. Berrangé <berrange@redhat.com>
2019-09-07 19:20:55 +02:00
|
|
|
error_append_hint(errp, "Did you forget a valid tls-creds?\n");
|
2017-07-07 22:30:43 +02:00
|
|
|
break;
|
|
|
|
|
|
|
|
case NBD_REP_ERR_UNKNOWN:
|
2017-07-17 16:23:10 +02:00
|
|
|
error_setg(errp, "Requested export not available");
|
2016-02-10 19:41:11 +01:00
|
|
|
break;
|
|
|
|
|
2016-10-14 20:33:16 +02:00
|
|
|
case NBD_REP_ERR_SHUTDOWN:
|
2018-02-15 14:51:43 +01:00
|
|
|
error_setg(errp, "Server shutting down before option %" PRIu32 " (%s)",
|
2017-07-07 22:30:43 +02:00
|
|
|
reply->option, nbd_opt_lookup(reply->option));
|
|
|
|
break;
|
|
|
|
|
|
|
|
case NBD_REP_ERR_BLOCK_SIZE_REQD:
|
2018-02-15 14:51:43 +01:00
|
|
|
error_setg(errp, "Server requires INFO_BLOCK_SIZE for option %" PRIu32
|
2017-07-07 22:30:43 +02:00
|
|
|
" (%s)", reply->option, nbd_opt_lookup(reply->option));
|
2016-10-14 20:33:16 +02:00
|
|
|
break;
|
|
|
|
|
2016-02-10 19:41:09 +01:00
|
|
|
default:
|
2018-02-15 14:51:43 +01:00
|
|
|
error_setg(errp, "Unknown error code when asking for option %" PRIu32
|
2017-07-07 22:30:43 +02:00
|
|
|
" (%s)", reply->option, nbd_opt_lookup(reply->option));
|
2016-02-10 19:41:09 +01:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
nbd: Fix NBD unsupported options
nbd-client.c currently fails to handle unsupported options properly.
If during option haggling the server finds an option that is
unsupported, it returns an NBD_REP_ERR_UNSUP reply.
According to nbd's proto.md, the format for such a reply
should be:
S: 64 bits, 0x3e889045565a9 (magic number for replies)
S: 32 bits, the option as sent by the client to which this is a reply
S: 32 bits, reply type (e.g., NBD_REP_ACK for successful completion,
or NBD_REP_ERR_UNSUP to mark use of an option not known by this server
S: 32 bits, length of the reply. This may be zero for some replies,
in which case the next field is not sent
S: any data as required by the reply (e.g., an export name in the case
of NBD_REP_SERVER, or optional UTF-8 message for NBD_REP_ERR_*)
However, in nbd-client.c, the reply type was being read, and if it
contained an error, it was bailing out and issuing the next option
request without first reading the length. This meant that the
next option / handshake read had an extra 4 or more bytes of data in it.
In practice, this makes Qemu incompatible with servers that do not
support NBD_OPT_LIST.
To verify this isn't an error in the specification or my reading of
it, replies are sent by the reference implementation here:
https://github.com/yoe/nbd/blob/66dfb35/nbd-server.c#L1232
and as is evident it always sends a 'datasize' (aka length) 32 bit
word. Unsupported elements are replied to here:
https://github.com/yoe/nbd/blob/66dfb35/nbd-server.c#L1371
Signed-off-by: Alex Bligh <alex@alex.org.uk>
Message-Id: <1459882500-24316-1-git-send-email-alex@alex.org.uk>
[rework to ALWAYS consume an optional UTF-8 message from the server]
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-Id: <1459961962-18771-1-git-send-email-eblake@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2016-04-06 18:59:22 +02:00
|
|
|
if (msg) {
|
2017-07-17 16:23:10 +02:00
|
|
|
error_append_hint(errp, "server reported: %s\n", msg);
|
nbd: Fix NBD unsupported options
nbd-client.c currently fails to handle unsupported options properly.
If during option haggling the server finds an option that is
unsupported, it returns an NBD_REP_ERR_UNSUP reply.
According to nbd's proto.md, the format for such a reply
should be:
S: 64 bits, 0x3e889045565a9 (magic number for replies)
S: 32 bits, the option as sent by the client to which this is a reply
S: 32 bits, reply type (e.g., NBD_REP_ACK for successful completion,
or NBD_REP_ERR_UNSUP to mark use of an option not known by this server
S: 32 bits, length of the reply. This may be zero for some replies,
in which case the next field is not sent
S: any data as required by the reply (e.g., an export name in the case
of NBD_REP_SERVER, or optional UTF-8 message for NBD_REP_ERR_*)
However, in nbd-client.c, the reply type was being read, and if it
contained an error, it was bailing out and issuing the next option
request without first reading the length. This meant that the
next option / handshake read had an extra 4 or more bytes of data in it.
In practice, this makes Qemu incompatible with servers that do not
support NBD_OPT_LIST.
To verify this isn't an error in the specification or my reading of
it, replies are sent by the reference implementation here:
https://github.com/yoe/nbd/blob/66dfb35/nbd-server.c#L1232
and as is evident it always sends a 'datasize' (aka length) 32 bit
word. Unsupported elements are replied to here:
https://github.com/yoe/nbd/blob/66dfb35/nbd-server.c#L1371
Signed-off-by: Alex Bligh <alex@alex.org.uk>
Message-Id: <1459882500-24316-1-git-send-email-alex@alex.org.uk>
[rework to ALWAYS consume an optional UTF-8 message from the server]
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-Id: <1459961962-18771-1-git-send-email-eblake@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2016-04-06 18:59:22 +02:00
|
|
|
}
|
|
|
|
|
2019-08-24 19:28:13 +02:00
|
|
|
err:
|
|
|
|
nbd_send_opt_abort(ioc);
|
|
|
|
return -1;
|
2016-02-10 19:41:09 +01:00
|
|
|
}
|
|
|
|
|
nbd/client: Refactor nbd_receive_list()
Right now, nbd_receive_list() is only called by
nbd_receive_query_exports(), which in turn is only called if the
server lacks NBD_OPT_GO but has working option negotiation, and is
merely used as a quality-of-implementation trick since servers
can't give decent errors for NBD_OPT_EXPORT_NAME. However, servers
that lack NBD_OPT_GO are becoming increasingly rare (nbdkit was a
latecomer, in Aug 2018, but qemu has been such a server since commit
f37708f6 in July 2017 and released in 2.10), so it no longer makes
sense to micro-optimize that function for performance.
Furthermore, when debugging a server's implementation, tracing the
full reply (both names and descriptions) is useful, not to mention
that upcoming patches adding 'qemu-nbd --list' will want to collect
that data. And when you consider that a server can send an export
name up to the NBD protocol length limit of 4k; but our current
NBD_MAX_NAME_SIZE is only 256, we can't trace all valid server
names without more storage, but 4k is large enough that the heap
is better than the stack for long names.
Thus, I'm changing the division of labor, with nbd_receive_list()
now always malloc'ing a result on success (the malloc is bounded
by the fact that we reject servers with a reply length larger
than 32M), and moving the comparison to 'wantname' to the caller.
There is a minor change in behavior where a server with 0 exports
(an immediate NBD_REP_ACK reply) is now no longer distinguished
from a server without LIST support (NBD_REP_ERR_UNSUP); this
information could be preserved with a complication to the calling
contract to provide a bit more information, but I didn't see the
point. After all, the worst that can happen if our guess at a
match is wrong is that the caller will get a cryptic disconnect
when NBD_OPT_EXPORT_NAME fails (which is no different from what
would happen if we had not tried LIST), while treating an empty
list as immediate failure would prevent connecting to really old
servers that really did lack LIST. Besides, NBD servers with 0
exports are rare (qemu can do it when using QMP nbd-server-start
without nbd-server-add - but qemu understands NBD_OPT_GO and
thus won't tickle this change in behavior).
Fix the spelling of foundExport to match coding standards while
in the area.
Signed-off-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Richard W.M. Jones <rjones@redhat.com>
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Message-Id: <20190117193658.16413-9-eblake@redhat.com>
2019-01-17 20:36:45 +01:00
|
|
|
/* nbd_receive_list:
|
|
|
|
* Process another portion of the NBD_OPT_LIST reply, populating any
|
|
|
|
* name received into *@name. If @description is non-NULL, and the
|
|
|
|
* server provided a description, that is also populated. The caller
|
|
|
|
* must eventually call g_free() on success.
|
|
|
|
* Returns 1 if name and description were set and iteration must continue,
|
|
|
|
* 0 if iteration is complete (including if OPT_LIST unsupported),
|
|
|
|
* -1 with @errp set if an unrecoverable error occurred.
|
|
|
|
*/
|
|
|
|
static int nbd_receive_list(QIOChannel *ioc, char **name, char **description,
|
2016-10-14 20:33:13 +02:00
|
|
|
Error **errp)
|
2016-02-10 19:41:09 +01:00
|
|
|
{
|
2017-11-22 11:19:57 +01:00
|
|
|
NBDOptionReply reply;
|
2016-02-10 19:41:09 +01:00
|
|
|
uint32_t len;
|
|
|
|
uint32_t namelen;
|
2019-08-24 19:28:12 +02:00
|
|
|
g_autofree char *local_name = NULL;
|
|
|
|
g_autofree char *local_desc = NULL;
|
nbd: Fix NBD unsupported options
nbd-client.c currently fails to handle unsupported options properly.
If during option haggling the server finds an option that is
unsupported, it returns an NBD_REP_ERR_UNSUP reply.
According to nbd's proto.md, the format for such a reply
should be:
S: 64 bits, 0x3e889045565a9 (magic number for replies)
S: 32 bits, the option as sent by the client to which this is a reply
S: 32 bits, reply type (e.g., NBD_REP_ACK for successful completion,
or NBD_REP_ERR_UNSUP to mark use of an option not known by this server
S: 32 bits, length of the reply. This may be zero for some replies,
in which case the next field is not sent
S: any data as required by the reply (e.g., an export name in the case
of NBD_REP_SERVER, or optional UTF-8 message for NBD_REP_ERR_*)
However, in nbd-client.c, the reply type was being read, and if it
contained an error, it was bailing out and issuing the next option
request without first reading the length. This meant that the
next option / handshake read had an extra 4 or more bytes of data in it.
In practice, this makes Qemu incompatible with servers that do not
support NBD_OPT_LIST.
To verify this isn't an error in the specification or my reading of
it, replies are sent by the reference implementation here:
https://github.com/yoe/nbd/blob/66dfb35/nbd-server.c#L1232
and as is evident it always sends a 'datasize' (aka length) 32 bit
word. Unsupported elements are replied to here:
https://github.com/yoe/nbd/blob/66dfb35/nbd-server.c#L1371
Signed-off-by: Alex Bligh <alex@alex.org.uk>
Message-Id: <1459882500-24316-1-git-send-email-alex@alex.org.uk>
[rework to ALWAYS consume an optional UTF-8 message from the server]
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-Id: <1459961962-18771-1-git-send-email-eblake@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2016-04-06 18:59:22 +02:00
|
|
|
int error;
|
2016-02-10 19:41:09 +01:00
|
|
|
|
2016-10-14 20:33:10 +02:00
|
|
|
if (nbd_receive_option_reply(ioc, NBD_OPT_LIST, &reply, errp) < 0) {
|
2016-02-10 19:41:09 +01:00
|
|
|
return -1;
|
|
|
|
}
|
2019-08-24 19:28:13 +02:00
|
|
|
error = nbd_handle_reply_err(ioc, &reply, true, errp);
|
nbd: Fix NBD unsupported options
nbd-client.c currently fails to handle unsupported options properly.
If during option haggling the server finds an option that is
unsupported, it returns an NBD_REP_ERR_UNSUP reply.
According to nbd's proto.md, the format for such a reply
should be:
S: 64 bits, 0x3e889045565a9 (magic number for replies)
S: 32 bits, the option as sent by the client to which this is a reply
S: 32 bits, reply type (e.g., NBD_REP_ACK for successful completion,
or NBD_REP_ERR_UNSUP to mark use of an option not known by this server
S: 32 bits, length of the reply. This may be zero for some replies,
in which case the next field is not sent
S: any data as required by the reply (e.g., an export name in the case
of NBD_REP_SERVER, or optional UTF-8 message for NBD_REP_ERR_*)
However, in nbd-client.c, the reply type was being read, and if it
contained an error, it was bailing out and issuing the next option
request without first reading the length. This meant that the
next option / handshake read had an extra 4 or more bytes of data in it.
In practice, this makes Qemu incompatible with servers that do not
support NBD_OPT_LIST.
To verify this isn't an error in the specification or my reading of
it, replies are sent by the reference implementation here:
https://github.com/yoe/nbd/blob/66dfb35/nbd-server.c#L1232
and as is evident it always sends a 'datasize' (aka length) 32 bit
word. Unsupported elements are replied to here:
https://github.com/yoe/nbd/blob/66dfb35/nbd-server.c#L1371
Signed-off-by: Alex Bligh <alex@alex.org.uk>
Message-Id: <1459882500-24316-1-git-send-email-alex@alex.org.uk>
[rework to ALWAYS consume an optional UTF-8 message from the server]
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-Id: <1459961962-18771-1-git-send-email-eblake@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2016-04-06 18:59:22 +02:00
|
|
|
if (error <= 0) {
|
|
|
|
return error;
|
2016-02-10 19:41:09 +01:00
|
|
|
}
|
2016-10-14 20:33:10 +02:00
|
|
|
len = reply.length;
|
2016-02-10 19:41:09 +01:00
|
|
|
|
2016-10-14 20:33:10 +02:00
|
|
|
if (reply.type == NBD_REP_ACK) {
|
2016-02-10 19:41:09 +01:00
|
|
|
if (len != 0) {
|
|
|
|
error_setg(errp, "length too long for option end");
|
2016-10-14 20:33:11 +02:00
|
|
|
nbd_send_opt_abort(ioc);
|
2016-02-10 19:41:09 +01:00
|
|
|
return -1;
|
|
|
|
}
|
2016-10-14 20:33:13 +02:00
|
|
|
return 0;
|
|
|
|
} else if (reply.type != NBD_REP_SERVER) {
|
2018-12-15 14:53:07 +01:00
|
|
|
error_setg(errp, "Unexpected reply type %u (%s), expected %u (%s)",
|
|
|
|
reply.type, nbd_rep_lookup(reply.type),
|
|
|
|
NBD_REP_SERVER, nbd_rep_lookup(NBD_REP_SERVER));
|
2016-10-14 20:33:13 +02:00
|
|
|
nbd_send_opt_abort(ioc);
|
|
|
|
return -1;
|
|
|
|
}
|
2016-02-10 19:41:09 +01:00
|
|
|
|
2016-10-14 20:33:13 +02:00
|
|
|
if (len < sizeof(namelen) || len > NBD_MAX_BUFFER_SIZE) {
|
|
|
|
error_setg(errp, "incorrect option length %" PRIu32, len);
|
|
|
|
nbd_send_opt_abort(ioc);
|
|
|
|
return -1;
|
|
|
|
}
|
2019-01-28 17:58:30 +01:00
|
|
|
if (nbd_read32(ioc, &namelen, "option name length", errp) < 0) {
|
2016-10-14 20:33:13 +02:00
|
|
|
nbd_send_opt_abort(ioc);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
len -= sizeof(namelen);
|
2019-11-14 03:46:34 +01:00
|
|
|
if (len < namelen || namelen > NBD_MAX_STRING_SIZE) {
|
|
|
|
error_setg(errp, "incorrect name length in server's list response");
|
2016-10-14 20:33:13 +02:00
|
|
|
nbd_send_opt_abort(ioc);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
nbd/client: Refactor nbd_receive_list()
Right now, nbd_receive_list() is only called by
nbd_receive_query_exports(), which in turn is only called if the
server lacks NBD_OPT_GO but has working option negotiation, and is
merely used as a quality-of-implementation trick since servers
can't give decent errors for NBD_OPT_EXPORT_NAME. However, servers
that lack NBD_OPT_GO are becoming increasingly rare (nbdkit was a
latecomer, in Aug 2018, but qemu has been such a server since commit
f37708f6 in July 2017 and released in 2.10), so it no longer makes
sense to micro-optimize that function for performance.
Furthermore, when debugging a server's implementation, tracing the
full reply (both names and descriptions) is useful, not to mention
that upcoming patches adding 'qemu-nbd --list' will want to collect
that data. And when you consider that a server can send an export
name up to the NBD protocol length limit of 4k; but our current
NBD_MAX_NAME_SIZE is only 256, we can't trace all valid server
names without more storage, but 4k is large enough that the heap
is better than the stack for long names.
Thus, I'm changing the division of labor, with nbd_receive_list()
now always malloc'ing a result on success (the malloc is bounded
by the fact that we reject servers with a reply length larger
than 32M), and moving the comparison to 'wantname' to the caller.
There is a minor change in behavior where a server with 0 exports
(an immediate NBD_REP_ACK reply) is now no longer distinguished
from a server without LIST support (NBD_REP_ERR_UNSUP); this
information could be preserved with a complication to the calling
contract to provide a bit more information, but I didn't see the
point. After all, the worst that can happen if our guess at a
match is wrong is that the caller will get a cryptic disconnect
when NBD_OPT_EXPORT_NAME fails (which is no different from what
would happen if we had not tried LIST), while treating an empty
list as immediate failure would prevent connecting to really old
servers that really did lack LIST. Besides, NBD servers with 0
exports are rare (qemu can do it when using QMP nbd-server-start
without nbd-server-add - but qemu understands NBD_OPT_GO and
thus won't tickle this change in behavior).
Fix the spelling of foundExport to match coding standards while
in the area.
Signed-off-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Richard W.M. Jones <rjones@redhat.com>
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Message-Id: <20190117193658.16413-9-eblake@redhat.com>
2019-01-17 20:36:45 +01:00
|
|
|
local_name = g_malloc(namelen + 1);
|
2019-01-28 17:58:30 +01:00
|
|
|
if (nbd_read(ioc, local_name, namelen, "export name", errp) < 0) {
|
2016-10-14 20:33:13 +02:00
|
|
|
nbd_send_opt_abort(ioc);
|
2019-08-24 19:28:12 +02:00
|
|
|
return -1;
|
2016-10-14 20:33:13 +02:00
|
|
|
}
|
nbd/client: Refactor nbd_receive_list()
Right now, nbd_receive_list() is only called by
nbd_receive_query_exports(), which in turn is only called if the
server lacks NBD_OPT_GO but has working option negotiation, and is
merely used as a quality-of-implementation trick since servers
can't give decent errors for NBD_OPT_EXPORT_NAME. However, servers
that lack NBD_OPT_GO are becoming increasingly rare (nbdkit was a
latecomer, in Aug 2018, but qemu has been such a server since commit
f37708f6 in July 2017 and released in 2.10), so it no longer makes
sense to micro-optimize that function for performance.
Furthermore, when debugging a server's implementation, tracing the
full reply (both names and descriptions) is useful, not to mention
that upcoming patches adding 'qemu-nbd --list' will want to collect
that data. And when you consider that a server can send an export
name up to the NBD protocol length limit of 4k; but our current
NBD_MAX_NAME_SIZE is only 256, we can't trace all valid server
names without more storage, but 4k is large enough that the heap
is better than the stack for long names.
Thus, I'm changing the division of labor, with nbd_receive_list()
now always malloc'ing a result on success (the malloc is bounded
by the fact that we reject servers with a reply length larger
than 32M), and moving the comparison to 'wantname' to the caller.
There is a minor change in behavior where a server with 0 exports
(an immediate NBD_REP_ACK reply) is now no longer distinguished
from a server without LIST support (NBD_REP_ERR_UNSUP); this
information could be preserved with a complication to the calling
contract to provide a bit more information, but I didn't see the
point. After all, the worst that can happen if our guess at a
match is wrong is that the caller will get a cryptic disconnect
when NBD_OPT_EXPORT_NAME fails (which is no different from what
would happen if we had not tried LIST), while treating an empty
list as immediate failure would prevent connecting to really old
servers that really did lack LIST. Besides, NBD servers with 0
exports are rare (qemu can do it when using QMP nbd-server-start
without nbd-server-add - but qemu understands NBD_OPT_GO and
thus won't tickle this change in behavior).
Fix the spelling of foundExport to match coding standards while
in the area.
Signed-off-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Richard W.M. Jones <rjones@redhat.com>
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Message-Id: <20190117193658.16413-9-eblake@redhat.com>
2019-01-17 20:36:45 +01:00
|
|
|
local_name[namelen] = '\0';
|
2016-10-14 20:33:13 +02:00
|
|
|
len -= namelen;
|
nbd/client: Refactor nbd_receive_list()
Right now, nbd_receive_list() is only called by
nbd_receive_query_exports(), which in turn is only called if the
server lacks NBD_OPT_GO but has working option negotiation, and is
merely used as a quality-of-implementation trick since servers
can't give decent errors for NBD_OPT_EXPORT_NAME. However, servers
that lack NBD_OPT_GO are becoming increasingly rare (nbdkit was a
latecomer, in Aug 2018, but qemu has been such a server since commit
f37708f6 in July 2017 and released in 2.10), so it no longer makes
sense to micro-optimize that function for performance.
Furthermore, when debugging a server's implementation, tracing the
full reply (both names and descriptions) is useful, not to mention
that upcoming patches adding 'qemu-nbd --list' will want to collect
that data. And when you consider that a server can send an export
name up to the NBD protocol length limit of 4k; but our current
NBD_MAX_NAME_SIZE is only 256, we can't trace all valid server
names without more storage, but 4k is large enough that the heap
is better than the stack for long names.
Thus, I'm changing the division of labor, with nbd_receive_list()
now always malloc'ing a result on success (the malloc is bounded
by the fact that we reject servers with a reply length larger
than 32M), and moving the comparison to 'wantname' to the caller.
There is a minor change in behavior where a server with 0 exports
(an immediate NBD_REP_ACK reply) is now no longer distinguished
from a server without LIST support (NBD_REP_ERR_UNSUP); this
information could be preserved with a complication to the calling
contract to provide a bit more information, but I didn't see the
point. After all, the worst that can happen if our guess at a
match is wrong is that the caller will get a cryptic disconnect
when NBD_OPT_EXPORT_NAME fails (which is no different from what
would happen if we had not tried LIST), while treating an empty
list as immediate failure would prevent connecting to really old
servers that really did lack LIST. Besides, NBD servers with 0
exports are rare (qemu can do it when using QMP nbd-server-start
without nbd-server-add - but qemu understands NBD_OPT_GO and
thus won't tickle this change in behavior).
Fix the spelling of foundExport to match coding standards while
in the area.
Signed-off-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Richard W.M. Jones <rjones@redhat.com>
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Message-Id: <20190117193658.16413-9-eblake@redhat.com>
2019-01-17 20:36:45 +01:00
|
|
|
if (len) {
|
2019-11-14 03:46:34 +01:00
|
|
|
if (len > NBD_MAX_STRING_SIZE) {
|
|
|
|
error_setg(errp, "incorrect description length in server's "
|
|
|
|
"list response");
|
|
|
|
nbd_send_opt_abort(ioc);
|
|
|
|
return -1;
|
|
|
|
}
|
nbd/client: Refactor nbd_receive_list()
Right now, nbd_receive_list() is only called by
nbd_receive_query_exports(), which in turn is only called if the
server lacks NBD_OPT_GO but has working option negotiation, and is
merely used as a quality-of-implementation trick since servers
can't give decent errors for NBD_OPT_EXPORT_NAME. However, servers
that lack NBD_OPT_GO are becoming increasingly rare (nbdkit was a
latecomer, in Aug 2018, but qemu has been such a server since commit
f37708f6 in July 2017 and released in 2.10), so it no longer makes
sense to micro-optimize that function for performance.
Furthermore, when debugging a server's implementation, tracing the
full reply (both names and descriptions) is useful, not to mention
that upcoming patches adding 'qemu-nbd --list' will want to collect
that data. And when you consider that a server can send an export
name up to the NBD protocol length limit of 4k; but our current
NBD_MAX_NAME_SIZE is only 256, we can't trace all valid server
names without more storage, but 4k is large enough that the heap
is better than the stack for long names.
Thus, I'm changing the division of labor, with nbd_receive_list()
now always malloc'ing a result on success (the malloc is bounded
by the fact that we reject servers with a reply length larger
than 32M), and moving the comparison to 'wantname' to the caller.
There is a minor change in behavior where a server with 0 exports
(an immediate NBD_REP_ACK reply) is now no longer distinguished
from a server without LIST support (NBD_REP_ERR_UNSUP); this
information could be preserved with a complication to the calling
contract to provide a bit more information, but I didn't see the
point. After all, the worst that can happen if our guess at a
match is wrong is that the caller will get a cryptic disconnect
when NBD_OPT_EXPORT_NAME fails (which is no different from what
would happen if we had not tried LIST), while treating an empty
list as immediate failure would prevent connecting to really old
servers that really did lack LIST. Besides, NBD servers with 0
exports are rare (qemu can do it when using QMP nbd-server-start
without nbd-server-add - but qemu understands NBD_OPT_GO and
thus won't tickle this change in behavior).
Fix the spelling of foundExport to match coding standards while
in the area.
Signed-off-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Richard W.M. Jones <rjones@redhat.com>
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Message-Id: <20190117193658.16413-9-eblake@redhat.com>
2019-01-17 20:36:45 +01:00
|
|
|
local_desc = g_malloc(len + 1);
|
2019-01-28 17:58:30 +01:00
|
|
|
if (nbd_read(ioc, local_desc, len, "export description", errp) < 0) {
|
nbd/client: Refactor nbd_receive_list()
Right now, nbd_receive_list() is only called by
nbd_receive_query_exports(), which in turn is only called if the
server lacks NBD_OPT_GO but has working option negotiation, and is
merely used as a quality-of-implementation trick since servers
can't give decent errors for NBD_OPT_EXPORT_NAME. However, servers
that lack NBD_OPT_GO are becoming increasingly rare (nbdkit was a
latecomer, in Aug 2018, but qemu has been such a server since commit
f37708f6 in July 2017 and released in 2.10), so it no longer makes
sense to micro-optimize that function for performance.
Furthermore, when debugging a server's implementation, tracing the
full reply (both names and descriptions) is useful, not to mention
that upcoming patches adding 'qemu-nbd --list' will want to collect
that data. And when you consider that a server can send an export
name up to the NBD protocol length limit of 4k; but our current
NBD_MAX_NAME_SIZE is only 256, we can't trace all valid server
names without more storage, but 4k is large enough that the heap
is better than the stack for long names.
Thus, I'm changing the division of labor, with nbd_receive_list()
now always malloc'ing a result on success (the malloc is bounded
by the fact that we reject servers with a reply length larger
than 32M), and moving the comparison to 'wantname' to the caller.
There is a minor change in behavior where a server with 0 exports
(an immediate NBD_REP_ACK reply) is now no longer distinguished
from a server without LIST support (NBD_REP_ERR_UNSUP); this
information could be preserved with a complication to the calling
contract to provide a bit more information, but I didn't see the
point. After all, the worst that can happen if our guess at a
match is wrong is that the caller will get a cryptic disconnect
when NBD_OPT_EXPORT_NAME fails (which is no different from what
would happen if we had not tried LIST), while treating an empty
list as immediate failure would prevent connecting to really old
servers that really did lack LIST. Besides, NBD servers with 0
exports are rare (qemu can do it when using QMP nbd-server-start
without nbd-server-add - but qemu understands NBD_OPT_GO and
thus won't tickle this change in behavior).
Fix the spelling of foundExport to match coding standards while
in the area.
Signed-off-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Richard W.M. Jones <rjones@redhat.com>
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Message-Id: <20190117193658.16413-9-eblake@redhat.com>
2019-01-17 20:36:45 +01:00
|
|
|
nbd_send_opt_abort(ioc);
|
2019-08-24 19:28:12 +02:00
|
|
|
return -1;
|
nbd/client: Refactor nbd_receive_list()
Right now, nbd_receive_list() is only called by
nbd_receive_query_exports(), which in turn is only called if the
server lacks NBD_OPT_GO but has working option negotiation, and is
merely used as a quality-of-implementation trick since servers
can't give decent errors for NBD_OPT_EXPORT_NAME. However, servers
that lack NBD_OPT_GO are becoming increasingly rare (nbdkit was a
latecomer, in Aug 2018, but qemu has been such a server since commit
f37708f6 in July 2017 and released in 2.10), so it no longer makes
sense to micro-optimize that function for performance.
Furthermore, when debugging a server's implementation, tracing the
full reply (both names and descriptions) is useful, not to mention
that upcoming patches adding 'qemu-nbd --list' will want to collect
that data. And when you consider that a server can send an export
name up to the NBD protocol length limit of 4k; but our current
NBD_MAX_NAME_SIZE is only 256, we can't trace all valid server
names without more storage, but 4k is large enough that the heap
is better than the stack for long names.
Thus, I'm changing the division of labor, with nbd_receive_list()
now always malloc'ing a result on success (the malloc is bounded
by the fact that we reject servers with a reply length larger
than 32M), and moving the comparison to 'wantname' to the caller.
There is a minor change in behavior where a server with 0 exports
(an immediate NBD_REP_ACK reply) is now no longer distinguished
from a server without LIST support (NBD_REP_ERR_UNSUP); this
information could be preserved with a complication to the calling
contract to provide a bit more information, but I didn't see the
point. After all, the worst that can happen if our guess at a
match is wrong is that the caller will get a cryptic disconnect
when NBD_OPT_EXPORT_NAME fails (which is no different from what
would happen if we had not tried LIST), while treating an empty
list as immediate failure would prevent connecting to really old
servers that really did lack LIST. Besides, NBD servers with 0
exports are rare (qemu can do it when using QMP nbd-server-start
without nbd-server-add - but qemu understands NBD_OPT_GO and
thus won't tickle this change in behavior).
Fix the spelling of foundExport to match coding standards while
in the area.
Signed-off-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Richard W.M. Jones <rjones@redhat.com>
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Message-Id: <20190117193658.16413-9-eblake@redhat.com>
2019-01-17 20:36:45 +01:00
|
|
|
}
|
|
|
|
local_desc[len] = '\0';
|
2016-02-10 19:41:09 +01:00
|
|
|
}
|
nbd/client: Refactor nbd_receive_list()
Right now, nbd_receive_list() is only called by
nbd_receive_query_exports(), which in turn is only called if the
server lacks NBD_OPT_GO but has working option negotiation, and is
merely used as a quality-of-implementation trick since servers
can't give decent errors for NBD_OPT_EXPORT_NAME. However, servers
that lack NBD_OPT_GO are becoming increasingly rare (nbdkit was a
latecomer, in Aug 2018, but qemu has been such a server since commit
f37708f6 in July 2017 and released in 2.10), so it no longer makes
sense to micro-optimize that function for performance.
Furthermore, when debugging a server's implementation, tracing the
full reply (both names and descriptions) is useful, not to mention
that upcoming patches adding 'qemu-nbd --list' will want to collect
that data. And when you consider that a server can send an export
name up to the NBD protocol length limit of 4k; but our current
NBD_MAX_NAME_SIZE is only 256, we can't trace all valid server
names without more storage, but 4k is large enough that the heap
is better than the stack for long names.
Thus, I'm changing the division of labor, with nbd_receive_list()
now always malloc'ing a result on success (the malloc is bounded
by the fact that we reject servers with a reply length larger
than 32M), and moving the comparison to 'wantname' to the caller.
There is a minor change in behavior where a server with 0 exports
(an immediate NBD_REP_ACK reply) is now no longer distinguished
from a server without LIST support (NBD_REP_ERR_UNSUP); this
information could be preserved with a complication to the calling
contract to provide a bit more information, but I didn't see the
point. After all, the worst that can happen if our guess at a
match is wrong is that the caller will get a cryptic disconnect
when NBD_OPT_EXPORT_NAME fails (which is no different from what
would happen if we had not tried LIST), while treating an empty
list as immediate failure would prevent connecting to really old
servers that really did lack LIST. Besides, NBD servers with 0
exports are rare (qemu can do it when using QMP nbd-server-start
without nbd-server-add - but qemu understands NBD_OPT_GO and
thus won't tickle this change in behavior).
Fix the spelling of foundExport to match coding standards while
in the area.
Signed-off-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Richard W.M. Jones <rjones@redhat.com>
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Message-Id: <20190117193658.16413-9-eblake@redhat.com>
2019-01-17 20:36:45 +01:00
|
|
|
|
|
|
|
trace_nbd_receive_list(local_name, local_desc ?: "");
|
2019-08-24 19:28:12 +02:00
|
|
|
*name = g_steal_pointer(&local_name);
|
nbd/client: Refactor nbd_receive_list()
Right now, nbd_receive_list() is only called by
nbd_receive_query_exports(), which in turn is only called if the
server lacks NBD_OPT_GO but has working option negotiation, and is
merely used as a quality-of-implementation trick since servers
can't give decent errors for NBD_OPT_EXPORT_NAME. However, servers
that lack NBD_OPT_GO are becoming increasingly rare (nbdkit was a
latecomer, in Aug 2018, but qemu has been such a server since commit
f37708f6 in July 2017 and released in 2.10), so it no longer makes
sense to micro-optimize that function for performance.
Furthermore, when debugging a server's implementation, tracing the
full reply (both names and descriptions) is useful, not to mention
that upcoming patches adding 'qemu-nbd --list' will want to collect
that data. And when you consider that a server can send an export
name up to the NBD protocol length limit of 4k; but our current
NBD_MAX_NAME_SIZE is only 256, we can't trace all valid server
names without more storage, but 4k is large enough that the heap
is better than the stack for long names.
Thus, I'm changing the division of labor, with nbd_receive_list()
now always malloc'ing a result on success (the malloc is bounded
by the fact that we reject servers with a reply length larger
than 32M), and moving the comparison to 'wantname' to the caller.
There is a minor change in behavior where a server with 0 exports
(an immediate NBD_REP_ACK reply) is now no longer distinguished
from a server without LIST support (NBD_REP_ERR_UNSUP); this
information could be preserved with a complication to the calling
contract to provide a bit more information, but I didn't see the
point. After all, the worst that can happen if our guess at a
match is wrong is that the caller will get a cryptic disconnect
when NBD_OPT_EXPORT_NAME fails (which is no different from what
would happen if we had not tried LIST), while treating an empty
list as immediate failure would prevent connecting to really old
servers that really did lack LIST. Besides, NBD servers with 0
exports are rare (qemu can do it when using QMP nbd-server-start
without nbd-server-add - but qemu understands NBD_OPT_GO and
thus won't tickle this change in behavior).
Fix the spelling of foundExport to match coding standards while
in the area.
Signed-off-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Richard W.M. Jones <rjones@redhat.com>
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Message-Id: <20190117193658.16413-9-eblake@redhat.com>
2019-01-17 20:36:45 +01:00
|
|
|
if (description) {
|
2019-08-24 19:28:12 +02:00
|
|
|
*description = g_steal_pointer(&local_desc);
|
2016-10-14 20:33:13 +02:00
|
|
|
}
|
2019-08-24 19:28:12 +02:00
|
|
|
return 1;
|
2016-02-10 19:41:09 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-01-17 20:36:53 +01:00
|
|
|
/*
|
|
|
|
* nbd_opt_info_or_go:
|
|
|
|
* Send option for NBD_OPT_INFO or NBD_OPT_GO and parse the reply.
|
|
|
|
* Returns -1 if the option proves the export @info->name cannot be
|
|
|
|
* used, 0 if the option is unsupported (fall back to NBD_OPT_LIST and
|
nbd: Implement NBD_OPT_GO on client
NBD_OPT_EXPORT_NAME is lousy: per the NBD protocol, any failure
requires the server to close the connection rather than report an
error to us. Therefore, upstream NBD recently added NBD_OPT_GO as
the improved version of the option that does what we want [1]: it
reports sane errors on failures, and on success provides at least
as much info as NBD_OPT_EXPORT_NAME.
[1] https://github.com/NetworkBlockDevice/nbd/blob/extension-info/doc/proto.md
This is a first cut at use of the information types. Note that we
do not need to use NBD_OPT_INFO, and that use of NBD_OPT_GO means
we no longer have to use NBD_OPT_LIST to learn whether a server
requires TLS (this requires servers that gracefully handle unknown
NBD_OPT, many servers prior to qemu 2.5 were buggy, but I have patched
qemu, upstream nbd, and nbdkit in the meantime, in part because of
interoperability testing with this patch). We still fall back to
NBD_OPT_LIST when NBD_OPT_GO is not supported on the server, as it
is still one last chance for a nicer error message. Later patches
will use further info, like NBD_INFO_BLOCK_SIZE.
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-Id: <20170707203049.534-8-eblake@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2017-07-07 22:30:47 +02:00
|
|
|
* NBD_OPT_EXPORT_NAME in that case), and > 0 if the export is good to
|
2019-01-17 20:36:53 +01:00
|
|
|
* go (with the rest of @info populated).
|
|
|
|
*/
|
|
|
|
static int nbd_opt_info_or_go(QIOChannel *ioc, uint32_t opt,
|
|
|
|
NBDExportInfo *info, Error **errp)
|
nbd: Implement NBD_OPT_GO on client
NBD_OPT_EXPORT_NAME is lousy: per the NBD protocol, any failure
requires the server to close the connection rather than report an
error to us. Therefore, upstream NBD recently added NBD_OPT_GO as
the improved version of the option that does what we want [1]: it
reports sane errors on failures, and on success provides at least
as much info as NBD_OPT_EXPORT_NAME.
[1] https://github.com/NetworkBlockDevice/nbd/blob/extension-info/doc/proto.md
This is a first cut at use of the information types. Note that we
do not need to use NBD_OPT_INFO, and that use of NBD_OPT_GO means
we no longer have to use NBD_OPT_LIST to learn whether a server
requires TLS (this requires servers that gracefully handle unknown
NBD_OPT, many servers prior to qemu 2.5 were buggy, but I have patched
qemu, upstream nbd, and nbdkit in the meantime, in part because of
interoperability testing with this patch). We still fall back to
NBD_OPT_LIST when NBD_OPT_GO is not supported on the server, as it
is still one last chance for a nicer error message. Later patches
will use further info, like NBD_INFO_BLOCK_SIZE.
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-Id: <20170707203049.534-8-eblake@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2017-07-07 22:30:47 +02:00
|
|
|
{
|
nbd: Use ERRP_GUARD()
If we want to check error after errp-function call, we need to
introduce local_err and then propagate it to errp. Instead, use
the ERRP_GUARD() macro, benefits are:
1. No need of explicit error_propagate call
2. No need of explicit local_err variable: use errp directly
3. ERRP_GUARD() leaves errp as is if it's not NULL or
&error_fatal, this means that we don't break error_abort
(we'll abort on error_set, not on error_propagate)
If we want to add some info to errp (by error_prepend() or
error_append_hint()), we must use the ERRP_GUARD() macro.
Otherwise, this info will not be added when errp == &error_fatal
(the program will exit prior to the error_append_hint() or
error_prepend() call). Fix several such cases, e.g. in nbd_read().
This commit is generated by command
sed -n '/^Network Block Device (NBD)$/,/^$/{s/^F: //p}' \
MAINTAINERS | \
xargs git ls-files | grep '\.[hc]$' | \
xargs spatch \
--sp-file scripts/coccinelle/errp-guard.cocci \
--macro-file scripts/cocci-macro-file.h \
--in-place --no-show-diff --max-width 80
Reported-by: Kevin Wolf <kwolf@redhat.com>
Reported-by: Greg Kurz <groug@kaod.org>
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Reviewed-by: Markus Armbruster <armbru@redhat.com>
[Commit message tweaked]
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Message-Id: <20200707165037.1026246-8-armbru@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
[ERRP_AUTO_PROPAGATE() renamed to ERRP_GUARD(), and
auto-propagated-errp.cocci to errp-guard.cocci. Commit message
tweaked again.]
2020-07-07 18:50:36 +02:00
|
|
|
ERRP_GUARD();
|
2017-11-22 11:19:57 +01:00
|
|
|
NBDOptionReply reply;
|
2019-01-17 20:36:46 +01:00
|
|
|
uint32_t len = strlen(info->name);
|
nbd: Implement NBD_OPT_GO on client
NBD_OPT_EXPORT_NAME is lousy: per the NBD protocol, any failure
requires the server to close the connection rather than report an
error to us. Therefore, upstream NBD recently added NBD_OPT_GO as
the improved version of the option that does what we want [1]: it
reports sane errors on failures, and on success provides at least
as much info as NBD_OPT_EXPORT_NAME.
[1] https://github.com/NetworkBlockDevice/nbd/blob/extension-info/doc/proto.md
This is a first cut at use of the information types. Note that we
do not need to use NBD_OPT_INFO, and that use of NBD_OPT_GO means
we no longer have to use NBD_OPT_LIST to learn whether a server
requires TLS (this requires servers that gracefully handle unknown
NBD_OPT, many servers prior to qemu 2.5 were buggy, but I have patched
qemu, upstream nbd, and nbdkit in the meantime, in part because of
interoperability testing with this patch). We still fall back to
NBD_OPT_LIST when NBD_OPT_GO is not supported on the server, as it
is still one last chance for a nicer error message. Later patches
will use further info, like NBD_INFO_BLOCK_SIZE.
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-Id: <20170707203049.534-8-eblake@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2017-07-07 22:30:47 +02:00
|
|
|
uint16_t type;
|
|
|
|
int error;
|
|
|
|
char *buf;
|
|
|
|
|
|
|
|
/* The protocol requires that the server send NBD_INFO_EXPORT with
|
|
|
|
* a non-zero flags (at least NBD_FLAG_HAS_FLAGS must be set); so
|
|
|
|
* flags still 0 is a witness of a broken server. */
|
|
|
|
info->flags = 0;
|
|
|
|
|
2019-01-17 20:36:53 +01:00
|
|
|
assert(opt == NBD_OPT_GO || opt == NBD_OPT_INFO);
|
|
|
|
trace_nbd_opt_info_go_start(nbd_opt_lookup(opt), info->name);
|
nbd: Implement NBD_INFO_BLOCK_SIZE on client
The upstream NBD Protocol has defined a new extension to allow
the server to advertise block sizes to the client, as well as
a way for the client to inform the server whether it intends to
obey block sizes.
When using the block layer as the client, we will obey block
sizes; but when used as 'qemu-nbd -c' to hand off to the
kernel nbd module as the client, we are still waiting for the
kernel to implement a way for us to learn if it will honor
block sizes (perhaps by an addition to sysfs, rather than an
ioctl), as well as any way to tell the kernel what additional
block sizes to obey (NBD_SET_BLKSIZE appears to be accurate
for the minimum size, but preferred and maximum sizes would
probably be new ioctl()s), so until then, we need to make our
request for block sizes conditional.
When using ioctl(NBD_SET_BLKSIZE) to hand off to the kernel,
use the minimum block size as the sector size if it is larger
than 512, which also has the nice effect of cooperating with
(non-qemu) servers that don't do read-modify-write when
exposing a block device with 4k sectors; it might also allow
us to visit a file larger than 2T on a 32-bit kernel.
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-Id: <20170707203049.534-10-eblake@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2017-07-07 22:30:49 +02:00
|
|
|
buf = g_malloc(4 + len + 2 + 2 * info->request_sizes + 1);
|
nbd: Implement NBD_OPT_GO on client
NBD_OPT_EXPORT_NAME is lousy: per the NBD protocol, any failure
requires the server to close the connection rather than report an
error to us. Therefore, upstream NBD recently added NBD_OPT_GO as
the improved version of the option that does what we want [1]: it
reports sane errors on failures, and on success provides at least
as much info as NBD_OPT_EXPORT_NAME.
[1] https://github.com/NetworkBlockDevice/nbd/blob/extension-info/doc/proto.md
This is a first cut at use of the information types. Note that we
do not need to use NBD_OPT_INFO, and that use of NBD_OPT_GO means
we no longer have to use NBD_OPT_LIST to learn whether a server
requires TLS (this requires servers that gracefully handle unknown
NBD_OPT, many servers prior to qemu 2.5 were buggy, but I have patched
qemu, upstream nbd, and nbdkit in the meantime, in part because of
interoperability testing with this patch). We still fall back to
NBD_OPT_LIST when NBD_OPT_GO is not supported on the server, as it
is still one last chance for a nicer error message. Later patches
will use further info, like NBD_INFO_BLOCK_SIZE.
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-Id: <20170707203049.534-8-eblake@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2017-07-07 22:30:47 +02:00
|
|
|
stl_be_p(buf, len);
|
2019-01-17 20:36:46 +01:00
|
|
|
memcpy(buf + 4, info->name, len);
|
nbd: Implement NBD_INFO_BLOCK_SIZE on client
The upstream NBD Protocol has defined a new extension to allow
the server to advertise block sizes to the client, as well as
a way for the client to inform the server whether it intends to
obey block sizes.
When using the block layer as the client, we will obey block
sizes; but when used as 'qemu-nbd -c' to hand off to the
kernel nbd module as the client, we are still waiting for the
kernel to implement a way for us to learn if it will honor
block sizes (perhaps by an addition to sysfs, rather than an
ioctl), as well as any way to tell the kernel what additional
block sizes to obey (NBD_SET_BLKSIZE appears to be accurate
for the minimum size, but preferred and maximum sizes would
probably be new ioctl()s), so until then, we need to make our
request for block sizes conditional.
When using ioctl(NBD_SET_BLKSIZE) to hand off to the kernel,
use the minimum block size as the sector size if it is larger
than 512, which also has the nice effect of cooperating with
(non-qemu) servers that don't do read-modify-write when
exposing a block device with 4k sectors; it might also allow
us to visit a file larger than 2T on a 32-bit kernel.
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-Id: <20170707203049.534-10-eblake@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2017-07-07 22:30:49 +02:00
|
|
|
/* At most one request, everything else up to server */
|
|
|
|
stw_be_p(buf + 4 + len, info->request_sizes);
|
|
|
|
if (info->request_sizes) {
|
|
|
|
stw_be_p(buf + 4 + len + 2, NBD_INFO_BLOCK_SIZE);
|
|
|
|
}
|
2019-01-17 20:36:53 +01:00
|
|
|
error = nbd_send_option_request(ioc, opt,
|
2017-07-27 04:42:09 +02:00
|
|
|
4 + len + 2 + 2 * info->request_sizes,
|
|
|
|
buf, errp);
|
|
|
|
g_free(buf);
|
|
|
|
if (error < 0) {
|
nbd: Implement NBD_OPT_GO on client
NBD_OPT_EXPORT_NAME is lousy: per the NBD protocol, any failure
requires the server to close the connection rather than report an
error to us. Therefore, upstream NBD recently added NBD_OPT_GO as
the improved version of the option that does what we want [1]: it
reports sane errors on failures, and on success provides at least
as much info as NBD_OPT_EXPORT_NAME.
[1] https://github.com/NetworkBlockDevice/nbd/blob/extension-info/doc/proto.md
This is a first cut at use of the information types. Note that we
do not need to use NBD_OPT_INFO, and that use of NBD_OPT_GO means
we no longer have to use NBD_OPT_LIST to learn whether a server
requires TLS (this requires servers that gracefully handle unknown
NBD_OPT, many servers prior to qemu 2.5 were buggy, but I have patched
qemu, upstream nbd, and nbdkit in the meantime, in part because of
interoperability testing with this patch). We still fall back to
NBD_OPT_LIST when NBD_OPT_GO is not supported on the server, as it
is still one last chance for a nicer error message. Later patches
will use further info, like NBD_INFO_BLOCK_SIZE.
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-Id: <20170707203049.534-8-eblake@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2017-07-07 22:30:47 +02:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
while (1) {
|
2019-01-17 20:36:53 +01:00
|
|
|
if (nbd_receive_option_reply(ioc, opt, &reply, errp) < 0) {
|
nbd: Implement NBD_OPT_GO on client
NBD_OPT_EXPORT_NAME is lousy: per the NBD protocol, any failure
requires the server to close the connection rather than report an
error to us. Therefore, upstream NBD recently added NBD_OPT_GO as
the improved version of the option that does what we want [1]: it
reports sane errors on failures, and on success provides at least
as much info as NBD_OPT_EXPORT_NAME.
[1] https://github.com/NetworkBlockDevice/nbd/blob/extension-info/doc/proto.md
This is a first cut at use of the information types. Note that we
do not need to use NBD_OPT_INFO, and that use of NBD_OPT_GO means
we no longer have to use NBD_OPT_LIST to learn whether a server
requires TLS (this requires servers that gracefully handle unknown
NBD_OPT, many servers prior to qemu 2.5 were buggy, but I have patched
qemu, upstream nbd, and nbdkit in the meantime, in part because of
interoperability testing with this patch). We still fall back to
NBD_OPT_LIST when NBD_OPT_GO is not supported on the server, as it
is still one last chance for a nicer error message. Later patches
will use further info, like NBD_INFO_BLOCK_SIZE.
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-Id: <20170707203049.534-8-eblake@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2017-07-07 22:30:47 +02:00
|
|
|
return -1;
|
|
|
|
}
|
2019-08-24 19:28:13 +02:00
|
|
|
error = nbd_handle_reply_err(ioc, &reply, true, errp);
|
nbd: Implement NBD_OPT_GO on client
NBD_OPT_EXPORT_NAME is lousy: per the NBD protocol, any failure
requires the server to close the connection rather than report an
error to us. Therefore, upstream NBD recently added NBD_OPT_GO as
the improved version of the option that does what we want [1]: it
reports sane errors on failures, and on success provides at least
as much info as NBD_OPT_EXPORT_NAME.
[1] https://github.com/NetworkBlockDevice/nbd/blob/extension-info/doc/proto.md
This is a first cut at use of the information types. Note that we
do not need to use NBD_OPT_INFO, and that use of NBD_OPT_GO means
we no longer have to use NBD_OPT_LIST to learn whether a server
requires TLS (this requires servers that gracefully handle unknown
NBD_OPT, many servers prior to qemu 2.5 were buggy, but I have patched
qemu, upstream nbd, and nbdkit in the meantime, in part because of
interoperability testing with this patch). We still fall back to
NBD_OPT_LIST when NBD_OPT_GO is not supported on the server, as it
is still one last chance for a nicer error message. Later patches
will use further info, like NBD_INFO_BLOCK_SIZE.
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-Id: <20170707203049.534-8-eblake@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2017-07-07 22:30:47 +02:00
|
|
|
if (error <= 0) {
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
len = reply.length;
|
|
|
|
|
|
|
|
if (reply.type == NBD_REP_ACK) {
|
2019-01-17 20:36:53 +01:00
|
|
|
/*
|
|
|
|
* Server is done sending info, and moved into transmission
|
|
|
|
* phase for NBD_OPT_GO, but make sure it sent flags
|
|
|
|
*/
|
nbd: Implement NBD_OPT_GO on client
NBD_OPT_EXPORT_NAME is lousy: per the NBD protocol, any failure
requires the server to close the connection rather than report an
error to us. Therefore, upstream NBD recently added NBD_OPT_GO as
the improved version of the option that does what we want [1]: it
reports sane errors on failures, and on success provides at least
as much info as NBD_OPT_EXPORT_NAME.
[1] https://github.com/NetworkBlockDevice/nbd/blob/extension-info/doc/proto.md
This is a first cut at use of the information types. Note that we
do not need to use NBD_OPT_INFO, and that use of NBD_OPT_GO means
we no longer have to use NBD_OPT_LIST to learn whether a server
requires TLS (this requires servers that gracefully handle unknown
NBD_OPT, many servers prior to qemu 2.5 were buggy, but I have patched
qemu, upstream nbd, and nbdkit in the meantime, in part because of
interoperability testing with this patch). We still fall back to
NBD_OPT_LIST when NBD_OPT_GO is not supported on the server, as it
is still one last chance for a nicer error message. Later patches
will use further info, like NBD_INFO_BLOCK_SIZE.
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-Id: <20170707203049.534-8-eblake@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2017-07-07 22:30:47 +02:00
|
|
|
if (len) {
|
|
|
|
error_setg(errp, "server sent invalid NBD_REP_ACK");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
if (!info->flags) {
|
|
|
|
error_setg(errp, "broken server omitted NBD_INFO_EXPORT");
|
|
|
|
return -1;
|
|
|
|
}
|
2019-01-17 20:36:53 +01:00
|
|
|
trace_nbd_opt_info_go_success(nbd_opt_lookup(opt));
|
nbd: Implement NBD_OPT_GO on client
NBD_OPT_EXPORT_NAME is lousy: per the NBD protocol, any failure
requires the server to close the connection rather than report an
error to us. Therefore, upstream NBD recently added NBD_OPT_GO as
the improved version of the option that does what we want [1]: it
reports sane errors on failures, and on success provides at least
as much info as NBD_OPT_EXPORT_NAME.
[1] https://github.com/NetworkBlockDevice/nbd/blob/extension-info/doc/proto.md
This is a first cut at use of the information types. Note that we
do not need to use NBD_OPT_INFO, and that use of NBD_OPT_GO means
we no longer have to use NBD_OPT_LIST to learn whether a server
requires TLS (this requires servers that gracefully handle unknown
NBD_OPT, many servers prior to qemu 2.5 were buggy, but I have patched
qemu, upstream nbd, and nbdkit in the meantime, in part because of
interoperability testing with this patch). We still fall back to
NBD_OPT_LIST when NBD_OPT_GO is not supported on the server, as it
is still one last chance for a nicer error message. Later patches
will use further info, like NBD_INFO_BLOCK_SIZE.
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-Id: <20170707203049.534-8-eblake@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2017-07-07 22:30:47 +02:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
if (reply.type != NBD_REP_INFO) {
|
2018-12-15 14:53:07 +01:00
|
|
|
error_setg(errp, "unexpected reply type %u (%s), expected %u (%s)",
|
|
|
|
reply.type, nbd_rep_lookup(reply.type),
|
|
|
|
NBD_REP_INFO, nbd_rep_lookup(NBD_REP_INFO));
|
nbd: Implement NBD_OPT_GO on client
NBD_OPT_EXPORT_NAME is lousy: per the NBD protocol, any failure
requires the server to close the connection rather than report an
error to us. Therefore, upstream NBD recently added NBD_OPT_GO as
the improved version of the option that does what we want [1]: it
reports sane errors on failures, and on success provides at least
as much info as NBD_OPT_EXPORT_NAME.
[1] https://github.com/NetworkBlockDevice/nbd/blob/extension-info/doc/proto.md
This is a first cut at use of the information types. Note that we
do not need to use NBD_OPT_INFO, and that use of NBD_OPT_GO means
we no longer have to use NBD_OPT_LIST to learn whether a server
requires TLS (this requires servers that gracefully handle unknown
NBD_OPT, many servers prior to qemu 2.5 were buggy, but I have patched
qemu, upstream nbd, and nbdkit in the meantime, in part because of
interoperability testing with this patch). We still fall back to
NBD_OPT_LIST when NBD_OPT_GO is not supported on the server, as it
is still one last chance for a nicer error message. Later patches
will use further info, like NBD_INFO_BLOCK_SIZE.
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-Id: <20170707203049.534-8-eblake@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2017-07-07 22:30:47 +02:00
|
|
|
nbd_send_opt_abort(ioc);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
if (len < sizeof(type)) {
|
|
|
|
error_setg(errp, "NBD_REP_INFO length %" PRIu32 " is too short",
|
|
|
|
len);
|
|
|
|
nbd_send_opt_abort(ioc);
|
|
|
|
return -1;
|
|
|
|
}
|
2019-01-28 17:58:30 +01:00
|
|
|
if (nbd_read16(ioc, &type, "info type", errp) < 0) {
|
nbd: Implement NBD_OPT_GO on client
NBD_OPT_EXPORT_NAME is lousy: per the NBD protocol, any failure
requires the server to close the connection rather than report an
error to us. Therefore, upstream NBD recently added NBD_OPT_GO as
the improved version of the option that does what we want [1]: it
reports sane errors on failures, and on success provides at least
as much info as NBD_OPT_EXPORT_NAME.
[1] https://github.com/NetworkBlockDevice/nbd/blob/extension-info/doc/proto.md
This is a first cut at use of the information types. Note that we
do not need to use NBD_OPT_INFO, and that use of NBD_OPT_GO means
we no longer have to use NBD_OPT_LIST to learn whether a server
requires TLS (this requires servers that gracefully handle unknown
NBD_OPT, many servers prior to qemu 2.5 were buggy, but I have patched
qemu, upstream nbd, and nbdkit in the meantime, in part because of
interoperability testing with this patch). We still fall back to
NBD_OPT_LIST when NBD_OPT_GO is not supported on the server, as it
is still one last chance for a nicer error message. Later patches
will use further info, like NBD_INFO_BLOCK_SIZE.
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-Id: <20170707203049.534-8-eblake@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2017-07-07 22:30:47 +02:00
|
|
|
nbd_send_opt_abort(ioc);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
len -= sizeof(type);
|
|
|
|
switch (type) {
|
|
|
|
case NBD_INFO_EXPORT:
|
|
|
|
if (len != sizeof(info->size) + sizeof(info->flags)) {
|
|
|
|
error_setg(errp, "remaining export info len %" PRIu32
|
|
|
|
" is unexpected size", len);
|
|
|
|
nbd_send_opt_abort(ioc);
|
|
|
|
return -1;
|
|
|
|
}
|
2019-01-28 17:58:30 +01:00
|
|
|
if (nbd_read64(ioc, &info->size, "info size", errp) < 0) {
|
nbd: Implement NBD_OPT_GO on client
NBD_OPT_EXPORT_NAME is lousy: per the NBD protocol, any failure
requires the server to close the connection rather than report an
error to us. Therefore, upstream NBD recently added NBD_OPT_GO as
the improved version of the option that does what we want [1]: it
reports sane errors on failures, and on success provides at least
as much info as NBD_OPT_EXPORT_NAME.
[1] https://github.com/NetworkBlockDevice/nbd/blob/extension-info/doc/proto.md
This is a first cut at use of the information types. Note that we
do not need to use NBD_OPT_INFO, and that use of NBD_OPT_GO means
we no longer have to use NBD_OPT_LIST to learn whether a server
requires TLS (this requires servers that gracefully handle unknown
NBD_OPT, many servers prior to qemu 2.5 were buggy, but I have patched
qemu, upstream nbd, and nbdkit in the meantime, in part because of
interoperability testing with this patch). We still fall back to
NBD_OPT_LIST when NBD_OPT_GO is not supported on the server, as it
is still one last chance for a nicer error message. Later patches
will use further info, like NBD_INFO_BLOCK_SIZE.
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-Id: <20170707203049.534-8-eblake@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2017-07-07 22:30:47 +02:00
|
|
|
nbd_send_opt_abort(ioc);
|
|
|
|
return -1;
|
|
|
|
}
|
2019-01-28 17:58:30 +01:00
|
|
|
if (nbd_read16(ioc, &info->flags, "info flags", errp) < 0) {
|
nbd: Implement NBD_OPT_GO on client
NBD_OPT_EXPORT_NAME is lousy: per the NBD protocol, any failure
requires the server to close the connection rather than report an
error to us. Therefore, upstream NBD recently added NBD_OPT_GO as
the improved version of the option that does what we want [1]: it
reports sane errors on failures, and on success provides at least
as much info as NBD_OPT_EXPORT_NAME.
[1] https://github.com/NetworkBlockDevice/nbd/blob/extension-info/doc/proto.md
This is a first cut at use of the information types. Note that we
do not need to use NBD_OPT_INFO, and that use of NBD_OPT_GO means
we no longer have to use NBD_OPT_LIST to learn whether a server
requires TLS (this requires servers that gracefully handle unknown
NBD_OPT, many servers prior to qemu 2.5 were buggy, but I have patched
qemu, upstream nbd, and nbdkit in the meantime, in part because of
interoperability testing with this patch). We still fall back to
NBD_OPT_LIST when NBD_OPT_GO is not supported on the server, as it
is still one last chance for a nicer error message. Later patches
will use further info, like NBD_INFO_BLOCK_SIZE.
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-Id: <20170707203049.534-8-eblake@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2017-07-07 22:30:47 +02:00
|
|
|
nbd_send_opt_abort(ioc);
|
|
|
|
return -1;
|
|
|
|
}
|
2019-03-30 16:57:04 +01:00
|
|
|
if (info->min_block &&
|
|
|
|
!QEMU_IS_ALIGNED(info->size, info->min_block)) {
|
2019-04-04 16:52:26 +02:00
|
|
|
error_setg(errp, "export size %" PRIu64 " is not multiple of "
|
2019-03-30 16:57:04 +01:00
|
|
|
"minimum block size %" PRIu32, info->size,
|
|
|
|
info->min_block);
|
|
|
|
nbd_send_opt_abort(ioc);
|
|
|
|
return -1;
|
|
|
|
}
|
nbd: Implement NBD_OPT_GO on client
NBD_OPT_EXPORT_NAME is lousy: per the NBD protocol, any failure
requires the server to close the connection rather than report an
error to us. Therefore, upstream NBD recently added NBD_OPT_GO as
the improved version of the option that does what we want [1]: it
reports sane errors on failures, and on success provides at least
as much info as NBD_OPT_EXPORT_NAME.
[1] https://github.com/NetworkBlockDevice/nbd/blob/extension-info/doc/proto.md
This is a first cut at use of the information types. Note that we
do not need to use NBD_OPT_INFO, and that use of NBD_OPT_GO means
we no longer have to use NBD_OPT_LIST to learn whether a server
requires TLS (this requires servers that gracefully handle unknown
NBD_OPT, many servers prior to qemu 2.5 were buggy, but I have patched
qemu, upstream nbd, and nbdkit in the meantime, in part because of
interoperability testing with this patch). We still fall back to
NBD_OPT_LIST when NBD_OPT_GO is not supported on the server, as it
is still one last chance for a nicer error message. Later patches
will use further info, like NBD_INFO_BLOCK_SIZE.
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-Id: <20170707203049.534-8-eblake@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2017-07-07 22:30:47 +02:00
|
|
|
trace_nbd_receive_negotiate_size_flags(info->size, info->flags);
|
|
|
|
break;
|
|
|
|
|
nbd: Implement NBD_INFO_BLOCK_SIZE on client
The upstream NBD Protocol has defined a new extension to allow
the server to advertise block sizes to the client, as well as
a way for the client to inform the server whether it intends to
obey block sizes.
When using the block layer as the client, we will obey block
sizes; but when used as 'qemu-nbd -c' to hand off to the
kernel nbd module as the client, we are still waiting for the
kernel to implement a way for us to learn if it will honor
block sizes (perhaps by an addition to sysfs, rather than an
ioctl), as well as any way to tell the kernel what additional
block sizes to obey (NBD_SET_BLKSIZE appears to be accurate
for the minimum size, but preferred and maximum sizes would
probably be new ioctl()s), so until then, we need to make our
request for block sizes conditional.
When using ioctl(NBD_SET_BLKSIZE) to hand off to the kernel,
use the minimum block size as the sector size if it is larger
than 512, which also has the nice effect of cooperating with
(non-qemu) servers that don't do read-modify-write when
exposing a block device with 4k sectors; it might also allow
us to visit a file larger than 2T on a 32-bit kernel.
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-Id: <20170707203049.534-10-eblake@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2017-07-07 22:30:49 +02:00
|
|
|
case NBD_INFO_BLOCK_SIZE:
|
|
|
|
if (len != sizeof(info->min_block) * 3) {
|
|
|
|
error_setg(errp, "remaining export info len %" PRIu32
|
|
|
|
" is unexpected size", len);
|
|
|
|
nbd_send_opt_abort(ioc);
|
|
|
|
return -1;
|
|
|
|
}
|
2019-01-28 17:58:30 +01:00
|
|
|
if (nbd_read32(ioc, &info->min_block, "info minimum block size",
|
|
|
|
errp) < 0) {
|
nbd: Implement NBD_INFO_BLOCK_SIZE on client
The upstream NBD Protocol has defined a new extension to allow
the server to advertise block sizes to the client, as well as
a way for the client to inform the server whether it intends to
obey block sizes.
When using the block layer as the client, we will obey block
sizes; but when used as 'qemu-nbd -c' to hand off to the
kernel nbd module as the client, we are still waiting for the
kernel to implement a way for us to learn if it will honor
block sizes (perhaps by an addition to sysfs, rather than an
ioctl), as well as any way to tell the kernel what additional
block sizes to obey (NBD_SET_BLKSIZE appears to be accurate
for the minimum size, but preferred and maximum sizes would
probably be new ioctl()s), so until then, we need to make our
request for block sizes conditional.
When using ioctl(NBD_SET_BLKSIZE) to hand off to the kernel,
use the minimum block size as the sector size if it is larger
than 512, which also has the nice effect of cooperating with
(non-qemu) servers that don't do read-modify-write when
exposing a block device with 4k sectors; it might also allow
us to visit a file larger than 2T on a 32-bit kernel.
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-Id: <20170707203049.534-10-eblake@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2017-07-07 22:30:49 +02:00
|
|
|
nbd_send_opt_abort(ioc);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
if (!is_power_of_2(info->min_block)) {
|
2018-05-01 17:46:53 +02:00
|
|
|
error_setg(errp, "server minimum block size %" PRIu32
|
|
|
|
" is not a power of two", info->min_block);
|
nbd: Implement NBD_INFO_BLOCK_SIZE on client
The upstream NBD Protocol has defined a new extension to allow
the server to advertise block sizes to the client, as well as
a way for the client to inform the server whether it intends to
obey block sizes.
When using the block layer as the client, we will obey block
sizes; but when used as 'qemu-nbd -c' to hand off to the
kernel nbd module as the client, we are still waiting for the
kernel to implement a way for us to learn if it will honor
block sizes (perhaps by an addition to sysfs, rather than an
ioctl), as well as any way to tell the kernel what additional
block sizes to obey (NBD_SET_BLKSIZE appears to be accurate
for the minimum size, but preferred and maximum sizes would
probably be new ioctl()s), so until then, we need to make our
request for block sizes conditional.
When using ioctl(NBD_SET_BLKSIZE) to hand off to the kernel,
use the minimum block size as the sector size if it is larger
than 512, which also has the nice effect of cooperating with
(non-qemu) servers that don't do read-modify-write when
exposing a block device with 4k sectors; it might also allow
us to visit a file larger than 2T on a 32-bit kernel.
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-Id: <20170707203049.534-10-eblake@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2017-07-07 22:30:49 +02:00
|
|
|
nbd_send_opt_abort(ioc);
|
|
|
|
return -1;
|
|
|
|
}
|
2019-01-28 17:58:30 +01:00
|
|
|
if (nbd_read32(ioc, &info->opt_block, "info preferred block size",
|
|
|
|
errp) < 0)
|
|
|
|
{
|
nbd: Implement NBD_INFO_BLOCK_SIZE on client
The upstream NBD Protocol has defined a new extension to allow
the server to advertise block sizes to the client, as well as
a way for the client to inform the server whether it intends to
obey block sizes.
When using the block layer as the client, we will obey block
sizes; but when used as 'qemu-nbd -c' to hand off to the
kernel nbd module as the client, we are still waiting for the
kernel to implement a way for us to learn if it will honor
block sizes (perhaps by an addition to sysfs, rather than an
ioctl), as well as any way to tell the kernel what additional
block sizes to obey (NBD_SET_BLKSIZE appears to be accurate
for the minimum size, but preferred and maximum sizes would
probably be new ioctl()s), so until then, we need to make our
request for block sizes conditional.
When using ioctl(NBD_SET_BLKSIZE) to hand off to the kernel,
use the minimum block size as the sector size if it is larger
than 512, which also has the nice effect of cooperating with
(non-qemu) servers that don't do read-modify-write when
exposing a block device with 4k sectors; it might also allow
us to visit a file larger than 2T on a 32-bit kernel.
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-Id: <20170707203049.534-10-eblake@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2017-07-07 22:30:49 +02:00
|
|
|
nbd_send_opt_abort(ioc);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
if (!is_power_of_2(info->opt_block) ||
|
|
|
|
info->opt_block < info->min_block) {
|
2018-05-01 17:46:53 +02:00
|
|
|
error_setg(errp, "server preferred block size %" PRIu32
|
|
|
|
" is not valid", info->opt_block);
|
nbd: Implement NBD_INFO_BLOCK_SIZE on client
The upstream NBD Protocol has defined a new extension to allow
the server to advertise block sizes to the client, as well as
a way for the client to inform the server whether it intends to
obey block sizes.
When using the block layer as the client, we will obey block
sizes; but when used as 'qemu-nbd -c' to hand off to the
kernel nbd module as the client, we are still waiting for the
kernel to implement a way for us to learn if it will honor
block sizes (perhaps by an addition to sysfs, rather than an
ioctl), as well as any way to tell the kernel what additional
block sizes to obey (NBD_SET_BLKSIZE appears to be accurate
for the minimum size, but preferred and maximum sizes would
probably be new ioctl()s), so until then, we need to make our
request for block sizes conditional.
When using ioctl(NBD_SET_BLKSIZE) to hand off to the kernel,
use the minimum block size as the sector size if it is larger
than 512, which also has the nice effect of cooperating with
(non-qemu) servers that don't do read-modify-write when
exposing a block device with 4k sectors; it might also allow
us to visit a file larger than 2T on a 32-bit kernel.
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-Id: <20170707203049.534-10-eblake@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2017-07-07 22:30:49 +02:00
|
|
|
nbd_send_opt_abort(ioc);
|
|
|
|
return -1;
|
|
|
|
}
|
2019-01-28 17:58:30 +01:00
|
|
|
if (nbd_read32(ioc, &info->max_block, "info maximum block size",
|
|
|
|
errp) < 0)
|
|
|
|
{
|
nbd: Implement NBD_INFO_BLOCK_SIZE on client
The upstream NBD Protocol has defined a new extension to allow
the server to advertise block sizes to the client, as well as
a way for the client to inform the server whether it intends to
obey block sizes.
When using the block layer as the client, we will obey block
sizes; but when used as 'qemu-nbd -c' to hand off to the
kernel nbd module as the client, we are still waiting for the
kernel to implement a way for us to learn if it will honor
block sizes (perhaps by an addition to sysfs, rather than an
ioctl), as well as any way to tell the kernel what additional
block sizes to obey (NBD_SET_BLKSIZE appears to be accurate
for the minimum size, but preferred and maximum sizes would
probably be new ioctl()s), so until then, we need to make our
request for block sizes conditional.
When using ioctl(NBD_SET_BLKSIZE) to hand off to the kernel,
use the minimum block size as the sector size if it is larger
than 512, which also has the nice effect of cooperating with
(non-qemu) servers that don't do read-modify-write when
exposing a block device with 4k sectors; it might also allow
us to visit a file larger than 2T on a 32-bit kernel.
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-Id: <20170707203049.534-10-eblake@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2017-07-07 22:30:49 +02:00
|
|
|
nbd_send_opt_abort(ioc);
|
|
|
|
return -1;
|
|
|
|
}
|
2018-05-01 17:46:53 +02:00
|
|
|
if (info->max_block < info->min_block) {
|
|
|
|
error_setg(errp, "server maximum block size %" PRIu32
|
|
|
|
" is not valid", info->max_block);
|
|
|
|
nbd_send_opt_abort(ioc);
|
|
|
|
return -1;
|
|
|
|
}
|
2019-01-17 20:36:53 +01:00
|
|
|
trace_nbd_opt_info_block_size(info->min_block, info->opt_block,
|
|
|
|
info->max_block);
|
nbd: Implement NBD_INFO_BLOCK_SIZE on client
The upstream NBD Protocol has defined a new extension to allow
the server to advertise block sizes to the client, as well as
a way for the client to inform the server whether it intends to
obey block sizes.
When using the block layer as the client, we will obey block
sizes; but when used as 'qemu-nbd -c' to hand off to the
kernel nbd module as the client, we are still waiting for the
kernel to implement a way for us to learn if it will honor
block sizes (perhaps by an addition to sysfs, rather than an
ioctl), as well as any way to tell the kernel what additional
block sizes to obey (NBD_SET_BLKSIZE appears to be accurate
for the minimum size, but preferred and maximum sizes would
probably be new ioctl()s), so until then, we need to make our
request for block sizes conditional.
When using ioctl(NBD_SET_BLKSIZE) to hand off to the kernel,
use the minimum block size as the sector size if it is larger
than 512, which also has the nice effect of cooperating with
(non-qemu) servers that don't do read-modify-write when
exposing a block device with 4k sectors; it might also allow
us to visit a file larger than 2T on a 32-bit kernel.
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-Id: <20170707203049.534-10-eblake@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2017-07-07 22:30:49 +02:00
|
|
|
break;
|
|
|
|
|
nbd: Implement NBD_OPT_GO on client
NBD_OPT_EXPORT_NAME is lousy: per the NBD protocol, any failure
requires the server to close the connection rather than report an
error to us. Therefore, upstream NBD recently added NBD_OPT_GO as
the improved version of the option that does what we want [1]: it
reports sane errors on failures, and on success provides at least
as much info as NBD_OPT_EXPORT_NAME.
[1] https://github.com/NetworkBlockDevice/nbd/blob/extension-info/doc/proto.md
This is a first cut at use of the information types. Note that we
do not need to use NBD_OPT_INFO, and that use of NBD_OPT_GO means
we no longer have to use NBD_OPT_LIST to learn whether a server
requires TLS (this requires servers that gracefully handle unknown
NBD_OPT, many servers prior to qemu 2.5 were buggy, but I have patched
qemu, upstream nbd, and nbdkit in the meantime, in part because of
interoperability testing with this patch). We still fall back to
NBD_OPT_LIST when NBD_OPT_GO is not supported on the server, as it
is still one last chance for a nicer error message. Later patches
will use further info, like NBD_INFO_BLOCK_SIZE.
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-Id: <20170707203049.534-8-eblake@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2017-07-07 22:30:47 +02:00
|
|
|
default:
|
2019-11-14 03:46:34 +01:00
|
|
|
/*
|
|
|
|
* Not worth the bother to check if NBD_INFO_NAME or
|
|
|
|
* NBD_INFO_DESCRIPTION exceed NBD_MAX_STRING_SIZE.
|
|
|
|
*/
|
2019-01-17 20:36:53 +01:00
|
|
|
trace_nbd_opt_info_unknown(type, nbd_info_lookup(type));
|
nbd: Implement NBD_OPT_GO on client
NBD_OPT_EXPORT_NAME is lousy: per the NBD protocol, any failure
requires the server to close the connection rather than report an
error to us. Therefore, upstream NBD recently added NBD_OPT_GO as
the improved version of the option that does what we want [1]: it
reports sane errors on failures, and on success provides at least
as much info as NBD_OPT_EXPORT_NAME.
[1] https://github.com/NetworkBlockDevice/nbd/blob/extension-info/doc/proto.md
This is a first cut at use of the information types. Note that we
do not need to use NBD_OPT_INFO, and that use of NBD_OPT_GO means
we no longer have to use NBD_OPT_LIST to learn whether a server
requires TLS (this requires servers that gracefully handle unknown
NBD_OPT, many servers prior to qemu 2.5 were buggy, but I have patched
qemu, upstream nbd, and nbdkit in the meantime, in part because of
interoperability testing with this patch). We still fall back to
NBD_OPT_LIST when NBD_OPT_GO is not supported on the server, as it
is still one last chance for a nicer error message. Later patches
will use further info, like NBD_INFO_BLOCK_SIZE.
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-Id: <20170707203049.534-8-eblake@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2017-07-07 22:30:47 +02:00
|
|
|
if (nbd_drop(ioc, len, errp) < 0) {
|
2017-11-13 16:24:24 +01:00
|
|
|
error_prepend(errp, "Failed to read info payload: ");
|
nbd: Implement NBD_OPT_GO on client
NBD_OPT_EXPORT_NAME is lousy: per the NBD protocol, any failure
requires the server to close the connection rather than report an
error to us. Therefore, upstream NBD recently added NBD_OPT_GO as
the improved version of the option that does what we want [1]: it
reports sane errors on failures, and on success provides at least
as much info as NBD_OPT_EXPORT_NAME.
[1] https://github.com/NetworkBlockDevice/nbd/blob/extension-info/doc/proto.md
This is a first cut at use of the information types. Note that we
do not need to use NBD_OPT_INFO, and that use of NBD_OPT_GO means
we no longer have to use NBD_OPT_LIST to learn whether a server
requires TLS (this requires servers that gracefully handle unknown
NBD_OPT, many servers prior to qemu 2.5 were buggy, but I have patched
qemu, upstream nbd, and nbdkit in the meantime, in part because of
interoperability testing with this patch). We still fall back to
NBD_OPT_LIST when NBD_OPT_GO is not supported on the server, as it
is still one last chance for a nicer error message. Later patches
will use further info, like NBD_INFO_BLOCK_SIZE.
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-Id: <20170707203049.534-8-eblake@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2017-07-07 22:30:47 +02:00
|
|
|
nbd_send_opt_abort(ioc);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-10-14 20:33:13 +02:00
|
|
|
/* Return -1 on failure, 0 if wantname is an available export. */
|
2016-02-10 19:41:09 +01:00
|
|
|
static int nbd_receive_query_exports(QIOChannel *ioc,
|
|
|
|
const char *wantname,
|
|
|
|
Error **errp)
|
|
|
|
{
|
nbd/client: Refactor nbd_receive_list()
Right now, nbd_receive_list() is only called by
nbd_receive_query_exports(), which in turn is only called if the
server lacks NBD_OPT_GO but has working option negotiation, and is
merely used as a quality-of-implementation trick since servers
can't give decent errors for NBD_OPT_EXPORT_NAME. However, servers
that lack NBD_OPT_GO are becoming increasingly rare (nbdkit was a
latecomer, in Aug 2018, but qemu has been such a server since commit
f37708f6 in July 2017 and released in 2.10), so it no longer makes
sense to micro-optimize that function for performance.
Furthermore, when debugging a server's implementation, tracing the
full reply (both names and descriptions) is useful, not to mention
that upcoming patches adding 'qemu-nbd --list' will want to collect
that data. And when you consider that a server can send an export
name up to the NBD protocol length limit of 4k; but our current
NBD_MAX_NAME_SIZE is only 256, we can't trace all valid server
names without more storage, but 4k is large enough that the heap
is better than the stack for long names.
Thus, I'm changing the division of labor, with nbd_receive_list()
now always malloc'ing a result on success (the malloc is bounded
by the fact that we reject servers with a reply length larger
than 32M), and moving the comparison to 'wantname' to the caller.
There is a minor change in behavior where a server with 0 exports
(an immediate NBD_REP_ACK reply) is now no longer distinguished
from a server without LIST support (NBD_REP_ERR_UNSUP); this
information could be preserved with a complication to the calling
contract to provide a bit more information, but I didn't see the
point. After all, the worst that can happen if our guess at a
match is wrong is that the caller will get a cryptic disconnect
when NBD_OPT_EXPORT_NAME fails (which is no different from what
would happen if we had not tried LIST), while treating an empty
list as immediate failure would prevent connecting to really old
servers that really did lack LIST. Besides, NBD servers with 0
exports are rare (qemu can do it when using QMP nbd-server-start
without nbd-server-add - but qemu understands NBD_OPT_GO and
thus won't tickle this change in behavior).
Fix the spelling of foundExport to match coding standards while
in the area.
Signed-off-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Richard W.M. Jones <rjones@redhat.com>
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Message-Id: <20190117193658.16413-9-eblake@redhat.com>
2019-01-17 20:36:45 +01:00
|
|
|
bool list_empty = true;
|
|
|
|
bool found_export = false;
|
2016-02-10 19:41:09 +01:00
|
|
|
|
2017-07-07 17:29:18 +02:00
|
|
|
trace_nbd_receive_query_exports_start(wantname);
|
2016-10-14 20:33:10 +02:00
|
|
|
if (nbd_send_option_request(ioc, NBD_OPT_LIST, 0, NULL, errp) < 0) {
|
2016-02-10 19:41:09 +01:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
while (1) {
|
nbd/client: Refactor nbd_receive_list()
Right now, nbd_receive_list() is only called by
nbd_receive_query_exports(), which in turn is only called if the
server lacks NBD_OPT_GO but has working option negotiation, and is
merely used as a quality-of-implementation trick since servers
can't give decent errors for NBD_OPT_EXPORT_NAME. However, servers
that lack NBD_OPT_GO are becoming increasingly rare (nbdkit was a
latecomer, in Aug 2018, but qemu has been such a server since commit
f37708f6 in July 2017 and released in 2.10), so it no longer makes
sense to micro-optimize that function for performance.
Furthermore, when debugging a server's implementation, tracing the
full reply (both names and descriptions) is useful, not to mention
that upcoming patches adding 'qemu-nbd --list' will want to collect
that data. And when you consider that a server can send an export
name up to the NBD protocol length limit of 4k; but our current
NBD_MAX_NAME_SIZE is only 256, we can't trace all valid server
names without more storage, but 4k is large enough that the heap
is better than the stack for long names.
Thus, I'm changing the division of labor, with nbd_receive_list()
now always malloc'ing a result on success (the malloc is bounded
by the fact that we reject servers with a reply length larger
than 32M), and moving the comparison to 'wantname' to the caller.
There is a minor change in behavior where a server with 0 exports
(an immediate NBD_REP_ACK reply) is now no longer distinguished
from a server without LIST support (NBD_REP_ERR_UNSUP); this
information could be preserved with a complication to the calling
contract to provide a bit more information, but I didn't see the
point. After all, the worst that can happen if our guess at a
match is wrong is that the caller will get a cryptic disconnect
when NBD_OPT_EXPORT_NAME fails (which is no different from what
would happen if we had not tried LIST), while treating an empty
list as immediate failure would prevent connecting to really old
servers that really did lack LIST. Besides, NBD servers with 0
exports are rare (qemu can do it when using QMP nbd-server-start
without nbd-server-add - but qemu understands NBD_OPT_GO and
thus won't tickle this change in behavior).
Fix the spelling of foundExport to match coding standards while
in the area.
Signed-off-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Richard W.M. Jones <rjones@redhat.com>
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Message-Id: <20190117193658.16413-9-eblake@redhat.com>
2019-01-17 20:36:45 +01:00
|
|
|
char *name;
|
|
|
|
int ret = nbd_receive_list(ioc, &name, NULL, errp);
|
2016-02-10 19:41:09 +01:00
|
|
|
|
|
|
|
if (ret < 0) {
|
2016-10-14 20:33:13 +02:00
|
|
|
/* Server gave unexpected reply */
|
2016-02-10 19:41:09 +01:00
|
|
|
return -1;
|
2016-10-14 20:33:13 +02:00
|
|
|
} else if (ret == 0) {
|
|
|
|
/* Done iterating. */
|
nbd/client: Refactor nbd_receive_list()
Right now, nbd_receive_list() is only called by
nbd_receive_query_exports(), which in turn is only called if the
server lacks NBD_OPT_GO but has working option negotiation, and is
merely used as a quality-of-implementation trick since servers
can't give decent errors for NBD_OPT_EXPORT_NAME. However, servers
that lack NBD_OPT_GO are becoming increasingly rare (nbdkit was a
latecomer, in Aug 2018, but qemu has been such a server since commit
f37708f6 in July 2017 and released in 2.10), so it no longer makes
sense to micro-optimize that function for performance.
Furthermore, when debugging a server's implementation, tracing the
full reply (both names and descriptions) is useful, not to mention
that upcoming patches adding 'qemu-nbd --list' will want to collect
that data. And when you consider that a server can send an export
name up to the NBD protocol length limit of 4k; but our current
NBD_MAX_NAME_SIZE is only 256, we can't trace all valid server
names without more storage, but 4k is large enough that the heap
is better than the stack for long names.
Thus, I'm changing the division of labor, with nbd_receive_list()
now always malloc'ing a result on success (the malloc is bounded
by the fact that we reject servers with a reply length larger
than 32M), and moving the comparison to 'wantname' to the caller.
There is a minor change in behavior where a server with 0 exports
(an immediate NBD_REP_ACK reply) is now no longer distinguished
from a server without LIST support (NBD_REP_ERR_UNSUP); this
information could be preserved with a complication to the calling
contract to provide a bit more information, but I didn't see the
point. After all, the worst that can happen if our guess at a
match is wrong is that the caller will get a cryptic disconnect
when NBD_OPT_EXPORT_NAME fails (which is no different from what
would happen if we had not tried LIST), while treating an empty
list as immediate failure would prevent connecting to really old
servers that really did lack LIST. Besides, NBD servers with 0
exports are rare (qemu can do it when using QMP nbd-server-start
without nbd-server-add - but qemu understands NBD_OPT_GO and
thus won't tickle this change in behavior).
Fix the spelling of foundExport to match coding standards while
in the area.
Signed-off-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Richard W.M. Jones <rjones@redhat.com>
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Message-Id: <20190117193658.16413-9-eblake@redhat.com>
2019-01-17 20:36:45 +01:00
|
|
|
if (list_empty) {
|
|
|
|
/*
|
|
|
|
* We don't have enough context to tell a server that
|
|
|
|
* sent an empty list apart from a server that does
|
|
|
|
* not support the list command; but as this function
|
|
|
|
* is just used to trigger a nicer error message
|
|
|
|
* before trying NBD_OPT_EXPORT_NAME, assume the
|
|
|
|
* export is available.
|
|
|
|
*/
|
|
|
|
return 0;
|
|
|
|
} else if (!found_export) {
|
2016-10-14 20:33:13 +02:00
|
|
|
error_setg(errp, "No export with name '%s' available",
|
|
|
|
wantname);
|
|
|
|
nbd_send_opt_abort(ioc);
|
|
|
|
return -1;
|
|
|
|
}
|
2017-07-07 17:29:18 +02:00
|
|
|
trace_nbd_receive_query_exports_success(wantname);
|
2016-10-14 20:33:13 +02:00
|
|
|
return 0;
|
2016-02-10 19:41:09 +01:00
|
|
|
}
|
nbd/client: Refactor nbd_receive_list()
Right now, nbd_receive_list() is only called by
nbd_receive_query_exports(), which in turn is only called if the
server lacks NBD_OPT_GO but has working option negotiation, and is
merely used as a quality-of-implementation trick since servers
can't give decent errors for NBD_OPT_EXPORT_NAME. However, servers
that lack NBD_OPT_GO are becoming increasingly rare (nbdkit was a
latecomer, in Aug 2018, but qemu has been such a server since commit
f37708f6 in July 2017 and released in 2.10), so it no longer makes
sense to micro-optimize that function for performance.
Furthermore, when debugging a server's implementation, tracing the
full reply (both names and descriptions) is useful, not to mention
that upcoming patches adding 'qemu-nbd --list' will want to collect
that data. And when you consider that a server can send an export
name up to the NBD protocol length limit of 4k; but our current
NBD_MAX_NAME_SIZE is only 256, we can't trace all valid server
names without more storage, but 4k is large enough that the heap
is better than the stack for long names.
Thus, I'm changing the division of labor, with nbd_receive_list()
now always malloc'ing a result on success (the malloc is bounded
by the fact that we reject servers with a reply length larger
than 32M), and moving the comparison to 'wantname' to the caller.
There is a minor change in behavior where a server with 0 exports
(an immediate NBD_REP_ACK reply) is now no longer distinguished
from a server without LIST support (NBD_REP_ERR_UNSUP); this
information could be preserved with a complication to the calling
contract to provide a bit more information, but I didn't see the
point. After all, the worst that can happen if our guess at a
match is wrong is that the caller will get a cryptic disconnect
when NBD_OPT_EXPORT_NAME fails (which is no different from what
would happen if we had not tried LIST), while treating an empty
list as immediate failure would prevent connecting to really old
servers that really did lack LIST. Besides, NBD servers with 0
exports are rare (qemu can do it when using QMP nbd-server-start
without nbd-server-add - but qemu understands NBD_OPT_GO and
thus won't tickle this change in behavior).
Fix the spelling of foundExport to match coding standards while
in the area.
Signed-off-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Richard W.M. Jones <rjones@redhat.com>
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Message-Id: <20190117193658.16413-9-eblake@redhat.com>
2019-01-17 20:36:45 +01:00
|
|
|
list_empty = false;
|
|
|
|
if (!strcmp(name, wantname)) {
|
|
|
|
found_export = true;
|
|
|
|
}
|
|
|
|
g_free(name);
|
2016-02-10 19:41:09 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-08-24 19:28:13 +02:00
|
|
|
/*
|
|
|
|
* nbd_request_simple_option: Send an option request, and parse the reply.
|
|
|
|
* @strict controls whether ERR_UNSUP or all errors produce 0 status.
|
2017-10-27 12:40:34 +02:00
|
|
|
* return 1 for successful negotiation,
|
|
|
|
* 0 if operation is unsupported,
|
|
|
|
* -1 with errp set for any other error
|
|
|
|
*/
|
2019-08-24 19:28:13 +02:00
|
|
|
static int nbd_request_simple_option(QIOChannel *ioc, int opt, bool strict,
|
|
|
|
Error **errp)
|
2016-02-10 19:41:11 +01:00
|
|
|
{
|
2017-11-22 11:19:57 +01:00
|
|
|
NBDOptionReply reply;
|
2017-10-27 12:40:34 +02:00
|
|
|
int error;
|
2016-02-10 19:41:11 +01:00
|
|
|
|
2017-10-27 12:40:34 +02:00
|
|
|
if (nbd_send_option_request(ioc, opt, 0, NULL, errp) < 0) {
|
|
|
|
return -1;
|
2016-02-10 19:41:11 +01:00
|
|
|
}
|
|
|
|
|
2017-10-27 12:40:34 +02:00
|
|
|
if (nbd_receive_option_reply(ioc, opt, &reply, errp) < 0) {
|
|
|
|
return -1;
|
|
|
|
}
|
2019-08-24 19:28:13 +02:00
|
|
|
error = nbd_handle_reply_err(ioc, &reply, strict, errp);
|
2017-10-27 12:40:34 +02:00
|
|
|
if (error <= 0) {
|
|
|
|
return error;
|
2016-02-10 19:41:11 +01:00
|
|
|
}
|
2016-10-14 20:33:10 +02:00
|
|
|
|
|
|
|
if (reply.type != NBD_REP_ACK) {
|
2017-10-27 12:40:34 +02:00
|
|
|
error_setg(errp, "Server answered option %d (%s) with unexpected "
|
2018-02-15 14:51:43 +01:00
|
|
|
"reply %" PRIu32 " (%s)", opt, nbd_opt_lookup(opt),
|
2017-10-27 12:40:34 +02:00
|
|
|
reply.type, nbd_rep_lookup(reply.type));
|
2016-10-14 20:33:11 +02:00
|
|
|
nbd_send_opt_abort(ioc);
|
2017-10-27 12:40:34 +02:00
|
|
|
return -1;
|
2016-02-10 19:41:11 +01:00
|
|
|
}
|
|
|
|
|
2016-10-14 20:33:10 +02:00
|
|
|
if (reply.length != 0) {
|
2017-10-27 12:40:34 +02:00
|
|
|
error_setg(errp, "Option %d ('%s') response length is %" PRIu32
|
|
|
|
" (it should be zero)", opt, nbd_opt_lookup(opt),
|
2016-10-14 20:33:10 +02:00
|
|
|
reply.length);
|
2016-10-14 20:33:11 +02:00
|
|
|
nbd_send_opt_abort(ioc);
|
2017-10-27 12:40:34 +02:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static QIOChannel *nbd_receive_starttls(QIOChannel *ioc,
|
|
|
|
QCryptoTLSCreds *tlscreds,
|
|
|
|
const char *hostname, Error **errp)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
QIOChannelTLS *tioc;
|
|
|
|
struct NBDTLSHandshakeData data = { 0 };
|
|
|
|
|
2019-08-24 19:28:13 +02:00
|
|
|
ret = nbd_request_simple_option(ioc, NBD_OPT_STARTTLS, true, errp);
|
2017-10-27 12:40:34 +02:00
|
|
|
if (ret <= 0) {
|
|
|
|
if (ret == 0) {
|
|
|
|
error_setg(errp, "Server don't support STARTTLS option");
|
|
|
|
nbd_send_opt_abort(ioc);
|
|
|
|
}
|
2016-02-10 19:41:11 +01:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2017-07-07 17:29:18 +02:00
|
|
|
trace_nbd_receive_starttls_new_client();
|
2016-02-10 19:41:11 +01:00
|
|
|
tioc = qio_channel_tls_new_client(ioc, tlscreds, hostname, errp);
|
|
|
|
if (!tioc) {
|
|
|
|
return NULL;
|
|
|
|
}
|
2016-09-30 12:57:14 +02:00
|
|
|
qio_channel_set_name(QIO_CHANNEL(tioc), "nbd-client-tls");
|
2016-02-10 19:41:11 +01:00
|
|
|
data.loop = g_main_loop_new(g_main_context_default(), FALSE);
|
2017-07-07 17:29:18 +02:00
|
|
|
trace_nbd_receive_starttls_tls_handshake();
|
2016-02-10 19:41:11 +01:00
|
|
|
qio_channel_tls_handshake(tioc,
|
|
|
|
nbd_tls_handshake,
|
|
|
|
&data,
|
2018-03-05 07:43:24 +01:00
|
|
|
NULL,
|
2016-02-10 19:41:11 +01:00
|
|
|
NULL);
|
|
|
|
|
|
|
|
if (!data.complete) {
|
|
|
|
g_main_loop_run(data.loop);
|
|
|
|
}
|
|
|
|
g_main_loop_unref(data.loop);
|
|
|
|
if (data.error) {
|
|
|
|
error_propagate(errp, data.error);
|
|
|
|
object_unref(OBJECT(tioc));
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return QIO_CHANNEL(tioc);
|
|
|
|
}
|
|
|
|
|
2019-01-17 20:36:48 +01:00
|
|
|
/*
|
|
|
|
* nbd_send_meta_query:
|
|
|
|
* Send 0 or 1 set/list meta context queries.
|
|
|
|
* Return 0 on success, -1 with errp set for any error
|
|
|
|
*/
|
|
|
|
static int nbd_send_meta_query(QIOChannel *ioc, uint32_t opt,
|
|
|
|
const char *export, const char *query,
|
|
|
|
Error **errp)
|
|
|
|
{
|
|
|
|
int ret;
|
2023-06-08 15:56:30 +02:00
|
|
|
uint32_t export_len;
|
2019-01-17 20:36:48 +01:00
|
|
|
uint32_t queries = !!query;
|
|
|
|
uint32_t query_len = 0;
|
|
|
|
uint32_t data_len;
|
|
|
|
char *data;
|
|
|
|
char *p;
|
|
|
|
|
2023-06-08 15:56:30 +02:00
|
|
|
assert(strnlen(export, NBD_MAX_STRING_SIZE + 1) <= NBD_MAX_STRING_SIZE);
|
|
|
|
export_len = strlen(export);
|
2019-01-17 20:36:48 +01:00
|
|
|
data_len = sizeof(export_len) + export_len + sizeof(queries);
|
|
|
|
if (query) {
|
2023-06-08 15:56:30 +02:00
|
|
|
assert(strnlen(query, NBD_MAX_STRING_SIZE + 1) <= NBD_MAX_STRING_SIZE);
|
2019-01-17 20:36:48 +01:00
|
|
|
query_len = strlen(query);
|
|
|
|
data_len += sizeof(query_len) + query_len;
|
|
|
|
} else {
|
|
|
|
assert(opt == NBD_OPT_LIST_META_CONTEXT);
|
|
|
|
}
|
|
|
|
p = data = g_malloc(data_len);
|
|
|
|
|
|
|
|
trace_nbd_opt_meta_request(nbd_opt_lookup(opt), query ?: "(all)", export);
|
|
|
|
stl_be_p(p, export_len);
|
|
|
|
memcpy(p += sizeof(export_len), export, export_len);
|
|
|
|
stl_be_p(p += export_len, queries);
|
|
|
|
if (query) {
|
|
|
|
stl_be_p(p += sizeof(queries), query_len);
|
|
|
|
memcpy(p += sizeof(query_len), query, query_len);
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = nbd_send_option_request(ioc, opt, data_len, data, errp);
|
|
|
|
g_free(data);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2019-01-17 20:36:49 +01:00
|
|
|
/*
|
|
|
|
* nbd_receive_one_meta_context:
|
|
|
|
* Called in a loop to receive and trace one set/list meta context reply.
|
|
|
|
* Pass non-NULL @name or @id to collect results back to the caller, which
|
|
|
|
* must eventually call g_free().
|
|
|
|
* return 1 if name is set and iteration must continue,
|
|
|
|
* 0 if iteration is complete (including if option is unsupported),
|
|
|
|
* -1 with errp set for any error
|
|
|
|
*/
|
|
|
|
static int nbd_receive_one_meta_context(QIOChannel *ioc,
|
|
|
|
uint32_t opt,
|
|
|
|
char **name,
|
|
|
|
uint32_t *id,
|
|
|
|
Error **errp)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
NBDOptionReply reply;
|
|
|
|
char *local_name = NULL;
|
|
|
|
uint32_t local_id;
|
|
|
|
|
|
|
|
if (nbd_receive_option_reply(ioc, opt, &reply, errp) < 0) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2019-08-24 19:28:13 +02:00
|
|
|
ret = nbd_handle_reply_err(ioc, &reply, false, errp);
|
2019-01-17 20:36:49 +01:00
|
|
|
if (ret <= 0) {
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (reply.type == NBD_REP_ACK) {
|
|
|
|
if (reply.length != 0) {
|
|
|
|
error_setg(errp, "Unexpected length to ACK response");
|
|
|
|
nbd_send_opt_abort(ioc);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
} else if (reply.type != NBD_REP_META_CONTEXT) {
|
|
|
|
error_setg(errp, "Unexpected reply type %u (%s), expected %u (%s)",
|
|
|
|
reply.type, nbd_rep_lookup(reply.type),
|
|
|
|
NBD_REP_META_CONTEXT, nbd_rep_lookup(NBD_REP_META_CONTEXT));
|
|
|
|
nbd_send_opt_abort(ioc);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (reply.length <= sizeof(local_id) ||
|
|
|
|
reply.length > NBD_MAX_BUFFER_SIZE) {
|
|
|
|
error_setg(errp, "Failed to negotiate meta context, server "
|
|
|
|
"answered with unexpected length %" PRIu32,
|
|
|
|
reply.length);
|
|
|
|
nbd_send_opt_abort(ioc);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2019-01-28 17:58:30 +01:00
|
|
|
if (nbd_read32(ioc, &local_id, "context id", errp) < 0) {
|
2019-01-17 20:36:49 +01:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
reply.length -= sizeof(local_id);
|
|
|
|
local_name = g_malloc(reply.length + 1);
|
2019-01-28 17:58:30 +01:00
|
|
|
if (nbd_read(ioc, local_name, reply.length, "context name", errp) < 0) {
|
2019-01-17 20:36:49 +01:00
|
|
|
g_free(local_name);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
local_name[reply.length] = '\0';
|
|
|
|
trace_nbd_opt_meta_reply(nbd_opt_lookup(opt), local_name, local_id);
|
|
|
|
|
|
|
|
if (name) {
|
|
|
|
*name = local_name;
|
|
|
|
} else {
|
|
|
|
g_free(local_name);
|
|
|
|
}
|
|
|
|
if (id) {
|
|
|
|
*id = local_id;
|
|
|
|
}
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* nbd_negotiate_simple_meta_context:
|
2019-01-17 20:36:47 +01:00
|
|
|
* Request the server to set the meta context for export @info->name
|
|
|
|
* using @info->x_dirty_bitmap with a fallback to "base:allocation",
|
|
|
|
* setting @info->context_id to the resulting id. Fail if the server
|
|
|
|
* responds with more than one context or with a context different
|
|
|
|
* than the query.
|
|
|
|
* return 1 for successful negotiation,
|
2018-03-12 16:21:23 +01:00
|
|
|
* 0 if operation is unsupported,
|
|
|
|
* -1 with errp set for any other error
|
|
|
|
*/
|
|
|
|
static int nbd_negotiate_simple_meta_context(QIOChannel *ioc,
|
2019-01-17 20:36:47 +01:00
|
|
|
NBDExportInfo *info,
|
2018-03-12 16:21:23 +01:00
|
|
|
Error **errp)
|
|
|
|
{
|
2019-01-17 20:36:47 +01:00
|
|
|
/*
|
|
|
|
* TODO: Removing the x_dirty_bitmap hack will mean refactoring
|
|
|
|
* this function to request and store ids for multiple contexts
|
|
|
|
* (both base:allocation and a dirty bitmap), at which point this
|
|
|
|
* function should lose the term _simple.
|
|
|
|
*/
|
2018-03-12 16:21:23 +01:00
|
|
|
int ret;
|
2019-01-17 20:36:47 +01:00
|
|
|
const char *context = info->x_dirty_bitmap ?: "base:allocation";
|
2018-04-27 16:20:01 +02:00
|
|
|
bool received = false;
|
2019-01-17 20:36:49 +01:00
|
|
|
char *name = NULL;
|
2018-03-12 16:21:23 +01:00
|
|
|
|
2019-01-17 20:36:48 +01:00
|
|
|
if (nbd_send_meta_query(ioc, NBD_OPT_SET_META_CONTEXT,
|
|
|
|
info->name, context, errp) < 0) {
|
|
|
|
return -1;
|
2018-03-12 16:21:23 +01:00
|
|
|
}
|
|
|
|
|
2019-01-17 20:36:49 +01:00
|
|
|
ret = nbd_receive_one_meta_context(ioc, NBD_OPT_SET_META_CONTEXT,
|
|
|
|
&name, &info->context_id, errp);
|
|
|
|
if (ret < 0) {
|
2018-03-12 16:21:23 +01:00
|
|
|
return -1;
|
|
|
|
}
|
2019-01-17 20:36:49 +01:00
|
|
|
if (ret == 1) {
|
2018-03-12 16:21:23 +01:00
|
|
|
if (strcmp(context, name)) {
|
|
|
|
error_setg(errp, "Failed to negotiate meta context '%s', server "
|
|
|
|
"answered with different context '%s'", context,
|
|
|
|
name);
|
|
|
|
g_free(name);
|
2018-03-30 01:18:37 +02:00
|
|
|
nbd_send_opt_abort(ioc);
|
2018-03-12 16:21:23 +01:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
g_free(name);
|
|
|
|
received = true;
|
|
|
|
|
2019-01-17 20:36:49 +01:00
|
|
|
ret = nbd_receive_one_meta_context(ioc, NBD_OPT_SET_META_CONTEXT,
|
|
|
|
NULL, NULL, errp);
|
|
|
|
if (ret < 0) {
|
2018-03-12 16:21:23 +01:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
2019-01-17 20:36:49 +01:00
|
|
|
if (ret != 0) {
|
|
|
|
error_setg(errp, "Server answered with more than one context");
|
2018-03-30 01:18:37 +02:00
|
|
|
nbd_send_opt_abort(ioc);
|
|
|
|
return -1;
|
|
|
|
}
|
2019-01-17 20:36:47 +01:00
|
|
|
return received;
|
2018-03-12 16:21:23 +01:00
|
|
|
}
|
2016-02-10 19:41:11 +01:00
|
|
|
|
2019-01-17 20:36:55 +01:00
|
|
|
/*
|
|
|
|
* nbd_list_meta_contexts:
|
|
|
|
* Request the server to list all meta contexts for export @info->name.
|
|
|
|
* return 0 if list is complete (even if empty),
|
|
|
|
* -1 with errp set for any error
|
|
|
|
*/
|
|
|
|
static int nbd_list_meta_contexts(QIOChannel *ioc,
|
|
|
|
NBDExportInfo *info,
|
|
|
|
Error **errp)
|
|
|
|
{
|
|
|
|
int ret;
|
2019-01-17 20:36:57 +01:00
|
|
|
int seen_any = false;
|
|
|
|
int seen_qemu = false;
|
2019-01-17 20:36:55 +01:00
|
|
|
|
|
|
|
if (nbd_send_meta_query(ioc, NBD_OPT_LIST_META_CONTEXT,
|
|
|
|
info->name, NULL, errp) < 0) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
while (1) {
|
|
|
|
char *context;
|
|
|
|
|
|
|
|
ret = nbd_receive_one_meta_context(ioc, NBD_OPT_LIST_META_CONTEXT,
|
|
|
|
&context, NULL, errp);
|
2019-01-17 20:36:57 +01:00
|
|
|
if (ret == 0 && seen_any && !seen_qemu) {
|
|
|
|
/*
|
|
|
|
* Work around qemu 3.0 bug: the server forgot to send
|
|
|
|
* "qemu:" replies to 0 queries. If we saw at least one
|
|
|
|
* reply (probably base:allocation), but none of them were
|
|
|
|
* qemu:, then run a more specific query to make sure.
|
|
|
|
*/
|
|
|
|
seen_qemu = true;
|
|
|
|
if (nbd_send_meta_query(ioc, NBD_OPT_LIST_META_CONTEXT,
|
|
|
|
info->name, "qemu:", errp) < 0) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
continue;
|
|
|
|
}
|
2019-01-17 20:36:55 +01:00
|
|
|
if (ret <= 0) {
|
|
|
|
return ret;
|
|
|
|
}
|
2019-01-17 20:36:57 +01:00
|
|
|
seen_any = true;
|
|
|
|
seen_qemu |= strstart(context, "qemu:", NULL);
|
2019-01-17 20:36:55 +01:00
|
|
|
info->contexts = g_renew(char *, info->contexts, ++info->n_contexts);
|
|
|
|
info->contexts[info->n_contexts - 1] = context;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-01-17 20:36:51 +01:00
|
|
|
/*
|
|
|
|
* nbd_start_negotiate:
|
|
|
|
* Start the handshake to the server. After a positive return, the server
|
|
|
|
* is ready to accept additional NBD_OPT requests.
|
|
|
|
* Returns: negative errno: failure talking to server
|
2023-06-08 15:56:37 +02:00
|
|
|
* non-negative: enum NBDMode describing server abilities
|
2019-01-17 20:36:51 +01:00
|
|
|
*/
|
2023-08-31 00:48:00 +02:00
|
|
|
static int nbd_start_negotiate(QIOChannel *ioc, QCryptoTLSCreds *tlscreds,
|
2019-01-17 20:36:51 +01:00
|
|
|
const char *hostname, QIOChannel **outioc,
|
2023-08-29 19:58:28 +02:00
|
|
|
NBDMode max_mode, bool *zeroes,
|
2019-01-17 20:36:51 +01:00
|
|
|
Error **errp)
|
2016-01-14 09:41:02 +01:00
|
|
|
{
|
nbd: Use ERRP_GUARD()
If we want to check error after errp-function call, we need to
introduce local_err and then propagate it to errp. Instead, use
the ERRP_GUARD() macro, benefits are:
1. No need of explicit error_propagate call
2. No need of explicit local_err variable: use errp directly
3. ERRP_GUARD() leaves errp as is if it's not NULL or
&error_fatal, this means that we don't break error_abort
(we'll abort on error_set, not on error_propagate)
If we want to add some info to errp (by error_prepend() or
error_append_hint()), we must use the ERRP_GUARD() macro.
Otherwise, this info will not be added when errp == &error_fatal
(the program will exit prior to the error_append_hint() or
error_prepend() call). Fix several such cases, e.g. in nbd_read().
This commit is generated by command
sed -n '/^Network Block Device (NBD)$/,/^$/{s/^F: //p}' \
MAINTAINERS | \
xargs git ls-files | grep '\.[hc]$' | \
xargs spatch \
--sp-file scripts/coccinelle/errp-guard.cocci \
--macro-file scripts/cocci-macro-file.h \
--in-place --no-show-diff --max-width 80
Reported-by: Kevin Wolf <kwolf@redhat.com>
Reported-by: Greg Kurz <groug@kaod.org>
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Reviewed-by: Markus Armbruster <armbru@redhat.com>
[Commit message tweaked]
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Message-Id: <20200707165037.1026246-8-armbru@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
[ERRP_AUTO_PROPAGATE() renamed to ERRP_GUARD(), and
auto-propagated-errp.cocci to errp-guard.cocci. Commit message
tweaked again.]
2020-07-07 18:50:36 +02:00
|
|
|
ERRP_GUARD();
|
2017-07-07 22:30:41 +02:00
|
|
|
uint64_t magic;
|
2016-01-14 09:41:02 +01:00
|
|
|
|
2019-01-17 20:36:51 +01:00
|
|
|
trace_nbd_start_negotiate(tlscreds, hostname ? hostname : "<null>");
|
2016-01-14 09:41:02 +01:00
|
|
|
|
nbd/client: Add nbd_receive_export_list()
We want to be able to detect whether a given qemu NBD server is
exposing the right export(s) and dirty bitmaps, at least for
regression testing. We could use 'nbd-client -l' from the upstream
NBD project to list exports, but it's annoying to rely on
out-of-tree binaries; furthermore, nbd-client doesn't necessarily
know about all of the qemu NBD extensions. Thus, we plan on adding
a new mode to qemu-nbd that merely sniffs all possible information
from the server during handshake phase, then disconnects and dumps
the information.
This patch adds the low-level client code for grabbing the list
of exports. It benefits from the recent refactoring patches, in
order to share as much code as possible when it comes to doing
validation of server replies. The resulting information is stored
in an array of NBDExportInfo which has been expanded to any
description string, along with a convenience function for freeing
the list.
Note: a malicious server could exhaust memory of a client by feeding
an unending loop of exports; perhaps we should place a limit on how
many we are willing to receive. But note that a server could
reasonably be serving an export for every file in a large directory,
where an arbitrary limit in the client means we can't list anything
from such a server; the same happens if we just run until the client
fails to malloc() and thus dies by an abort(), where the limit is
no longer arbitrary but determined by available memory. Since the
client is already planning on being short-lived, it's hard to call
this a denial of service attack that would starve off other uses,
so it does not appear to be a security issue.
Signed-off-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Richard W.M. Jones <rjones@redhat.com>
Message-Id: <20190117193658.16413-18-eblake@redhat.com>
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
2019-01-17 20:36:54 +01:00
|
|
|
if (zeroes) {
|
|
|
|
*zeroes = true;
|
|
|
|
}
|
2016-02-10 19:41:11 +01:00
|
|
|
if (outioc) {
|
|
|
|
*outioc = NULL;
|
|
|
|
}
|
|
|
|
if (tlscreds && !outioc) {
|
|
|
|
error_setg(errp, "Output I/O channel required for TLS");
|
2019-01-17 20:36:50 +01:00
|
|
|
return -EINVAL;
|
2016-02-10 19:41:11 +01:00
|
|
|
}
|
|
|
|
|
2019-01-28 17:58:30 +01:00
|
|
|
if (nbd_read64(ioc, &magic, "initial magic", errp) < 0) {
|
2019-01-17 20:36:50 +01:00
|
|
|
return -EINVAL;
|
2016-01-14 09:41:02 +01:00
|
|
|
}
|
2017-07-07 17:29:18 +02:00
|
|
|
trace_nbd_receive_negotiate_magic(magic);
|
2016-01-14 09:41:02 +01:00
|
|
|
|
2018-12-15 14:53:10 +01:00
|
|
|
if (magic != NBD_INIT_MAGIC) {
|
|
|
|
error_setg(errp, "Bad initial magic received: 0x%" PRIx64, magic);
|
2019-01-17 20:36:50 +01:00
|
|
|
return -EINVAL;
|
2016-01-14 09:41:02 +01:00
|
|
|
}
|
|
|
|
|
2019-01-28 17:58:30 +01:00
|
|
|
if (nbd_read64(ioc, &magic, "server magic", errp) < 0) {
|
2019-01-17 20:36:50 +01:00
|
|
|
return -EINVAL;
|
2016-01-14 09:41:02 +01:00
|
|
|
}
|
2017-07-07 17:29:18 +02:00
|
|
|
trace_nbd_receive_negotiate_magic(magic);
|
2016-01-14 09:41:02 +01:00
|
|
|
|
2016-02-10 19:41:05 +01:00
|
|
|
if (magic == NBD_OPTS_MAGIC) {
|
2016-02-10 19:41:07 +01:00
|
|
|
uint32_t clientflags = 0;
|
|
|
|
uint16_t globalflags;
|
2016-02-10 19:41:09 +01:00
|
|
|
bool fixedNewStyle = false;
|
2016-01-14 09:41:02 +01:00
|
|
|
|
2019-01-28 17:58:30 +01:00
|
|
|
if (nbd_read16(ioc, &globalflags, "server flags", errp) < 0) {
|
2019-01-17 20:36:50 +01:00
|
|
|
return -EINVAL;
|
2016-01-14 09:41:02 +01:00
|
|
|
}
|
2017-07-07 17:29:18 +02:00
|
|
|
trace_nbd_receive_negotiate_server_flags(globalflags);
|
2016-02-10 19:41:07 +01:00
|
|
|
if (globalflags & NBD_FLAG_FIXED_NEWSTYLE) {
|
2016-02-10 19:41:09 +01:00
|
|
|
fixedNewStyle = true;
|
2016-02-10 19:41:07 +01:00
|
|
|
clientflags |= NBD_FLAG_C_FIXED_NEWSTYLE;
|
|
|
|
}
|
2016-10-14 20:33:14 +02:00
|
|
|
if (globalflags & NBD_FLAG_NO_ZEROES) {
|
nbd/client: Add nbd_receive_export_list()
We want to be able to detect whether a given qemu NBD server is
exposing the right export(s) and dirty bitmaps, at least for
regression testing. We could use 'nbd-client -l' from the upstream
NBD project to list exports, but it's annoying to rely on
out-of-tree binaries; furthermore, nbd-client doesn't necessarily
know about all of the qemu NBD extensions. Thus, we plan on adding
a new mode to qemu-nbd that merely sniffs all possible information
from the server during handshake phase, then disconnects and dumps
the information.
This patch adds the low-level client code for grabbing the list
of exports. It benefits from the recent refactoring patches, in
order to share as much code as possible when it comes to doing
validation of server replies. The resulting information is stored
in an array of NBDExportInfo which has been expanded to any
description string, along with a convenience function for freeing
the list.
Note: a malicious server could exhaust memory of a client by feeding
an unending loop of exports; perhaps we should place a limit on how
many we are willing to receive. But note that a server could
reasonably be serving an export for every file in a large directory,
where an arbitrary limit in the client means we can't list anything
from such a server; the same happens if we just run until the client
fails to malloc() and thus dies by an abort(), where the limit is
no longer arbitrary but determined by available memory. Since the
client is already planning on being short-lived, it's hard to call
this a denial of service attack that would starve off other uses,
so it does not appear to be a security issue.
Signed-off-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Richard W.M. Jones <rjones@redhat.com>
Message-Id: <20190117193658.16413-18-eblake@redhat.com>
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
2019-01-17 20:36:54 +01:00
|
|
|
if (zeroes) {
|
|
|
|
*zeroes = false;
|
|
|
|
}
|
2016-10-14 20:33:14 +02:00
|
|
|
clientflags |= NBD_FLAG_C_NO_ZEROES;
|
|
|
|
}
|
2016-02-10 19:41:07 +01:00
|
|
|
/* client requested flags */
|
2016-02-10 19:41:09 +01:00
|
|
|
clientflags = cpu_to_be32(clientflags);
|
2017-06-02 17:01:39 +02:00
|
|
|
if (nbd_write(ioc, &clientflags, sizeof(clientflags), errp) < 0) {
|
2017-11-13 16:24:24 +01:00
|
|
|
error_prepend(errp, "Failed to send clientflags field: ");
|
2019-01-17 20:36:50 +01:00
|
|
|
return -EINVAL;
|
2016-01-14 09:41:02 +01:00
|
|
|
}
|
2016-02-10 19:41:11 +01:00
|
|
|
if (tlscreds) {
|
|
|
|
if (fixedNewStyle) {
|
|
|
|
*outioc = nbd_receive_starttls(ioc, tlscreds, hostname, errp);
|
|
|
|
if (!*outioc) {
|
2019-01-17 20:36:50 +01:00
|
|
|
return -EINVAL;
|
2016-02-10 19:41:11 +01:00
|
|
|
}
|
|
|
|
ioc = *outioc;
|
|
|
|
} else {
|
|
|
|
error_setg(errp, "Server does not support STARTTLS");
|
2019-01-17 20:36:50 +01:00
|
|
|
return -EINVAL;
|
2016-02-10 19:41:11 +01:00
|
|
|
}
|
|
|
|
}
|
2016-02-10 19:41:09 +01:00
|
|
|
if (fixedNewStyle) {
|
2019-01-17 20:36:51 +01:00
|
|
|
int result = 0;
|
nbd: Implement NBD_OPT_GO on client
NBD_OPT_EXPORT_NAME is lousy: per the NBD protocol, any failure
requires the server to close the connection rather than report an
error to us. Therefore, upstream NBD recently added NBD_OPT_GO as
the improved version of the option that does what we want [1]: it
reports sane errors on failures, and on success provides at least
as much info as NBD_OPT_EXPORT_NAME.
[1] https://github.com/NetworkBlockDevice/nbd/blob/extension-info/doc/proto.md
This is a first cut at use of the information types. Note that we
do not need to use NBD_OPT_INFO, and that use of NBD_OPT_GO means
we no longer have to use NBD_OPT_LIST to learn whether a server
requires TLS (this requires servers that gracefully handle unknown
NBD_OPT, many servers prior to qemu 2.5 were buggy, but I have patched
qemu, upstream nbd, and nbdkit in the meantime, in part because of
interoperability testing with this patch). We still fall back to
NBD_OPT_LIST when NBD_OPT_GO is not supported on the server, as it
is still one last chance for a nicer error message. Later patches
will use further info, like NBD_INFO_BLOCK_SIZE.
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-Id: <20170707203049.534-8-eblake@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2017-07-07 22:30:47 +02:00
|
|
|
|
2023-09-25 21:22:39 +02:00
|
|
|
if (max_mode >= NBD_MODE_EXTENDED) {
|
|
|
|
result = nbd_request_simple_option(ioc,
|
|
|
|
NBD_OPT_EXTENDED_HEADERS,
|
|
|
|
false, errp);
|
|
|
|
if (result) {
|
|
|
|
return result < 0 ? -EINVAL : NBD_MODE_EXTENDED;
|
|
|
|
}
|
|
|
|
}
|
2023-08-29 19:58:28 +02:00
|
|
|
if (max_mode >= NBD_MODE_STRUCTURED) {
|
2017-10-27 12:40:37 +02:00
|
|
|
result = nbd_request_simple_option(ioc,
|
|
|
|
NBD_OPT_STRUCTURED_REPLY,
|
2019-08-24 19:28:13 +02:00
|
|
|
false, errp);
|
2023-09-25 21:22:39 +02:00
|
|
|
if (result) {
|
|
|
|
return result < 0 ? -EINVAL : NBD_MODE_STRUCTURED;
|
2017-10-27 12:40:37 +02:00
|
|
|
}
|
|
|
|
}
|
2023-09-25 21:22:39 +02:00
|
|
|
return NBD_MODE_SIMPLE;
|
2019-01-17 20:36:51 +01:00
|
|
|
} else {
|
2023-06-08 15:56:37 +02:00
|
|
|
return NBD_MODE_EXPORT_NAME;
|
2019-01-17 20:36:51 +01:00
|
|
|
}
|
|
|
|
} else if (magic == NBD_CLIENT_MAGIC) {
|
|
|
|
if (tlscreds) {
|
|
|
|
error_setg(errp, "Server does not support STARTTLS");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
2023-06-08 15:56:37 +02:00
|
|
|
return NBD_MODE_OLDSTYLE;
|
2019-01-17 20:36:51 +01:00
|
|
|
} else {
|
|
|
|
error_setg(errp, "Bad server magic received: 0x%" PRIx64, magic);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
}
|
2017-10-27 12:40:37 +02:00
|
|
|
|
2019-01-17 20:36:52 +01:00
|
|
|
/*
|
|
|
|
* nbd_negotiate_finish_oldstyle:
|
|
|
|
* Populate @info with the size and export flags from an oldstyle server,
|
|
|
|
* but does not consume 124 bytes of reserved zero padding.
|
|
|
|
* Returns 0 on success, -1 with @errp set on failure
|
|
|
|
*/
|
|
|
|
static int nbd_negotiate_finish_oldstyle(QIOChannel *ioc, NBDExportInfo *info,
|
|
|
|
Error **errp)
|
|
|
|
{
|
|
|
|
uint32_t oldflags;
|
|
|
|
|
2019-01-28 17:58:30 +01:00
|
|
|
if (nbd_read64(ioc, &info->size, "export length", errp) < 0) {
|
2019-01-17 20:36:52 +01:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2019-01-28 17:58:30 +01:00
|
|
|
if (nbd_read32(ioc, &oldflags, "export flags", errp) < 0) {
|
2019-01-17 20:36:52 +01:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
if (oldflags & ~0xffff) {
|
|
|
|
error_setg(errp, "Unexpected export flags %0x" PRIx32, oldflags);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
info->flags = oldflags;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-01-17 20:36:51 +01:00
|
|
|
/*
|
|
|
|
* nbd_receive_negotiate:
|
|
|
|
* Connect to server, complete negotiation, and move into transmission phase.
|
|
|
|
* Returns: negative errno: failure talking to server
|
|
|
|
* 0: server is connected
|
|
|
|
*/
|
2023-08-31 00:47:59 +02:00
|
|
|
int nbd_receive_negotiate(QIOChannel *ioc, QCryptoTLSCreds *tlscreds,
|
2019-01-17 20:36:51 +01:00
|
|
|
const char *hostname, QIOChannel **outioc,
|
|
|
|
NBDExportInfo *info, Error **errp)
|
|
|
|
{
|
nbd: Use ERRP_GUARD()
If we want to check error after errp-function call, we need to
introduce local_err and then propagate it to errp. Instead, use
the ERRP_GUARD() macro, benefits are:
1. No need of explicit error_propagate call
2. No need of explicit local_err variable: use errp directly
3. ERRP_GUARD() leaves errp as is if it's not NULL or
&error_fatal, this means that we don't break error_abort
(we'll abort on error_set, not on error_propagate)
If we want to add some info to errp (by error_prepend() or
error_append_hint()), we must use the ERRP_GUARD() macro.
Otherwise, this info will not be added when errp == &error_fatal
(the program will exit prior to the error_append_hint() or
error_prepend() call). Fix several such cases, e.g. in nbd_read().
This commit is generated by command
sed -n '/^Network Block Device (NBD)$/,/^$/{s/^F: //p}' \
MAINTAINERS | \
xargs git ls-files | grep '\.[hc]$' | \
xargs spatch \
--sp-file scripts/coccinelle/errp-guard.cocci \
--macro-file scripts/cocci-macro-file.h \
--in-place --no-show-diff --max-width 80
Reported-by: Kevin Wolf <kwolf@redhat.com>
Reported-by: Greg Kurz <groug@kaod.org>
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Reviewed-by: Markus Armbruster <armbru@redhat.com>
[Commit message tweaked]
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Message-Id: <20200707165037.1026246-8-armbru@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
[ERRP_AUTO_PROPAGATE() renamed to ERRP_GUARD(), and
auto-propagated-errp.cocci to errp-guard.cocci. Commit message
tweaked again.]
2020-07-07 18:50:36 +02:00
|
|
|
ERRP_GUARD();
|
2019-01-17 20:36:51 +01:00
|
|
|
int result;
|
|
|
|
bool zeroes;
|
|
|
|
bool base_allocation = info->base_allocation;
|
2018-03-12 16:21:23 +01:00
|
|
|
|
2019-11-14 03:46:34 +01:00
|
|
|
assert(info->name && strlen(info->name) <= NBD_MAX_STRING_SIZE);
|
2019-01-17 20:36:51 +01:00
|
|
|
trace_nbd_receive_negotiate_name(info->name);
|
|
|
|
|
2023-08-31 00:48:00 +02:00
|
|
|
result = nbd_start_negotiate(ioc, tlscreds, hostname, outioc,
|
2023-08-29 19:58:28 +02:00
|
|
|
info->mode, &zeroes, errp);
|
2023-06-08 15:56:37 +02:00
|
|
|
if (result < 0) {
|
|
|
|
return result;
|
|
|
|
}
|
2019-01-17 20:36:51 +01:00
|
|
|
|
2023-08-29 19:58:28 +02:00
|
|
|
info->mode = result;
|
2019-01-17 20:36:51 +01:00
|
|
|
info->base_allocation = false;
|
|
|
|
if (tlscreds && *outioc) {
|
|
|
|
ioc = *outioc;
|
|
|
|
}
|
|
|
|
|
2023-08-29 19:58:28 +02:00
|
|
|
switch (info->mode) {
|
2023-09-25 21:22:39 +02:00
|
|
|
case NBD_MODE_EXTENDED:
|
2023-06-08 15:56:37 +02:00
|
|
|
case NBD_MODE_STRUCTURED:
|
2019-01-17 20:36:51 +01:00
|
|
|
if (base_allocation) {
|
|
|
|
result = nbd_negotiate_simple_meta_context(ioc, info, errp);
|
nbd: Implement NBD_OPT_GO on client
NBD_OPT_EXPORT_NAME is lousy: per the NBD protocol, any failure
requires the server to close the connection rather than report an
error to us. Therefore, upstream NBD recently added NBD_OPT_GO as
the improved version of the option that does what we want [1]: it
reports sane errors on failures, and on success provides at least
as much info as NBD_OPT_EXPORT_NAME.
[1] https://github.com/NetworkBlockDevice/nbd/blob/extension-info/doc/proto.md
This is a first cut at use of the information types. Note that we
do not need to use NBD_OPT_INFO, and that use of NBD_OPT_GO means
we no longer have to use NBD_OPT_LIST to learn whether a server
requires TLS (this requires servers that gracefully handle unknown
NBD_OPT, many servers prior to qemu 2.5 were buggy, but I have patched
qemu, upstream nbd, and nbdkit in the meantime, in part because of
interoperability testing with this patch). We still fall back to
NBD_OPT_LIST when NBD_OPT_GO is not supported on the server, as it
is still one last chance for a nicer error message. Later patches
will use further info, like NBD_INFO_BLOCK_SIZE.
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-Id: <20170707203049.534-8-eblake@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2017-07-07 22:30:47 +02:00
|
|
|
if (result < 0) {
|
2019-01-17 20:36:50 +01:00
|
|
|
return -EINVAL;
|
nbd: Implement NBD_OPT_GO on client
NBD_OPT_EXPORT_NAME is lousy: per the NBD protocol, any failure
requires the server to close the connection rather than report an
error to us. Therefore, upstream NBD recently added NBD_OPT_GO as
the improved version of the option that does what we want [1]: it
reports sane errors on failures, and on success provides at least
as much info as NBD_OPT_EXPORT_NAME.
[1] https://github.com/NetworkBlockDevice/nbd/blob/extension-info/doc/proto.md
This is a first cut at use of the information types. Note that we
do not need to use NBD_OPT_INFO, and that use of NBD_OPT_GO means
we no longer have to use NBD_OPT_LIST to learn whether a server
requires TLS (this requires servers that gracefully handle unknown
NBD_OPT, many servers prior to qemu 2.5 were buggy, but I have patched
qemu, upstream nbd, and nbdkit in the meantime, in part because of
interoperability testing with this patch). We still fall back to
NBD_OPT_LIST when NBD_OPT_GO is not supported on the server, as it
is still one last chance for a nicer error message. Later patches
will use further info, like NBD_INFO_BLOCK_SIZE.
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-Id: <20170707203049.534-8-eblake@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2017-07-07 22:30:47 +02:00
|
|
|
}
|
2019-01-17 20:36:51 +01:00
|
|
|
info->base_allocation = result == 1;
|
|
|
|
}
|
|
|
|
/* fall through */
|
2023-06-08 15:56:37 +02:00
|
|
|
case NBD_MODE_SIMPLE:
|
2019-01-17 20:36:51 +01:00
|
|
|
/* Try NBD_OPT_GO first - if it works, we are done (it
|
|
|
|
* also gives us a good message if the server requires
|
|
|
|
* TLS). If it is not available, fall back to
|
|
|
|
* NBD_OPT_LIST for nicer error messages about a missing
|
|
|
|
* export, then use NBD_OPT_EXPORT_NAME. */
|
2019-01-17 20:36:53 +01:00
|
|
|
result = nbd_opt_info_or_go(ioc, NBD_OPT_GO, info, errp);
|
2019-01-17 20:36:51 +01:00
|
|
|
if (result < 0) {
|
|
|
|
return -EINVAL;
|
2016-02-10 19:41:09 +01:00
|
|
|
}
|
2019-01-17 20:36:51 +01:00
|
|
|
if (result > 0) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
/* Check our desired export is present in the
|
|
|
|
* server export list. Since NBD_OPT_EXPORT_NAME
|
|
|
|
* cannot return an error message, running this
|
|
|
|
* query gives us better error reporting if the
|
|
|
|
* export name is not available.
|
|
|
|
*/
|
|
|
|
if (nbd_receive_query_exports(ioc, info->name, errp) < 0) {
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
/* fall through */
|
2023-06-08 15:56:37 +02:00
|
|
|
case NBD_MODE_EXPORT_NAME:
|
2016-10-14 20:33:10 +02:00
|
|
|
/* write the export name request */
|
2019-01-17 20:36:46 +01:00
|
|
|
if (nbd_send_option_request(ioc, NBD_OPT_EXPORT_NAME, -1, info->name,
|
2016-10-14 20:33:10 +02:00
|
|
|
errp) < 0) {
|
2019-01-17 20:36:50 +01:00
|
|
|
return -EINVAL;
|
2016-01-14 09:41:02 +01:00
|
|
|
}
|
2016-02-10 19:41:05 +01:00
|
|
|
|
2016-10-14 20:33:10 +02:00
|
|
|
/* Read the response */
|
2019-01-28 17:58:30 +01:00
|
|
|
if (nbd_read64(ioc, &info->size, "export length", errp) < 0) {
|
2019-01-17 20:36:50 +01:00
|
|
|
return -EINVAL;
|
2016-01-14 09:41:02 +01:00
|
|
|
}
|
|
|
|
|
2019-01-28 17:58:30 +01:00
|
|
|
if (nbd_read16(ioc, &info->flags, "export flags", errp) < 0) {
|
2019-01-17 20:36:50 +01:00
|
|
|
return -EINVAL;
|
2016-02-10 19:41:05 +01:00
|
|
|
}
|
2019-01-17 20:36:51 +01:00
|
|
|
break;
|
2023-06-08 15:56:37 +02:00
|
|
|
case NBD_MODE_OLDSTYLE:
|
2019-01-17 20:36:46 +01:00
|
|
|
if (*info->name) {
|
|
|
|
error_setg(errp, "Server does not support non-empty export names");
|
2019-01-17 20:36:50 +01:00
|
|
|
return -EINVAL;
|
2016-02-10 19:41:05 +01:00
|
|
|
}
|
2019-01-17 20:36:52 +01:00
|
|
|
if (nbd_negotiate_finish_oldstyle(ioc, info, errp) < 0) {
|
2019-01-17 20:36:50 +01:00
|
|
|
return -EINVAL;
|
2016-07-21 21:34:46 +02:00
|
|
|
}
|
2019-01-17 20:36:51 +01:00
|
|
|
break;
|
|
|
|
default:
|
2023-06-08 15:56:37 +02:00
|
|
|
g_assert_not_reached();
|
2016-01-14 09:41:02 +01:00
|
|
|
}
|
2016-02-10 19:41:05 +01:00
|
|
|
|
2017-07-07 22:30:41 +02:00
|
|
|
trace_nbd_receive_negotiate_size_flags(info->size, info->flags);
|
2017-06-02 17:01:39 +02:00
|
|
|
if (zeroes && nbd_drop(ioc, 124, errp) < 0) {
|
2017-11-13 16:24:24 +01:00
|
|
|
error_prepend(errp, "Failed to read reserved block: ");
|
2019-01-17 20:36:50 +01:00
|
|
|
return -EINVAL;
|
2016-01-14 09:41:02 +01:00
|
|
|
}
|
2019-01-17 20:36:50 +01:00
|
|
|
return 0;
|
2016-01-14 09:41:02 +01:00
|
|
|
}
|
|
|
|
|
nbd/client: Add nbd_receive_export_list()
We want to be able to detect whether a given qemu NBD server is
exposing the right export(s) and dirty bitmaps, at least for
regression testing. We could use 'nbd-client -l' from the upstream
NBD project to list exports, but it's annoying to rely on
out-of-tree binaries; furthermore, nbd-client doesn't necessarily
know about all of the qemu NBD extensions. Thus, we plan on adding
a new mode to qemu-nbd that merely sniffs all possible information
from the server during handshake phase, then disconnects and dumps
the information.
This patch adds the low-level client code for grabbing the list
of exports. It benefits from the recent refactoring patches, in
order to share as much code as possible when it comes to doing
validation of server replies. The resulting information is stored
in an array of NBDExportInfo which has been expanded to any
description string, along with a convenience function for freeing
the list.
Note: a malicious server could exhaust memory of a client by feeding
an unending loop of exports; perhaps we should place a limit on how
many we are willing to receive. But note that a server could
reasonably be serving an export for every file in a large directory,
where an arbitrary limit in the client means we can't list anything
from such a server; the same happens if we just run until the client
fails to malloc() and thus dies by an abort(), where the limit is
no longer arbitrary but determined by available memory. Since the
client is already planning on being short-lived, it's hard to call
this a denial of service attack that would starve off other uses,
so it does not appear to be a security issue.
Signed-off-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Richard W.M. Jones <rjones@redhat.com>
Message-Id: <20190117193658.16413-18-eblake@redhat.com>
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
2019-01-17 20:36:54 +01:00
|
|
|
/* Clean up result of nbd_receive_export_list */
|
|
|
|
void nbd_free_export_list(NBDExportInfo *info, int count)
|
|
|
|
{
|
2019-01-17 20:36:55 +01:00
|
|
|
int i, j;
|
nbd/client: Add nbd_receive_export_list()
We want to be able to detect whether a given qemu NBD server is
exposing the right export(s) and dirty bitmaps, at least for
regression testing. We could use 'nbd-client -l' from the upstream
NBD project to list exports, but it's annoying to rely on
out-of-tree binaries; furthermore, nbd-client doesn't necessarily
know about all of the qemu NBD extensions. Thus, we plan on adding
a new mode to qemu-nbd that merely sniffs all possible information
from the server during handshake phase, then disconnects and dumps
the information.
This patch adds the low-level client code for grabbing the list
of exports. It benefits from the recent refactoring patches, in
order to share as much code as possible when it comes to doing
validation of server replies. The resulting information is stored
in an array of NBDExportInfo which has been expanded to any
description string, along with a convenience function for freeing
the list.
Note: a malicious server could exhaust memory of a client by feeding
an unending loop of exports; perhaps we should place a limit on how
many we are willing to receive. But note that a server could
reasonably be serving an export for every file in a large directory,
where an arbitrary limit in the client means we can't list anything
from such a server; the same happens if we just run until the client
fails to malloc() and thus dies by an abort(), where the limit is
no longer arbitrary but determined by available memory. Since the
client is already planning on being short-lived, it's hard to call
this a denial of service attack that would starve off other uses,
so it does not appear to be a security issue.
Signed-off-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Richard W.M. Jones <rjones@redhat.com>
Message-Id: <20190117193658.16413-18-eblake@redhat.com>
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
2019-01-17 20:36:54 +01:00
|
|
|
|
|
|
|
if (!info) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < count; i++) {
|
|
|
|
g_free(info[i].name);
|
|
|
|
g_free(info[i].description);
|
2019-01-17 20:36:55 +01:00
|
|
|
for (j = 0; j < info[i].n_contexts; j++) {
|
|
|
|
g_free(info[i].contexts[j]);
|
|
|
|
}
|
|
|
|
g_free(info[i].contexts);
|
nbd/client: Add nbd_receive_export_list()
We want to be able to detect whether a given qemu NBD server is
exposing the right export(s) and dirty bitmaps, at least for
regression testing. We could use 'nbd-client -l' from the upstream
NBD project to list exports, but it's annoying to rely on
out-of-tree binaries; furthermore, nbd-client doesn't necessarily
know about all of the qemu NBD extensions. Thus, we plan on adding
a new mode to qemu-nbd that merely sniffs all possible information
from the server during handshake phase, then disconnects and dumps
the information.
This patch adds the low-level client code for grabbing the list
of exports. It benefits from the recent refactoring patches, in
order to share as much code as possible when it comes to doing
validation of server replies. The resulting information is stored
in an array of NBDExportInfo which has been expanded to any
description string, along with a convenience function for freeing
the list.
Note: a malicious server could exhaust memory of a client by feeding
an unending loop of exports; perhaps we should place a limit on how
many we are willing to receive. But note that a server could
reasonably be serving an export for every file in a large directory,
where an arbitrary limit in the client means we can't list anything
from such a server; the same happens if we just run until the client
fails to malloc() and thus dies by an abort(), where the limit is
no longer arbitrary but determined by available memory. Since the
client is already planning on being short-lived, it's hard to call
this a denial of service attack that would starve off other uses,
so it does not appear to be a security issue.
Signed-off-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Richard W.M. Jones <rjones@redhat.com>
Message-Id: <20190117193658.16413-18-eblake@redhat.com>
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
2019-01-17 20:36:54 +01:00
|
|
|
}
|
|
|
|
g_free(info);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* nbd_receive_export_list:
|
|
|
|
* Query details about a server's exports, then disconnect without
|
|
|
|
* going into transmission phase. Return a count of the exports listed
|
|
|
|
* in @info by the server, or -1 on error. Caller must free @info using
|
|
|
|
* nbd_free_export_list().
|
|
|
|
*/
|
|
|
|
int nbd_receive_export_list(QIOChannel *ioc, QCryptoTLSCreds *tlscreds,
|
|
|
|
const char *hostname, NBDExportInfo **info,
|
|
|
|
Error **errp)
|
|
|
|
{
|
|
|
|
int result;
|
|
|
|
int count = 0;
|
|
|
|
int i;
|
|
|
|
int rc;
|
|
|
|
int ret = -1;
|
|
|
|
NBDExportInfo *array = NULL;
|
|
|
|
QIOChannel *sioc = NULL;
|
|
|
|
|
|
|
|
*info = NULL;
|
2023-08-29 19:58:28 +02:00
|
|
|
result = nbd_start_negotiate(ioc, tlscreds, hostname, &sioc,
|
2023-09-25 21:22:39 +02:00
|
|
|
NBD_MODE_EXTENDED, NULL, errp);
|
nbd/client: Add nbd_receive_export_list()
We want to be able to detect whether a given qemu NBD server is
exposing the right export(s) and dirty bitmaps, at least for
regression testing. We could use 'nbd-client -l' from the upstream
NBD project to list exports, but it's annoying to rely on
out-of-tree binaries; furthermore, nbd-client doesn't necessarily
know about all of the qemu NBD extensions. Thus, we plan on adding
a new mode to qemu-nbd that merely sniffs all possible information
from the server during handshake phase, then disconnects and dumps
the information.
This patch adds the low-level client code for grabbing the list
of exports. It benefits from the recent refactoring patches, in
order to share as much code as possible when it comes to doing
validation of server replies. The resulting information is stored
in an array of NBDExportInfo which has been expanded to any
description string, along with a convenience function for freeing
the list.
Note: a malicious server could exhaust memory of a client by feeding
an unending loop of exports; perhaps we should place a limit on how
many we are willing to receive. But note that a server could
reasonably be serving an export for every file in a large directory,
where an arbitrary limit in the client means we can't list anything
from such a server; the same happens if we just run until the client
fails to malloc() and thus dies by an abort(), where the limit is
no longer arbitrary but determined by available memory. Since the
client is already planning on being short-lived, it's hard to call
this a denial of service attack that would starve off other uses,
so it does not appear to be a security issue.
Signed-off-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Richard W.M. Jones <rjones@redhat.com>
Message-Id: <20190117193658.16413-18-eblake@redhat.com>
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
2019-01-17 20:36:54 +01:00
|
|
|
if (tlscreds && sioc) {
|
|
|
|
ioc = sioc;
|
|
|
|
}
|
2023-06-08 15:56:37 +02:00
|
|
|
if (result < 0) {
|
|
|
|
goto out;
|
|
|
|
}
|
nbd/client: Add nbd_receive_export_list()
We want to be able to detect whether a given qemu NBD server is
exposing the right export(s) and dirty bitmaps, at least for
regression testing. We could use 'nbd-client -l' from the upstream
NBD project to list exports, but it's annoying to rely on
out-of-tree binaries; furthermore, nbd-client doesn't necessarily
know about all of the qemu NBD extensions. Thus, we plan on adding
a new mode to qemu-nbd that merely sniffs all possible information
from the server during handshake phase, then disconnects and dumps
the information.
This patch adds the low-level client code for grabbing the list
of exports. It benefits from the recent refactoring patches, in
order to share as much code as possible when it comes to doing
validation of server replies. The resulting information is stored
in an array of NBDExportInfo which has been expanded to any
description string, along with a convenience function for freeing
the list.
Note: a malicious server could exhaust memory of a client by feeding
an unending loop of exports; perhaps we should place a limit on how
many we are willing to receive. But note that a server could
reasonably be serving an export for every file in a large directory,
where an arbitrary limit in the client means we can't list anything
from such a server; the same happens if we just run until the client
fails to malloc() and thus dies by an abort(), where the limit is
no longer arbitrary but determined by available memory. Since the
client is already planning on being short-lived, it's hard to call
this a denial of service attack that would starve off other uses,
so it does not appear to be a security issue.
Signed-off-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Richard W.M. Jones <rjones@redhat.com>
Message-Id: <20190117193658.16413-18-eblake@redhat.com>
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
2019-01-17 20:36:54 +01:00
|
|
|
|
2023-06-08 15:56:37 +02:00
|
|
|
switch ((NBDMode)result) {
|
|
|
|
case NBD_MODE_SIMPLE:
|
|
|
|
case NBD_MODE_STRUCTURED:
|
2023-09-25 21:22:39 +02:00
|
|
|
case NBD_MODE_EXTENDED:
|
nbd/client: Add nbd_receive_export_list()
We want to be able to detect whether a given qemu NBD server is
exposing the right export(s) and dirty bitmaps, at least for
regression testing. We could use 'nbd-client -l' from the upstream
NBD project to list exports, but it's annoying to rely on
out-of-tree binaries; furthermore, nbd-client doesn't necessarily
know about all of the qemu NBD extensions. Thus, we plan on adding
a new mode to qemu-nbd that merely sniffs all possible information
from the server during handshake phase, then disconnects and dumps
the information.
This patch adds the low-level client code for grabbing the list
of exports. It benefits from the recent refactoring patches, in
order to share as much code as possible when it comes to doing
validation of server replies. The resulting information is stored
in an array of NBDExportInfo which has been expanded to any
description string, along with a convenience function for freeing
the list.
Note: a malicious server could exhaust memory of a client by feeding
an unending loop of exports; perhaps we should place a limit on how
many we are willing to receive. But note that a server could
reasonably be serving an export for every file in a large directory,
where an arbitrary limit in the client means we can't list anything
from such a server; the same happens if we just run until the client
fails to malloc() and thus dies by an abort(), where the limit is
no longer arbitrary but determined by available memory. Since the
client is already planning on being short-lived, it's hard to call
this a denial of service attack that would starve off other uses,
so it does not appear to be a security issue.
Signed-off-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Richard W.M. Jones <rjones@redhat.com>
Message-Id: <20190117193658.16413-18-eblake@redhat.com>
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
2019-01-17 20:36:54 +01:00
|
|
|
/* newstyle - use NBD_OPT_LIST to populate array, then try
|
|
|
|
* NBD_OPT_INFO on each array member. If structured replies
|
|
|
|
* are enabled, also try NBD_OPT_LIST_META_CONTEXT. */
|
|
|
|
if (nbd_send_option_request(ioc, NBD_OPT_LIST, 0, NULL, errp) < 0) {
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
while (1) {
|
|
|
|
char *name;
|
|
|
|
char *desc;
|
|
|
|
|
|
|
|
rc = nbd_receive_list(ioc, &name, &desc, errp);
|
|
|
|
if (rc < 0) {
|
|
|
|
goto out;
|
|
|
|
} else if (rc == 0) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
array = g_renew(NBDExportInfo, array, ++count);
|
|
|
|
memset(&array[count - 1], 0, sizeof(*array));
|
|
|
|
array[count - 1].name = name;
|
|
|
|
array[count - 1].description = desc;
|
2023-08-29 19:58:28 +02:00
|
|
|
array[count - 1].mode = result;
|
nbd/client: Add nbd_receive_export_list()
We want to be able to detect whether a given qemu NBD server is
exposing the right export(s) and dirty bitmaps, at least for
regression testing. We could use 'nbd-client -l' from the upstream
NBD project to list exports, but it's annoying to rely on
out-of-tree binaries; furthermore, nbd-client doesn't necessarily
know about all of the qemu NBD extensions. Thus, we plan on adding
a new mode to qemu-nbd that merely sniffs all possible information
from the server during handshake phase, then disconnects and dumps
the information.
This patch adds the low-level client code for grabbing the list
of exports. It benefits from the recent refactoring patches, in
order to share as much code as possible when it comes to doing
validation of server replies. The resulting information is stored
in an array of NBDExportInfo which has been expanded to any
description string, along with a convenience function for freeing
the list.
Note: a malicious server could exhaust memory of a client by feeding
an unending loop of exports; perhaps we should place a limit on how
many we are willing to receive. But note that a server could
reasonably be serving an export for every file in a large directory,
where an arbitrary limit in the client means we can't list anything
from such a server; the same happens if we just run until the client
fails to malloc() and thus dies by an abort(), where the limit is
no longer arbitrary but determined by available memory. Since the
client is already planning on being short-lived, it's hard to call
this a denial of service attack that would starve off other uses,
so it does not appear to be a security issue.
Signed-off-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Richard W.M. Jones <rjones@redhat.com>
Message-Id: <20190117193658.16413-18-eblake@redhat.com>
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
2019-01-17 20:36:54 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < count; i++) {
|
|
|
|
array[i].request_sizes = true;
|
|
|
|
rc = nbd_opt_info_or_go(ioc, NBD_OPT_INFO, &array[i], errp);
|
|
|
|
if (rc < 0) {
|
|
|
|
goto out;
|
|
|
|
} else if (rc == 0) {
|
|
|
|
/*
|
|
|
|
* Pointless to try rest of loop. If OPT_INFO doesn't work,
|
|
|
|
* it's unlikely that meta contexts work either
|
|
|
|
*/
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2023-09-25 21:22:39 +02:00
|
|
|
if (result >= NBD_MODE_STRUCTURED &&
|
2019-01-17 20:36:55 +01:00
|
|
|
nbd_list_meta_contexts(ioc, &array[i], errp) < 0) {
|
|
|
|
goto out;
|
|
|
|
}
|
nbd/client: Add nbd_receive_export_list()
We want to be able to detect whether a given qemu NBD server is
exposing the right export(s) and dirty bitmaps, at least for
regression testing. We could use 'nbd-client -l' from the upstream
NBD project to list exports, but it's annoying to rely on
out-of-tree binaries; furthermore, nbd-client doesn't necessarily
know about all of the qemu NBD extensions. Thus, we plan on adding
a new mode to qemu-nbd that merely sniffs all possible information
from the server during handshake phase, then disconnects and dumps
the information.
This patch adds the low-level client code for grabbing the list
of exports. It benefits from the recent refactoring patches, in
order to share as much code as possible when it comes to doing
validation of server replies. The resulting information is stored
in an array of NBDExportInfo which has been expanded to any
description string, along with a convenience function for freeing
the list.
Note: a malicious server could exhaust memory of a client by feeding
an unending loop of exports; perhaps we should place a limit on how
many we are willing to receive. But note that a server could
reasonably be serving an export for every file in a large directory,
where an arbitrary limit in the client means we can't list anything
from such a server; the same happens if we just run until the client
fails to malloc() and thus dies by an abort(), where the limit is
no longer arbitrary but determined by available memory. Since the
client is already planning on being short-lived, it's hard to call
this a denial of service attack that would starve off other uses,
so it does not appear to be a security issue.
Signed-off-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Richard W.M. Jones <rjones@redhat.com>
Message-Id: <20190117193658.16413-18-eblake@redhat.com>
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
2019-01-17 20:36:54 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Send NBD_OPT_ABORT as a courtesy before hanging up */
|
|
|
|
nbd_send_opt_abort(ioc);
|
|
|
|
break;
|
2023-06-08 15:56:37 +02:00
|
|
|
case NBD_MODE_EXPORT_NAME:
|
nbd/client: Add nbd_receive_export_list()
We want to be able to detect whether a given qemu NBD server is
exposing the right export(s) and dirty bitmaps, at least for
regression testing. We could use 'nbd-client -l' from the upstream
NBD project to list exports, but it's annoying to rely on
out-of-tree binaries; furthermore, nbd-client doesn't necessarily
know about all of the qemu NBD extensions. Thus, we plan on adding
a new mode to qemu-nbd that merely sniffs all possible information
from the server during handshake phase, then disconnects and dumps
the information.
This patch adds the low-level client code for grabbing the list
of exports. It benefits from the recent refactoring patches, in
order to share as much code as possible when it comes to doing
validation of server replies. The resulting information is stored
in an array of NBDExportInfo which has been expanded to any
description string, along with a convenience function for freeing
the list.
Note: a malicious server could exhaust memory of a client by feeding
an unending loop of exports; perhaps we should place a limit on how
many we are willing to receive. But note that a server could
reasonably be serving an export for every file in a large directory,
where an arbitrary limit in the client means we can't list anything
from such a server; the same happens if we just run until the client
fails to malloc() and thus dies by an abort(), where the limit is
no longer arbitrary but determined by available memory. Since the
client is already planning on being short-lived, it's hard to call
this a denial of service attack that would starve off other uses,
so it does not appear to be a security issue.
Signed-off-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Richard W.M. Jones <rjones@redhat.com>
Message-Id: <20190117193658.16413-18-eblake@redhat.com>
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
2019-01-17 20:36:54 +01:00
|
|
|
error_setg(errp, "Server does not support export lists");
|
|
|
|
/* We can't even send NBD_OPT_ABORT, so merely hang up */
|
|
|
|
goto out;
|
2023-06-08 15:56:37 +02:00
|
|
|
case NBD_MODE_OLDSTYLE:
|
|
|
|
/* Lone export name is implied, but we can parse length and flags */
|
nbd/client: Add nbd_receive_export_list()
We want to be able to detect whether a given qemu NBD server is
exposing the right export(s) and dirty bitmaps, at least for
regression testing. We could use 'nbd-client -l' from the upstream
NBD project to list exports, but it's annoying to rely on
out-of-tree binaries; furthermore, nbd-client doesn't necessarily
know about all of the qemu NBD extensions. Thus, we plan on adding
a new mode to qemu-nbd that merely sniffs all possible information
from the server during handshake phase, then disconnects and dumps
the information.
This patch adds the low-level client code for grabbing the list
of exports. It benefits from the recent refactoring patches, in
order to share as much code as possible when it comes to doing
validation of server replies. The resulting information is stored
in an array of NBDExportInfo which has been expanded to any
description string, along with a convenience function for freeing
the list.
Note: a malicious server could exhaust memory of a client by feeding
an unending loop of exports; perhaps we should place a limit on how
many we are willing to receive. But note that a server could
reasonably be serving an export for every file in a large directory,
where an arbitrary limit in the client means we can't list anything
from such a server; the same happens if we just run until the client
fails to malloc() and thus dies by an abort(), where the limit is
no longer arbitrary but determined by available memory. Since the
client is already planning on being short-lived, it's hard to call
this a denial of service attack that would starve off other uses,
so it does not appear to be a security issue.
Signed-off-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Richard W.M. Jones <rjones@redhat.com>
Message-Id: <20190117193658.16413-18-eblake@redhat.com>
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
2019-01-17 20:36:54 +01:00
|
|
|
array = g_new0(NBDExportInfo, 1);
|
|
|
|
array->name = g_strdup("");
|
2023-08-29 19:58:28 +02:00
|
|
|
array->mode = NBD_MODE_OLDSTYLE;
|
nbd/client: Add nbd_receive_export_list()
We want to be able to detect whether a given qemu NBD server is
exposing the right export(s) and dirty bitmaps, at least for
regression testing. We could use 'nbd-client -l' from the upstream
NBD project to list exports, but it's annoying to rely on
out-of-tree binaries; furthermore, nbd-client doesn't necessarily
know about all of the qemu NBD extensions. Thus, we plan on adding
a new mode to qemu-nbd that merely sniffs all possible information
from the server during handshake phase, then disconnects and dumps
the information.
This patch adds the low-level client code for grabbing the list
of exports. It benefits from the recent refactoring patches, in
order to share as much code as possible when it comes to doing
validation of server replies. The resulting information is stored
in an array of NBDExportInfo which has been expanded to any
description string, along with a convenience function for freeing
the list.
Note: a malicious server could exhaust memory of a client by feeding
an unending loop of exports; perhaps we should place a limit on how
many we are willing to receive. But note that a server could
reasonably be serving an export for every file in a large directory,
where an arbitrary limit in the client means we can't list anything
from such a server; the same happens if we just run until the client
fails to malloc() and thus dies by an abort(), where the limit is
no longer arbitrary but determined by available memory. Since the
client is already planning on being short-lived, it's hard to call
this a denial of service attack that would starve off other uses,
so it does not appear to be a security issue.
Signed-off-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Richard W.M. Jones <rjones@redhat.com>
Message-Id: <20190117193658.16413-18-eblake@redhat.com>
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
2019-01-17 20:36:54 +01:00
|
|
|
count = 1;
|
|
|
|
|
|
|
|
if (nbd_negotiate_finish_oldstyle(ioc, array, errp) < 0) {
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Send NBD_CMD_DISC as a courtesy to the server, but ignore all
|
|
|
|
* errors now that we have the information we wanted. */
|
|
|
|
if (nbd_drop(ioc, 124, NULL) == 0) {
|
2023-08-29 19:58:29 +02:00
|
|
|
NBDRequest request = { .type = NBD_CMD_DISC, .mode = result };
|
nbd/client: Add nbd_receive_export_list()
We want to be able to detect whether a given qemu NBD server is
exposing the right export(s) and dirty bitmaps, at least for
regression testing. We could use 'nbd-client -l' from the upstream
NBD project to list exports, but it's annoying to rely on
out-of-tree binaries; furthermore, nbd-client doesn't necessarily
know about all of the qemu NBD extensions. Thus, we plan on adding
a new mode to qemu-nbd that merely sniffs all possible information
from the server during handshake phase, then disconnects and dumps
the information.
This patch adds the low-level client code for grabbing the list
of exports. It benefits from the recent refactoring patches, in
order to share as much code as possible when it comes to doing
validation of server replies. The resulting information is stored
in an array of NBDExportInfo which has been expanded to any
description string, along with a convenience function for freeing
the list.
Note: a malicious server could exhaust memory of a client by feeding
an unending loop of exports; perhaps we should place a limit on how
many we are willing to receive. But note that a server could
reasonably be serving an export for every file in a large directory,
where an arbitrary limit in the client means we can't list anything
from such a server; the same happens if we just run until the client
fails to malloc() and thus dies by an abort(), where the limit is
no longer arbitrary but determined by available memory. Since the
client is already planning on being short-lived, it's hard to call
this a denial of service attack that would starve off other uses,
so it does not appear to be a security issue.
Signed-off-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Richard W.M. Jones <rjones@redhat.com>
Message-Id: <20190117193658.16413-18-eblake@redhat.com>
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
2019-01-17 20:36:54 +01:00
|
|
|
|
|
|
|
nbd_send_request(ioc, &request);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
default:
|
2023-06-08 15:56:37 +02:00
|
|
|
g_assert_not_reached();
|
nbd/client: Add nbd_receive_export_list()
We want to be able to detect whether a given qemu NBD server is
exposing the right export(s) and dirty bitmaps, at least for
regression testing. We could use 'nbd-client -l' from the upstream
NBD project to list exports, but it's annoying to rely on
out-of-tree binaries; furthermore, nbd-client doesn't necessarily
know about all of the qemu NBD extensions. Thus, we plan on adding
a new mode to qemu-nbd that merely sniffs all possible information
from the server during handshake phase, then disconnects and dumps
the information.
This patch adds the low-level client code for grabbing the list
of exports. It benefits from the recent refactoring patches, in
order to share as much code as possible when it comes to doing
validation of server replies. The resulting information is stored
in an array of NBDExportInfo which has been expanded to any
description string, along with a convenience function for freeing
the list.
Note: a malicious server could exhaust memory of a client by feeding
an unending loop of exports; perhaps we should place a limit on how
many we are willing to receive. But note that a server could
reasonably be serving an export for every file in a large directory,
where an arbitrary limit in the client means we can't list anything
from such a server; the same happens if we just run until the client
fails to malloc() and thus dies by an abort(), where the limit is
no longer arbitrary but determined by available memory. Since the
client is already planning on being short-lived, it's hard to call
this a denial of service attack that would starve off other uses,
so it does not appear to be a security issue.
Signed-off-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Richard W.M. Jones <rjones@redhat.com>
Message-Id: <20190117193658.16413-18-eblake@redhat.com>
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
2019-01-17 20:36:54 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
*info = array;
|
|
|
|
array = NULL;
|
|
|
|
ret = count;
|
|
|
|
|
|
|
|
out:
|
|
|
|
qio_channel_shutdown(ioc, QIO_CHANNEL_SHUTDOWN_BOTH, NULL);
|
|
|
|
qio_channel_close(ioc, NULL);
|
|
|
|
object_unref(OBJECT(sioc));
|
|
|
|
nbd_free_export_list(array, count);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2016-01-14 09:41:02 +01:00
|
|
|
#ifdef __linux__
|
2017-07-07 22:30:41 +02:00
|
|
|
int nbd_init(int fd, QIOChannelSocket *sioc, NBDExportInfo *info,
|
2017-05-26 13:09:13 +02:00
|
|
|
Error **errp)
|
2016-01-14 09:41:02 +01:00
|
|
|
{
|
nbd: Implement NBD_INFO_BLOCK_SIZE on client
The upstream NBD Protocol has defined a new extension to allow
the server to advertise block sizes to the client, as well as
a way for the client to inform the server whether it intends to
obey block sizes.
When using the block layer as the client, we will obey block
sizes; but when used as 'qemu-nbd -c' to hand off to the
kernel nbd module as the client, we are still waiting for the
kernel to implement a way for us to learn if it will honor
block sizes (perhaps by an addition to sysfs, rather than an
ioctl), as well as any way to tell the kernel what additional
block sizes to obey (NBD_SET_BLKSIZE appears to be accurate
for the minimum size, but preferred and maximum sizes would
probably be new ioctl()s), so until then, we need to make our
request for block sizes conditional.
When using ioctl(NBD_SET_BLKSIZE) to hand off to the kernel,
use the minimum block size as the sector size if it is larger
than 512, which also has the nice effect of cooperating with
(non-qemu) servers that don't do read-modify-write when
exposing a block device with 4k sectors; it might also allow
us to visit a file larger than 2T on a 32-bit kernel.
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-Id: <20170707203049.534-10-eblake@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2017-07-07 22:30:49 +02:00
|
|
|
unsigned long sector_size = MAX(BDRV_SECTOR_SIZE, info->min_block);
|
|
|
|
unsigned long sectors = info->size / sector_size;
|
|
|
|
|
|
|
|
/* FIXME: Once the kernel module is patched to honor block sizes,
|
|
|
|
* and to advertise that fact to user space, we should update the
|
|
|
|
* hand-off to the kernel to use any block sizes we learned. */
|
|
|
|
assert(!info->request_sizes);
|
|
|
|
if (info->size / sector_size != sectors) {
|
2017-07-07 22:30:41 +02:00
|
|
|
error_setg(errp, "Export size %" PRIu64 " too large for 32-bit kernel",
|
|
|
|
info->size);
|
2016-05-12 00:39:40 +02:00
|
|
|
return -E2BIG;
|
|
|
|
}
|
|
|
|
|
2017-07-07 17:29:18 +02:00
|
|
|
trace_nbd_init_set_socket();
|
2016-01-14 09:41:02 +01:00
|
|
|
|
2016-05-12 00:39:40 +02:00
|
|
|
if (ioctl(fd, NBD_SET_SOCK, (unsigned long) sioc->fd) < 0) {
|
2016-01-14 09:41:02 +01:00
|
|
|
int serrno = errno;
|
2017-05-26 13:09:13 +02:00
|
|
|
error_setg(errp, "Failed to set NBD socket");
|
2016-01-14 09:41:02 +01:00
|
|
|
return -serrno;
|
|
|
|
}
|
|
|
|
|
nbd: Implement NBD_INFO_BLOCK_SIZE on client
The upstream NBD Protocol has defined a new extension to allow
the server to advertise block sizes to the client, as well as
a way for the client to inform the server whether it intends to
obey block sizes.
When using the block layer as the client, we will obey block
sizes; but when used as 'qemu-nbd -c' to hand off to the
kernel nbd module as the client, we are still waiting for the
kernel to implement a way for us to learn if it will honor
block sizes (perhaps by an addition to sysfs, rather than an
ioctl), as well as any way to tell the kernel what additional
block sizes to obey (NBD_SET_BLKSIZE appears to be accurate
for the minimum size, but preferred and maximum sizes would
probably be new ioctl()s), so until then, we need to make our
request for block sizes conditional.
When using ioctl(NBD_SET_BLKSIZE) to hand off to the kernel,
use the minimum block size as the sector size if it is larger
than 512, which also has the nice effect of cooperating with
(non-qemu) servers that don't do read-modify-write when
exposing a block device with 4k sectors; it might also allow
us to visit a file larger than 2T on a 32-bit kernel.
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-Id: <20170707203049.534-10-eblake@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2017-07-07 22:30:49 +02:00
|
|
|
trace_nbd_init_set_block_size(sector_size);
|
2016-01-14 09:41:02 +01:00
|
|
|
|
nbd: Implement NBD_INFO_BLOCK_SIZE on client
The upstream NBD Protocol has defined a new extension to allow
the server to advertise block sizes to the client, as well as
a way for the client to inform the server whether it intends to
obey block sizes.
When using the block layer as the client, we will obey block
sizes; but when used as 'qemu-nbd -c' to hand off to the
kernel nbd module as the client, we are still waiting for the
kernel to implement a way for us to learn if it will honor
block sizes (perhaps by an addition to sysfs, rather than an
ioctl), as well as any way to tell the kernel what additional
block sizes to obey (NBD_SET_BLKSIZE appears to be accurate
for the minimum size, but preferred and maximum sizes would
probably be new ioctl()s), so until then, we need to make our
request for block sizes conditional.
When using ioctl(NBD_SET_BLKSIZE) to hand off to the kernel,
use the minimum block size as the sector size if it is larger
than 512, which also has the nice effect of cooperating with
(non-qemu) servers that don't do read-modify-write when
exposing a block device with 4k sectors; it might also allow
us to visit a file larger than 2T on a 32-bit kernel.
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-Id: <20170707203049.534-10-eblake@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2017-07-07 22:30:49 +02:00
|
|
|
if (ioctl(fd, NBD_SET_BLKSIZE, sector_size) < 0) {
|
2016-01-14 09:41:02 +01:00
|
|
|
int serrno = errno;
|
2017-05-26 13:09:13 +02:00
|
|
|
error_setg(errp, "Failed setting NBD block size");
|
2016-01-14 09:41:02 +01:00
|
|
|
return -serrno;
|
|
|
|
}
|
|
|
|
|
2017-07-07 17:29:18 +02:00
|
|
|
trace_nbd_init_set_size(sectors);
|
nbd: Implement NBD_INFO_BLOCK_SIZE on client
The upstream NBD Protocol has defined a new extension to allow
the server to advertise block sizes to the client, as well as
a way for the client to inform the server whether it intends to
obey block sizes.
When using the block layer as the client, we will obey block
sizes; but when used as 'qemu-nbd -c' to hand off to the
kernel nbd module as the client, we are still waiting for the
kernel to implement a way for us to learn if it will honor
block sizes (perhaps by an addition to sysfs, rather than an
ioctl), as well as any way to tell the kernel what additional
block sizes to obey (NBD_SET_BLKSIZE appears to be accurate
for the minimum size, but preferred and maximum sizes would
probably be new ioctl()s), so until then, we need to make our
request for block sizes conditional.
When using ioctl(NBD_SET_BLKSIZE) to hand off to the kernel,
use the minimum block size as the sector size if it is larger
than 512, which also has the nice effect of cooperating with
(non-qemu) servers that don't do read-modify-write when
exposing a block device with 4k sectors; it might also allow
us to visit a file larger than 2T on a 32-bit kernel.
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-Id: <20170707203049.534-10-eblake@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2017-07-07 22:30:49 +02:00
|
|
|
if (info->size % sector_size) {
|
|
|
|
trace_nbd_init_trailing_bytes(info->size % sector_size);
|
2016-05-12 00:39:40 +02:00
|
|
|
}
|
2016-01-14 09:41:02 +01:00
|
|
|
|
2016-05-12 00:39:40 +02:00
|
|
|
if (ioctl(fd, NBD_SET_SIZE_BLOCKS, sectors) < 0) {
|
2016-01-14 09:41:02 +01:00
|
|
|
int serrno = errno;
|
2017-05-26 13:09:13 +02:00
|
|
|
error_setg(errp, "Failed setting size (in blocks)");
|
2016-01-14 09:41:02 +01:00
|
|
|
return -serrno;
|
|
|
|
}
|
|
|
|
|
2017-07-07 22:30:41 +02:00
|
|
|
if (ioctl(fd, NBD_SET_FLAGS, (unsigned long) info->flags) < 0) {
|
2016-01-14 09:41:02 +01:00
|
|
|
if (errno == ENOTTY) {
|
2017-07-07 22:30:41 +02:00
|
|
|
int read_only = (info->flags & NBD_FLAG_READ_ONLY) != 0;
|
2017-07-07 17:29:18 +02:00
|
|
|
trace_nbd_init_set_readonly();
|
2016-01-14 09:41:02 +01:00
|
|
|
|
|
|
|
if (ioctl(fd, BLKROSET, (unsigned long) &read_only) < 0) {
|
|
|
|
int serrno = errno;
|
2017-05-26 13:09:13 +02:00
|
|
|
error_setg(errp, "Failed setting read-only attribute");
|
2016-01-14 09:41:02 +01:00
|
|
|
return -serrno;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
int serrno = errno;
|
2017-05-26 13:09:13 +02:00
|
|
|
error_setg(errp, "Failed setting flags");
|
2016-01-14 09:41:02 +01:00
|
|
|
return -serrno;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-07-07 17:29:18 +02:00
|
|
|
trace_nbd_init_finish();
|
2016-01-14 09:41:02 +01:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int nbd_client(int fd)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
int serrno;
|
|
|
|
|
2017-07-07 17:29:18 +02:00
|
|
|
trace_nbd_client_loop();
|
2016-01-14 09:41:02 +01:00
|
|
|
|
|
|
|
ret = ioctl(fd, NBD_DO_IT);
|
|
|
|
if (ret < 0 && errno == EPIPE) {
|
|
|
|
/* NBD_DO_IT normally returns EPIPE when someone has disconnected
|
|
|
|
* the socket via NBD_DISCONNECT. We do not want to return 1 in
|
|
|
|
* that case.
|
|
|
|
*/
|
|
|
|
ret = 0;
|
|
|
|
}
|
|
|
|
serrno = errno;
|
|
|
|
|
2017-07-07 17:29:18 +02:00
|
|
|
trace_nbd_client_loop_ret(ret, strerror(serrno));
|
2016-01-14 09:41:02 +01:00
|
|
|
|
2017-07-07 17:29:18 +02:00
|
|
|
trace_nbd_client_clear_queue();
|
2016-01-14 09:41:02 +01:00
|
|
|
ioctl(fd, NBD_CLEAR_QUE);
|
|
|
|
|
2017-07-07 17:29:18 +02:00
|
|
|
trace_nbd_client_clear_socket();
|
2016-01-14 09:41:02 +01:00
|
|
|
ioctl(fd, NBD_CLEAR_SOCK);
|
|
|
|
|
|
|
|
errno = serrno;
|
|
|
|
return ret;
|
|
|
|
}
|
2016-05-12 00:39:39 +02:00
|
|
|
|
|
|
|
int nbd_disconnect(int fd)
|
|
|
|
{
|
|
|
|
ioctl(fd, NBD_CLEAR_QUE);
|
|
|
|
ioctl(fd, NBD_DISCONNECT);
|
|
|
|
ioctl(fd, NBD_CLEAR_SOCK);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-12-15 14:53:08 +01:00
|
|
|
#endif /* __linux__ */
|
2016-01-14 09:41:02 +01:00
|
|
|
|
2017-08-04 17:14:27 +02:00
|
|
|
int nbd_send_request(QIOChannel *ioc, NBDRequest *request)
|
2016-01-14 09:41:02 +01:00
|
|
|
{
|
2023-09-25 21:22:37 +02:00
|
|
|
uint8_t buf[NBD_EXTENDED_REQUEST_SIZE];
|
|
|
|
size_t len;
|
2016-01-14 09:41:02 +01:00
|
|
|
|
2023-06-08 15:56:34 +02:00
|
|
|
trace_nbd_send_request(request->from, request->len, request->cookie,
|
2017-07-17 21:26:34 +02:00
|
|
|
request->flags, request->type,
|
|
|
|
nbd_cmd_lookup(request->type));
|
2016-04-06 05:35:04 +02:00
|
|
|
|
2016-10-14 20:33:04 +02:00
|
|
|
stw_be_p(buf + 4, request->flags);
|
|
|
|
stw_be_p(buf + 6, request->type);
|
2023-06-08 15:56:34 +02:00
|
|
|
stq_be_p(buf + 8, request->cookie);
|
2016-06-10 18:15:42 +02:00
|
|
|
stq_be_p(buf + 16, request->from);
|
2023-09-25 21:22:37 +02:00
|
|
|
if (request->mode >= NBD_MODE_EXTENDED) {
|
|
|
|
stl_be_p(buf, NBD_EXTENDED_REQUEST_MAGIC);
|
|
|
|
stq_be_p(buf + 24, request->len);
|
|
|
|
len = NBD_EXTENDED_REQUEST_SIZE;
|
|
|
|
} else {
|
|
|
|
assert(request->len <= UINT32_MAX);
|
|
|
|
stl_be_p(buf, NBD_REQUEST_MAGIC);
|
|
|
|
stl_be_p(buf + 24, request->len);
|
|
|
|
len = NBD_REQUEST_SIZE;
|
|
|
|
}
|
2016-01-14 09:41:02 +01:00
|
|
|
|
2023-09-25 21:22:37 +02:00
|
|
|
return nbd_write(ioc, buf, len, NULL);
|
2016-01-14 09:41:02 +01:00
|
|
|
}
|
|
|
|
|
2017-10-27 12:40:35 +02:00
|
|
|
/* nbd_receive_simple_reply
|
|
|
|
* Read simple reply except magic field (which should be already read).
|
|
|
|
* Payload is not read (payload is possible for CMD_READ, but here we even
|
|
|
|
* don't know whether it take place or not).
|
|
|
|
*/
|
|
|
|
static int nbd_receive_simple_reply(QIOChannel *ioc, NBDSimpleReply *reply,
|
|
|
|
Error **errp)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
assert(reply->magic == NBD_SIMPLE_REPLY_MAGIC);
|
|
|
|
|
|
|
|
ret = nbd_read(ioc, (uint8_t *)reply + sizeof(reply->magic),
|
2019-01-28 17:58:30 +01:00
|
|
|
sizeof(*reply) - sizeof(reply->magic), "reply", errp);
|
2017-10-27 12:40:35 +02:00
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2018-09-27 18:42:00 +02:00
|
|
|
reply->error = be32_to_cpu(reply->error);
|
2023-06-08 15:56:34 +02:00
|
|
|
reply->cookie = be64_to_cpu(reply->cookie);
|
2017-10-27 12:40:35 +02:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2023-09-25 21:22:37 +02:00
|
|
|
/* nbd_receive_reply_chunk_header
|
2017-10-27 12:40:35 +02:00
|
|
|
* Read structured reply chunk except magic field (which should be already
|
2023-09-25 21:22:37 +02:00
|
|
|
* read). Normalize into the compact form.
|
2017-10-27 12:40:35 +02:00
|
|
|
* Payload is not read.
|
|
|
|
*/
|
2023-09-25 21:22:37 +02:00
|
|
|
static int nbd_receive_reply_chunk_header(QIOChannel *ioc, NBDReply *chunk,
|
|
|
|
Error **errp)
|
2017-10-27 12:40:35 +02:00
|
|
|
{
|
|
|
|
int ret;
|
2023-09-25 21:22:37 +02:00
|
|
|
size_t len;
|
|
|
|
uint64_t payload_len;
|
2017-10-27 12:40:35 +02:00
|
|
|
|
2023-09-25 21:22:37 +02:00
|
|
|
if (chunk->magic == NBD_STRUCTURED_REPLY_MAGIC) {
|
|
|
|
len = sizeof(chunk->structured);
|
|
|
|
} else {
|
|
|
|
assert(chunk->magic == NBD_EXTENDED_REPLY_MAGIC);
|
|
|
|
len = sizeof(chunk->extended);
|
|
|
|
}
|
2017-10-27 12:40:35 +02:00
|
|
|
|
|
|
|
ret = nbd_read(ioc, (uint8_t *)chunk + sizeof(chunk->magic),
|
2023-09-25 21:22:37 +02:00
|
|
|
len - sizeof(chunk->magic), "structured chunk",
|
2019-01-28 17:58:30 +01:00
|
|
|
errp);
|
2017-10-27 12:40:35 +02:00
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2023-09-25 21:22:37 +02:00
|
|
|
/* flags, type, and cookie occupy same space between forms */
|
|
|
|
chunk->structured.flags = be16_to_cpu(chunk->structured.flags);
|
|
|
|
chunk->structured.type = be16_to_cpu(chunk->structured.type);
|
|
|
|
chunk->structured.cookie = be64_to_cpu(chunk->structured.cookie);
|
2017-10-27 12:40:35 +02:00
|
|
|
|
2023-06-08 15:56:36 +02:00
|
|
|
/*
|
|
|
|
* Because we use BLOCK_STATUS with REQ_ONE, and cap READ requests
|
|
|
|
* at 32M, no valid server should send us payload larger than
|
|
|
|
* this. Even if we stopped using REQ_ONE, sane servers will cap
|
|
|
|
* the number of extents they return for block status.
|
|
|
|
*/
|
2023-09-25 21:22:37 +02:00
|
|
|
if (chunk->magic == NBD_STRUCTURED_REPLY_MAGIC) {
|
|
|
|
payload_len = be32_to_cpu(chunk->structured.length);
|
|
|
|
} else {
|
|
|
|
/* For now, we are ignoring the extended header offset. */
|
|
|
|
payload_len = be64_to_cpu(chunk->extended.length);
|
|
|
|
chunk->magic = NBD_STRUCTURED_REPLY_MAGIC;
|
|
|
|
}
|
|
|
|
if (payload_len > NBD_MAX_BUFFER_SIZE + sizeof(NBDStructuredReadData)) {
|
2023-06-08 15:56:36 +02:00
|
|
|
error_setg(errp, "server chunk %" PRIu32 " (%s) payload is too long",
|
2023-09-25 21:22:37 +02:00
|
|
|
chunk->structured.type,
|
|
|
|
nbd_rep_lookup(chunk->structured.type));
|
2023-06-08 15:56:36 +02:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
2023-09-25 21:22:37 +02:00
|
|
|
chunk->structured.length = payload_len;
|
2023-06-08 15:56:36 +02:00
|
|
|
|
2017-10-27 12:40:35 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-02-18 14:38:15 +01:00
|
|
|
/* nbd_read_eof
|
|
|
|
* Tries to read @size bytes from @ioc.
|
|
|
|
* Returns 1 on success
|
|
|
|
* 0 on eof, when no data was read (errp is not set)
|
|
|
|
* negative errno on failure (errp is set)
|
|
|
|
*/
|
|
|
|
static inline int coroutine_fn
|
2019-02-18 14:56:01 +01:00
|
|
|
nbd_read_eof(BlockDriverState *bs, QIOChannel *ioc, void *buffer, size_t size,
|
|
|
|
Error **errp)
|
2019-02-18 14:38:15 +01:00
|
|
|
{
|
2019-02-18 14:56:01 +01:00
|
|
|
bool partial = false;
|
2019-02-18 14:38:15 +01:00
|
|
|
|
|
|
|
assert(size);
|
2019-02-18 14:56:01 +01:00
|
|
|
while (size > 0) {
|
|
|
|
struct iovec iov = { .iov_base = buffer, .iov_len = size };
|
|
|
|
ssize_t len;
|
|
|
|
|
|
|
|
len = qio_channel_readv(ioc, &iov, 1, errp);
|
|
|
|
if (len == QIO_CHANNEL_ERR_BLOCK) {
|
|
|
|
qio_channel_yield(ioc, G_IO_IN);
|
|
|
|
continue;
|
|
|
|
} else if (len < 0) {
|
|
|
|
return -EIO;
|
|
|
|
} else if (len == 0) {
|
|
|
|
if (partial) {
|
|
|
|
error_setg(errp,
|
|
|
|
"Unexpected end-of-file before all bytes were read");
|
|
|
|
return -EIO;
|
|
|
|
} else {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
partial = true;
|
|
|
|
size -= len;
|
|
|
|
buffer = (uint8_t*) buffer + len;
|
2019-02-18 14:38:15 +01:00
|
|
|
}
|
2019-02-18 14:56:01 +01:00
|
|
|
return 1;
|
2019-02-18 14:38:15 +01:00
|
|
|
}
|
|
|
|
|
2017-08-04 17:14:26 +02:00
|
|
|
/* nbd_receive_reply
|
2019-02-18 14:56:01 +01:00
|
|
|
*
|
2023-09-25 21:22:37 +02:00
|
|
|
* Wait for a new reply. If this yields, the coroutine must be able to be
|
|
|
|
* safely reentered for nbd_client_attach_aio_context(). @mode determines
|
|
|
|
* which reply magic we are expecting, although this normalizes the result
|
|
|
|
* so that the caller only has to work with compact headers.
|
2019-02-18 14:56:01 +01:00
|
|
|
*
|
2017-08-04 17:14:26 +02:00
|
|
|
* Returns 1 on success
|
2023-09-25 21:22:37 +02:00
|
|
|
* 0 on eof, when no data was read
|
|
|
|
* negative errno on failure
|
2017-08-04 17:14:26 +02:00
|
|
|
*/
|
2019-02-18 14:56:01 +01:00
|
|
|
int coroutine_fn nbd_receive_reply(BlockDriverState *bs, QIOChannel *ioc,
|
2023-09-25 21:22:37 +02:00
|
|
|
NBDReply *reply, NBDMode mode, Error **errp)
|
2016-01-14 09:41:02 +01:00
|
|
|
{
|
2017-08-04 17:14:26 +02:00
|
|
|
int ret;
|
2017-11-08 22:56:59 +01:00
|
|
|
const char *type;
|
2023-09-25 21:22:37 +02:00
|
|
|
uint32_t expected;
|
2016-01-14 09:41:02 +01:00
|
|
|
|
2019-02-18 14:56:01 +01:00
|
|
|
ret = nbd_read_eof(bs, ioc, &reply->magic, sizeof(reply->magic), errp);
|
2017-02-13 14:52:24 +01:00
|
|
|
if (ret <= 0) {
|
2016-01-14 09:41:02 +01:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2018-09-27 18:42:00 +02:00
|
|
|
reply->magic = be32_to_cpu(reply->magic);
|
2016-01-14 09:41:02 +01:00
|
|
|
|
2023-09-25 21:22:37 +02:00
|
|
|
/* Diagnose but accept wrong-width header */
|
2017-10-27 12:40:35 +02:00
|
|
|
switch (reply->magic) {
|
|
|
|
case NBD_SIMPLE_REPLY_MAGIC:
|
2023-09-25 21:22:37 +02:00
|
|
|
if (mode >= NBD_MODE_EXTENDED) {
|
|
|
|
trace_nbd_receive_wrong_header(reply->magic,
|
|
|
|
nbd_mode_lookup(mode));
|
|
|
|
}
|
2017-10-27 12:40:35 +02:00
|
|
|
ret = nbd_receive_simple_reply(ioc, &reply->simple, errp);
|
|
|
|
if (ret < 0) {
|
2023-09-25 21:22:37 +02:00
|
|
|
return ret;
|
2017-10-27 12:40:35 +02:00
|
|
|
}
|
|
|
|
trace_nbd_receive_simple_reply(reply->simple.error,
|
|
|
|
nbd_err_lookup(reply->simple.error),
|
2023-06-08 15:56:34 +02:00
|
|
|
reply->cookie);
|
2017-10-27 12:40:35 +02:00
|
|
|
break;
|
|
|
|
case NBD_STRUCTURED_REPLY_MAGIC:
|
2023-09-25 21:22:37 +02:00
|
|
|
case NBD_EXTENDED_REPLY_MAGIC:
|
|
|
|
expected = mode >= NBD_MODE_EXTENDED ? NBD_EXTENDED_REPLY_MAGIC
|
|
|
|
: NBD_STRUCTURED_REPLY_MAGIC;
|
|
|
|
if (reply->magic != expected) {
|
|
|
|
trace_nbd_receive_wrong_header(reply->magic,
|
|
|
|
nbd_mode_lookup(mode));
|
|
|
|
}
|
|
|
|
ret = nbd_receive_reply_chunk_header(ioc, reply, errp);
|
2017-10-27 12:40:35 +02:00
|
|
|
if (ret < 0) {
|
2023-09-25 21:22:37 +02:00
|
|
|
return ret;
|
2017-10-27 12:40:35 +02:00
|
|
|
}
|
2017-11-08 22:56:59 +01:00
|
|
|
type = nbd_reply_type_lookup(reply->structured.type);
|
2023-09-25 21:22:37 +02:00
|
|
|
trace_nbd_receive_reply_chunk_header(reply->structured.flags,
|
|
|
|
reply->structured.type, type,
|
|
|
|
reply->structured.cookie,
|
|
|
|
reply->structured.length);
|
2017-10-27 12:40:35 +02:00
|
|
|
break;
|
|
|
|
default:
|
2023-09-25 21:22:37 +02:00
|
|
|
trace_nbd_receive_wrong_header(reply->magic, nbd_mode_lookup(mode));
|
2017-10-27 12:40:35 +02:00
|
|
|
error_setg(errp, "invalid magic (got 0x%" PRIx32 ")", reply->magic);
|
2016-10-14 20:33:16 +02:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
2017-08-04 17:14:26 +02:00
|
|
|
|
|
|
|
return 1;
|
2016-01-14 09:41:02 +01:00
|
|
|
}
|
|
|
|
|