qemu-e2k/block/quorum.c

1312 lines
39 KiB
C
Raw Normal View History

/*
* Quorum Block filter
*
* Copyright (C) 2012-2014 Nodalink, EURL.
*
* Author:
* Benoît Canet <benoit.canet@irqsave.net>
*
* Based on the design and code of blkverify.c (Copyright (C) 2010 IBM, Corp)
* and blkmirror.c (Copyright (C) 2011 Red Hat, Inc).
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*/
#include "qemu/osdep.h"
#include "qemu/cutils.h"
#include "qemu/module.h"
#include "qemu/option.h"
#include "qemu/memalign.h"
#include "block/block_int.h"
#include "block/coroutines.h"
#include "block/qdict.h"
#include "qapi/error.h"
#include "qapi/qapi-events-block.h"
#include "qapi/qmp/qdict.h"
#include "qapi/qmp/qerror.h"
#include "qapi/qmp/qlist.h"
#include "qapi/qmp/qstring.h"
#include "crypto/hash.h"
#define HASH_LENGTH 32
#define INDEXSTR_LEN 32
#define QUORUM_OPT_VOTE_THRESHOLD "vote-threshold"
#define QUORUM_OPT_BLKVERIFY "blkverify"
#define QUORUM_OPT_REWRITE "rewrite-corrupted"
block/quorum: add simple read pattern support This patch adds single read pattern to quorum driver and quorum vote is default pattern. For now we do a quorum vote on all the reads, it is designed for unreliable underlying storage such as non-redundant NFS to make sure data integrity at the cost of the read performance. For some use cases as following: VM -------------- | | v v A B Both A and B has hardware raid storage to justify the data integrity on its own. So it would help performance if we do a single read instead of on all the nodes. Further, if we run VM on either of the storage node, we can make a local read request for better performance. This patch generalize the above 2 nodes case in the N nodes. That is, vm -> write to all the N nodes, read just one of them. If single read fails, we try to read next node in FIFO order specified by the startup command. The 2 nodes case is very similar to DRBD[1] though lack of auto-sync functionality in the single device/node failure for now. But compared with DRBD we still have some advantages over it: - Suppose we have 20 VMs running on one(assume A) of 2 nodes' DRBD backed storage. And if A crashes, we need to restart all the VMs on node B. But for practice case, we can't because B might not have enough resources to setup 20 VMs at once. So if we run our 20 VMs with quorum driver, and scatter the replicated images over the data center, we can very likely restart 20 VMs without any resource problem. After all, I think we can build a more powerful replicated image functionality on quorum and block jobs(block mirror) to meet various High Availibility needs. E.g, Enable single read pattern on 2 children, -drive driver=quorum,children.0.file.filename=0.qcow2,\ children.1.file.filename=1.qcow2,read-pattern=fifo,vote-threshold=1 [1] http://en.wikipedia.org/wiki/Distributed_Replicated_Block_Device [Dropped \n from an error_setg() error message --Stefan] Cc: Benoit Canet <benoit@irqsave.net> Cc: Eric Blake <eblake@redhat.com> Cc: Kevin Wolf <kwolf@redhat.com> Cc: Stefan Hajnoczi <stefanha@redhat.com> Signed-off-by: Liu Yuan <namei.unix@gmail.com> Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2014-08-18 11:41:05 +02:00
#define QUORUM_OPT_READ_PATTERN "read-pattern"
/* This union holds a vote hash value */
typedef union QuorumVoteValue {
uint8_t h[HASH_LENGTH]; /* SHA-256 hash */
int64_t l; /* simpler 64 bits hash */
} QuorumVoteValue;
/* A vote item */
typedef struct QuorumVoteItem {
int index;
QLIST_ENTRY(QuorumVoteItem) next;
} QuorumVoteItem;
/* this structure is a vote version. A version is the set of votes sharing the
* same vote value.
* The set of votes will be tracked with the items field and its cardinality is
* vote_count.
*/
typedef struct QuorumVoteVersion {
QuorumVoteValue value;
int index;
int vote_count;
QLIST_HEAD(, QuorumVoteItem) items;
QLIST_ENTRY(QuorumVoteVersion) next;
} QuorumVoteVersion;
/* this structure holds a group of vote versions together */
typedef struct QuorumVotes {
QLIST_HEAD(, QuorumVoteVersion) vote_list;
bool (*compare)(QuorumVoteValue *a, QuorumVoteValue *b);
} QuorumVotes;
/* the following structure holds the state of one quorum instance */
typedef struct BDRVQuorumState {
BdrvChild **children; /* children BlockDriverStates */
int num_children; /* children count */
unsigned next_child_index; /* the index of the next child that should
* be added
*/
int threshold; /* if less than threshold children reads gave the
* same result a quorum error occurs.
*/
bool is_blkverify; /* true if the driver is in blkverify mode
* Writes are mirrored on two children devices.
* On reads the two children devices' contents are
* compared and if a difference is spotted its
* location is printed and the code aborts.
* It is useful to debug other block drivers by
* comparing them with a reference one.
*/
bool rewrite_corrupted;/* true if the driver must rewrite-on-read corrupted
* block if Quorum is reached.
*/
block/quorum: add simple read pattern support This patch adds single read pattern to quorum driver and quorum vote is default pattern. For now we do a quorum vote on all the reads, it is designed for unreliable underlying storage such as non-redundant NFS to make sure data integrity at the cost of the read performance. For some use cases as following: VM -------------- | | v v A B Both A and B has hardware raid storage to justify the data integrity on its own. So it would help performance if we do a single read instead of on all the nodes. Further, if we run VM on either of the storage node, we can make a local read request for better performance. This patch generalize the above 2 nodes case in the N nodes. That is, vm -> write to all the N nodes, read just one of them. If single read fails, we try to read next node in FIFO order specified by the startup command. The 2 nodes case is very similar to DRBD[1] though lack of auto-sync functionality in the single device/node failure for now. But compared with DRBD we still have some advantages over it: - Suppose we have 20 VMs running on one(assume A) of 2 nodes' DRBD backed storage. And if A crashes, we need to restart all the VMs on node B. But for practice case, we can't because B might not have enough resources to setup 20 VMs at once. So if we run our 20 VMs with quorum driver, and scatter the replicated images over the data center, we can very likely restart 20 VMs without any resource problem. After all, I think we can build a more powerful replicated image functionality on quorum and block jobs(block mirror) to meet various High Availibility needs. E.g, Enable single read pattern on 2 children, -drive driver=quorum,children.0.file.filename=0.qcow2,\ children.1.file.filename=1.qcow2,read-pattern=fifo,vote-threshold=1 [1] http://en.wikipedia.org/wiki/Distributed_Replicated_Block_Device [Dropped \n from an error_setg() error message --Stefan] Cc: Benoit Canet <benoit@irqsave.net> Cc: Eric Blake <eblake@redhat.com> Cc: Kevin Wolf <kwolf@redhat.com> Cc: Stefan Hajnoczi <stefanha@redhat.com> Signed-off-by: Liu Yuan <namei.unix@gmail.com> Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2014-08-18 11:41:05 +02:00
QuorumReadPattern read_pattern;
} BDRVQuorumState;
typedef struct QuorumAIOCB QuorumAIOCB;
/* Quorum will create one instance of the following structure per operation it
* performs on its children.
* So for each read/write operation coming from the upper layer there will be
* $children_count QuorumChildRequest.
*/
typedef struct QuorumChildRequest {
BlockDriverState *bs;
QEMUIOVector qiov;
uint8_t *buf;
int ret;
QuorumAIOCB *parent;
} QuorumChildRequest;
/* Quorum will use the following structure to track progress of each read/write
* operation received by the upper layer.
* This structure hold pointers to the QuorumChildRequest structures instances
* used to do operations on each children and track overall progress.
*/
struct QuorumAIOCB {
BlockDriverState *bs;
Coroutine *co;
/* Request metadata */
uint64_t offset;
uint64_t bytes;
int flags;
QEMUIOVector *qiov; /* calling IOV */
QuorumChildRequest *qcrs; /* individual child requests */
int count; /* number of completed AIOCB */
int success_count; /* number of successfully completed AIOCB */
int rewrite_count; /* number of replica to rewrite: count down to
* zero once writes are fired
*/
QuorumVotes votes;
bool is_read;
int vote_ret;
int children_read; /* how many children have been read from */
};
typedef struct QuorumCo {
QuorumAIOCB *acb;
int idx;
} QuorumCo;
static void quorum_aio_finalize(QuorumAIOCB *acb)
{
g_free(acb->qcrs);
g_free(acb);
}
static bool quorum_sha256_compare(QuorumVoteValue *a, QuorumVoteValue *b)
{
return !memcmp(a->h, b->h, HASH_LENGTH);
}
static bool quorum_64bits_compare(QuorumVoteValue *a, QuorumVoteValue *b)
{
return a->l == b->l;
}
static QuorumAIOCB *quorum_aio_get(BlockDriverState *bs,
QEMUIOVector *qiov,
uint64_t offset,
uint64_t bytes,
int flags)
{
BDRVQuorumState *s = bs->opaque;
QuorumAIOCB *acb = g_new(QuorumAIOCB, 1);
int i;
*acb = (QuorumAIOCB) {
.co = qemu_coroutine_self(),
.bs = bs,
.offset = offset,
.bytes = bytes,
.flags = flags,
.qiov = qiov,
.votes.compare = quorum_sha256_compare,
.votes.vote_list = QLIST_HEAD_INITIALIZER(acb.votes.vote_list),
};
acb->qcrs = g_new0(QuorumChildRequest, s->num_children);
for (i = 0; i < s->num_children; i++) {
acb->qcrs[i].buf = NULL;
acb->qcrs[i].ret = 0;
acb->qcrs[i].parent = acb;
}
return acb;
}
static void quorum_report_bad(QuorumOpType type, uint64_t offset,
uint64_t bytes, char *node_name, int ret)
{
const char *msg = NULL;
int64_t start_sector = offset / BDRV_SECTOR_SIZE;
int64_t end_sector = DIV_ROUND_UP(offset + bytes, BDRV_SECTOR_SIZE);
if (ret < 0) {
msg = strerror(-ret);
}
qapi_event_send_quorum_report_bad(type, !!msg, msg, node_name, start_sector,
end_sector - start_sector);
}
static void quorum_report_failure(QuorumAIOCB *acb)
{
const char *reference = bdrv_get_device_or_node_name(acb->bs);
int64_t start_sector = acb->offset / BDRV_SECTOR_SIZE;
int64_t end_sector = DIV_ROUND_UP(acb->offset + acb->bytes,
BDRV_SECTOR_SIZE);
qapi_event_send_quorum_failure(reference, start_sector,
end_sector - start_sector);
}
static int quorum_vote_error(QuorumAIOCB *acb);
static bool quorum_has_too_much_io_failed(QuorumAIOCB *acb)
{
BDRVQuorumState *s = acb->bs->opaque;
if (acb->success_count < s->threshold) {
acb->vote_ret = quorum_vote_error(acb);
quorum_report_failure(acb);
return true;
}
return false;
}
static int read_fifo_child(QuorumAIOCB *acb);
block/quorum: add simple read pattern support This patch adds single read pattern to quorum driver and quorum vote is default pattern. For now we do a quorum vote on all the reads, it is designed for unreliable underlying storage such as non-redundant NFS to make sure data integrity at the cost of the read performance. For some use cases as following: VM -------------- | | v v A B Both A and B has hardware raid storage to justify the data integrity on its own. So it would help performance if we do a single read instead of on all the nodes. Further, if we run VM on either of the storage node, we can make a local read request for better performance. This patch generalize the above 2 nodes case in the N nodes. That is, vm -> write to all the N nodes, read just one of them. If single read fails, we try to read next node in FIFO order specified by the startup command. The 2 nodes case is very similar to DRBD[1] though lack of auto-sync functionality in the single device/node failure for now. But compared with DRBD we still have some advantages over it: - Suppose we have 20 VMs running on one(assume A) of 2 nodes' DRBD backed storage. And if A crashes, we need to restart all the VMs on node B. But for practice case, we can't because B might not have enough resources to setup 20 VMs at once. So if we run our 20 VMs with quorum driver, and scatter the replicated images over the data center, we can very likely restart 20 VMs without any resource problem. After all, I think we can build a more powerful replicated image functionality on quorum and block jobs(block mirror) to meet various High Availibility needs. E.g, Enable single read pattern on 2 children, -drive driver=quorum,children.0.file.filename=0.qcow2,\ children.1.file.filename=1.qcow2,read-pattern=fifo,vote-threshold=1 [1] http://en.wikipedia.org/wiki/Distributed_Replicated_Block_Device [Dropped \n from an error_setg() error message --Stefan] Cc: Benoit Canet <benoit@irqsave.net> Cc: Eric Blake <eblake@redhat.com> Cc: Kevin Wolf <kwolf@redhat.com> Cc: Stefan Hajnoczi <stefanha@redhat.com> Signed-off-by: Liu Yuan <namei.unix@gmail.com> Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2014-08-18 11:41:05 +02:00
static void quorum_copy_qiov(QEMUIOVector *dest, QEMUIOVector *source)
{
int i;
assert(dest->niov == source->niov);
assert(dest->size == source->size);
for (i = 0; i < source->niov; i++) {
assert(dest->iov[i].iov_len == source->iov[i].iov_len);
memcpy(dest->iov[i].iov_base,
source->iov[i].iov_base,
source->iov[i].iov_len);
}
}
static void quorum_report_bad_acb(QuorumChildRequest *sacb, int ret)
{
QuorumAIOCB *acb = sacb->parent;
QuorumOpType type = acb->is_read ? QUORUM_OP_TYPE_READ : QUORUM_OP_TYPE_WRITE;
quorum_report_bad(type, acb->offset, acb->bytes, sacb->bs->node_name, ret);
}
static void quorum_report_bad_versions(BDRVQuorumState *s,
QuorumAIOCB *acb,
QuorumVoteValue *value)
{
QuorumVoteVersion *version;
QuorumVoteItem *item;
QLIST_FOREACH(version, &acb->votes.vote_list, next) {
if (acb->votes.compare(&version->value, value)) {
continue;
}
QLIST_FOREACH(item, &version->items, next) {
quorum_report_bad(QUORUM_OP_TYPE_READ, acb->offset, acb->bytes,
s->children[item->index]->bs->node_name, 0);
}
}
}
static void quorum_rewrite_entry(void *opaque)
{
QuorumCo *co = opaque;
QuorumAIOCB *acb = co->acb;
BDRVQuorumState *s = acb->bs->opaque;
/* Ignore any errors, it's just a correction attempt for already
* corrupted data.
* Mask out BDRV_REQ_WRITE_UNCHANGED because this overwrites the
* area with different data from the other children. */
bdrv_co_pwritev(s->children[co->idx], acb->offset, acb->bytes,
acb->qiov, acb->flags & ~BDRV_REQ_WRITE_UNCHANGED);
/* Wake up the caller after the last rewrite */
acb->rewrite_count--;
if (!acb->rewrite_count) {
qemu_coroutine_enter_if_inactive(acb->co);
}
}
static bool quorum_rewrite_bad_versions(QuorumAIOCB *acb,
QuorumVoteValue *value)
{
QuorumVoteVersion *version;
QuorumVoteItem *item;
int count = 0;
/* first count the number of bad versions: done first to avoid concurrency
* issues.
*/
QLIST_FOREACH(version, &acb->votes.vote_list, next) {
if (acb->votes.compare(&version->value, value)) {
continue;
}
QLIST_FOREACH(item, &version->items, next) {
count++;
}
}
/* quorum_rewrite_entry will count down this to zero */
acb->rewrite_count = count;
/* now fire the correcting rewrites */
QLIST_FOREACH(version, &acb->votes.vote_list, next) {
if (acb->votes.compare(&version->value, value)) {
continue;
}
QLIST_FOREACH(item, &version->items, next) {
Coroutine *co;
QuorumCo data = {
.acb = acb,
.idx = item->index,
};
co = qemu_coroutine_create(quorum_rewrite_entry, &data);
qemu_coroutine_enter(co);
}
}
/* return true if any rewrite is done else false */
return count;
}
static void quorum_count_vote(QuorumVotes *votes,
QuorumVoteValue *value,
int index)
{
QuorumVoteVersion *v = NULL, *version = NULL;
QuorumVoteItem *item;
/* look if we have something with this hash */
QLIST_FOREACH(v, &votes->vote_list, next) {
if (votes->compare(&v->value, value)) {
version = v;
break;
}
}
/* It's a version not yet in the list add it */
if (!version) {
version = g_new0(QuorumVoteVersion, 1);
QLIST_INIT(&version->items);
memcpy(&version->value, value, sizeof(version->value));
version->index = index;
version->vote_count = 0;
QLIST_INSERT_HEAD(&votes->vote_list, version, next);
}
version->vote_count++;
item = g_new0(QuorumVoteItem, 1);
item->index = index;
QLIST_INSERT_HEAD(&version->items, item, next);
}
static void quorum_free_vote_list(QuorumVotes *votes)
{
QuorumVoteVersion *version, *next_version;
QuorumVoteItem *item, *next_item;
QLIST_FOREACH_SAFE(version, &votes->vote_list, next, next_version) {
QLIST_REMOVE(version, next);
QLIST_FOREACH_SAFE(item, &version->items, next, next_item) {
QLIST_REMOVE(item, next);
g_free(item);
}
g_free(version);
}
}
static int quorum_compute_hash(QuorumAIOCB *acb, int i, QuorumVoteValue *hash)
{
QEMUIOVector *qiov = &acb->qcrs[i].qiov;
size_t len = sizeof(hash->h);
uint8_t *data = hash->h;
/* XXX - would be nice if we could pass in the Error **
* and propagate that back, but this quorum code is
* restricted to just errno values currently */
if (qcrypto_hash_bytesv(QCRYPTO_HASH_ALG_SHA256,
qiov->iov, qiov->niov,
&data, &len,
NULL) < 0) {
return -EINVAL;
}
return 0;
}
static QuorumVoteVersion *quorum_get_vote_winner(QuorumVotes *votes)
{
int max = 0;
QuorumVoteVersion *candidate, *winner = NULL;
QLIST_FOREACH(candidate, &votes->vote_list, next) {
if (candidate->vote_count > max) {
max = candidate->vote_count;
winner = candidate;
}
}
return winner;
}
/* qemu_iovec_compare is handy for blkverify mode because it returns the first
* differing byte location. Yet it is handcoded to compare vectors one byte
* after another so it does not benefit from the libc SIMD optimizations.
* quorum_iovec_compare is written for speed and should be used in the non
* blkverify mode of quorum.
*/
static bool quorum_iovec_compare(QEMUIOVector *a, QEMUIOVector *b)
{
int i;
int result;
assert(a->niov == b->niov);
for (i = 0; i < a->niov; i++) {
assert(a->iov[i].iov_len == b->iov[i].iov_len);
result = memcmp(a->iov[i].iov_base,
b->iov[i].iov_base,
a->iov[i].iov_len);
if (result) {
return false;
}
}
return true;
}
static bool quorum_compare(QuorumAIOCB *acb, QEMUIOVector *a, QEMUIOVector *b)
{
BDRVQuorumState *s = acb->bs->opaque;
ssize_t offset;
/* This driver will replace blkverify in this particular case */
if (s->is_blkverify) {
offset = qemu_iovec_compare(a, b);
if (offset != -1) {
fprintf(stderr, "quorum: offset=%" PRIu64 " bytes=%" PRIu64
" contents mismatch at offset %" PRIu64 "\n",
acb->offset, acb->bytes, acb->offset + offset);
exit(1);
}
return true;
}
return quorum_iovec_compare(a, b);
}
/* Do a vote to get the error code */
static int quorum_vote_error(QuorumAIOCB *acb)
{
BDRVQuorumState *s = acb->bs->opaque;
QuorumVoteVersion *winner = NULL;
QuorumVotes error_votes;
QuorumVoteValue result_value;
int i, ret = 0;
bool error = false;
QLIST_INIT(&error_votes.vote_list);
error_votes.compare = quorum_64bits_compare;
for (i = 0; i < s->num_children; i++) {
ret = acb->qcrs[i].ret;
if (ret) {
error = true;
result_value.l = ret;
quorum_count_vote(&error_votes, &result_value, i);
}
}
if (error) {
winner = quorum_get_vote_winner(&error_votes);
ret = winner->value.l;
}
quorum_free_vote_list(&error_votes);
return ret;
}
static void quorum_vote(QuorumAIOCB *acb)
{
bool quorum = true;
int i, j, ret;
QuorumVoteValue hash;
BDRVQuorumState *s = acb->bs->opaque;
QuorumVoteVersion *winner;
if (quorum_has_too_much_io_failed(acb)) {
return;
}
/* get the index of the first successful read */
for (i = 0; i < s->num_children; i++) {
if (!acb->qcrs[i].ret) {
break;
}
}
assert(i < s->num_children);
/* compare this read with all other successful reads stopping at quorum
* failure
*/
for (j = i + 1; j < s->num_children; j++) {
if (acb->qcrs[j].ret) {
continue;
}
quorum = quorum_compare(acb, &acb->qcrs[i].qiov, &acb->qcrs[j].qiov);
if (!quorum) {
break;
}
}
/* Every successful read agrees */
if (quorum) {
quorum_copy_qiov(acb->qiov, &acb->qcrs[i].qiov);
return;
}
/* compute hashes for each successful read, also store indexes */
for (i = 0; i < s->num_children; i++) {
if (acb->qcrs[i].ret) {
continue;
}
ret = quorum_compute_hash(acb, i, &hash);
/* if ever the hash computation failed */
if (ret < 0) {
acb->vote_ret = ret;
goto free_exit;
}
quorum_count_vote(&acb->votes, &hash, i);
}
/* vote to select the most represented version */
winner = quorum_get_vote_winner(&acb->votes);
/* if the winner count is smaller than threshold the read fails */
if (winner->vote_count < s->threshold) {
quorum_report_failure(acb);
acb->vote_ret = -EIO;
goto free_exit;
}
/* we have a winner: copy it */
quorum_copy_qiov(acb->qiov, &acb->qcrs[winner->index].qiov);
/* some versions are bad print them */
quorum_report_bad_versions(s, acb, &winner->value);
/* corruption correction is enabled */
if (s->rewrite_corrupted) {
quorum_rewrite_bad_versions(acb, &winner->value);
}
free_exit:
/* free lists */
quorum_free_vote_list(&acb->votes);
}
static void read_quorum_children_entry(void *opaque)
{
QuorumCo *co = opaque;
QuorumAIOCB *acb = co->acb;
BDRVQuorumState *s = acb->bs->opaque;
int i = co->idx;
QuorumChildRequest *sacb = &acb->qcrs[i];
sacb->bs = s->children[i]->bs;
sacb->ret = bdrv_co_preadv(s->children[i], acb->offset, acb->bytes,
&acb->qcrs[i].qiov, 0);
if (sacb->ret == 0) {
acb->success_count++;
} else {
quorum_report_bad_acb(sacb, sacb->ret);
}
acb->count++;
assert(acb->count <= s->num_children);
assert(acb->success_count <= s->num_children);
/* Wake up the caller after the last read */
if (acb->count == s->num_children) {
qemu_coroutine_enter_if_inactive(acb->co);
}
}
static int read_quorum_children(QuorumAIOCB *acb)
{
BDRVQuorumState *s = acb->bs->opaque;
int i;
acb->children_read = s->num_children;
for (i = 0; i < s->num_children; i++) {
acb->qcrs[i].buf = qemu_blockalign(s->children[i]->bs, acb->qiov->size);
block/quorum: add simple read pattern support This patch adds single read pattern to quorum driver and quorum vote is default pattern. For now we do a quorum vote on all the reads, it is designed for unreliable underlying storage such as non-redundant NFS to make sure data integrity at the cost of the read performance. For some use cases as following: VM -------------- | | v v A B Both A and B has hardware raid storage to justify the data integrity on its own. So it would help performance if we do a single read instead of on all the nodes. Further, if we run VM on either of the storage node, we can make a local read request for better performance. This patch generalize the above 2 nodes case in the N nodes. That is, vm -> write to all the N nodes, read just one of them. If single read fails, we try to read next node in FIFO order specified by the startup command. The 2 nodes case is very similar to DRBD[1] though lack of auto-sync functionality in the single device/node failure for now. But compared with DRBD we still have some advantages over it: - Suppose we have 20 VMs running on one(assume A) of 2 nodes' DRBD backed storage. And if A crashes, we need to restart all the VMs on node B. But for practice case, we can't because B might not have enough resources to setup 20 VMs at once. So if we run our 20 VMs with quorum driver, and scatter the replicated images over the data center, we can very likely restart 20 VMs without any resource problem. After all, I think we can build a more powerful replicated image functionality on quorum and block jobs(block mirror) to meet various High Availibility needs. E.g, Enable single read pattern on 2 children, -drive driver=quorum,children.0.file.filename=0.qcow2,\ children.1.file.filename=1.qcow2,read-pattern=fifo,vote-threshold=1 [1] http://en.wikipedia.org/wiki/Distributed_Replicated_Block_Device [Dropped \n from an error_setg() error message --Stefan] Cc: Benoit Canet <benoit@irqsave.net> Cc: Eric Blake <eblake@redhat.com> Cc: Kevin Wolf <kwolf@redhat.com> Cc: Stefan Hajnoczi <stefanha@redhat.com> Signed-off-by: Liu Yuan <namei.unix@gmail.com> Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2014-08-18 11:41:05 +02:00
qemu_iovec_init(&acb->qcrs[i].qiov, acb->qiov->niov);
qemu_iovec_clone(&acb->qcrs[i].qiov, acb->qiov, acb->qcrs[i].buf);
}
for (i = 0; i < s->num_children; i++) {
Coroutine *co;
QuorumCo data = {
.acb = acb,
.idx = i,
};
co = qemu_coroutine_create(read_quorum_children_entry, &data);
qemu_coroutine_enter(co);
}
while (acb->count < s->num_children) {
qemu_coroutine_yield();
}
/* Do the vote on read */
quorum_vote(acb);
for (i = 0; i < s->num_children; i++) {
qemu_vfree(acb->qcrs[i].buf);
qemu_iovec_destroy(&acb->qcrs[i].qiov);
}
while (acb->rewrite_count) {
qemu_coroutine_yield();
}
return acb->vote_ret;
}
static int read_fifo_child(QuorumAIOCB *acb)
block/quorum: add simple read pattern support This patch adds single read pattern to quorum driver and quorum vote is default pattern. For now we do a quorum vote on all the reads, it is designed for unreliable underlying storage such as non-redundant NFS to make sure data integrity at the cost of the read performance. For some use cases as following: VM -------------- | | v v A B Both A and B has hardware raid storage to justify the data integrity on its own. So it would help performance if we do a single read instead of on all the nodes. Further, if we run VM on either of the storage node, we can make a local read request for better performance. This patch generalize the above 2 nodes case in the N nodes. That is, vm -> write to all the N nodes, read just one of them. If single read fails, we try to read next node in FIFO order specified by the startup command. The 2 nodes case is very similar to DRBD[1] though lack of auto-sync functionality in the single device/node failure for now. But compared with DRBD we still have some advantages over it: - Suppose we have 20 VMs running on one(assume A) of 2 nodes' DRBD backed storage. And if A crashes, we need to restart all the VMs on node B. But for practice case, we can't because B might not have enough resources to setup 20 VMs at once. So if we run our 20 VMs with quorum driver, and scatter the replicated images over the data center, we can very likely restart 20 VMs without any resource problem. After all, I think we can build a more powerful replicated image functionality on quorum and block jobs(block mirror) to meet various High Availibility needs. E.g, Enable single read pattern on 2 children, -drive driver=quorum,children.0.file.filename=0.qcow2,\ children.1.file.filename=1.qcow2,read-pattern=fifo,vote-threshold=1 [1] http://en.wikipedia.org/wiki/Distributed_Replicated_Block_Device [Dropped \n from an error_setg() error message --Stefan] Cc: Benoit Canet <benoit@irqsave.net> Cc: Eric Blake <eblake@redhat.com> Cc: Kevin Wolf <kwolf@redhat.com> Cc: Stefan Hajnoczi <stefanha@redhat.com> Signed-off-by: Liu Yuan <namei.unix@gmail.com> Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2014-08-18 11:41:05 +02:00
{
BDRVQuorumState *s = acb->bs->opaque;
int n, ret;
/* We try to read the next child in FIFO order if we failed to read */
do {
n = acb->children_read++;
acb->qcrs[n].bs = s->children[n]->bs;
ret = bdrv_co_preadv(s->children[n], acb->offset, acb->bytes,
acb->qiov, 0);
if (ret < 0) {
quorum_report_bad_acb(&acb->qcrs[n], ret);
}
} while (ret < 0 && acb->children_read < s->num_children);
block/quorum: add simple read pattern support This patch adds single read pattern to quorum driver and quorum vote is default pattern. For now we do a quorum vote on all the reads, it is designed for unreliable underlying storage such as non-redundant NFS to make sure data integrity at the cost of the read performance. For some use cases as following: VM -------------- | | v v A B Both A and B has hardware raid storage to justify the data integrity on its own. So it would help performance if we do a single read instead of on all the nodes. Further, if we run VM on either of the storage node, we can make a local read request for better performance. This patch generalize the above 2 nodes case in the N nodes. That is, vm -> write to all the N nodes, read just one of them. If single read fails, we try to read next node in FIFO order specified by the startup command. The 2 nodes case is very similar to DRBD[1] though lack of auto-sync functionality in the single device/node failure for now. But compared with DRBD we still have some advantages over it: - Suppose we have 20 VMs running on one(assume A) of 2 nodes' DRBD backed storage. And if A crashes, we need to restart all the VMs on node B. But for practice case, we can't because B might not have enough resources to setup 20 VMs at once. So if we run our 20 VMs with quorum driver, and scatter the replicated images over the data center, we can very likely restart 20 VMs without any resource problem. After all, I think we can build a more powerful replicated image functionality on quorum and block jobs(block mirror) to meet various High Availibility needs. E.g, Enable single read pattern on 2 children, -drive driver=quorum,children.0.file.filename=0.qcow2,\ children.1.file.filename=1.qcow2,read-pattern=fifo,vote-threshold=1 [1] http://en.wikipedia.org/wiki/Distributed_Replicated_Block_Device [Dropped \n from an error_setg() error message --Stefan] Cc: Benoit Canet <benoit@irqsave.net> Cc: Eric Blake <eblake@redhat.com> Cc: Kevin Wolf <kwolf@redhat.com> Cc: Stefan Hajnoczi <stefanha@redhat.com> Signed-off-by: Liu Yuan <namei.unix@gmail.com> Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2014-08-18 11:41:05 +02:00
/* FIXME: rewrite failed children if acb->children_read > 1? */
block/quorum: add simple read pattern support This patch adds single read pattern to quorum driver and quorum vote is default pattern. For now we do a quorum vote on all the reads, it is designed for unreliable underlying storage such as non-redundant NFS to make sure data integrity at the cost of the read performance. For some use cases as following: VM -------------- | | v v A B Both A and B has hardware raid storage to justify the data integrity on its own. So it would help performance if we do a single read instead of on all the nodes. Further, if we run VM on either of the storage node, we can make a local read request for better performance. This patch generalize the above 2 nodes case in the N nodes. That is, vm -> write to all the N nodes, read just one of them. If single read fails, we try to read next node in FIFO order specified by the startup command. The 2 nodes case is very similar to DRBD[1] though lack of auto-sync functionality in the single device/node failure for now. But compared with DRBD we still have some advantages over it: - Suppose we have 20 VMs running on one(assume A) of 2 nodes' DRBD backed storage. And if A crashes, we need to restart all the VMs on node B. But for practice case, we can't because B might not have enough resources to setup 20 VMs at once. So if we run our 20 VMs with quorum driver, and scatter the replicated images over the data center, we can very likely restart 20 VMs without any resource problem. After all, I think we can build a more powerful replicated image functionality on quorum and block jobs(block mirror) to meet various High Availibility needs. E.g, Enable single read pattern on 2 children, -drive driver=quorum,children.0.file.filename=0.qcow2,\ children.1.file.filename=1.qcow2,read-pattern=fifo,vote-threshold=1 [1] http://en.wikipedia.org/wiki/Distributed_Replicated_Block_Device [Dropped \n from an error_setg() error message --Stefan] Cc: Benoit Canet <benoit@irqsave.net> Cc: Eric Blake <eblake@redhat.com> Cc: Kevin Wolf <kwolf@redhat.com> Cc: Stefan Hajnoczi <stefanha@redhat.com> Signed-off-by: Liu Yuan <namei.unix@gmail.com> Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2014-08-18 11:41:05 +02:00
return ret;
block/quorum: add simple read pattern support This patch adds single read pattern to quorum driver and quorum vote is default pattern. For now we do a quorum vote on all the reads, it is designed for unreliable underlying storage such as non-redundant NFS to make sure data integrity at the cost of the read performance. For some use cases as following: VM -------------- | | v v A B Both A and B has hardware raid storage to justify the data integrity on its own. So it would help performance if we do a single read instead of on all the nodes. Further, if we run VM on either of the storage node, we can make a local read request for better performance. This patch generalize the above 2 nodes case in the N nodes. That is, vm -> write to all the N nodes, read just one of them. If single read fails, we try to read next node in FIFO order specified by the startup command. The 2 nodes case is very similar to DRBD[1] though lack of auto-sync functionality in the single device/node failure for now. But compared with DRBD we still have some advantages over it: - Suppose we have 20 VMs running on one(assume A) of 2 nodes' DRBD backed storage. And if A crashes, we need to restart all the VMs on node B. But for practice case, we can't because B might not have enough resources to setup 20 VMs at once. So if we run our 20 VMs with quorum driver, and scatter the replicated images over the data center, we can very likely restart 20 VMs without any resource problem. After all, I think we can build a more powerful replicated image functionality on quorum and block jobs(block mirror) to meet various High Availibility needs. E.g, Enable single read pattern on 2 children, -drive driver=quorum,children.0.file.filename=0.qcow2,\ children.1.file.filename=1.qcow2,read-pattern=fifo,vote-threshold=1 [1] http://en.wikipedia.org/wiki/Distributed_Replicated_Block_Device [Dropped \n from an error_setg() error message --Stefan] Cc: Benoit Canet <benoit@irqsave.net> Cc: Eric Blake <eblake@redhat.com> Cc: Kevin Wolf <kwolf@redhat.com> Cc: Stefan Hajnoczi <stefanha@redhat.com> Signed-off-by: Liu Yuan <namei.unix@gmail.com> Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2014-08-18 11:41:05 +02:00
}
block: use int64_t instead of uint64_t in driver read handlers We are generally moving to int64_t for both offset and bytes parameters on all io paths. Main motivation is realization of 64-bit write_zeroes operation for fast zeroing large disk chunks, up to the whole disk. We chose signed type, to be consistent with off_t (which is signed) and with possibility for signed return type (where negative value means error). So, convert driver read handlers parameters which are already 64bit to signed type. While being here, convert also flags parameter to be BdrvRequestFlags. Now let's consider all callers. Simple git grep '\->bdrv_\(aio\|co\)_preadv\(_part\)\?' shows that's there three callers of driver function: bdrv_driver_preadv() in block/io.c, passes int64_t, checked by bdrv_check_qiov_request() to be non-negative. qcow2_load_vmstate() does bdrv_check_qiov_request(). do_perform_cow_read() has uint64_t argument. And a lot of things in qcow2 driver are uint64_t, so converting it is big job. But we must not work with requests that don't satisfy bdrv_check_qiov_request(), so let's just assert it here. Still, the functions may be called directly, not only by drv->... Let's check: git grep '\.bdrv_\(aio\|co\)_preadv\(_part\)\?\s*=' | \ awk '{print $4}' | sed 's/,//' | sed 's/&//' | sort | uniq | \ while read func; do git grep "$func(" | \ grep -v "$func(BlockDriverState"; done The only one such caller: QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, &data, 1); ... ret = bdrv_replace_test_co_preadv(bs, 0, 1, &qiov, 0); in tests/unit/test-bdrv-drain.c, and it's OK obviously. Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> Message-Id: <20210903102807.27127-4-vsementsov@virtuozzo.com> Reviewed-by: Eric Blake <eblake@redhat.com> [eblake: fix typos] Signed-off-by: Eric Blake <eblake@redhat.com>
2021-09-03 12:27:59 +02:00
static int quorum_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes,
QEMUIOVector *qiov, BdrvRequestFlags flags)
block/quorum: add simple read pattern support This patch adds single read pattern to quorum driver and quorum vote is default pattern. For now we do a quorum vote on all the reads, it is designed for unreliable underlying storage such as non-redundant NFS to make sure data integrity at the cost of the read performance. For some use cases as following: VM -------------- | | v v A B Both A and B has hardware raid storage to justify the data integrity on its own. So it would help performance if we do a single read instead of on all the nodes. Further, if we run VM on either of the storage node, we can make a local read request for better performance. This patch generalize the above 2 nodes case in the N nodes. That is, vm -> write to all the N nodes, read just one of them. If single read fails, we try to read next node in FIFO order specified by the startup command. The 2 nodes case is very similar to DRBD[1] though lack of auto-sync functionality in the single device/node failure for now. But compared with DRBD we still have some advantages over it: - Suppose we have 20 VMs running on one(assume A) of 2 nodes' DRBD backed storage. And if A crashes, we need to restart all the VMs on node B. But for practice case, we can't because B might not have enough resources to setup 20 VMs at once. So if we run our 20 VMs with quorum driver, and scatter the replicated images over the data center, we can very likely restart 20 VMs without any resource problem. After all, I think we can build a more powerful replicated image functionality on quorum and block jobs(block mirror) to meet various High Availibility needs. E.g, Enable single read pattern on 2 children, -drive driver=quorum,children.0.file.filename=0.qcow2,\ children.1.file.filename=1.qcow2,read-pattern=fifo,vote-threshold=1 [1] http://en.wikipedia.org/wiki/Distributed_Replicated_Block_Device [Dropped \n from an error_setg() error message --Stefan] Cc: Benoit Canet <benoit@irqsave.net> Cc: Eric Blake <eblake@redhat.com> Cc: Kevin Wolf <kwolf@redhat.com> Cc: Stefan Hajnoczi <stefanha@redhat.com> Signed-off-by: Liu Yuan <namei.unix@gmail.com> Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2014-08-18 11:41:05 +02:00
{
BDRVQuorumState *s = bs->opaque;
QuorumAIOCB *acb = quorum_aio_get(bs, qiov, offset, bytes, flags);
int ret;
block/quorum: add simple read pattern support This patch adds single read pattern to quorum driver and quorum vote is default pattern. For now we do a quorum vote on all the reads, it is designed for unreliable underlying storage such as non-redundant NFS to make sure data integrity at the cost of the read performance. For some use cases as following: VM -------------- | | v v A B Both A and B has hardware raid storage to justify the data integrity on its own. So it would help performance if we do a single read instead of on all the nodes. Further, if we run VM on either of the storage node, we can make a local read request for better performance. This patch generalize the above 2 nodes case in the N nodes. That is, vm -> write to all the N nodes, read just one of them. If single read fails, we try to read next node in FIFO order specified by the startup command. The 2 nodes case is very similar to DRBD[1] though lack of auto-sync functionality in the single device/node failure for now. But compared with DRBD we still have some advantages over it: - Suppose we have 20 VMs running on one(assume A) of 2 nodes' DRBD backed storage. And if A crashes, we need to restart all the VMs on node B. But for practice case, we can't because B might not have enough resources to setup 20 VMs at once. So if we run our 20 VMs with quorum driver, and scatter the replicated images over the data center, we can very likely restart 20 VMs without any resource problem. After all, I think we can build a more powerful replicated image functionality on quorum and block jobs(block mirror) to meet various High Availibility needs. E.g, Enable single read pattern on 2 children, -drive driver=quorum,children.0.file.filename=0.qcow2,\ children.1.file.filename=1.qcow2,read-pattern=fifo,vote-threshold=1 [1] http://en.wikipedia.org/wiki/Distributed_Replicated_Block_Device [Dropped \n from an error_setg() error message --Stefan] Cc: Benoit Canet <benoit@irqsave.net> Cc: Eric Blake <eblake@redhat.com> Cc: Kevin Wolf <kwolf@redhat.com> Cc: Stefan Hajnoczi <stefanha@redhat.com> Signed-off-by: Liu Yuan <namei.unix@gmail.com> Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2014-08-18 11:41:05 +02:00
acb->is_read = true;
acb->children_read = 0;
block/quorum: add simple read pattern support This patch adds single read pattern to quorum driver and quorum vote is default pattern. For now we do a quorum vote on all the reads, it is designed for unreliable underlying storage such as non-redundant NFS to make sure data integrity at the cost of the read performance. For some use cases as following: VM -------------- | | v v A B Both A and B has hardware raid storage to justify the data integrity on its own. So it would help performance if we do a single read instead of on all the nodes. Further, if we run VM on either of the storage node, we can make a local read request for better performance. This patch generalize the above 2 nodes case in the N nodes. That is, vm -> write to all the N nodes, read just one of them. If single read fails, we try to read next node in FIFO order specified by the startup command. The 2 nodes case is very similar to DRBD[1] though lack of auto-sync functionality in the single device/node failure for now. But compared with DRBD we still have some advantages over it: - Suppose we have 20 VMs running on one(assume A) of 2 nodes' DRBD backed storage. And if A crashes, we need to restart all the VMs on node B. But for practice case, we can't because B might not have enough resources to setup 20 VMs at once. So if we run our 20 VMs with quorum driver, and scatter the replicated images over the data center, we can very likely restart 20 VMs without any resource problem. After all, I think we can build a more powerful replicated image functionality on quorum and block jobs(block mirror) to meet various High Availibility needs. E.g, Enable single read pattern on 2 children, -drive driver=quorum,children.0.file.filename=0.qcow2,\ children.1.file.filename=1.qcow2,read-pattern=fifo,vote-threshold=1 [1] http://en.wikipedia.org/wiki/Distributed_Replicated_Block_Device [Dropped \n from an error_setg() error message --Stefan] Cc: Benoit Canet <benoit@irqsave.net> Cc: Eric Blake <eblake@redhat.com> Cc: Kevin Wolf <kwolf@redhat.com> Cc: Stefan Hajnoczi <stefanha@redhat.com> Signed-off-by: Liu Yuan <namei.unix@gmail.com> Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2014-08-18 11:41:05 +02:00
if (s->read_pattern == QUORUM_READ_PATTERN_QUORUM) {
ret = read_quorum_children(acb);
} else {
ret = read_fifo_child(acb);
block/quorum: add simple read pattern support This patch adds single read pattern to quorum driver and quorum vote is default pattern. For now we do a quorum vote on all the reads, it is designed for unreliable underlying storage such as non-redundant NFS to make sure data integrity at the cost of the read performance. For some use cases as following: VM -------------- | | v v A B Both A and B has hardware raid storage to justify the data integrity on its own. So it would help performance if we do a single read instead of on all the nodes. Further, if we run VM on either of the storage node, we can make a local read request for better performance. This patch generalize the above 2 nodes case in the N nodes. That is, vm -> write to all the N nodes, read just one of them. If single read fails, we try to read next node in FIFO order specified by the startup command. The 2 nodes case is very similar to DRBD[1] though lack of auto-sync functionality in the single device/node failure for now. But compared with DRBD we still have some advantages over it: - Suppose we have 20 VMs running on one(assume A) of 2 nodes' DRBD backed storage. And if A crashes, we need to restart all the VMs on node B. But for practice case, we can't because B might not have enough resources to setup 20 VMs at once. So if we run our 20 VMs with quorum driver, and scatter the replicated images over the data center, we can very likely restart 20 VMs without any resource problem. After all, I think we can build a more powerful replicated image functionality on quorum and block jobs(block mirror) to meet various High Availibility needs. E.g, Enable single read pattern on 2 children, -drive driver=quorum,children.0.file.filename=0.qcow2,\ children.1.file.filename=1.qcow2,read-pattern=fifo,vote-threshold=1 [1] http://en.wikipedia.org/wiki/Distributed_Replicated_Block_Device [Dropped \n from an error_setg() error message --Stefan] Cc: Benoit Canet <benoit@irqsave.net> Cc: Eric Blake <eblake@redhat.com> Cc: Kevin Wolf <kwolf@redhat.com> Cc: Stefan Hajnoczi <stefanha@redhat.com> Signed-off-by: Liu Yuan <namei.unix@gmail.com> Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2014-08-18 11:41:05 +02:00
}
quorum_aio_finalize(acb);
return ret;
}
block/quorum: add simple read pattern support This patch adds single read pattern to quorum driver and quorum vote is default pattern. For now we do a quorum vote on all the reads, it is designed for unreliable underlying storage such as non-redundant NFS to make sure data integrity at the cost of the read performance. For some use cases as following: VM -------------- | | v v A B Both A and B has hardware raid storage to justify the data integrity on its own. So it would help performance if we do a single read instead of on all the nodes. Further, if we run VM on either of the storage node, we can make a local read request for better performance. This patch generalize the above 2 nodes case in the N nodes. That is, vm -> write to all the N nodes, read just one of them. If single read fails, we try to read next node in FIFO order specified by the startup command. The 2 nodes case is very similar to DRBD[1] though lack of auto-sync functionality in the single device/node failure for now. But compared with DRBD we still have some advantages over it: - Suppose we have 20 VMs running on one(assume A) of 2 nodes' DRBD backed storage. And if A crashes, we need to restart all the VMs on node B. But for practice case, we can't because B might not have enough resources to setup 20 VMs at once. So if we run our 20 VMs with quorum driver, and scatter the replicated images over the data center, we can very likely restart 20 VMs without any resource problem. After all, I think we can build a more powerful replicated image functionality on quorum and block jobs(block mirror) to meet various High Availibility needs. E.g, Enable single read pattern on 2 children, -drive driver=quorum,children.0.file.filename=0.qcow2,\ children.1.file.filename=1.qcow2,read-pattern=fifo,vote-threshold=1 [1] http://en.wikipedia.org/wiki/Distributed_Replicated_Block_Device [Dropped \n from an error_setg() error message --Stefan] Cc: Benoit Canet <benoit@irqsave.net> Cc: Eric Blake <eblake@redhat.com> Cc: Kevin Wolf <kwolf@redhat.com> Cc: Stefan Hajnoczi <stefanha@redhat.com> Signed-off-by: Liu Yuan <namei.unix@gmail.com> Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2014-08-18 11:41:05 +02:00
static void write_quorum_entry(void *opaque)
{
QuorumCo *co = opaque;
QuorumAIOCB *acb = co->acb;
BDRVQuorumState *s = acb->bs->opaque;
int i = co->idx;
QuorumChildRequest *sacb = &acb->qcrs[i];
sacb->bs = s->children[i]->bs;
if (acb->flags & BDRV_REQ_ZERO_WRITE) {
sacb->ret = bdrv_co_pwrite_zeroes(s->children[i], acb->offset,
acb->bytes, acb->flags);
} else {
sacb->ret = bdrv_co_pwritev(s->children[i], acb->offset, acb->bytes,
acb->qiov, acb->flags);
}
if (sacb->ret == 0) {
acb->success_count++;
} else {
quorum_report_bad_acb(sacb, sacb->ret);
}
acb->count++;
assert(acb->count <= s->num_children);
assert(acb->success_count <= s->num_children);
/* Wake up the caller after the last write */
if (acb->count == s->num_children) {
qemu_coroutine_enter_if_inactive(acb->co);
}
block/quorum: add simple read pattern support This patch adds single read pattern to quorum driver and quorum vote is default pattern. For now we do a quorum vote on all the reads, it is designed for unreliable underlying storage such as non-redundant NFS to make sure data integrity at the cost of the read performance. For some use cases as following: VM -------------- | | v v A B Both A and B has hardware raid storage to justify the data integrity on its own. So it would help performance if we do a single read instead of on all the nodes. Further, if we run VM on either of the storage node, we can make a local read request for better performance. This patch generalize the above 2 nodes case in the N nodes. That is, vm -> write to all the N nodes, read just one of them. If single read fails, we try to read next node in FIFO order specified by the startup command. The 2 nodes case is very similar to DRBD[1] though lack of auto-sync functionality in the single device/node failure for now. But compared with DRBD we still have some advantages over it: - Suppose we have 20 VMs running on one(assume A) of 2 nodes' DRBD backed storage. And if A crashes, we need to restart all the VMs on node B. But for practice case, we can't because B might not have enough resources to setup 20 VMs at once. So if we run our 20 VMs with quorum driver, and scatter the replicated images over the data center, we can very likely restart 20 VMs without any resource problem. After all, I think we can build a more powerful replicated image functionality on quorum and block jobs(block mirror) to meet various High Availibility needs. E.g, Enable single read pattern on 2 children, -drive driver=quorum,children.0.file.filename=0.qcow2,\ children.1.file.filename=1.qcow2,read-pattern=fifo,vote-threshold=1 [1] http://en.wikipedia.org/wiki/Distributed_Replicated_Block_Device [Dropped \n from an error_setg() error message --Stefan] Cc: Benoit Canet <benoit@irqsave.net> Cc: Eric Blake <eblake@redhat.com> Cc: Kevin Wolf <kwolf@redhat.com> Cc: Stefan Hajnoczi <stefanha@redhat.com> Signed-off-by: Liu Yuan <namei.unix@gmail.com> Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2014-08-18 11:41:05 +02:00
}
block: use int64_t instead of uint64_t in driver write handlers We are generally moving to int64_t for both offset and bytes parameters on all io paths. Main motivation is realization of 64-bit write_zeroes operation for fast zeroing large disk chunks, up to the whole disk. We chose signed type, to be consistent with off_t (which is signed) and with possibility for signed return type (where negative value means error). So, convert driver write handlers parameters which are already 64bit to signed type. While being here, convert also flags parameter to be BdrvRequestFlags. Now let's consider all callers. Simple git grep '\->bdrv_\(aio\|co\)_pwritev\(_part\)\?' shows that's there three callers of driver function: bdrv_driver_pwritev() and bdrv_driver_pwritev_compressed() in block/io.c, both pass int64_t, checked by bdrv_check_qiov_request() to be non-negative. qcow2_save_vmstate() does bdrv_check_qiov_request(). Still, the functions may be called directly, not only by drv->... Let's check: git grep '\.bdrv_\(aio\|co\)_pwritev\(_part\)\?\s*=' | \ awk '{print $4}' | sed 's/,//' | sed 's/&//' | sort | uniq | \ while read func; do git grep "$func(" | \ grep -v "$func(BlockDriverState"; done shows several callers: qcow2: qcow2_co_truncate() write at most up to @offset, which is checked in generic qcow2_co_truncate() by bdrv_check_request(). qcow2_co_pwritev_compressed_task() pass the request (or part of the request) that already went through normal write path, so it should be OK qcow: qcow_co_pwritev_compressed() pass int64_t, it's updated by this patch quorum: quorum_co_pwrite_zeroes() pass int64_t and int - OK throttle: throttle_co_pwritev_compressed() pass int64_t, it's updated by this patch vmdk: vmdk_co_pwritev_compressed() pass int64_t, it's updated by this patch Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> Message-Id: <20210903102807.27127-5-vsementsov@virtuozzo.com> Reviewed-by: Eric Blake <eblake@redhat.com> Signed-off-by: Eric Blake <eblake@redhat.com>
2021-09-03 12:28:00 +02:00
static int quorum_co_pwritev(BlockDriverState *bs, int64_t offset,
int64_t bytes, QEMUIOVector *qiov,
BdrvRequestFlags flags)
{
BDRVQuorumState *s = bs->opaque;
QuorumAIOCB *acb = quorum_aio_get(bs, qiov, offset, bytes, flags);
int i, ret;
for (i = 0; i < s->num_children; i++) {
Coroutine *co;
QuorumCo data = {
.acb = acb,
.idx = i,
};
co = qemu_coroutine_create(write_quorum_entry, &data);
qemu_coroutine_enter(co);
}
while (acb->count < s->num_children) {
qemu_coroutine_yield();
}
quorum_has_too_much_io_failed(acb);
ret = acb->vote_ret;
quorum_aio_finalize(acb);
return ret;
}
static int quorum_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset,
block: use int64_t instead of int in driver write_zeroes handlers We are generally moving to int64_t for both offset and bytes parameters on all io paths. Main motivation is realization of 64-bit write_zeroes operation for fast zeroing large disk chunks, up to the whole disk. We chose signed type, to be consistent with off_t (which is signed) and with possibility for signed return type (where negative value means error). So, convert driver write_zeroes handlers bytes parameter to int64_t. The only caller of all updated function is bdrv_co_do_pwrite_zeroes(). bdrv_co_do_pwrite_zeroes() itself is of course OK with widening of callee parameter type. Also, bdrv_co_do_pwrite_zeroes()'s max_write_zeroes is limited to INT_MAX. So, updated functions all are safe, they will not get "bytes" larger than before. Still, let's look through all updated functions, and add assertions to the ones which are actually unprepared to values larger than INT_MAX. For these drivers also set explicit max_pwrite_zeroes limit. Let's go: blkdebug: calculations can't overflow, thanks to bdrv_check_qiov_request() in generic layer. rule_check() and bdrv_co_pwrite_zeroes() both have 64bit argument. blklogwrites: pass to blk_log_writes_co_log() with 64bit argument. blkreplay, copy-on-read, filter-compress: pass to bdrv_co_pwrite_zeroes() which is OK copy-before-write: Calls cbw_do_copy_before_write() and bdrv_co_pwrite_zeroes, both have 64bit argument. file-posix: both handler calls raw_do_pwrite_zeroes, which is updated. In raw_do_pwrite_zeroes() calculations are OK due to bdrv_check_qiov_request(), bytes go to RawPosixAIOData::aio_nbytes which is uint64_t. Check also where that uint64_t gets handed: handle_aiocb_write_zeroes_block() passes a uint64_t[2] to ioctl(BLKZEROOUT), handle_aiocb_write_zeroes() calls do_fallocate() which takes off_t (and we compile to always have 64-bit off_t), as does handle_aiocb_write_zeroes_unmap. All look safe. gluster: bytes go to GlusterAIOCB::size which is int64_t and to glfs_zerofill_async works with off_t. iscsi: Aha, here we deal with iscsi_writesame16_task() that has uint32_t num_blocks argument and iscsi_writesame16_task() has uint16_t argument. Make comments, add assertions and clarify max_pwrite_zeroes calculation. iscsi_allocmap_() functions already has int64_t argument is_byte_request_lun_aligned is simple to update, do it. mirror_top: pass to bdrv_mirror_top_do_write which has uint64_t argument nbd: Aha, here we have protocol limitation, and NBDRequest::len is uint32_t. max_pwrite_zeroes is cleanly set to 32bit value, so we are OK for now. nvme: Again, protocol limitation. And no inherent limit for write-zeroes at all. But from code that calculates cdw12 it's obvious that we do have limit and alignment. Let's clarify it. Also, obviously the code is not prepared to handle bytes=0. Let's handle this case too. trace events already 64bit preallocate: pass to handle_write() and bdrv_co_pwrite_zeroes(), both 64bit. rbd: pass to qemu_rbd_start_co() which is 64bit. qcow2: offset + bytes and alignment still works good (thanks to bdrv_check_qiov_request()), so tail calculation is OK qcow2_subcluster_zeroize() has 64bit argument, should be OK trace events updated qed: qed_co_request wants int nb_sectors. Also in code we have size_t used for request length which may be 32bit. So, let's just keep INT_MAX as a limit (aligning it down to pwrite_zeroes_alignment) and don't care. raw-format: Is OK. raw_adjust_offset and bdrv_co_pwrite_zeroes are both 64bit. throttle: Both throttle_group_co_io_limits_intercept() and bdrv_co_pwrite_zeroes() are 64bit. vmdk: pass to vmdk_pwritev which is 64bit quorum: pass to quorum_co_pwritev() which is 64bit Hooray! At this point all block drivers are prepared to support 64bit write-zero requests, or have explicitly set max_pwrite_zeroes. Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> Message-Id: <20210903102807.27127-8-vsementsov@virtuozzo.com> Reviewed-by: Eric Blake <eblake@redhat.com> [eblake: use <= rather than < in assertions relying on max_pwrite_zeroes] Signed-off-by: Eric Blake <eblake@redhat.com>
2021-09-03 12:28:03 +02:00
int64_t bytes, BdrvRequestFlags flags)
{
return quorum_co_pwritev(bs, offset, bytes, NULL,
flags | BDRV_REQ_ZERO_WRITE);
}
static int64_t quorum_getlength(BlockDriverState *bs)
{
BDRVQuorumState *s = bs->opaque;
int64_t result;
int i;
/* check that all file have the same length */
result = bdrv_getlength(s->children[0]->bs);
if (result < 0) {
return result;
}
for (i = 1; i < s->num_children; i++) {
int64_t value = bdrv_getlength(s->children[i]->bs);
if (value < 0) {
return value;
}
if (value != result) {
return -EIO;
}
}
return result;
}
static coroutine_fn int quorum_co_flush(BlockDriverState *bs)
{
BDRVQuorumState *s = bs->opaque;
QuorumVoteVersion *winner = NULL;
QuorumVotes error_votes;
QuorumVoteValue result_value;
int i;
int result = 0;
int success_count = 0;
QLIST_INIT(&error_votes.vote_list);
error_votes.compare = quorum_64bits_compare;
for (i = 0; i < s->num_children; i++) {
result = bdrv_co_flush(s->children[i]->bs);
if (result) {
quorum_report_bad(QUORUM_OP_TYPE_FLUSH, 0, 0,
s->children[i]->bs->node_name, result);
result_value.l = result;
quorum_count_vote(&error_votes, &result_value, i);
} else {
success_count++;
}
}
if (success_count >= s->threshold) {
result = 0;
} else {
winner = quorum_get_vote_winner(&error_votes);
result = winner->value.l;
}
quorum_free_vote_list(&error_votes);
return result;
}
static bool quorum_recurse_can_replace(BlockDriverState *bs,
BlockDriverState *to_replace)
{
BDRVQuorumState *s = bs->opaque;
int i;
for (i = 0; i < s->num_children; i++) {
/*
* We have no idea whether our children show the same data as
* this node (@bs). It is actually highly likely that
* @to_replace does not, because replacing a broken child is
* one of the main use cases here.
*
* We do know that the new BDS will match @bs, so replacing
* any of our children by it will be safe. It cannot change
* the data this quorum node presents to its parents.
*
* However, replacing @to_replace by @bs in any of our
* children's chains may change visible data somewhere in
* there. We therefore cannot recurse down those chains with
* bdrv_recurse_can_replace().
* (More formally, bdrv_recurse_can_replace() requires that
* @to_replace will be replaced by something matching the @bs
* passed to it. We cannot guarantee that.)
*
* Thus, we can only check whether any of our immediate
* children matches @to_replace.
*
* (In the future, we might add a function to recurse down a
* chain that checks that nothing there cares about a change
* in data from the respective child in question. For
* example, most filters do not care when their child's data
* suddenly changes, as long as their parents do not care.)
*/
if (s->children[i]->bs == to_replace) {
/*
* We now have to ensure that there is no other parent
* that cares about replacing this child by a node with
* potentially different data.
* We do so by checking whether there are any other parents
* at all, which is stricter than necessary, but also very
* simple. (We may decide to implement something more
* complex and permissive when there is an actual need for
* it.)
*/
return QLIST_FIRST(&to_replace->parents) == s->children[i] &&
QLIST_NEXT(s->children[i], next_parent) == NULL;
}
}
return false;
}
static int quorum_valid_threshold(int threshold, int num_children, Error **errp)
{
if (threshold < 1) {
error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
"vote-threshold", "a value >= 1");
return -ERANGE;
}
if (threshold > num_children) {
error_setg(errp, "threshold may not exceed children count");
return -ERANGE;
}
return 0;
}
static QemuOptsList quorum_runtime_opts = {
.name = "quorum",
.head = QTAILQ_HEAD_INITIALIZER(quorum_runtime_opts.head),
.desc = {
{
.name = QUORUM_OPT_VOTE_THRESHOLD,
.type = QEMU_OPT_NUMBER,
.help = "The number of vote needed for reaching quorum",
},
{
.name = QUORUM_OPT_BLKVERIFY,
.type = QEMU_OPT_BOOL,
.help = "Trigger block verify mode if set",
},
{
.name = QUORUM_OPT_REWRITE,
.type = QEMU_OPT_BOOL,
.help = "Rewrite corrupted block on read quorum",
},
block/quorum: add simple read pattern support This patch adds single read pattern to quorum driver and quorum vote is default pattern. For now we do a quorum vote on all the reads, it is designed for unreliable underlying storage such as non-redundant NFS to make sure data integrity at the cost of the read performance. For some use cases as following: VM -------------- | | v v A B Both A and B has hardware raid storage to justify the data integrity on its own. So it would help performance if we do a single read instead of on all the nodes. Further, if we run VM on either of the storage node, we can make a local read request for better performance. This patch generalize the above 2 nodes case in the N nodes. That is, vm -> write to all the N nodes, read just one of them. If single read fails, we try to read next node in FIFO order specified by the startup command. The 2 nodes case is very similar to DRBD[1] though lack of auto-sync functionality in the single device/node failure for now. But compared with DRBD we still have some advantages over it: - Suppose we have 20 VMs running on one(assume A) of 2 nodes' DRBD backed storage. And if A crashes, we need to restart all the VMs on node B. But for practice case, we can't because B might not have enough resources to setup 20 VMs at once. So if we run our 20 VMs with quorum driver, and scatter the replicated images over the data center, we can very likely restart 20 VMs without any resource problem. After all, I think we can build a more powerful replicated image functionality on quorum and block jobs(block mirror) to meet various High Availibility needs. E.g, Enable single read pattern on 2 children, -drive driver=quorum,children.0.file.filename=0.qcow2,\ children.1.file.filename=1.qcow2,read-pattern=fifo,vote-threshold=1 [1] http://en.wikipedia.org/wiki/Distributed_Replicated_Block_Device [Dropped \n from an error_setg() error message --Stefan] Cc: Benoit Canet <benoit@irqsave.net> Cc: Eric Blake <eblake@redhat.com> Cc: Kevin Wolf <kwolf@redhat.com> Cc: Stefan Hajnoczi <stefanha@redhat.com> Signed-off-by: Liu Yuan <namei.unix@gmail.com> Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2014-08-18 11:41:05 +02:00
{
.name = QUORUM_OPT_READ_PATTERN,
.type = QEMU_OPT_STRING,
.help = "Allowed pattern: quorum, fifo. Quorum is default",
},
{ /* end of list */ }
},
};
static void quorum_refresh_flags(BlockDriverState *bs)
{
BDRVQuorumState *s = bs->opaque;
int i;
bs->supported_zero_flags =
BDRV_REQ_FUA | BDRV_REQ_MAY_UNMAP | BDRV_REQ_NO_FALLBACK;
for (i = 0; i < s->num_children; i++) {
bs->supported_zero_flags &= s->children[i]->bs->supported_zero_flags;
}
bs->supported_zero_flags |= BDRV_REQ_WRITE_UNCHANGED;
}
static int quorum_open(BlockDriverState *bs, QDict *options, int flags,
Error **errp)
{
BDRVQuorumState *s = bs->opaque;
QemuOpts *opts = NULL;
const char *pattern_str;
bool *opened;
int i;
int ret = 0;
qdict_flatten(options);
/* count how many different children are present */
s->num_children = qdict_array_entries(options, "children.");
if (s->num_children < 0) {
error_setg(errp, "Option children is not a valid array");
ret = -EINVAL;
goto exit;
}
if (s->num_children < 1) {
error_setg(errp, "Number of provided children must be 1 or more");
ret = -EINVAL;
goto exit;
}
opts = qemu_opts_create(&quorum_runtime_opts, NULL, 0, &error_abort);
if (!qemu_opts_absorb_qdict(opts, options, errp)) {
ret = -EINVAL;
goto exit;
}
s->threshold = qemu_opt_get_number(opts, QUORUM_OPT_VOTE_THRESHOLD, 0);
/* and validate it against s->num_children */
ret = quorum_valid_threshold(s->threshold, s->num_children, errp);
if (ret < 0) {
goto exit;
}
pattern_str = qemu_opt_get(opts, QUORUM_OPT_READ_PATTERN);
if (!pattern_str) {
ret = QUORUM_READ_PATTERN_QUORUM;
} else {
ret = qapi_enum_parse(&QuorumReadPattern_lookup, pattern_str,
-EINVAL, NULL);
}
if (ret < 0) {
error_setg(errp, "Please set read-pattern as fifo or quorum");
goto exit;
}
block/quorum: add simple read pattern support This patch adds single read pattern to quorum driver and quorum vote is default pattern. For now we do a quorum vote on all the reads, it is designed for unreliable underlying storage such as non-redundant NFS to make sure data integrity at the cost of the read performance. For some use cases as following: VM -------------- | | v v A B Both A and B has hardware raid storage to justify the data integrity on its own. So it would help performance if we do a single read instead of on all the nodes. Further, if we run VM on either of the storage node, we can make a local read request for better performance. This patch generalize the above 2 nodes case in the N nodes. That is, vm -> write to all the N nodes, read just one of them. If single read fails, we try to read next node in FIFO order specified by the startup command. The 2 nodes case is very similar to DRBD[1] though lack of auto-sync functionality in the single device/node failure for now. But compared with DRBD we still have some advantages over it: - Suppose we have 20 VMs running on one(assume A) of 2 nodes' DRBD backed storage. And if A crashes, we need to restart all the VMs on node B. But for practice case, we can't because B might not have enough resources to setup 20 VMs at once. So if we run our 20 VMs with quorum driver, and scatter the replicated images over the data center, we can very likely restart 20 VMs without any resource problem. After all, I think we can build a more powerful replicated image functionality on quorum and block jobs(block mirror) to meet various High Availibility needs. E.g, Enable single read pattern on 2 children, -drive driver=quorum,children.0.file.filename=0.qcow2,\ children.1.file.filename=1.qcow2,read-pattern=fifo,vote-threshold=1 [1] http://en.wikipedia.org/wiki/Distributed_Replicated_Block_Device [Dropped \n from an error_setg() error message --Stefan] Cc: Benoit Canet <benoit@irqsave.net> Cc: Eric Blake <eblake@redhat.com> Cc: Kevin Wolf <kwolf@redhat.com> Cc: Stefan Hajnoczi <stefanha@redhat.com> Signed-off-by: Liu Yuan <namei.unix@gmail.com> Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2014-08-18 11:41:05 +02:00
s->read_pattern = ret;
block/quorum: add simple read pattern support This patch adds single read pattern to quorum driver and quorum vote is default pattern. For now we do a quorum vote on all the reads, it is designed for unreliable underlying storage such as non-redundant NFS to make sure data integrity at the cost of the read performance. For some use cases as following: VM -------------- | | v v A B Both A and B has hardware raid storage to justify the data integrity on its own. So it would help performance if we do a single read instead of on all the nodes. Further, if we run VM on either of the storage node, we can make a local read request for better performance. This patch generalize the above 2 nodes case in the N nodes. That is, vm -> write to all the N nodes, read just one of them. If single read fails, we try to read next node in FIFO order specified by the startup command. The 2 nodes case is very similar to DRBD[1] though lack of auto-sync functionality in the single device/node failure for now. But compared with DRBD we still have some advantages over it: - Suppose we have 20 VMs running on one(assume A) of 2 nodes' DRBD backed storage. And if A crashes, we need to restart all the VMs on node B. But for practice case, we can't because B might not have enough resources to setup 20 VMs at once. So if we run our 20 VMs with quorum driver, and scatter the replicated images over the data center, we can very likely restart 20 VMs without any resource problem. After all, I think we can build a more powerful replicated image functionality on quorum and block jobs(block mirror) to meet various High Availibility needs. E.g, Enable single read pattern on 2 children, -drive driver=quorum,children.0.file.filename=0.qcow2,\ children.1.file.filename=1.qcow2,read-pattern=fifo,vote-threshold=1 [1] http://en.wikipedia.org/wiki/Distributed_Replicated_Block_Device [Dropped \n from an error_setg() error message --Stefan] Cc: Benoit Canet <benoit@irqsave.net> Cc: Eric Blake <eblake@redhat.com> Cc: Kevin Wolf <kwolf@redhat.com> Cc: Stefan Hajnoczi <stefanha@redhat.com> Signed-off-by: Liu Yuan <namei.unix@gmail.com> Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2014-08-18 11:41:05 +02:00
if (s->read_pattern == QUORUM_READ_PATTERN_QUORUM) {
s->is_blkverify = qemu_opt_get_bool(opts, QUORUM_OPT_BLKVERIFY, false);
if (s->is_blkverify && (s->num_children != 2 || s->threshold != 2)) {
error_setg(errp, "blkverify=on can only be set if there are "
"exactly two files and vote-threshold is 2");
ret = -EINVAL;
goto exit;
block/quorum: add simple read pattern support This patch adds single read pattern to quorum driver and quorum vote is default pattern. For now we do a quorum vote on all the reads, it is designed for unreliable underlying storage such as non-redundant NFS to make sure data integrity at the cost of the read performance. For some use cases as following: VM -------------- | | v v A B Both A and B has hardware raid storage to justify the data integrity on its own. So it would help performance if we do a single read instead of on all the nodes. Further, if we run VM on either of the storage node, we can make a local read request for better performance. This patch generalize the above 2 nodes case in the N nodes. That is, vm -> write to all the N nodes, read just one of them. If single read fails, we try to read next node in FIFO order specified by the startup command. The 2 nodes case is very similar to DRBD[1] though lack of auto-sync functionality in the single device/node failure for now. But compared with DRBD we still have some advantages over it: - Suppose we have 20 VMs running on one(assume A) of 2 nodes' DRBD backed storage. And if A crashes, we need to restart all the VMs on node B. But for practice case, we can't because B might not have enough resources to setup 20 VMs at once. So if we run our 20 VMs with quorum driver, and scatter the replicated images over the data center, we can very likely restart 20 VMs without any resource problem. After all, I think we can build a more powerful replicated image functionality on quorum and block jobs(block mirror) to meet various High Availibility needs. E.g, Enable single read pattern on 2 children, -drive driver=quorum,children.0.file.filename=0.qcow2,\ children.1.file.filename=1.qcow2,read-pattern=fifo,vote-threshold=1 [1] http://en.wikipedia.org/wiki/Distributed_Replicated_Block_Device [Dropped \n from an error_setg() error message --Stefan] Cc: Benoit Canet <benoit@irqsave.net> Cc: Eric Blake <eblake@redhat.com> Cc: Kevin Wolf <kwolf@redhat.com> Cc: Stefan Hajnoczi <stefanha@redhat.com> Signed-off-by: Liu Yuan <namei.unix@gmail.com> Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2014-08-18 11:41:05 +02:00
}
s->rewrite_corrupted = qemu_opt_get_bool(opts, QUORUM_OPT_REWRITE,
false);
if (s->rewrite_corrupted && s->is_blkverify) {
error_setg(errp,
block/quorum: add simple read pattern support This patch adds single read pattern to quorum driver and quorum vote is default pattern. For now we do a quorum vote on all the reads, it is designed for unreliable underlying storage such as non-redundant NFS to make sure data integrity at the cost of the read performance. For some use cases as following: VM -------------- | | v v A B Both A and B has hardware raid storage to justify the data integrity on its own. So it would help performance if we do a single read instead of on all the nodes. Further, if we run VM on either of the storage node, we can make a local read request for better performance. This patch generalize the above 2 nodes case in the N nodes. That is, vm -> write to all the N nodes, read just one of them. If single read fails, we try to read next node in FIFO order specified by the startup command. The 2 nodes case is very similar to DRBD[1] though lack of auto-sync functionality in the single device/node failure for now. But compared with DRBD we still have some advantages over it: - Suppose we have 20 VMs running on one(assume A) of 2 nodes' DRBD backed storage. And if A crashes, we need to restart all the VMs on node B. But for practice case, we can't because B might not have enough resources to setup 20 VMs at once. So if we run our 20 VMs with quorum driver, and scatter the replicated images over the data center, we can very likely restart 20 VMs without any resource problem. After all, I think we can build a more powerful replicated image functionality on quorum and block jobs(block mirror) to meet various High Availibility needs. E.g, Enable single read pattern on 2 children, -drive driver=quorum,children.0.file.filename=0.qcow2,\ children.1.file.filename=1.qcow2,read-pattern=fifo,vote-threshold=1 [1] http://en.wikipedia.org/wiki/Distributed_Replicated_Block_Device [Dropped \n from an error_setg() error message --Stefan] Cc: Benoit Canet <benoit@irqsave.net> Cc: Eric Blake <eblake@redhat.com> Cc: Kevin Wolf <kwolf@redhat.com> Cc: Stefan Hajnoczi <stefanha@redhat.com> Signed-off-by: Liu Yuan <namei.unix@gmail.com> Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2014-08-18 11:41:05 +02:00
"rewrite-corrupted=on cannot be used with blkverify=on");
ret = -EINVAL;
goto exit;
}
}
/* allocate the children array */
s->children = g_new0(BdrvChild *, s->num_children);
opened = g_new0(bool, s->num_children);
for (i = 0; i < s->num_children; i++) {
char indexstr[INDEXSTR_LEN];
ret = snprintf(indexstr, INDEXSTR_LEN, "children.%d", i);
assert(ret < INDEXSTR_LEN);
s->children[i] = bdrv_open_child(NULL, options, indexstr, bs,
&child_of_bds, BDRV_CHILD_DATA, false,
errp);
if (!s->children[i]) {
ret = -EINVAL;
goto close_exit;
}
opened[i] = true;
}
s->next_child_index = s->num_children;
bs->supported_write_flags = BDRV_REQ_WRITE_UNCHANGED;
quorum_refresh_flags(bs);
g_free(opened);
goto exit;
close_exit:
/* cleanup on error */
for (i = 0; i < s->num_children; i++) {
if (!opened[i]) {
continue;
}
bdrv_unref_child(bs, s->children[i]);
}
g_free(s->children);
g_free(opened);
exit:
qemu_opts_del(opts);
return ret;
}
static void quorum_close(BlockDriverState *bs)
{
BDRVQuorumState *s = bs->opaque;
int i;
for (i = 0; i < s->num_children; i++) {
bdrv_unref_child(bs, s->children[i]);
}
g_free(s->children);
}
static void quorum_add_child(BlockDriverState *bs, BlockDriverState *child_bs,
Error **errp)
{
BDRVQuorumState *s = bs->opaque;
BdrvChild *child;
char indexstr[INDEXSTR_LEN];
int ret;
if (s->is_blkverify) {
error_setg(errp, "Cannot add a child to a quorum in blkverify mode");
return;
}
assert(s->num_children <= INT_MAX / sizeof(BdrvChild *));
if (s->num_children == INT_MAX / sizeof(BdrvChild *) ||
s->next_child_index == UINT_MAX) {
error_setg(errp, "Too many children");
return;
}
ret = snprintf(indexstr, INDEXSTR_LEN, "children.%u", s->next_child_index);
if (ret < 0 || ret >= INDEXSTR_LEN) {
error_setg(errp, "cannot generate child name");
return;
}
s->next_child_index++;
bdrv_drained_begin(bs);
/* We can safely add the child now */
bdrv_ref(child_bs);
child = bdrv_attach_child(bs, child_bs, indexstr, &child_of_bds,
BDRV_CHILD_DATA, errp);
if (child == NULL) {
s->next_child_index--;
goto out;
}
s->children = g_renew(BdrvChild *, s->children, s->num_children + 1);
s->children[s->num_children++] = child;
quorum_refresh_flags(bs);
out:
bdrv_drained_end(bs);
}
static void quorum_del_child(BlockDriverState *bs, BdrvChild *child,
Error **errp)
{
BDRVQuorumState *s = bs->opaque;
char indexstr[INDEXSTR_LEN];
int i;
for (i = 0; i < s->num_children; i++) {
if (s->children[i] == child) {
break;
}
}
/* we have checked it in bdrv_del_child() */
assert(i < s->num_children);
if (s->num_children <= s->threshold) {
error_setg(errp,
"The number of children cannot be lower than the vote threshold %d",
s->threshold);
return;
}
/* We know now that num_children > threshold, so blkverify must be false */
assert(!s->is_blkverify);
snprintf(indexstr, INDEXSTR_LEN, "children.%u", s->next_child_index - 1);
if (!strncmp(child->name, indexstr, INDEXSTR_LEN)) {
s->next_child_index--;
}
bdrv_drained_begin(bs);
/* We can safely remove this child now */
memmove(&s->children[i], &s->children[i + 1],
(s->num_children - i - 1) * sizeof(BdrvChild *));
s->children = g_renew(BdrvChild *, s->children, --s->num_children);
bdrv_unref_child(bs, child);
quorum_refresh_flags(bs);
bdrv_drained_end(bs);
}
static void quorum_gather_child_options(BlockDriverState *bs, QDict *target,
bool backing_overridden)
{
BDRVQuorumState *s = bs->opaque;
QList *children_list;
int i;
/*
* The generic implementation for gathering child options in
* bdrv_refresh_filename() would use the names of the children
* as specified for bdrv_open_child() or bdrv_attach_child(),
* which is "children.%u" with %u being a value
* (s->next_child_index) that is incremented each time a new child
* is added (and never decremented). Since children can be
* deleted at runtime, there may be gaps in that enumeration.
* When creating a new quorum BDS and specifying the children for
* it through runtime options, the enumeration used there may not
* have any gaps, though.
*
* Therefore, we have to create a new gap-less enumeration here
* (which we can achieve by simply putting all of the children's
* full_open_options into a QList).
*
* XXX: Note that there are issues with the current child option
* structure quorum uses (such as the fact that children do
* not really have unique permanent names). Therefore, this
* is going to have to change in the future and ideally we
* want quorum to be covered by the generic implementation.
*/
children_list = qlist_new();
qdict_put(target, "children", children_list);
for (i = 0; i < s->num_children; i++) {
qlist_append(children_list,
qobject_ref(s->children[i]->bs->full_open_options));
}
}
static char *quorum_dirname(BlockDriverState *bs, Error **errp)
{
/* In general, there are multiple BDSs with different dirnames below this
* one; so there is no unique dirname we could return (unless all are equal
* by chance, or there is only one). Therefore, to be consistent, just
* always return NULL. */
error_setg(errp, "Cannot generate a base directory for quorum nodes");
return NULL;
}
static void quorum_child_perm(BlockDriverState *bs, BdrvChild *c,
BdrvChildRole role,
BlockReopenQueue *reopen_queue,
uint64_t perm, uint64_t shared,
uint64_t *nperm, uint64_t *nshared)
{
BDRVQuorumState *s = bs->opaque;
*nperm = perm & DEFAULT_PERM_PASSTHROUGH;
if (s->rewrite_corrupted) {
*nperm |= BLK_PERM_WRITE;
}
/*
* We cannot share RESIZE or WRITE, as this would make the
* children differ from each other.
*/
*nshared = (shared & (BLK_PERM_CONSISTENT_READ |
BLK_PERM_WRITE_UNCHANGED))
| DEFAULT_PERM_UNCHANGED;
}
/*
* Each one of the children can report different status flags even
* when they contain the same data, so what this function does is
* return BDRV_BLOCK_ZERO if *all* children agree that a certain
* region contains zeroes, and BDRV_BLOCK_DATA otherwise.
*/
static int coroutine_fn quorum_co_block_status(BlockDriverState *bs,
bool want_zero,
int64_t offset, int64_t count,
int64_t *pnum, int64_t *map,
BlockDriverState **file)
{
BDRVQuorumState *s = bs->opaque;
int i, ret;
int64_t pnum_zero = count;
int64_t pnum_data = 0;
for (i = 0; i < s->num_children; i++) {
int64_t bytes;
ret = bdrv_co_common_block_status_above(s->children[i]->bs, NULL, false,
want_zero, offset, count,
&bytes, NULL, NULL, NULL);
if (ret < 0) {
quorum_report_bad(QUORUM_OP_TYPE_READ, offset, count,
s->children[i]->bs->node_name, ret);
pnum_data = count;
break;
}
/*
* Even if all children agree about whether there are zeroes
* or not at @offset they might disagree on the size, so use
* the smallest when reporting BDRV_BLOCK_ZERO and the largest
* when reporting BDRV_BLOCK_DATA.
*/
if (ret & BDRV_BLOCK_ZERO) {
pnum_zero = MIN(pnum_zero, bytes);
} else {
pnum_data = MAX(pnum_data, bytes);
}
}
if (pnum_data) {
*pnum = pnum_data;
return BDRV_BLOCK_DATA;
} else {
*pnum = pnum_zero;
return BDRV_BLOCK_ZERO;
}
}
static const char *const quorum_strong_runtime_opts[] = {
QUORUM_OPT_VOTE_THRESHOLD,
QUORUM_OPT_BLKVERIFY,
QUORUM_OPT_REWRITE,
QUORUM_OPT_READ_PATTERN,
NULL
};
static BlockDriver bdrv_quorum = {
.format_name = "quorum",
.instance_size = sizeof(BDRVQuorumState),
.bdrv_open = quorum_open,
.bdrv_close = quorum_close,
.bdrv_gather_child_options = quorum_gather_child_options,
.bdrv_dirname = quorum_dirname,
.bdrv_co_block_status = quorum_co_block_status,
.bdrv_co_flush = quorum_co_flush,
.bdrv_getlength = quorum_getlength,
.bdrv_co_preadv = quorum_co_preadv,
.bdrv_co_pwritev = quorum_co_pwritev,
.bdrv_co_pwrite_zeroes = quorum_co_pwrite_zeroes,
.bdrv_add_child = quorum_add_child,
.bdrv_del_child = quorum_del_child,
.bdrv_child_perm = quorum_child_perm,
.bdrv_recurse_can_replace = quorum_recurse_can_replace,
.strong_runtime_opts = quorum_strong_runtime_opts,
};
static void bdrv_quorum_init(void)
{
if (!qcrypto_hash_supports(QCRYPTO_HASH_ALG_SHA256)) {
/* SHA256 hash support is required for quorum device */
return;
}
bdrv_register(&bdrv_quorum);
}
block_init(bdrv_quorum_init);