2020-01-30 17:32:23 +01:00
|
|
|
#!/usr/bin/env python3
|
2021-01-16 14:44:19 +01:00
|
|
|
# group: rw backing
|
2013-07-26 20:39:05 +02:00
|
|
|
#
|
|
|
|
# Tests for drive-backup
|
|
|
|
#
|
|
|
|
# Copyright (C) 2013 Red Hat, Inc.
|
|
|
|
#
|
|
|
|
# Based on 041.
|
|
|
|
#
|
|
|
|
# This program is free software; you can redistribute it and/or modify
|
|
|
|
# it under the terms of the GNU General Public License as published by
|
|
|
|
# the Free Software Foundation; either version 2 of the License, or
|
|
|
|
# (at your option) any later version.
|
|
|
|
#
|
|
|
|
# This program is distributed in the hope that it will be useful,
|
|
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
# GNU General Public License for more details.
|
|
|
|
#
|
|
|
|
# You should have received a copy of the GNU General Public License
|
|
|
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
#
|
|
|
|
|
|
|
|
import time
|
|
|
|
import os
|
|
|
|
import iotests
|
|
|
|
from iotests import qemu_img, qemu_io, create_image
|
|
|
|
|
|
|
|
backing_img = os.path.join(iotests.test_dir, 'backing.img')
|
|
|
|
test_img = os.path.join(iotests.test_dir, 'test.img')
|
|
|
|
target_img = os.path.join(iotests.test_dir, 'target.img')
|
|
|
|
|
2018-03-10 09:27:45 +01:00
|
|
|
def img_create(img, fmt=iotests.imgfmt, size='64M', **kwargs):
|
|
|
|
fullname = os.path.join(iotests.test_dir, '%s.%s' % (img, fmt))
|
|
|
|
optargs = []
|
iotests: Different iterator behavior in Python 3
In Python 3, several functions now return iterators instead of lists.
This includes range(), items(), map(), and filter(). This means that if
we really want a list, we have to wrap those instances with list(). But
then again, the two instances where this is the case for map() and
filter(), there are shorter expressions which work without either
function.
On the other hand, sometimes we do just want an iterator, in which case
we have sometimes used xrange() and iteritems() which no longer exist in
Python 3. Just change these calls to be range() and items(), works in
both Python 2 and 3, and is really what we want in 3 (which is what
matters). But because it is so simple to do (and to find and remove
once we completely switch to Python 3), make range() be an alias for
xrange() in the two affected tests (044 and 163).
In one instance, we only wanted the first instance of the result of a
filter() call. Instead of using next(filter()) which would work only in
Python 3, or list(filter())[0] which would work everywhere but is a bit
weird, this instance is changed to use a generator expression with a
next() wrapped around, which works both in 2.7 and 3.
Signed-off-by: Max Reitz <mreitz@redhat.com>
Reviewed-by: Eduardo Habkost <ehabkost@redhat.com>
Reviewed-by: Cleber Rosa <crosa@redhat.com>
Message-Id: <20181022135307.14398-6-mreitz@redhat.com>
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
2018-10-22 15:53:03 +02:00
|
|
|
for k,v in kwargs.items():
|
2018-03-10 09:27:45 +01:00
|
|
|
optargs = optargs + ['-o', '%s=%s' % (k,v)]
|
|
|
|
args = ['create', '-f', fmt] + optargs + [fullname, size]
|
|
|
|
iotests.qemu_img(*args)
|
|
|
|
return fullname
|
|
|
|
|
|
|
|
def try_remove(img):
|
|
|
|
try:
|
|
|
|
os.remove(img)
|
|
|
|
except OSError:
|
|
|
|
pass
|
|
|
|
|
|
|
|
def io_write_patterns(img, patterns):
|
|
|
|
for pattern in patterns:
|
|
|
|
iotests.qemu_io('-c', 'write -P%s %s %s' % pattern, img)
|
|
|
|
|
|
|
|
|
2013-07-26 20:39:05 +02:00
|
|
|
class TestSyncModesNoneAndTop(iotests.QMPTestCase):
|
|
|
|
image_len = 64 * 1024 * 1024 # MB
|
|
|
|
|
|
|
|
def setUp(self):
|
|
|
|
create_image(backing_img, TestSyncModesNoneAndTop.image_len)
|
iotests: Specify explicit backing format where sensible
There are many existing qcow2 images that specify a backing file but
no format. This has been the source of CVEs in the past, but has
become more prominent of a problem now that libvirt has switched to
-blockdev. With older -drive, at least the probing was always done by
qemu (so the only risk of a changed format between successive boots of
a guest was if qemu was upgraded and probed differently). But with
newer -blockdev, libvirt must specify a format; if libvirt guesses raw
where the image was formatted, this results in data corruption visible
to the guest; conversely, if libvirt guesses qcow2 where qemu was
using raw, this can result in potential security holes, so modern
libvirt instead refuses to use images without explicit backing format.
The change in libvirt to reject images without explicit backing format
has pointed out that a number of tools have been far too reliant on
probing in the past. It's time to set a better example in our own
iotests of properly setting this parameter.
iotest calls to create, rebase, and convert are all impacted to some
degree. It's a bit annoying that we are inconsistent on command line
- while all of those accept -o backing_file=...,backing_fmt=..., the
shortcuts are different: create and rebase have -b and -F, while
convert has -B but no -F. (amend has no shortcuts, but the previous
patch just deprecated the use of amend to change backing chains).
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-Id: <20200706203954.341758-9-eblake@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2020-07-06 22:39:52 +02:00
|
|
|
qemu_img('create', '-f', iotests.imgfmt,
|
|
|
|
'-o', 'backing_file=%s' % backing_img, '-F', 'raw', test_img)
|
2013-07-26 20:39:05 +02:00
|
|
|
qemu_io('-c', 'write -P0x41 0 512', test_img)
|
|
|
|
qemu_io('-c', 'write -P0xd5 1M 32k', test_img)
|
|
|
|
qemu_io('-c', 'write -P0xdc 32M 124k', test_img)
|
|
|
|
qemu_io('-c', 'write -P0xdc 67043328 64k', test_img)
|
|
|
|
self.vm = iotests.VM().add_drive(test_img)
|
|
|
|
self.vm.launch()
|
|
|
|
|
|
|
|
def tearDown(self):
|
|
|
|
self.vm.shutdown()
|
|
|
|
os.remove(test_img)
|
|
|
|
os.remove(backing_img)
|
|
|
|
try:
|
|
|
|
os.remove(target_img)
|
|
|
|
except OSError:
|
|
|
|
pass
|
|
|
|
|
|
|
|
def test_complete_top(self):
|
|
|
|
self.assert_no_active_block_jobs()
|
|
|
|
result = self.vm.qmp('drive-backup', device='drive0', sync='top',
|
|
|
|
format=iotests.imgfmt, target=target_img)
|
|
|
|
self.assert_qmp(result, 'return', {})
|
|
|
|
|
2014-04-02 07:54:07 +02:00
|
|
|
self.wait_until_completed(check_offset=False)
|
2013-07-26 20:39:05 +02:00
|
|
|
|
|
|
|
self.assert_no_active_block_jobs()
|
|
|
|
self.vm.shutdown()
|
|
|
|
self.assertTrue(iotests.compare_images(test_img, target_img),
|
|
|
|
'target image does not match source after backup')
|
|
|
|
|
|
|
|
def test_cancel_sync_none(self):
|
|
|
|
self.assert_no_active_block_jobs()
|
|
|
|
|
|
|
|
result = self.vm.qmp('drive-backup', device='drive0',
|
|
|
|
sync='none', target=target_img)
|
|
|
|
self.assert_qmp(result, 'return', {})
|
|
|
|
time.sleep(1)
|
|
|
|
self.vm.hmp_qemu_io('drive0', 'write -P0x5e 0 512')
|
|
|
|
self.vm.hmp_qemu_io('drive0', 'aio_flush')
|
|
|
|
# Verify that the original contents exist in the target image.
|
|
|
|
|
|
|
|
event = self.cancel_and_wait()
|
|
|
|
self.assert_qmp(event, 'data/type', 'backup')
|
|
|
|
|
|
|
|
self.vm.shutdown()
|
|
|
|
time.sleep(1)
|
2022-04-18 23:14:55 +02:00
|
|
|
qemu_io('-c', 'read -P0x41 0 512', target_img)
|
2013-07-26 20:39:05 +02:00
|
|
|
|
2015-12-01 10:36:30 +01:00
|
|
|
class TestBeforeWriteNotifier(iotests.QMPTestCase):
|
|
|
|
def setUp(self):
|
|
|
|
self.vm = iotests.VM().add_drive_raw("file=blkdebug::null-co://,id=drive0,align=65536,driver=blkdebug")
|
|
|
|
self.vm.launch()
|
|
|
|
|
|
|
|
def tearDown(self):
|
|
|
|
self.vm.shutdown()
|
|
|
|
os.remove(target_img)
|
|
|
|
|
|
|
|
def test_before_write_notifier(self):
|
|
|
|
self.vm.pause_drive("drive0")
|
|
|
|
result = self.vm.qmp('drive-backup', device='drive0',
|
|
|
|
sync='full', target=target_img,
|
|
|
|
format="file", speed=1)
|
|
|
|
self.assert_qmp(result, 'return', {})
|
|
|
|
result = self.vm.qmp('block-job-pause', device="drive0")
|
|
|
|
self.assert_qmp(result, 'return', {})
|
|
|
|
# Speed is low enough that this must be an uncopied range, which will
|
|
|
|
# trigger the before write notifier
|
|
|
|
self.vm.hmp_qemu_io('drive0', 'aio_write -P 1 512512 512')
|
|
|
|
self.vm.resume_drive("drive0")
|
|
|
|
result = self.vm.qmp('block-job-resume', device="drive0")
|
|
|
|
self.assert_qmp(result, 'return', {})
|
|
|
|
event = self.cancel_and_wait()
|
|
|
|
self.assert_qmp(event, 'data/type', 'backup')
|
2013-07-26 20:39:05 +02:00
|
|
|
|
2018-03-10 09:27:45 +01:00
|
|
|
class BackupTest(iotests.QMPTestCase):
|
|
|
|
def setUp(self):
|
|
|
|
self.vm = iotests.VM()
|
|
|
|
self.test_img = img_create('test')
|
|
|
|
self.dest_img = img_create('dest')
|
block/backup: use backup-top instead of write notifiers
Drop write notifiers and use filter node instead.
= Changes =
1. Add filter-node-name argument for backup qmp api. We have to do it
in this commit, as 257 needs to be fixed.
2. There are no more write notifiers here, so is_write_notifier
parameter is dropped from block-copy paths.
3. To sync with in-flight requests at job finish we now have drained
removing of the filter, we don't need rw-lock.
4. Block-copy is now using BdrvChildren instead of BlockBackends
5. As backup-top owns these children, we also move block-copy state
into backup-top's ownership.
= Iotest changes =
56: op-blocker doesn't shoot now, as we set it on source, but then
check on filter, when trying to start second backup.
To keep the test we instead can catch another collision: both jobs will
get 'drive0' job-id, as job-id parameter is unspecified. To prevent
interleaving with file-posix locks (as they are dependent on config)
let's use another target for second backup.
Also, it's obvious now that we'd like to drop this op-blocker at all
and add a test-case for two backups from one node (to different
destinations) actually works. But not in these series.
141: Output changed: prepatch, "Node is in use" comes from bdrv_has_blk
check inside qmp_blockdev_del. But we've dropped block-copy blk
objects, so no more blk objects on source bs (job blk is on backup-top
filter bs). New message is from op-blocker, which is the next check in
qmp_blockdev_add.
257: The test wants to emulate guest write during backup. They should
go to filter node, not to original source node, of course. Therefore we
need to specify filter node name and use it.
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Message-id: 20191001131409.14202-6-vsementsov@virtuozzo.com
Reviewed-by: Max Reitz <mreitz@redhat.com>
Signed-off-by: Max Reitz <mreitz@redhat.com>
2019-10-01 15:14:09 +02:00
|
|
|
self.dest_img2 = img_create('dest2')
|
2019-08-01 19:39:00 +02:00
|
|
|
self.ref_img = img_create('ref')
|
2018-03-10 09:27:45 +01:00
|
|
|
self.vm.add_drive(self.test_img)
|
|
|
|
self.vm.launch()
|
|
|
|
|
|
|
|
def tearDown(self):
|
|
|
|
self.vm.shutdown()
|
|
|
|
try_remove(self.test_img)
|
|
|
|
try_remove(self.dest_img)
|
block/backup: use backup-top instead of write notifiers
Drop write notifiers and use filter node instead.
= Changes =
1. Add filter-node-name argument for backup qmp api. We have to do it
in this commit, as 257 needs to be fixed.
2. There are no more write notifiers here, so is_write_notifier
parameter is dropped from block-copy paths.
3. To sync with in-flight requests at job finish we now have drained
removing of the filter, we don't need rw-lock.
4. Block-copy is now using BdrvChildren instead of BlockBackends
5. As backup-top owns these children, we also move block-copy state
into backup-top's ownership.
= Iotest changes =
56: op-blocker doesn't shoot now, as we set it on source, but then
check on filter, when trying to start second backup.
To keep the test we instead can catch another collision: both jobs will
get 'drive0' job-id, as job-id parameter is unspecified. To prevent
interleaving with file-posix locks (as they are dependent on config)
let's use another target for second backup.
Also, it's obvious now that we'd like to drop this op-blocker at all
and add a test-case for two backups from one node (to different
destinations) actually works. But not in these series.
141: Output changed: prepatch, "Node is in use" comes from bdrv_has_blk
check inside qmp_blockdev_del. But we've dropped block-copy blk
objects, so no more blk objects on source bs (job blk is on backup-top
filter bs). New message is from op-blocker, which is the next check in
qmp_blockdev_add.
257: The test wants to emulate guest write during backup. They should
go to filter node, not to original source node, of course. Therefore we
need to specify filter node name and use it.
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Message-id: 20191001131409.14202-6-vsementsov@virtuozzo.com
Reviewed-by: Max Reitz <mreitz@redhat.com>
Signed-off-by: Max Reitz <mreitz@redhat.com>
2019-10-01 15:14:09 +02:00
|
|
|
try_remove(self.dest_img2)
|
2019-08-01 19:39:00 +02:00
|
|
|
try_remove(self.ref_img)
|
2018-03-10 09:27:45 +01:00
|
|
|
|
|
|
|
def hmp_io_writes(self, drive, patterns):
|
|
|
|
for pattern in patterns:
|
|
|
|
self.vm.hmp_qemu_io(drive, 'write -P%s %s %s' % pattern)
|
|
|
|
self.vm.hmp_qemu_io(drive, 'flush')
|
|
|
|
|
|
|
|
def qmp_backup_and_wait(self, cmd='drive-backup', serror=None,
|
|
|
|
aerror=None, **kwargs):
|
|
|
|
if not self.qmp_backup(cmd, serror, **kwargs):
|
|
|
|
return False
|
|
|
|
return self.qmp_backup_wait(kwargs['device'], aerror)
|
|
|
|
|
|
|
|
def qmp_backup(self, cmd='drive-backup',
|
|
|
|
error=None, **kwargs):
|
|
|
|
self.assertTrue('device' in kwargs)
|
|
|
|
res = self.vm.qmp(cmd, **kwargs)
|
|
|
|
if error:
|
|
|
|
self.assert_qmp(res, 'error/desc', error)
|
|
|
|
return False
|
|
|
|
self.assert_qmp(res, 'return', {})
|
|
|
|
return True
|
|
|
|
|
|
|
|
def qmp_backup_wait(self, device, error=None):
|
|
|
|
event = self.vm.event_wait(name="BLOCK_JOB_COMPLETED",
|
|
|
|
match={'data': {'device': device}})
|
|
|
|
self.assertNotEqual(event, None)
|
|
|
|
try:
|
|
|
|
failure = self.dictpath(event, 'data/error')
|
|
|
|
except AssertionError:
|
|
|
|
# Backup succeeded.
|
|
|
|
self.assert_qmp(event, 'data/offset', event['data']['len'])
|
|
|
|
return True
|
|
|
|
else:
|
|
|
|
# Failure.
|
|
|
|
self.assert_qmp(event, 'data/error', qerror)
|
|
|
|
return False
|
|
|
|
|
2019-08-01 19:39:00 +02:00
|
|
|
def test_overlapping_writes(self):
|
|
|
|
# Write something to back up
|
|
|
|
self.hmp_io_writes('drive0', [('42', '0M', '2M')])
|
|
|
|
|
|
|
|
# Create a reference backup
|
|
|
|
self.qmp_backup_and_wait(device='drive0', format=iotests.imgfmt,
|
|
|
|
sync='full', target=self.ref_img,
|
|
|
|
auto_dismiss=False)
|
|
|
|
res = self.vm.qmp('block-job-dismiss', id='drive0')
|
|
|
|
self.assert_qmp(res, 'return', {})
|
|
|
|
|
|
|
|
# Now to the test backup: We simulate the following guest
|
|
|
|
# writes:
|
|
|
|
# (1) [1M + 64k, 1M + 128k): Afterwards, everything in that
|
|
|
|
# area should be in the target image, and we must not copy
|
|
|
|
# it again (because the source image has changed now)
|
|
|
|
# (64k is the job's cluster size)
|
|
|
|
# (2) [1M, 2M): The backup job must not get overeager. It
|
|
|
|
# must copy [1M, 1M + 64k) and [1M + 128k, 2M) separately,
|
|
|
|
# but not the area in between.
|
|
|
|
|
|
|
|
self.qmp_backup(device='drive0', format=iotests.imgfmt, sync='full',
|
|
|
|
target=self.dest_img, speed=1, auto_dismiss=False)
|
|
|
|
|
|
|
|
self.hmp_io_writes('drive0', [('23', '%ik' % (1024 + 64), '64k'),
|
|
|
|
('66', '1M', '1M')])
|
|
|
|
|
|
|
|
# Let the job complete
|
|
|
|
res = self.vm.qmp('block-job-set-speed', device='drive0', speed=0)
|
|
|
|
self.assert_qmp(res, 'return', {})
|
|
|
|
self.qmp_backup_wait('drive0')
|
|
|
|
res = self.vm.qmp('block-job-dismiss', id='drive0')
|
|
|
|
self.assert_qmp(res, 'return', {})
|
|
|
|
|
|
|
|
self.assertTrue(iotests.compare_images(self.ref_img, self.dest_img),
|
|
|
|
'target image does not match reference image')
|
|
|
|
|
2018-03-10 09:27:45 +01:00
|
|
|
def test_dismiss_false(self):
|
|
|
|
res = self.vm.qmp('query-block-jobs')
|
|
|
|
self.assert_qmp(res, 'return', [])
|
|
|
|
self.qmp_backup_and_wait(device='drive0', format=iotests.imgfmt,
|
|
|
|
sync='full', target=self.dest_img,
|
|
|
|
auto_dismiss=True)
|
|
|
|
res = self.vm.qmp('query-block-jobs')
|
|
|
|
self.assert_qmp(res, 'return', [])
|
|
|
|
|
|
|
|
def test_dismiss_true(self):
|
|
|
|
res = self.vm.qmp('query-block-jobs')
|
|
|
|
self.assert_qmp(res, 'return', [])
|
|
|
|
self.qmp_backup_and_wait(device='drive0', format=iotests.imgfmt,
|
|
|
|
sync='full', target=self.dest_img,
|
|
|
|
auto_dismiss=False)
|
|
|
|
res = self.vm.qmp('query-block-jobs')
|
|
|
|
self.assert_qmp(res, 'return[0]/status', 'concluded')
|
|
|
|
res = self.vm.qmp('block-job-dismiss', id='drive0')
|
|
|
|
self.assert_qmp(res, 'return', {})
|
|
|
|
res = self.vm.qmp('query-block-jobs')
|
|
|
|
self.assert_qmp(res, 'return', [])
|
|
|
|
|
|
|
|
def test_dismiss_bad_id(self):
|
|
|
|
res = self.vm.qmp('query-block-jobs')
|
|
|
|
self.assert_qmp(res, 'return', [])
|
|
|
|
res = self.vm.qmp('block-job-dismiss', id='foobar')
|
|
|
|
self.assert_qmp(res, 'error/class', 'DeviceNotActive')
|
|
|
|
|
|
|
|
def test_dismiss_collision(self):
|
|
|
|
res = self.vm.qmp('query-block-jobs')
|
|
|
|
self.assert_qmp(res, 'return', [])
|
|
|
|
self.qmp_backup_and_wait(device='drive0', format=iotests.imgfmt,
|
|
|
|
sync='full', target=self.dest_img,
|
|
|
|
auto_dismiss=False)
|
|
|
|
res = self.vm.qmp('query-block-jobs')
|
|
|
|
self.assert_qmp(res, 'return[0]/status', 'concluded')
|
|
|
|
# Leave zombie job un-dismissed, observe a failure:
|
block/backup: use backup-top instead of write notifiers
Drop write notifiers and use filter node instead.
= Changes =
1. Add filter-node-name argument for backup qmp api. We have to do it
in this commit, as 257 needs to be fixed.
2. There are no more write notifiers here, so is_write_notifier
parameter is dropped from block-copy paths.
3. To sync with in-flight requests at job finish we now have drained
removing of the filter, we don't need rw-lock.
4. Block-copy is now using BdrvChildren instead of BlockBackends
5. As backup-top owns these children, we also move block-copy state
into backup-top's ownership.
= Iotest changes =
56: op-blocker doesn't shoot now, as we set it on source, but then
check on filter, when trying to start second backup.
To keep the test we instead can catch another collision: both jobs will
get 'drive0' job-id, as job-id parameter is unspecified. To prevent
interleaving with file-posix locks (as they are dependent on config)
let's use another target for second backup.
Also, it's obvious now that we'd like to drop this op-blocker at all
and add a test-case for two backups from one node (to different
destinations) actually works. But not in these series.
141: Output changed: prepatch, "Node is in use" comes from bdrv_has_blk
check inside qmp_blockdev_del. But we've dropped block-copy blk
objects, so no more blk objects on source bs (job blk is on backup-top
filter bs). New message is from op-blocker, which is the next check in
qmp_blockdev_add.
257: The test wants to emulate guest write during backup. They should
go to filter node, not to original source node, of course. Therefore we
need to specify filter node name and use it.
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Message-id: 20191001131409.14202-6-vsementsov@virtuozzo.com
Reviewed-by: Max Reitz <mreitz@redhat.com>
Signed-off-by: Max Reitz <mreitz@redhat.com>
2019-10-01 15:14:09 +02:00
|
|
|
res = self.qmp_backup_and_wait(serror="Job ID 'drive0' already in use",
|
2018-03-10 09:27:45 +01:00
|
|
|
device='drive0', format=iotests.imgfmt,
|
block/backup: use backup-top instead of write notifiers
Drop write notifiers and use filter node instead.
= Changes =
1. Add filter-node-name argument for backup qmp api. We have to do it
in this commit, as 257 needs to be fixed.
2. There are no more write notifiers here, so is_write_notifier
parameter is dropped from block-copy paths.
3. To sync with in-flight requests at job finish we now have drained
removing of the filter, we don't need rw-lock.
4. Block-copy is now using BdrvChildren instead of BlockBackends
5. As backup-top owns these children, we also move block-copy state
into backup-top's ownership.
= Iotest changes =
56: op-blocker doesn't shoot now, as we set it on source, but then
check on filter, when trying to start second backup.
To keep the test we instead can catch another collision: both jobs will
get 'drive0' job-id, as job-id parameter is unspecified. To prevent
interleaving with file-posix locks (as they are dependent on config)
let's use another target for second backup.
Also, it's obvious now that we'd like to drop this op-blocker at all
and add a test-case for two backups from one node (to different
destinations) actually works. But not in these series.
141: Output changed: prepatch, "Node is in use" comes from bdrv_has_blk
check inside qmp_blockdev_del. But we've dropped block-copy blk
objects, so no more blk objects on source bs (job blk is on backup-top
filter bs). New message is from op-blocker, which is the next check in
qmp_blockdev_add.
257: The test wants to emulate guest write during backup. They should
go to filter node, not to original source node, of course. Therefore we
need to specify filter node name and use it.
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Message-id: 20191001131409.14202-6-vsementsov@virtuozzo.com
Reviewed-by: Max Reitz <mreitz@redhat.com>
Signed-off-by: Max Reitz <mreitz@redhat.com>
2019-10-01 15:14:09 +02:00
|
|
|
sync='full', target=self.dest_img2,
|
2018-03-10 09:27:45 +01:00
|
|
|
auto_dismiss=False)
|
|
|
|
self.assertEqual(res, False)
|
|
|
|
# OK, dismiss the zombie.
|
|
|
|
res = self.vm.qmp('block-job-dismiss', id='drive0')
|
|
|
|
self.assert_qmp(res, 'return', {})
|
|
|
|
res = self.vm.qmp('query-block-jobs')
|
|
|
|
self.assert_qmp(res, 'return', [])
|
|
|
|
# Ensure it's really gone.
|
|
|
|
self.qmp_backup_and_wait(device='drive0', format=iotests.imgfmt,
|
block/backup: use backup-top instead of write notifiers
Drop write notifiers and use filter node instead.
= Changes =
1. Add filter-node-name argument for backup qmp api. We have to do it
in this commit, as 257 needs to be fixed.
2. There are no more write notifiers here, so is_write_notifier
parameter is dropped from block-copy paths.
3. To sync with in-flight requests at job finish we now have drained
removing of the filter, we don't need rw-lock.
4. Block-copy is now using BdrvChildren instead of BlockBackends
5. As backup-top owns these children, we also move block-copy state
into backup-top's ownership.
= Iotest changes =
56: op-blocker doesn't shoot now, as we set it on source, but then
check on filter, when trying to start second backup.
To keep the test we instead can catch another collision: both jobs will
get 'drive0' job-id, as job-id parameter is unspecified. To prevent
interleaving with file-posix locks (as they are dependent on config)
let's use another target for second backup.
Also, it's obvious now that we'd like to drop this op-blocker at all
and add a test-case for two backups from one node (to different
destinations) actually works. But not in these series.
141: Output changed: prepatch, "Node is in use" comes from bdrv_has_blk
check inside qmp_blockdev_del. But we've dropped block-copy blk
objects, so no more blk objects on source bs (job blk is on backup-top
filter bs). New message is from op-blocker, which is the next check in
qmp_blockdev_add.
257: The test wants to emulate guest write during backup. They should
go to filter node, not to original source node, of course. Therefore we
need to specify filter node name and use it.
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Message-id: 20191001131409.14202-6-vsementsov@virtuozzo.com
Reviewed-by: Max Reitz <mreitz@redhat.com>
Signed-off-by: Max Reitz <mreitz@redhat.com>
2019-10-01 15:14:09 +02:00
|
|
|
sync='full', target=self.dest_img2,
|
2018-03-10 09:27:45 +01:00
|
|
|
auto_dismiss=False)
|
|
|
|
|
|
|
|
def dismissal_failure(self, dismissal_opt):
|
|
|
|
res = self.vm.qmp('query-block-jobs')
|
|
|
|
self.assert_qmp(res, 'return', [])
|
|
|
|
# Give blkdebug something to chew on
|
|
|
|
self.hmp_io_writes('drive0',
|
|
|
|
(('0x9a', 0, 512),
|
|
|
|
('0x55', '8M', '352k'),
|
|
|
|
('0x78', '15872k', '1M')))
|
|
|
|
# Add destination node via blkdebug
|
|
|
|
res = self.vm.qmp('blockdev-add',
|
|
|
|
node_name='target0',
|
|
|
|
driver=iotests.imgfmt,
|
|
|
|
file={
|
|
|
|
'driver': 'blkdebug',
|
|
|
|
'image': {
|
|
|
|
'driver': 'file',
|
|
|
|
'filename': self.dest_img
|
|
|
|
},
|
|
|
|
'inject-error': [{
|
|
|
|
'event': 'write_aio',
|
|
|
|
'errno': 5,
|
|
|
|
'immediately': False,
|
|
|
|
'once': True
|
|
|
|
}],
|
|
|
|
})
|
|
|
|
self.assert_qmp(res, 'return', {})
|
|
|
|
|
|
|
|
res = self.qmp_backup(cmd='blockdev-backup',
|
|
|
|
device='drive0', target='target0',
|
|
|
|
on_target_error='stop',
|
|
|
|
sync='full',
|
|
|
|
auto_dismiss=dismissal_opt)
|
|
|
|
self.assertTrue(res)
|
|
|
|
event = self.vm.event_wait(name="BLOCK_JOB_ERROR",
|
|
|
|
match={'data': {'device': 'drive0'}})
|
|
|
|
self.assertNotEqual(event, None)
|
2021-01-16 22:46:53 +01:00
|
|
|
# OK, job should pause, but it can't do it immediately, as it can't
|
|
|
|
# cancel other parallel requests (which didn't fail)
|
|
|
|
with iotests.Timeout(60, "Timeout waiting for backup actually paused"):
|
|
|
|
while True:
|
|
|
|
res = self.vm.qmp('query-block-jobs')
|
|
|
|
if res['return'][0]['status'] == 'paused':
|
|
|
|
break
|
2018-03-10 09:27:45 +01:00
|
|
|
self.assert_qmp(res, 'return[0]/status', 'paused')
|
|
|
|
res = self.vm.qmp('block-job-dismiss', id='drive0')
|
|
|
|
self.assert_qmp(res, 'error/desc',
|
|
|
|
"Job 'drive0' in state 'paused' cannot accept"
|
|
|
|
" command verb 'dismiss'")
|
|
|
|
res = self.vm.qmp('query-block-jobs')
|
|
|
|
self.assert_qmp(res, 'return[0]/status', 'paused')
|
|
|
|
# OK, unstick job and move forward.
|
|
|
|
res = self.vm.qmp('block-job-resume', device='drive0')
|
|
|
|
self.assert_qmp(res, 'return', {})
|
|
|
|
# And now we need to wait for it to conclude;
|
|
|
|
res = self.qmp_backup_wait(device='drive0')
|
|
|
|
self.assertTrue(res)
|
|
|
|
if not dismissal_opt:
|
|
|
|
# Job should now be languishing:
|
|
|
|
res = self.vm.qmp('query-block-jobs')
|
|
|
|
self.assert_qmp(res, 'return[0]/status', 'concluded')
|
|
|
|
res = self.vm.qmp('block-job-dismiss', id='drive0')
|
|
|
|
self.assert_qmp(res, 'return', {})
|
|
|
|
res = self.vm.qmp('query-block-jobs')
|
|
|
|
self.assert_qmp(res, 'return', [])
|
|
|
|
|
|
|
|
def test_dismiss_premature(self):
|
|
|
|
self.dismissal_failure(False)
|
|
|
|
|
|
|
|
def test_dismiss_erroneous(self):
|
|
|
|
self.dismissal_failure(True)
|
|
|
|
|
2013-07-26 20:39:05 +02:00
|
|
|
if __name__ == '__main__':
|
2019-09-02 21:33:18 +02:00
|
|
|
iotests.main(supported_fmts=['qcow2', 'qed'],
|
|
|
|
supported_protocols=['file'])
|