2015-04-18 01:50:05 +02:00
|
|
|
#!/usr/bin/env python
|
|
|
|
#
|
|
|
|
# Tests for incremental drive-backup
|
|
|
|
#
|
|
|
|
# Copyright (C) 2015 John Snow for Red Hat, Inc.
|
|
|
|
#
|
|
|
|
# Based on 056.
|
|
|
|
#
|
|
|
|
# This program is free software; you can redistribute it and/or modify
|
|
|
|
# it under the terms of the GNU General Public License as published by
|
|
|
|
# the Free Software Foundation; either version 2 of the License, or
|
|
|
|
# (at your option) any later version.
|
|
|
|
#
|
|
|
|
# This program is distributed in the hope that it will be useful,
|
|
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
# GNU General Public License for more details.
|
|
|
|
#
|
|
|
|
# You should have received a copy of the GNU General Public License
|
|
|
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
#
|
|
|
|
|
|
|
|
import os
|
|
|
|
import iotests
|
|
|
|
|
|
|
|
|
|
|
|
def io_write_patterns(img, patterns):
|
|
|
|
for pattern in patterns:
|
|
|
|
iotests.qemu_io('-c', 'write -P%s %s %s' % pattern, img)
|
|
|
|
|
|
|
|
|
2015-04-18 01:50:07 +02:00
|
|
|
def try_remove(img):
|
|
|
|
try:
|
|
|
|
os.remove(img)
|
|
|
|
except OSError:
|
|
|
|
pass
|
|
|
|
|
|
|
|
|
2015-11-06 00:13:08 +01:00
|
|
|
def transaction_action(action, **kwargs):
|
|
|
|
return {
|
|
|
|
'type': action,
|
2015-11-06 00:13:19 +01:00
|
|
|
'data': dict((k.replace('_', '-'), v) for k, v in kwargs.iteritems())
|
2015-11-06 00:13:08 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
def transaction_bitmap_clear(node, name, **kwargs):
|
|
|
|
return transaction_action('block-dirty-bitmap-clear',
|
|
|
|
node=node, name=name, **kwargs)
|
|
|
|
|
|
|
|
|
|
|
|
def transaction_drive_backup(device, target, **kwargs):
|
|
|
|
return transaction_action('drive-backup', device=device, target=target,
|
|
|
|
**kwargs)
|
|
|
|
|
|
|
|
|
2015-04-18 01:50:07 +02:00
|
|
|
class Bitmap:
|
|
|
|
def __init__(self, name, drive):
|
|
|
|
self.name = name
|
|
|
|
self.drive = drive
|
|
|
|
self.num = 0
|
|
|
|
self.backups = list()
|
|
|
|
|
|
|
|
def base_target(self):
|
|
|
|
return (self.drive['backup'], None)
|
|
|
|
|
|
|
|
def new_target(self, num=None):
|
|
|
|
if num is None:
|
|
|
|
num = self.num
|
|
|
|
self.num = num + 1
|
|
|
|
base = os.path.join(iotests.test_dir,
|
|
|
|
"%s.%s." % (self.drive['id'], self.name))
|
|
|
|
suff = "%i.%s" % (num, self.drive['fmt'])
|
|
|
|
target = base + "inc" + suff
|
|
|
|
reference = base + "ref" + suff
|
|
|
|
self.backups.append((target, reference))
|
|
|
|
return (target, reference)
|
|
|
|
|
|
|
|
def last_target(self):
|
|
|
|
if self.backups:
|
|
|
|
return self.backups[-1]
|
|
|
|
return self.base_target()
|
|
|
|
|
|
|
|
def del_target(self):
|
|
|
|
for image in self.backups.pop():
|
|
|
|
try_remove(image)
|
|
|
|
self.num -= 1
|
|
|
|
|
|
|
|
def cleanup(self):
|
|
|
|
for backup in self.backups:
|
|
|
|
for image in backup:
|
|
|
|
try_remove(image)
|
|
|
|
|
|
|
|
|
2015-12-02 00:16:37 +01:00
|
|
|
class TestIncrementalBackupBase(iotests.QMPTestCase):
|
|
|
|
def __init__(self, *args):
|
|
|
|
super(TestIncrementalBackupBase, self).__init__(*args)
|
2015-04-18 01:50:05 +02:00
|
|
|
self.bitmaps = list()
|
|
|
|
self.files = list()
|
|
|
|
self.drives = list()
|
|
|
|
self.vm = iotests.VM()
|
|
|
|
self.err_img = os.path.join(iotests.test_dir, 'err.%s' % iotests.imgfmt)
|
|
|
|
|
2015-12-02 00:16:37 +01:00
|
|
|
|
|
|
|
def setUp(self):
|
2015-04-18 01:50:05 +02:00
|
|
|
# Create a base image with a distinctive patterning
|
|
|
|
drive0 = self.add_node('drive0')
|
|
|
|
self.img_create(drive0['file'], drive0['fmt'])
|
|
|
|
self.vm.add_drive(drive0['file'])
|
2015-12-02 00:16:37 +01:00
|
|
|
self.write_default_pattern(drive0['file'])
|
2015-04-18 01:50:05 +02:00
|
|
|
self.vm.launch()
|
|
|
|
|
|
|
|
|
2015-12-02 00:16:37 +01:00
|
|
|
def write_default_pattern(self, target):
|
|
|
|
io_write_patterns(target, (('0x41', 0, 512),
|
|
|
|
('0xd5', '1M', '32k'),
|
|
|
|
('0xdc', '32M', '124k')))
|
|
|
|
|
|
|
|
|
2015-04-18 01:50:05 +02:00
|
|
|
def add_node(self, node_id, fmt=iotests.imgfmt, path=None, backup=None):
|
|
|
|
if path is None:
|
|
|
|
path = os.path.join(iotests.test_dir, '%s.%s' % (node_id, fmt))
|
|
|
|
if backup is None:
|
|
|
|
backup = os.path.join(iotests.test_dir,
|
|
|
|
'%s.full.backup.%s' % (node_id, fmt))
|
|
|
|
|
|
|
|
self.drives.append({
|
|
|
|
'id': node_id,
|
|
|
|
'file': path,
|
|
|
|
'backup': backup,
|
|
|
|
'fmt': fmt })
|
|
|
|
return self.drives[-1]
|
|
|
|
|
|
|
|
|
|
|
|
def img_create(self, img, fmt=iotests.imgfmt, size='64M',
|
2016-02-25 21:58:31 +01:00
|
|
|
parent=None, parentFormat=None, **kwargs):
|
|
|
|
optargs = []
|
|
|
|
for k,v in kwargs.iteritems():
|
|
|
|
optargs = optargs + ['-o', '%s=%s' % (k,v)]
|
|
|
|
args = ['create', '-f', fmt] + optargs + [img, size]
|
2015-04-18 01:50:05 +02:00
|
|
|
if parent:
|
|
|
|
if parentFormat is None:
|
|
|
|
parentFormat = fmt
|
2016-02-25 21:58:31 +01:00
|
|
|
args = args + ['-b', parent, '-F', parentFormat]
|
|
|
|
iotests.qemu_img(*args)
|
2015-04-18 01:50:05 +02:00
|
|
|
self.files.append(img)
|
|
|
|
|
2015-04-18 01:50:07 +02:00
|
|
|
|
|
|
|
def do_qmp_backup(self, error='Input/output error', **kwargs):
|
|
|
|
res = self.vm.qmp('drive-backup', **kwargs)
|
|
|
|
self.assert_qmp(res, 'return', {})
|
2015-11-06 00:13:19 +01:00
|
|
|
return self.wait_qmp_backup(kwargs['device'], error)
|
2015-04-18 01:50:07 +02:00
|
|
|
|
2015-11-06 00:13:19 +01:00
|
|
|
|
|
|
|
def wait_qmp_backup(self, device, error='Input/output error'):
|
2015-04-18 01:50:07 +02:00
|
|
|
event = self.vm.event_wait(name="BLOCK_JOB_COMPLETED",
|
2015-11-06 00:13:19 +01:00
|
|
|
match={'data': {'device': device}})
|
2015-05-22 18:01:41 +02:00
|
|
|
self.assertNotEqual(event, None)
|
2015-04-18 01:50:07 +02:00
|
|
|
|
|
|
|
try:
|
|
|
|
failure = self.dictpath(event, 'data/error')
|
|
|
|
except AssertionError:
|
|
|
|
# Backup succeeded.
|
|
|
|
self.assert_qmp(event, 'data/offset', event['data']['len'])
|
|
|
|
return True
|
|
|
|
else:
|
|
|
|
# Backup failed.
|
|
|
|
self.assert_qmp(event, 'data/error', error)
|
|
|
|
return False
|
|
|
|
|
|
|
|
|
2015-11-06 00:13:19 +01:00
|
|
|
def wait_qmp_backup_cancelled(self, device):
|
|
|
|
event = self.vm.event_wait(name='BLOCK_JOB_CANCELLED',
|
|
|
|
match={'data': {'device': device}})
|
|
|
|
self.assertNotEqual(event, None)
|
|
|
|
|
|
|
|
|
2015-04-18 01:50:07 +02:00
|
|
|
def create_anchor_backup(self, drive=None):
|
|
|
|
if drive is None:
|
|
|
|
drive = self.drives[-1]
|
|
|
|
res = self.do_qmp_backup(device=drive['id'], sync='full',
|
|
|
|
format=drive['fmt'], target=drive['backup'])
|
|
|
|
self.assertTrue(res)
|
|
|
|
self.files.append(drive['backup'])
|
|
|
|
return drive['backup']
|
|
|
|
|
|
|
|
|
|
|
|
def make_reference_backup(self, bitmap=None):
|
|
|
|
if bitmap is None:
|
|
|
|
bitmap = self.bitmaps[-1]
|
|
|
|
_, reference = bitmap.last_target()
|
|
|
|
res = self.do_qmp_backup(device=bitmap.drive['id'], sync='full',
|
|
|
|
format=bitmap.drive['fmt'], target=reference)
|
|
|
|
self.assertTrue(res)
|
|
|
|
|
|
|
|
|
2015-04-18 01:50:09 +02:00
|
|
|
def add_bitmap(self, name, drive, **kwargs):
|
2015-04-18 01:50:07 +02:00
|
|
|
bitmap = Bitmap(name, drive)
|
|
|
|
self.bitmaps.append(bitmap)
|
|
|
|
result = self.vm.qmp('block-dirty-bitmap-add', node=drive['id'],
|
2015-04-18 01:50:09 +02:00
|
|
|
name=bitmap.name, **kwargs)
|
2015-04-18 01:50:07 +02:00
|
|
|
self.assert_qmp(result, 'return', {})
|
|
|
|
return bitmap
|
|
|
|
|
|
|
|
|
|
|
|
def prepare_backup(self, bitmap=None, parent=None):
|
|
|
|
if bitmap is None:
|
|
|
|
bitmap = self.bitmaps[-1]
|
|
|
|
if parent is None:
|
|
|
|
parent, _ = bitmap.last_target()
|
|
|
|
|
|
|
|
target, _ = bitmap.new_target()
|
|
|
|
self.img_create(target, bitmap.drive['fmt'], parent=parent)
|
|
|
|
return target
|
|
|
|
|
|
|
|
|
|
|
|
def create_incremental(self, bitmap=None, parent=None,
|
|
|
|
parentFormat=None, validate=True):
|
|
|
|
if bitmap is None:
|
|
|
|
bitmap = self.bitmaps[-1]
|
|
|
|
if parent is None:
|
|
|
|
parent, _ = bitmap.last_target()
|
|
|
|
|
|
|
|
target = self.prepare_backup(bitmap, parent)
|
|
|
|
res = self.do_qmp_backup(device=bitmap.drive['id'],
|
2015-06-05 02:20:34 +02:00
|
|
|
sync='incremental', bitmap=bitmap.name,
|
2015-04-18 01:50:07 +02:00
|
|
|
format=bitmap.drive['fmt'], target=target,
|
|
|
|
mode='existing')
|
|
|
|
if not res:
|
|
|
|
bitmap.del_target();
|
|
|
|
self.assertFalse(validate)
|
|
|
|
else:
|
|
|
|
self.make_reference_backup(bitmap)
|
|
|
|
return res
|
|
|
|
|
|
|
|
|
|
|
|
def check_backups(self):
|
|
|
|
for bitmap in self.bitmaps:
|
|
|
|
for incremental, reference in bitmap.backups:
|
|
|
|
self.assertTrue(iotests.compare_images(incremental, reference))
|
|
|
|
last = bitmap.last_target()[0]
|
|
|
|
self.assertTrue(iotests.compare_images(last, bitmap.drive['file']))
|
|
|
|
|
|
|
|
|
|
|
|
def hmp_io_writes(self, drive, patterns):
|
|
|
|
for pattern in patterns:
|
|
|
|
self.vm.hmp_qemu_io(drive, 'write -P%s %s %s' % pattern)
|
|
|
|
self.vm.hmp_qemu_io(drive, 'flush')
|
|
|
|
|
|
|
|
|
2015-04-18 01:50:09 +02:00
|
|
|
def do_incremental_simple(self, **kwargs):
|
2015-04-18 01:50:07 +02:00
|
|
|
self.create_anchor_backup()
|
2015-04-18 01:50:09 +02:00
|
|
|
self.add_bitmap('bitmap0', self.drives[0], **kwargs)
|
2015-04-18 01:50:07 +02:00
|
|
|
|
|
|
|
# Sanity: Create a "hollow" incremental backup
|
|
|
|
self.create_incremental()
|
|
|
|
# Three writes: One complete overwrite, one new segment,
|
|
|
|
# and one partial overlap.
|
|
|
|
self.hmp_io_writes(self.drives[0]['id'], (('0xab', 0, 512),
|
|
|
|
('0xfe', '16M', '256k'),
|
|
|
|
('0x64', '32736k', '64k')))
|
|
|
|
self.create_incremental()
|
|
|
|
# Three more writes, one of each kind, like above
|
|
|
|
self.hmp_io_writes(self.drives[0]['id'], (('0x9a', 0, 512),
|
|
|
|
('0x55', '8M', '352k'),
|
|
|
|
('0x78', '15872k', '1M')))
|
|
|
|
self.create_incremental()
|
|
|
|
self.vm.shutdown()
|
|
|
|
self.check_backups()
|
|
|
|
|
|
|
|
|
2015-12-02 00:16:37 +01:00
|
|
|
def tearDown(self):
|
|
|
|
self.vm.shutdown()
|
|
|
|
for bitmap in self.bitmaps:
|
|
|
|
bitmap.cleanup()
|
|
|
|
for filename in self.files:
|
|
|
|
try_remove(filename)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class TestIncrementalBackup(TestIncrementalBackupBase):
|
2015-04-18 01:50:09 +02:00
|
|
|
def test_incremental_simple(self):
|
|
|
|
'''
|
|
|
|
Test: Create and verify three incremental backups.
|
|
|
|
|
|
|
|
Create a bitmap and a full backup before VM execution begins,
|
|
|
|
then create a series of three incremental backups "during execution,"
|
|
|
|
i.e.; after IO requests begin modifying the drive.
|
|
|
|
'''
|
|
|
|
return self.do_incremental_simple()
|
|
|
|
|
|
|
|
|
|
|
|
def test_small_granularity(self):
|
|
|
|
'''
|
|
|
|
Test: Create and verify backups made with a small granularity bitmap.
|
|
|
|
|
|
|
|
Perform the same test as test_incremental_simple, but with a granularity
|
|
|
|
of only 32KiB instead of the present default of 64KiB.
|
|
|
|
'''
|
|
|
|
return self.do_incremental_simple(granularity=32768)
|
|
|
|
|
|
|
|
|
|
|
|
def test_large_granularity(self):
|
|
|
|
'''
|
|
|
|
Test: Create and verify backups made with a large granularity bitmap.
|
|
|
|
|
|
|
|
Perform the same test as test_incremental_simple, but with a granularity
|
|
|
|
of 128KiB instead of the present default of 64KiB.
|
|
|
|
'''
|
|
|
|
return self.do_incremental_simple(granularity=131072)
|
|
|
|
|
|
|
|
|
2016-02-25 21:58:31 +01:00
|
|
|
def test_larger_cluster_target(self):
|
|
|
|
'''
|
|
|
|
Test: Create and verify backups made to a larger cluster size target.
|
|
|
|
|
|
|
|
With a default granularity of 64KiB, verify that backups made to a
|
|
|
|
larger cluster size target of 128KiB without a backing file works.
|
|
|
|
'''
|
|
|
|
drive0 = self.drives[0]
|
|
|
|
|
|
|
|
# Create a cluster_size=128k full backup / "anchor" backup
|
|
|
|
self.img_create(drive0['backup'], cluster_size='128k')
|
|
|
|
self.assertTrue(self.do_qmp_backup(device=drive0['id'], sync='full',
|
|
|
|
format=drive0['fmt'],
|
|
|
|
target=drive0['backup'],
|
|
|
|
mode='existing'))
|
|
|
|
|
|
|
|
# Create bitmap and dirty it with some new writes.
|
|
|
|
# overwrite [32736, 32799] which will dirty bitmap clusters at
|
|
|
|
# 32M-64K and 32M. 32M+64K will be left undirtied.
|
|
|
|
bitmap0 = self.add_bitmap('bitmap0', drive0)
|
|
|
|
self.hmp_io_writes(drive0['id'],
|
|
|
|
(('0xab', 0, 512),
|
|
|
|
('0xfe', '16M', '256k'),
|
|
|
|
('0x64', '32736k', '64k')))
|
|
|
|
|
|
|
|
|
|
|
|
# Prepare a cluster_size=128k backup target without a backing file.
|
|
|
|
(target, _) = bitmap0.new_target()
|
|
|
|
self.img_create(target, bitmap0.drive['fmt'], cluster_size='128k')
|
|
|
|
|
|
|
|
# Perform Incremental Backup
|
|
|
|
self.assertTrue(self.do_qmp_backup(device=bitmap0.drive['id'],
|
|
|
|
sync='incremental',
|
|
|
|
bitmap=bitmap0.name,
|
|
|
|
format=bitmap0.drive['fmt'],
|
|
|
|
target=target,
|
|
|
|
mode='existing'))
|
|
|
|
self.make_reference_backup(bitmap0)
|
|
|
|
|
|
|
|
# Add the backing file, then compare and exit.
|
|
|
|
iotests.qemu_img('rebase', '-f', drive0['fmt'], '-u', '-b',
|
|
|
|
drive0['backup'], '-F', drive0['fmt'], target)
|
|
|
|
self.vm.shutdown()
|
|
|
|
self.check_backups()
|
|
|
|
|
|
|
|
|
2015-11-06 00:13:08 +01:00
|
|
|
def test_incremental_transaction(self):
|
|
|
|
'''Test: Verify backups made from transactionally created bitmaps.
|
|
|
|
|
|
|
|
Create a bitmap "before" VM execution begins, then create a second
|
|
|
|
bitmap AFTER writes have already occurred. Use transactions to create
|
|
|
|
a full backup and synchronize both bitmaps to this backup.
|
|
|
|
Create an incremental backup through both bitmaps and verify that
|
|
|
|
both backups match the current drive0 image.
|
|
|
|
'''
|
|
|
|
|
|
|
|
drive0 = self.drives[0]
|
|
|
|
bitmap0 = self.add_bitmap('bitmap0', drive0)
|
|
|
|
self.hmp_io_writes(drive0['id'], (('0xab', 0, 512),
|
|
|
|
('0xfe', '16M', '256k'),
|
|
|
|
('0x64', '32736k', '64k')))
|
|
|
|
bitmap1 = self.add_bitmap('bitmap1', drive0)
|
|
|
|
|
|
|
|
result = self.vm.qmp('transaction', actions=[
|
|
|
|
transaction_bitmap_clear(bitmap0.drive['id'], bitmap0.name),
|
|
|
|
transaction_bitmap_clear(bitmap1.drive['id'], bitmap1.name),
|
|
|
|
transaction_drive_backup(drive0['id'], drive0['backup'],
|
|
|
|
sync='full', format=drive0['fmt'])
|
|
|
|
])
|
|
|
|
self.assert_qmp(result, 'return', {})
|
|
|
|
self.wait_until_completed(drive0['id'])
|
|
|
|
self.files.append(drive0['backup'])
|
|
|
|
|
|
|
|
self.hmp_io_writes(drive0['id'], (('0x9a', 0, 512),
|
|
|
|
('0x55', '8M', '352k'),
|
|
|
|
('0x78', '15872k', '1M')))
|
|
|
|
# Both bitmaps should be correctly in sync.
|
|
|
|
self.create_incremental(bitmap0)
|
|
|
|
self.create_incremental(bitmap1)
|
|
|
|
self.vm.shutdown()
|
|
|
|
self.check_backups()
|
|
|
|
|
|
|
|
|
2015-11-06 00:13:19 +01:00
|
|
|
def test_transaction_failure(self):
|
|
|
|
'''Test: Verify backups made from a transaction that partially fails.
|
|
|
|
|
|
|
|
Add a second drive with its own unique pattern, and add a bitmap to each
|
|
|
|
drive. Use blkdebug to interfere with the backup on just one drive and
|
|
|
|
attempt to create a coherent incremental backup across both drives.
|
|
|
|
|
|
|
|
verify a failure in one but not both, then delete the failed stubs and
|
|
|
|
re-run the same transaction.
|
|
|
|
|
|
|
|
verify that both incrementals are created successfully.
|
|
|
|
'''
|
|
|
|
|
|
|
|
# Create a second drive, with pattern:
|
|
|
|
drive1 = self.add_node('drive1')
|
|
|
|
self.img_create(drive1['file'], drive1['fmt'])
|
|
|
|
io_write_patterns(drive1['file'], (('0x14', 0, 512),
|
|
|
|
('0x5d', '1M', '32k'),
|
|
|
|
('0xcd', '32M', '124k')))
|
|
|
|
|
|
|
|
# Create a blkdebug interface to this img as 'drive1'
|
|
|
|
result = self.vm.qmp('blockdev-add', options={
|
|
|
|
'id': drive1['id'],
|
|
|
|
'driver': drive1['fmt'],
|
|
|
|
'file': {
|
|
|
|
'driver': 'blkdebug',
|
|
|
|
'image': {
|
|
|
|
'driver': 'file',
|
|
|
|
'filename': drive1['file']
|
|
|
|
},
|
|
|
|
'set-state': [{
|
|
|
|
'event': 'flush_to_disk',
|
|
|
|
'state': 1,
|
|
|
|
'new_state': 2
|
|
|
|
}],
|
|
|
|
'inject-error': [{
|
|
|
|
'event': 'read_aio',
|
|
|
|
'errno': 5,
|
|
|
|
'state': 2,
|
|
|
|
'immediately': False,
|
|
|
|
'once': True
|
|
|
|
}],
|
|
|
|
}
|
|
|
|
})
|
|
|
|
self.assert_qmp(result, 'return', {})
|
|
|
|
|
|
|
|
# Create bitmaps and full backups for both drives
|
|
|
|
drive0 = self.drives[0]
|
|
|
|
dr0bm0 = self.add_bitmap('bitmap0', drive0)
|
|
|
|
dr1bm0 = self.add_bitmap('bitmap0', drive1)
|
|
|
|
self.create_anchor_backup(drive0)
|
|
|
|
self.create_anchor_backup(drive1)
|
|
|
|
self.assert_no_active_block_jobs()
|
|
|
|
self.assertFalse(self.vm.get_qmp_events(wait=False))
|
|
|
|
|
|
|
|
# Emulate some writes
|
|
|
|
self.hmp_io_writes(drive0['id'], (('0xab', 0, 512),
|
|
|
|
('0xfe', '16M', '256k'),
|
|
|
|
('0x64', '32736k', '64k')))
|
|
|
|
self.hmp_io_writes(drive1['id'], (('0xba', 0, 512),
|
|
|
|
('0xef', '16M', '256k'),
|
|
|
|
('0x46', '32736k', '64k')))
|
|
|
|
|
|
|
|
# Create incremental backup targets
|
|
|
|
target0 = self.prepare_backup(dr0bm0)
|
|
|
|
target1 = self.prepare_backup(dr1bm0)
|
|
|
|
|
|
|
|
# Ask for a new incremental backup per-each drive,
|
|
|
|
# expecting drive1's backup to fail:
|
|
|
|
transaction = [
|
|
|
|
transaction_drive_backup(drive0['id'], target0, sync='incremental',
|
|
|
|
format=drive0['fmt'], mode='existing',
|
|
|
|
bitmap=dr0bm0.name),
|
|
|
|
transaction_drive_backup(drive1['id'], target1, sync='incremental',
|
|
|
|
format=drive1['fmt'], mode='existing',
|
|
|
|
bitmap=dr1bm0.name)
|
|
|
|
]
|
|
|
|
result = self.vm.qmp('transaction', actions=transaction,
|
|
|
|
properties={'completion-mode': 'grouped'} )
|
|
|
|
self.assert_qmp(result, 'return', {})
|
|
|
|
|
|
|
|
# Observe that drive0's backup is cancelled and drive1 completes with
|
|
|
|
# an error.
|
|
|
|
self.wait_qmp_backup_cancelled(drive0['id'])
|
|
|
|
self.assertFalse(self.wait_qmp_backup(drive1['id']))
|
|
|
|
error = self.vm.event_wait('BLOCK_JOB_ERROR')
|
|
|
|
self.assert_qmp(error, 'data', {'device': drive1['id'],
|
|
|
|
'action': 'report',
|
|
|
|
'operation': 'read'})
|
|
|
|
self.assertFalse(self.vm.get_qmp_events(wait=False))
|
|
|
|
self.assert_no_active_block_jobs()
|
|
|
|
|
|
|
|
# Delete drive0's successful target and eliminate our record of the
|
|
|
|
# unsuccessful drive1 target. Then re-run the same transaction.
|
|
|
|
dr0bm0.del_target()
|
|
|
|
dr1bm0.del_target()
|
|
|
|
target0 = self.prepare_backup(dr0bm0)
|
|
|
|
target1 = self.prepare_backup(dr1bm0)
|
|
|
|
|
|
|
|
# Re-run the exact same transaction.
|
|
|
|
result = self.vm.qmp('transaction', actions=transaction,
|
|
|
|
properties={'completion-mode':'grouped'})
|
|
|
|
self.assert_qmp(result, 'return', {})
|
|
|
|
|
|
|
|
# Both should complete successfully this time.
|
|
|
|
self.assertTrue(self.wait_qmp_backup(drive0['id']))
|
|
|
|
self.assertTrue(self.wait_qmp_backup(drive1['id']))
|
|
|
|
self.make_reference_backup(dr0bm0)
|
|
|
|
self.make_reference_backup(dr1bm0)
|
|
|
|
self.assertFalse(self.vm.get_qmp_events(wait=False))
|
|
|
|
self.assert_no_active_block_jobs()
|
|
|
|
|
|
|
|
# And the images should of course validate.
|
|
|
|
self.vm.shutdown()
|
|
|
|
self.check_backups()
|
|
|
|
|
|
|
|
|
2015-04-18 01:50:05 +02:00
|
|
|
def test_sync_dirty_bitmap_missing(self):
|
|
|
|
self.assert_no_active_block_jobs()
|
|
|
|
self.files.append(self.err_img)
|
|
|
|
result = self.vm.qmp('drive-backup', device=self.drives[0]['id'],
|
2015-06-05 02:20:34 +02:00
|
|
|
sync='incremental', format=self.drives[0]['fmt'],
|
2015-04-18 01:50:05 +02:00
|
|
|
target=self.err_img)
|
|
|
|
self.assert_qmp(result, 'error/class', 'GenericError')
|
|
|
|
|
|
|
|
|
|
|
|
def test_sync_dirty_bitmap_not_found(self):
|
|
|
|
self.assert_no_active_block_jobs()
|
|
|
|
self.files.append(self.err_img)
|
|
|
|
result = self.vm.qmp('drive-backup', device=self.drives[0]['id'],
|
2015-06-05 02:20:34 +02:00
|
|
|
sync='incremental', bitmap='unknown',
|
2015-04-18 01:50:05 +02:00
|
|
|
format=self.drives[0]['fmt'], target=self.err_img)
|
|
|
|
self.assert_qmp(result, 'error/class', 'GenericError')
|
|
|
|
|
|
|
|
|
2015-04-18 01:50:09 +02:00
|
|
|
def test_sync_dirty_bitmap_bad_granularity(self):
|
|
|
|
'''
|
|
|
|
Test: Test what happens if we provide an improper granularity.
|
|
|
|
|
|
|
|
The granularity must always be a power of 2.
|
|
|
|
'''
|
|
|
|
self.assert_no_active_block_jobs()
|
|
|
|
self.assertRaises(AssertionError, self.add_bitmap,
|
|
|
|
'bitmap0', self.drives[0],
|
|
|
|
granularity=64000)
|
|
|
|
|
|
|
|
|
2015-12-02 00:16:38 +01:00
|
|
|
class TestIncrementalBackupBlkdebug(TestIncrementalBackupBase):
|
|
|
|
'''Incremental backup tests that utilize a BlkDebug filter on drive0.'''
|
|
|
|
|
2015-12-02 00:16:39 +01:00
|
|
|
def setUp(self):
|
|
|
|
drive0 = self.add_node('drive0')
|
|
|
|
self.img_create(drive0['file'], drive0['fmt'])
|
|
|
|
self.write_default_pattern(drive0['file'])
|
|
|
|
self.vm.launch()
|
|
|
|
|
2015-12-02 00:16:38 +01:00
|
|
|
def test_incremental_failure(self):
|
|
|
|
'''Test: Verify backups made after a failure are correct.
|
|
|
|
|
|
|
|
Simulate a failure during an incremental backup block job,
|
|
|
|
emulate additional writes, then create another incremental backup
|
|
|
|
afterwards and verify that the backup created is correct.
|
|
|
|
'''
|
|
|
|
|
2015-12-02 00:16:39 +01:00
|
|
|
drive0 = self.drives[0]
|
2015-12-02 00:16:38 +01:00
|
|
|
result = self.vm.qmp('blockdev-add', options={
|
2015-12-02 00:16:39 +01:00
|
|
|
'id': drive0['id'],
|
|
|
|
'driver': drive0['fmt'],
|
2015-12-02 00:16:38 +01:00
|
|
|
'file': {
|
|
|
|
'driver': 'blkdebug',
|
|
|
|
'image': {
|
|
|
|
'driver': 'file',
|
2015-12-02 00:16:39 +01:00
|
|
|
'filename': drive0['file']
|
2015-12-02 00:16:38 +01:00
|
|
|
},
|
|
|
|
'set-state': [{
|
|
|
|
'event': 'flush_to_disk',
|
|
|
|
'state': 1,
|
|
|
|
'new_state': 2
|
|
|
|
}],
|
|
|
|
'inject-error': [{
|
|
|
|
'event': 'read_aio',
|
|
|
|
'errno': 5,
|
|
|
|
'state': 2,
|
|
|
|
'immediately': False,
|
|
|
|
'once': True
|
|
|
|
}],
|
|
|
|
}
|
|
|
|
})
|
|
|
|
self.assert_qmp(result, 'return', {})
|
|
|
|
|
2015-12-02 00:16:39 +01:00
|
|
|
self.create_anchor_backup(drive0)
|
|
|
|
self.add_bitmap('bitmap0', drive0)
|
2015-12-02 00:16:38 +01:00
|
|
|
# Note: at this point, during a normal execution,
|
|
|
|
# Assume that the VM resumes and begins issuing IO requests here.
|
|
|
|
|
2015-12-02 00:16:39 +01:00
|
|
|
self.hmp_io_writes(drive0['id'], (('0xab', 0, 512),
|
2015-12-02 00:16:38 +01:00
|
|
|
('0xfe', '16M', '256k'),
|
|
|
|
('0x64', '32736k', '64k')))
|
|
|
|
|
|
|
|
result = self.create_incremental(validate=False)
|
|
|
|
self.assertFalse(result)
|
2015-12-02 00:16:39 +01:00
|
|
|
self.hmp_io_writes(drive0['id'], (('0x9a', 0, 512),
|
2015-12-02 00:16:38 +01:00
|
|
|
('0x55', '8M', '352k'),
|
|
|
|
('0x78', '15872k', '1M')))
|
|
|
|
self.create_incremental()
|
|
|
|
self.vm.shutdown()
|
|
|
|
self.check_backups()
|
|
|
|
|
|
|
|
|
2015-04-18 01:50:05 +02:00
|
|
|
if __name__ == '__main__':
|
|
|
|
iotests.main(supported_fmts=['qcow2'])
|