d44dae1a7c
It's possible that requests start to wait each other in
mirror_wait_on_conflicts(). To avoid it let's use same technique as in
block/io.c in bdrv_wait_serialising_requests_locked() /
bdrv_find_conflicting_request(): don't wait on intersecting request if
it is already waiting for some other request.
For details of the dead-lock look at testIntersectingActiveIO()
test-case which we actually fixing now.
Fixes: d06107ade0
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Message-Id: <20210702211636.228981-4-vsementsov@virtuozzo.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
198 lines
7.9 KiB
Python
Executable File
198 lines
7.9 KiB
Python
Executable File
#!/usr/bin/env python3
|
|
# group: rw
|
|
#
|
|
# Tests for active mirroring
|
|
#
|
|
# Copyright (C) 2018 Red Hat, Inc.
|
|
#
|
|
# This program is free software; you can redistribute it and/or modify
|
|
# it under the terms of the GNU General Public License as published by
|
|
# the Free Software Foundation; either version 2 of the License, or
|
|
# (at your option) any later version.
|
|
#
|
|
# This program is distributed in the hope that it will be useful,
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
# GNU General Public License for more details.
|
|
#
|
|
# You should have received a copy of the GNU General Public License
|
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
#
|
|
|
|
import os
|
|
import iotests
|
|
from iotests import qemu_img
|
|
|
|
source_img = os.path.join(iotests.test_dir, 'source.' + iotests.imgfmt)
|
|
target_img = os.path.join(iotests.test_dir, 'target.' + iotests.imgfmt)
|
|
|
|
class TestActiveMirror(iotests.QMPTestCase):
|
|
image_len = 128 * 1024 * 1024 # MB
|
|
potential_writes_in_flight = True
|
|
|
|
def setUp(self):
|
|
qemu_img('create', '-f', iotests.imgfmt, source_img, '128M')
|
|
qemu_img('create', '-f', iotests.imgfmt, target_img, '128M')
|
|
|
|
blk_source = {'id': 'source',
|
|
'if': 'none',
|
|
'node-name': 'source-node',
|
|
'driver': iotests.imgfmt,
|
|
'file': {'driver': 'blkdebug',
|
|
'image': {'driver': 'file',
|
|
'filename': source_img}}}
|
|
|
|
blk_target = {'node-name': 'target-node',
|
|
'driver': iotests.imgfmt,
|
|
'file': {'driver': 'file',
|
|
'filename': target_img}}
|
|
|
|
self.vm = iotests.VM()
|
|
self.vm.add_drive_raw(self.vm.qmp_to_opts(blk_source))
|
|
self.vm.add_blockdev(self.vm.qmp_to_opts(blk_target))
|
|
self.vm.add_device('virtio-blk,drive=source')
|
|
self.vm.launch()
|
|
|
|
def tearDown(self):
|
|
self.vm.shutdown()
|
|
|
|
if not self.potential_writes_in_flight:
|
|
self.assertTrue(iotests.compare_images(source_img, target_img),
|
|
'mirror target does not match source')
|
|
|
|
os.remove(source_img)
|
|
os.remove(target_img)
|
|
|
|
def doActiveIO(self, sync_source_and_target):
|
|
# Fill the source image
|
|
self.vm.hmp_qemu_io('source',
|
|
'write -P 1 0 %i' % self.image_len);
|
|
|
|
# Start some background requests
|
|
for offset in range(1 * self.image_len // 8, 3 * self.image_len // 8, 1024 * 1024):
|
|
self.vm.hmp_qemu_io('source', 'aio_write -P 2 %i 1M' % offset)
|
|
for offset in range(2 * self.image_len // 8, 3 * self.image_len // 8, 1024 * 1024):
|
|
self.vm.hmp_qemu_io('source', 'aio_write -z %i 1M' % offset)
|
|
|
|
# Start the block job
|
|
result = self.vm.qmp('blockdev-mirror',
|
|
job_id='mirror',
|
|
filter_node_name='mirror-node',
|
|
device='source-node',
|
|
target='target-node',
|
|
sync='full',
|
|
copy_mode='write-blocking')
|
|
self.assert_qmp(result, 'return', {})
|
|
|
|
# Start some more requests
|
|
for offset in range(3 * self.image_len // 8, 5 * self.image_len // 8, 1024 * 1024):
|
|
self.vm.hmp_qemu_io('source', 'aio_write -P 3 %i 1M' % offset)
|
|
for offset in range(4 * self.image_len // 8, 5 * self.image_len // 8, 1024 * 1024):
|
|
self.vm.hmp_qemu_io('source', 'aio_write -z %i 1M' % offset)
|
|
|
|
# Wait for the READY event
|
|
self.wait_ready(drive='mirror')
|
|
|
|
# Now start some final requests; all of these (which land on
|
|
# the source) should be settled using the active mechanism.
|
|
# The mirror code itself asserts that the source BDS's dirty
|
|
# bitmap will stay clean between READY and COMPLETED.
|
|
for offset in range(5 * self.image_len // 8, 7 * self.image_len // 8, 1024 * 1024):
|
|
self.vm.hmp_qemu_io('source', 'aio_write -P 3 %i 1M' % offset)
|
|
for offset in range(6 * self.image_len // 8, 7 * self.image_len // 8, 1024 * 1024):
|
|
self.vm.hmp_qemu_io('source', 'aio_write -z %i 1M' % offset)
|
|
|
|
if sync_source_and_target:
|
|
# If source and target should be in sync after the mirror,
|
|
# we have to flush before completion
|
|
self.vm.hmp_qemu_io('source', 'aio_flush')
|
|
self.potential_writes_in_flight = False
|
|
|
|
self.complete_and_wait(drive='mirror', wait_ready=False)
|
|
|
|
def testActiveIO(self):
|
|
self.doActiveIO(False)
|
|
|
|
def testActiveIOFlushed(self):
|
|
self.doActiveIO(True)
|
|
|
|
def testUnalignedActiveIO(self):
|
|
# Fill the source image
|
|
result = self.vm.hmp_qemu_io('source', 'write -P 1 0 2M')
|
|
|
|
# Start the block job (very slowly)
|
|
result = self.vm.qmp('blockdev-mirror',
|
|
job_id='mirror',
|
|
filter_node_name='mirror-node',
|
|
device='source-node',
|
|
target='target-node',
|
|
sync='full',
|
|
copy_mode='write-blocking',
|
|
buf_size=(1048576 // 4),
|
|
speed=1)
|
|
self.assert_qmp(result, 'return', {})
|
|
|
|
# Start an unaligned request to a dirty area
|
|
result = self.vm.hmp_qemu_io('source', 'write -P 2 %i 1' % (1048576 + 42))
|
|
|
|
# Let the job finish
|
|
result = self.vm.qmp('block-job-set-speed', device='mirror', speed=0)
|
|
self.assert_qmp(result, 'return', {})
|
|
self.complete_and_wait(drive='mirror')
|
|
|
|
self.potential_writes_in_flight = False
|
|
|
|
def testIntersectingActiveIO(self):
|
|
# Fill the source image
|
|
result = self.vm.hmp_qemu_io('source', 'write -P 1 0 2M')
|
|
|
|
# Start the block job (very slowly)
|
|
result = self.vm.qmp('blockdev-mirror',
|
|
job_id='mirror',
|
|
filter_node_name='mirror-node',
|
|
device='source-node',
|
|
target='target-node',
|
|
sync='full',
|
|
copy_mode='write-blocking',
|
|
speed=1)
|
|
|
|
self.vm.hmp_qemu_io('source', 'break write_aio A')
|
|
self.vm.hmp_qemu_io('source', 'aio_write 0 1M') # 1
|
|
self.vm.hmp_qemu_io('source', 'wait_break A')
|
|
self.vm.hmp_qemu_io('source', 'aio_write 0 2M') # 2
|
|
self.vm.hmp_qemu_io('source', 'aio_write 0 2M') # 3
|
|
|
|
# Now 2 and 3 are in mirror_wait_on_conflicts, waiting for 1
|
|
|
|
self.vm.hmp_qemu_io('source', 'break write_aio B')
|
|
self.vm.hmp_qemu_io('source', 'aio_write 1M 2M') # 4
|
|
self.vm.hmp_qemu_io('source', 'wait_break B')
|
|
|
|
# 4 doesn't wait for 2 and 3, because they didn't yet set
|
|
# in_flight_bitmap. So, nothing prevents 4 to go except for our
|
|
# break-point B.
|
|
|
|
self.vm.hmp_qemu_io('source', 'resume A')
|
|
|
|
# Now we resumed 1, so 2 and 3 goes to the next iteration of while loop
|
|
# in mirror_wait_on_conflicts(). They don't exit, as bitmap is dirty
|
|
# due to request 4.
|
|
# In the past at that point 2 and 3 would wait for each other producing
|
|
# a dead-lock. Now this is fixed and they will wait for request 4.
|
|
|
|
self.vm.hmp_qemu_io('source', 'resume B')
|
|
|
|
# After resuming 4, one of 2 and 3 goes first and set in_flight_bitmap,
|
|
# so the other will wait for it.
|
|
|
|
result = self.vm.qmp('block-job-set-speed', device='mirror', speed=0)
|
|
self.assert_qmp(result, 'return', {})
|
|
self.complete_and_wait(drive='mirror')
|
|
|
|
self.potential_writes_in_flight = False
|
|
|
|
|
|
if __name__ == '__main__':
|
|
iotests.main(supported_fmts=['qcow2', 'raw'],
|
|
supported_protocols=['file'])
|