9a3a9a636e
In Python 3, / is always a floating-point division. We usually do not want this, and as Python 2.7 understands // as well, change all integer divisions to use that. Signed-off-by: Max Reitz <mreitz@redhat.com> Reviewed-by: Eduardo Habkost <ehabkost@redhat.com> Reviewed-by: Cleber Rosa <crosa@redhat.com> Message-Id: <20181022135307.14398-5-mreitz@redhat.com> Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
852 lines
35 KiB
Python
Executable File
852 lines
35 KiB
Python
Executable File
#!/usr/bin/env python
|
|
#
|
|
# Tests for image streaming.
|
|
#
|
|
# Copyright (C) 2012 IBM Corp.
|
|
#
|
|
# This program is free software; you can redistribute it and/or modify
|
|
# it under the terms of the GNU General Public License as published by
|
|
# the Free Software Foundation; either version 2 of the License, or
|
|
# (at your option) any later version.
|
|
#
|
|
# This program is distributed in the hope that it will be useful,
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
# GNU General Public License for more details.
|
|
#
|
|
# You should have received a copy of the GNU General Public License
|
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
#
|
|
|
|
import time
|
|
import os
|
|
import iotests
|
|
from iotests import qemu_img, qemu_io
|
|
|
|
backing_img = os.path.join(iotests.test_dir, 'backing.img')
|
|
mid_img = os.path.join(iotests.test_dir, 'mid.img')
|
|
test_img = os.path.join(iotests.test_dir, 'test.img')
|
|
|
|
class TestSingleDrive(iotests.QMPTestCase):
|
|
image_len = 1 * 1024 * 1024 # MB
|
|
|
|
def setUp(self):
|
|
iotests.create_image(backing_img, TestSingleDrive.image_len)
|
|
qemu_img('create', '-f', iotests.imgfmt, '-o', 'backing_file=%s' % backing_img, mid_img)
|
|
qemu_img('create', '-f', iotests.imgfmt, '-o', 'backing_file=%s' % mid_img, test_img)
|
|
qemu_io('-f', 'raw', '-c', 'write -P 0x1 0 512', backing_img)
|
|
qemu_io('-f', iotests.imgfmt, '-c', 'write -P 0x1 524288 512', mid_img)
|
|
self.vm = iotests.VM().add_drive("blkdebug::" + test_img, "backing.node-name=mid")
|
|
self.vm.launch()
|
|
|
|
def tearDown(self):
|
|
self.vm.shutdown()
|
|
os.remove(test_img)
|
|
os.remove(mid_img)
|
|
os.remove(backing_img)
|
|
|
|
def test_stream(self):
|
|
self.assert_no_active_block_jobs()
|
|
|
|
result = self.vm.qmp('block-stream', device='drive0')
|
|
self.assert_qmp(result, 'return', {})
|
|
|
|
self.wait_until_completed()
|
|
|
|
self.assert_no_active_block_jobs()
|
|
self.vm.shutdown()
|
|
|
|
self.assertEqual(qemu_io('-f', 'raw', '-c', 'map', backing_img),
|
|
qemu_io('-f', iotests.imgfmt, '-c', 'map', test_img),
|
|
'image file map does not match backing file after streaming')
|
|
|
|
def test_stream_intermediate(self):
|
|
self.assert_no_active_block_jobs()
|
|
|
|
self.assertNotEqual(qemu_io('-f', 'raw', '-rU', '-c', 'map', backing_img),
|
|
qemu_io('-f', iotests.imgfmt, '-rU', '-c', 'map', mid_img),
|
|
'image file map matches backing file before streaming')
|
|
|
|
result = self.vm.qmp('block-stream', device='mid', job_id='stream-mid')
|
|
self.assert_qmp(result, 'return', {})
|
|
|
|
self.wait_until_completed(drive='stream-mid')
|
|
|
|
self.assert_no_active_block_jobs()
|
|
self.vm.shutdown()
|
|
|
|
self.assertEqual(qemu_io('-f', 'raw', '-c', 'map', backing_img),
|
|
qemu_io('-f', iotests.imgfmt, '-c', 'map', mid_img),
|
|
'image file map does not match backing file after streaming')
|
|
|
|
def test_stream_pause(self):
|
|
self.assert_no_active_block_jobs()
|
|
|
|
self.vm.pause_drive('drive0')
|
|
result = self.vm.qmp('block-stream', device='drive0')
|
|
self.assert_qmp(result, 'return', {})
|
|
|
|
self.pause_job('drive0', wait=False)
|
|
self.vm.resume_drive('drive0')
|
|
self.pause_wait('drive0')
|
|
|
|
result = self.vm.qmp('query-block-jobs')
|
|
offset = self.dictpath(result, 'return[0]/offset')
|
|
|
|
time.sleep(0.5)
|
|
result = self.vm.qmp('query-block-jobs')
|
|
self.assert_qmp(result, 'return[0]/offset', offset)
|
|
|
|
result = self.vm.qmp('block-job-resume', device='drive0')
|
|
self.assert_qmp(result, 'return', {})
|
|
|
|
self.wait_until_completed()
|
|
|
|
self.assert_no_active_block_jobs()
|
|
self.vm.shutdown()
|
|
|
|
self.assertEqual(qemu_io('-f', 'raw', '-c', 'map', backing_img),
|
|
qemu_io('-f', iotests.imgfmt, '-c', 'map', test_img),
|
|
'image file map does not match backing file after streaming')
|
|
|
|
def test_stream_no_op(self):
|
|
self.assert_no_active_block_jobs()
|
|
|
|
# The image map is empty before the operation
|
|
empty_map = qemu_io('-f', iotests.imgfmt, '-rU', '-c', 'map', test_img)
|
|
|
|
# This is a no-op: no data should ever be copied from the base image
|
|
result = self.vm.qmp('block-stream', device='drive0', base=mid_img)
|
|
self.assert_qmp(result, 'return', {})
|
|
|
|
self.wait_until_completed()
|
|
|
|
self.assert_no_active_block_jobs()
|
|
self.vm.shutdown()
|
|
|
|
self.assertEqual(qemu_io('-f', iotests.imgfmt, '-c', 'map', test_img),
|
|
empty_map, 'image file map changed after a no-op')
|
|
|
|
def test_stream_partial(self):
|
|
self.assert_no_active_block_jobs()
|
|
|
|
result = self.vm.qmp('block-stream', device='drive0', base=backing_img)
|
|
self.assert_qmp(result, 'return', {})
|
|
|
|
self.wait_until_completed()
|
|
|
|
self.assert_no_active_block_jobs()
|
|
self.vm.shutdown()
|
|
|
|
self.assertEqual(qemu_io('-f', iotests.imgfmt, '-c', 'map', mid_img),
|
|
qemu_io('-f', iotests.imgfmt, '-c', 'map', test_img),
|
|
'image file map does not match backing file after streaming')
|
|
|
|
def test_device_not_found(self):
|
|
result = self.vm.qmp('block-stream', device='nonexistent')
|
|
self.assert_qmp(result, 'error/class', 'GenericError')
|
|
|
|
def test_job_id_missing(self):
|
|
result = self.vm.qmp('block-stream', device='mid')
|
|
self.assert_qmp(result, 'error/class', 'GenericError')
|
|
|
|
|
|
class TestParallelOps(iotests.QMPTestCase):
|
|
num_ops = 4 # Number of parallel block-stream operations
|
|
num_imgs = num_ops * 2 + 1
|
|
image_len = num_ops * 512 * 1024
|
|
imgs = []
|
|
|
|
def setUp(self):
|
|
opts = []
|
|
self.imgs = []
|
|
|
|
# Initialize file names and command-line options
|
|
for i in range(self.num_imgs):
|
|
img_depth = self.num_imgs - i - 1
|
|
opts.append("backing." * img_depth + "node-name=node%d" % i)
|
|
self.imgs.append(os.path.join(iotests.test_dir, 'img-%d.img' % i))
|
|
|
|
# Create all images
|
|
iotests.create_image(self.imgs[0], self.image_len)
|
|
for i in range(1, self.num_imgs):
|
|
qemu_img('create', '-f', iotests.imgfmt,
|
|
'-o', 'backing_file=%s' % self.imgs[i-1], self.imgs[i])
|
|
|
|
# Put data into the images we are copying data from
|
|
odd_img_indexes = [x for x in reversed(range(self.num_imgs)) if x % 2 == 1]
|
|
for i in range(len(odd_img_indexes)):
|
|
# Alternate between 256KB and 512KB.
|
|
# This way jobs will not finish in the same order they were created
|
|
num_kb = 256 + 256 * (i % 2)
|
|
qemu_io('-f', iotests.imgfmt,
|
|
'-c', 'write -P 0xFF %dk %dk' % (i * 512, num_kb),
|
|
self.imgs[odd_img_indexes[i]])
|
|
|
|
# Attach the drive to the VM
|
|
self.vm = iotests.VM()
|
|
self.vm.add_drive(self.imgs[-1], ','.join(opts))
|
|
self.vm.launch()
|
|
|
|
def tearDown(self):
|
|
self.vm.shutdown()
|
|
for img in self.imgs:
|
|
os.remove(img)
|
|
|
|
# Test that it's possible to run several block-stream operations
|
|
# in parallel in the same snapshot chain
|
|
def test_stream_parallel(self):
|
|
self.assert_no_active_block_jobs()
|
|
|
|
# Check that the maps don't match before the streaming operations
|
|
for i in range(2, self.num_imgs, 2):
|
|
self.assertNotEqual(qemu_io('-f', iotests.imgfmt, '-rU', '-c', 'map', self.imgs[i]),
|
|
qemu_io('-f', iotests.imgfmt, '-rU', '-c', 'map', self.imgs[i-1]),
|
|
'image file map matches backing file before streaming')
|
|
|
|
# Create all streaming jobs
|
|
pending_jobs = []
|
|
for i in range(2, self.num_imgs, 2):
|
|
node_name = 'node%d' % i
|
|
job_id = 'stream-%s' % node_name
|
|
pending_jobs.append(job_id)
|
|
result = self.vm.qmp('block-stream', device=node_name, job_id=job_id, base=self.imgs[i-2], speed=512*1024)
|
|
self.assert_qmp(result, 'return', {})
|
|
|
|
# Wait for all jobs to be finished.
|
|
while len(pending_jobs) > 0:
|
|
for event in self.vm.get_qmp_events(wait=True):
|
|
if event['event'] == 'BLOCK_JOB_COMPLETED':
|
|
job_id = self.dictpath(event, 'data/device')
|
|
self.assertTrue(job_id in pending_jobs)
|
|
self.assert_qmp_absent(event, 'data/error')
|
|
pending_jobs.remove(job_id)
|
|
|
|
self.assert_no_active_block_jobs()
|
|
self.vm.shutdown()
|
|
|
|
# Check that all maps match now
|
|
for i in range(2, self.num_imgs, 2):
|
|
self.assertEqual(qemu_io('-f', iotests.imgfmt, '-c', 'map', self.imgs[i]),
|
|
qemu_io('-f', iotests.imgfmt, '-c', 'map', self.imgs[i-1]),
|
|
'image file map does not match backing file after streaming')
|
|
|
|
# Test that it's not possible to perform two block-stream
|
|
# operations if there are nodes involved in both.
|
|
def test_overlapping_1(self):
|
|
self.assert_no_active_block_jobs()
|
|
|
|
# Set a speed limit to make sure that this job blocks the rest
|
|
result = self.vm.qmp('block-stream', device='node4', job_id='stream-node4', base=self.imgs[1], speed=1024*1024)
|
|
self.assert_qmp(result, 'return', {})
|
|
|
|
result = self.vm.qmp('block-stream', device='node5', job_id='stream-node5', base=self.imgs[2])
|
|
self.assert_qmp(result, 'error/class', 'GenericError')
|
|
|
|
result = self.vm.qmp('block-stream', device='node3', job_id='stream-node3', base=self.imgs[2])
|
|
self.assert_qmp(result, 'error/class', 'GenericError')
|
|
|
|
result = self.vm.qmp('block-stream', device='node4', job_id='stream-node4-v2')
|
|
self.assert_qmp(result, 'error/class', 'GenericError')
|
|
|
|
# block-commit should also fail if it touches nodes used by the stream job
|
|
result = self.vm.qmp('block-commit', device='drive0', base=self.imgs[4], job_id='commit-node4')
|
|
self.assert_qmp(result, 'error/class', 'GenericError')
|
|
|
|
result = self.vm.qmp('block-commit', device='drive0', base=self.imgs[1], top=self.imgs[3], job_id='commit-node1')
|
|
self.assert_qmp(result, 'error/class', 'GenericError')
|
|
|
|
# This fails because it needs to modify the backing string in node2, which is blocked
|
|
result = self.vm.qmp('block-commit', device='drive0', base=self.imgs[0], top=self.imgs[1], job_id='commit-node0')
|
|
self.assert_qmp(result, 'error/class', 'GenericError')
|
|
|
|
self.wait_until_completed(drive='stream-node4')
|
|
self.assert_no_active_block_jobs()
|
|
|
|
# Similar to test_overlapping_1, but with block-commit
|
|
# blocking the other jobs
|
|
def test_overlapping_2(self):
|
|
self.assertLessEqual(9, self.num_imgs)
|
|
self.assert_no_active_block_jobs()
|
|
|
|
# Set a speed limit to make sure that this job blocks the rest
|
|
result = self.vm.qmp('block-commit', device='drive0', top=self.imgs[5], base=self.imgs[3], job_id='commit-node3', speed=1024*1024)
|
|
self.assert_qmp(result, 'return', {})
|
|
|
|
result = self.vm.qmp('block-stream', device='node3', job_id='stream-node3')
|
|
self.assert_qmp(result, 'error/class', 'GenericError')
|
|
|
|
result = self.vm.qmp('block-stream', device='node6', base=self.imgs[2], job_id='stream-node6')
|
|
self.assert_qmp(result, 'error/class', 'GenericError')
|
|
|
|
result = self.vm.qmp('block-stream', device='node4', base=self.imgs[2], job_id='stream-node4')
|
|
self.assert_qmp(result, 'error/class', 'GenericError')
|
|
|
|
result = self.vm.qmp('block-stream', device='node6', base=self.imgs[4], job_id='stream-node6-v2')
|
|
self.assert_qmp(result, 'error/class', 'GenericError')
|
|
|
|
# This fails because block-commit currently blocks the active layer even if it's not used
|
|
result = self.vm.qmp('block-stream', device='drive0', base=self.imgs[5], job_id='stream-drive0')
|
|
self.assert_qmp(result, 'error/class', 'GenericError')
|
|
|
|
self.wait_until_completed(drive='commit-node3')
|
|
|
|
# Similar to test_overlapping_2, but here block-commit doesn't use the 'top' parameter.
|
|
# Internally this uses a mirror block job, hence the separate test case.
|
|
def test_overlapping_3(self):
|
|
self.assertLessEqual(8, self.num_imgs)
|
|
self.assert_no_active_block_jobs()
|
|
|
|
# Set a speed limit to make sure that this job blocks the rest
|
|
result = self.vm.qmp('block-commit', device='drive0', base=self.imgs[3], job_id='commit-drive0', speed=1024*1024)
|
|
self.assert_qmp(result, 'return', {})
|
|
|
|
result = self.vm.qmp('block-stream', device='node5', base=self.imgs[3], job_id='stream-node6')
|
|
self.assert_qmp(result, 'error/class', 'GenericError')
|
|
|
|
event = self.vm.event_wait(name='BLOCK_JOB_READY')
|
|
self.assert_qmp(event, 'data/device', 'commit-drive0')
|
|
self.assert_qmp(event, 'data/type', 'commit')
|
|
self.assert_qmp_absent(event, 'data/error')
|
|
|
|
result = self.vm.qmp('block-job-complete', device='commit-drive0')
|
|
self.assert_qmp(result, 'return', {})
|
|
|
|
self.wait_until_completed(drive='commit-drive0')
|
|
|
|
# Test a block-stream and a block-commit job in parallel
|
|
# Here the stream job is supposed to finish quickly in order to reproduce
|
|
# the scenario that triggers the bug fixed in 3d5d319e1221 and 1a63a907507
|
|
def test_stream_commit_1(self):
|
|
self.assertLessEqual(8, self.num_imgs)
|
|
self.assert_no_active_block_jobs()
|
|
|
|
# Stream from node0 into node2
|
|
result = self.vm.qmp('block-stream', device='node2', base_node='node0', job_id='node2')
|
|
self.assert_qmp(result, 'return', {})
|
|
|
|
# Commit from the active layer into node3
|
|
result = self.vm.qmp('block-commit', device='drive0', base=self.imgs[3])
|
|
self.assert_qmp(result, 'return', {})
|
|
|
|
# Wait for all jobs to be finished.
|
|
pending_jobs = ['node2', 'drive0']
|
|
while len(pending_jobs) > 0:
|
|
for event in self.vm.get_qmp_events(wait=True):
|
|
if event['event'] == 'BLOCK_JOB_COMPLETED':
|
|
node_name = self.dictpath(event, 'data/device')
|
|
self.assertTrue(node_name in pending_jobs)
|
|
self.assert_qmp_absent(event, 'data/error')
|
|
pending_jobs.remove(node_name)
|
|
if event['event'] == 'BLOCK_JOB_READY':
|
|
self.assert_qmp(event, 'data/device', 'drive0')
|
|
self.assert_qmp(event, 'data/type', 'commit')
|
|
self.assert_qmp_absent(event, 'data/error')
|
|
self.assertTrue('drive0' in pending_jobs)
|
|
self.vm.qmp('block-job-complete', device='drive0')
|
|
|
|
self.assert_no_active_block_jobs()
|
|
|
|
# This is similar to test_stream_commit_1 but both jobs are slowed
|
|
# down so they can run in parallel for a little while.
|
|
def test_stream_commit_2(self):
|
|
self.assertLessEqual(8, self.num_imgs)
|
|
self.assert_no_active_block_jobs()
|
|
|
|
# Stream from node0 into node4
|
|
result = self.vm.qmp('block-stream', device='node4', base_node='node0', job_id='node4', speed=1024*1024)
|
|
self.assert_qmp(result, 'return', {})
|
|
|
|
# Commit from the active layer into node5
|
|
result = self.vm.qmp('block-commit', device='drive0', base=self.imgs[5], speed=1024*1024)
|
|
self.assert_qmp(result, 'return', {})
|
|
|
|
# Wait for all jobs to be finished.
|
|
pending_jobs = ['node4', 'drive0']
|
|
while len(pending_jobs) > 0:
|
|
for event in self.vm.get_qmp_events(wait=True):
|
|
if event['event'] == 'BLOCK_JOB_COMPLETED':
|
|
node_name = self.dictpath(event, 'data/device')
|
|
self.assertTrue(node_name in pending_jobs)
|
|
self.assert_qmp_absent(event, 'data/error')
|
|
pending_jobs.remove(node_name)
|
|
if event['event'] == 'BLOCK_JOB_READY':
|
|
self.assert_qmp(event, 'data/device', 'drive0')
|
|
self.assert_qmp(event, 'data/type', 'commit')
|
|
self.assert_qmp_absent(event, 'data/error')
|
|
self.assertTrue('drive0' in pending_jobs)
|
|
self.vm.qmp('block-job-complete', device='drive0')
|
|
|
|
self.assert_no_active_block_jobs()
|
|
|
|
# Test the base_node parameter
|
|
def test_stream_base_node_name(self):
|
|
self.assert_no_active_block_jobs()
|
|
|
|
self.assertNotEqual(qemu_io('-f', iotests.imgfmt, '-rU', '-c', 'map', self.imgs[4]),
|
|
qemu_io('-f', iotests.imgfmt, '-rU', '-c', 'map', self.imgs[3]),
|
|
'image file map matches backing file before streaming')
|
|
|
|
# Error: the base node does not exist
|
|
result = self.vm.qmp('block-stream', device='node4', base_node='none', job_id='stream')
|
|
self.assert_qmp(result, 'error/class', 'GenericError')
|
|
|
|
# Error: the base node is not a backing file of the top node
|
|
result = self.vm.qmp('block-stream', device='node4', base_node='node6', job_id='stream')
|
|
self.assert_qmp(result, 'error/class', 'GenericError')
|
|
|
|
# Error: the base node is the same as the top node
|
|
result = self.vm.qmp('block-stream', device='node4', base_node='node4', job_id='stream')
|
|
self.assert_qmp(result, 'error/class', 'GenericError')
|
|
|
|
# Error: cannot specify 'base' and 'base-node' at the same time
|
|
result = self.vm.qmp('block-stream', device='node4', base=self.imgs[2], base_node='node2', job_id='stream')
|
|
self.assert_qmp(result, 'error/class', 'GenericError')
|
|
|
|
# Success: the base node is a backing file of the top node
|
|
result = self.vm.qmp('block-stream', device='node4', base_node='node2', job_id='stream')
|
|
self.assert_qmp(result, 'return', {})
|
|
|
|
self.wait_until_completed(drive='stream')
|
|
|
|
self.assert_no_active_block_jobs()
|
|
self.vm.shutdown()
|
|
|
|
self.assertEqual(qemu_io('-f', iotests.imgfmt, '-c', 'map', self.imgs[4]),
|
|
qemu_io('-f', iotests.imgfmt, '-c', 'map', self.imgs[3]),
|
|
'image file map matches backing file after streaming')
|
|
|
|
class TestQuorum(iotests.QMPTestCase):
|
|
num_children = 3
|
|
children = []
|
|
backing = []
|
|
|
|
def setUp(self):
|
|
opts = ['driver=quorum', 'vote-threshold=2']
|
|
|
|
# Initialize file names and command-line options
|
|
for i in range(self.num_children):
|
|
child_img = os.path.join(iotests.test_dir, 'img-%d.img' % i)
|
|
backing_img = os.path.join(iotests.test_dir, 'backing-%d.img' % i)
|
|
self.children.append(child_img)
|
|
self.backing.append(backing_img)
|
|
qemu_img('create', '-f', iotests.imgfmt, backing_img, '1M')
|
|
qemu_io('-f', iotests.imgfmt,
|
|
'-c', 'write -P 0x55 0 1024', backing_img)
|
|
qemu_img('create', '-f', iotests.imgfmt,
|
|
'-o', 'backing_file=%s' % backing_img, child_img)
|
|
opts.append("children.%d.file.filename=%s" % (i, child_img))
|
|
opts.append("children.%d.node-name=node%d" % (i, i))
|
|
|
|
# Attach the drive to the VM
|
|
self.vm = iotests.VM()
|
|
self.vm.add_drive(path = None, opts = ','.join(opts))
|
|
self.vm.launch()
|
|
|
|
def tearDown(self):
|
|
self.vm.shutdown()
|
|
for img in self.children:
|
|
os.remove(img)
|
|
for img in self.backing:
|
|
os.remove(img)
|
|
|
|
def test_stream_quorum(self):
|
|
if not iotests.supports_quorum():
|
|
return
|
|
|
|
self.assertNotEqual(qemu_io('-f', iotests.imgfmt, '-rU', '-c', 'map', self.children[0]),
|
|
qemu_io('-f', iotests.imgfmt, '-rU', '-c', 'map', self.backing[0]),
|
|
'image file map matches backing file before streaming')
|
|
|
|
self.assert_no_active_block_jobs()
|
|
|
|
result = self.vm.qmp('block-stream', device='node0', job_id='stream-node0')
|
|
self.assert_qmp(result, 'return', {})
|
|
|
|
self.wait_until_completed(drive='stream-node0')
|
|
|
|
self.assert_no_active_block_jobs()
|
|
self.vm.shutdown()
|
|
|
|
self.assertEqual(qemu_io('-f', iotests.imgfmt, '-c', 'map', self.children[0]),
|
|
qemu_io('-f', iotests.imgfmt, '-c', 'map', self.backing[0]),
|
|
'image file map does not match backing file after streaming')
|
|
|
|
class TestSmallerBackingFile(iotests.QMPTestCase):
|
|
backing_len = 1 * 1024 * 1024 # MB
|
|
image_len = 2 * backing_len
|
|
|
|
def setUp(self):
|
|
iotests.create_image(backing_img, self.backing_len)
|
|
qemu_img('create', '-f', iotests.imgfmt, '-o', 'backing_file=%s' % backing_img, test_img, str(self.image_len))
|
|
self.vm = iotests.VM().add_drive(test_img)
|
|
self.vm.launch()
|
|
|
|
# If this hangs, then you are missing a fix to complete streaming when the
|
|
# end of the backing file is reached.
|
|
def test_stream(self):
|
|
self.assert_no_active_block_jobs()
|
|
|
|
result = self.vm.qmp('block-stream', device='drive0')
|
|
self.assert_qmp(result, 'return', {})
|
|
|
|
self.wait_until_completed()
|
|
|
|
self.assert_no_active_block_jobs()
|
|
self.vm.shutdown()
|
|
|
|
class TestErrors(iotests.QMPTestCase):
|
|
image_len = 2 * 1024 * 1024 # MB
|
|
|
|
# this should match STREAM_BUFFER_SIZE/512 in block/stream.c
|
|
STREAM_BUFFER_SIZE = 512 * 1024
|
|
|
|
def create_blkdebug_file(self, name, event, errno):
|
|
file = open(name, 'w')
|
|
file.write('''
|
|
[inject-error]
|
|
state = "1"
|
|
event = "%s"
|
|
errno = "%d"
|
|
immediately = "off"
|
|
once = "on"
|
|
sector = "%d"
|
|
|
|
[set-state]
|
|
state = "1"
|
|
event = "%s"
|
|
new_state = "2"
|
|
|
|
[set-state]
|
|
state = "2"
|
|
event = "%s"
|
|
new_state = "1"
|
|
''' % (event, errno, self.STREAM_BUFFER_SIZE // 512, event, event))
|
|
file.close()
|
|
|
|
class TestEIO(TestErrors):
|
|
def setUp(self):
|
|
self.blkdebug_file = backing_img + ".blkdebug"
|
|
iotests.create_image(backing_img, TestErrors.image_len)
|
|
self.create_blkdebug_file(self.blkdebug_file, "read_aio", 5)
|
|
qemu_img('create', '-f', iotests.imgfmt,
|
|
'-o', 'backing_file=blkdebug:%s:%s,backing_fmt=raw'
|
|
% (self.blkdebug_file, backing_img),
|
|
test_img)
|
|
self.vm = iotests.VM().add_drive(test_img)
|
|
self.vm.launch()
|
|
|
|
def tearDown(self):
|
|
self.vm.shutdown()
|
|
os.remove(test_img)
|
|
os.remove(backing_img)
|
|
os.remove(self.blkdebug_file)
|
|
|
|
def test_report(self):
|
|
self.assert_no_active_block_jobs()
|
|
|
|
result = self.vm.qmp('block-stream', device='drive0')
|
|
self.assert_qmp(result, 'return', {})
|
|
|
|
completed = False
|
|
error = False
|
|
while not completed:
|
|
for event in self.vm.get_qmp_events(wait=True):
|
|
if event['event'] == 'BLOCK_JOB_ERROR':
|
|
self.assert_qmp(event, 'data/device', 'drive0')
|
|
self.assert_qmp(event, 'data/operation', 'read')
|
|
error = True
|
|
elif event['event'] == 'BLOCK_JOB_COMPLETED':
|
|
self.assertTrue(error, 'job completed unexpectedly')
|
|
self.assert_qmp(event, 'data/type', 'stream')
|
|
self.assert_qmp(event, 'data/device', 'drive0')
|
|
self.assert_qmp(event, 'data/error', 'Input/output error')
|
|
self.assert_qmp(event, 'data/offset', self.STREAM_BUFFER_SIZE)
|
|
self.assert_qmp(event, 'data/len', self.image_len)
|
|
completed = True
|
|
elif event['event'] == 'JOB_STATUS_CHANGE':
|
|
self.assert_qmp(event, 'data/id', 'drive0')
|
|
|
|
self.assert_no_active_block_jobs()
|
|
self.vm.shutdown()
|
|
|
|
def test_ignore(self):
|
|
self.assert_no_active_block_jobs()
|
|
|
|
result = self.vm.qmp('block-stream', device='drive0', on_error='ignore')
|
|
self.assert_qmp(result, 'return', {})
|
|
|
|
error = False
|
|
completed = False
|
|
while not completed:
|
|
for event in self.vm.get_qmp_events(wait=True):
|
|
if event['event'] == 'BLOCK_JOB_ERROR':
|
|
error = True
|
|
self.assert_qmp(event, 'data/device', 'drive0')
|
|
self.assert_qmp(event, 'data/operation', 'read')
|
|
result = self.vm.qmp('query-block-jobs')
|
|
if result == {'return': []}:
|
|
# Job finished too quickly
|
|
continue
|
|
self.assert_qmp(result, 'return[0]/paused', False)
|
|
elif event['event'] == 'BLOCK_JOB_COMPLETED':
|
|
self.assertTrue(error, 'job completed unexpectedly')
|
|
self.assert_qmp(event, 'data/type', 'stream')
|
|
self.assert_qmp(event, 'data/device', 'drive0')
|
|
self.assert_qmp(event, 'data/error', 'Input/output error')
|
|
self.assert_qmp(event, 'data/offset', self.image_len)
|
|
self.assert_qmp(event, 'data/len', self.image_len)
|
|
completed = True
|
|
elif event['event'] == 'JOB_STATUS_CHANGE':
|
|
self.assert_qmp(event, 'data/id', 'drive0')
|
|
|
|
self.assert_no_active_block_jobs()
|
|
self.vm.shutdown()
|
|
|
|
def test_stop(self):
|
|
self.assert_no_active_block_jobs()
|
|
|
|
result = self.vm.qmp('block-stream', device='drive0', on_error='stop')
|
|
self.assert_qmp(result, 'return', {})
|
|
|
|
error = False
|
|
completed = False
|
|
while not completed:
|
|
for event in self.vm.get_qmp_events(wait=True):
|
|
if event['event'] == 'BLOCK_JOB_ERROR':
|
|
error = True
|
|
self.assert_qmp(event, 'data/device', 'drive0')
|
|
self.assert_qmp(event, 'data/operation', 'read')
|
|
|
|
result = self.vm.qmp('query-block-jobs')
|
|
self.assert_qmp(result, 'return[0]/paused', True)
|
|
self.assert_qmp(result, 'return[0]/offset', self.STREAM_BUFFER_SIZE)
|
|
self.assert_qmp(result, 'return[0]/io-status', 'failed')
|
|
|
|
result = self.vm.qmp('block-job-resume', device='drive0')
|
|
self.assert_qmp(result, 'return', {})
|
|
|
|
result = self.vm.qmp('query-block-jobs')
|
|
if result == {'return': []}:
|
|
# Race; likely already finished. Check.
|
|
continue
|
|
self.assert_qmp(result, 'return[0]/paused', False)
|
|
self.assert_qmp(result, 'return[0]/io-status', 'ok')
|
|
elif event['event'] == 'BLOCK_JOB_COMPLETED':
|
|
self.assertTrue(error, 'job completed unexpectedly')
|
|
self.assert_qmp(event, 'data/type', 'stream')
|
|
self.assert_qmp(event, 'data/device', 'drive0')
|
|
self.assert_qmp_absent(event, 'data/error')
|
|
self.assert_qmp(event, 'data/offset', self.image_len)
|
|
self.assert_qmp(event, 'data/len', self.image_len)
|
|
completed = True
|
|
elif event['event'] == 'JOB_STATUS_CHANGE':
|
|
self.assert_qmp(event, 'data/id', 'drive0')
|
|
|
|
self.assert_no_active_block_jobs()
|
|
self.vm.shutdown()
|
|
|
|
def test_enospc(self):
|
|
self.assert_no_active_block_jobs()
|
|
|
|
result = self.vm.qmp('block-stream', device='drive0', on_error='enospc')
|
|
self.assert_qmp(result, 'return', {})
|
|
|
|
completed = False
|
|
error = False
|
|
while not completed:
|
|
for event in self.vm.get_qmp_events(wait=True):
|
|
if event['event'] == 'BLOCK_JOB_ERROR':
|
|
self.assert_qmp(event, 'data/device', 'drive0')
|
|
self.assert_qmp(event, 'data/operation', 'read')
|
|
error = True
|
|
elif event['event'] == 'BLOCK_JOB_COMPLETED':
|
|
self.assertTrue(error, 'job completed unexpectedly')
|
|
self.assert_qmp(event, 'data/type', 'stream')
|
|
self.assert_qmp(event, 'data/device', 'drive0')
|
|
self.assert_qmp(event, 'data/error', 'Input/output error')
|
|
self.assert_qmp(event, 'data/offset', self.STREAM_BUFFER_SIZE)
|
|
self.assert_qmp(event, 'data/len', self.image_len)
|
|
completed = True
|
|
elif event['event'] == 'JOB_STATUS_CHANGE':
|
|
self.assert_qmp(event, 'data/id', 'drive0')
|
|
|
|
self.assert_no_active_block_jobs()
|
|
self.vm.shutdown()
|
|
|
|
class TestENOSPC(TestErrors):
|
|
def setUp(self):
|
|
self.blkdebug_file = backing_img + ".blkdebug"
|
|
iotests.create_image(backing_img, TestErrors.image_len)
|
|
self.create_blkdebug_file(self.blkdebug_file, "read_aio", 28)
|
|
qemu_img('create', '-f', iotests.imgfmt,
|
|
'-o', 'backing_file=blkdebug:%s:%s,backing_fmt=raw'
|
|
% (self.blkdebug_file, backing_img),
|
|
test_img)
|
|
self.vm = iotests.VM().add_drive(test_img)
|
|
self.vm.launch()
|
|
|
|
def tearDown(self):
|
|
self.vm.shutdown()
|
|
os.remove(test_img)
|
|
os.remove(backing_img)
|
|
os.remove(self.blkdebug_file)
|
|
|
|
def test_enospc(self):
|
|
self.assert_no_active_block_jobs()
|
|
|
|
result = self.vm.qmp('block-stream', device='drive0', on_error='enospc')
|
|
self.assert_qmp(result, 'return', {})
|
|
|
|
error = False
|
|
completed = False
|
|
while not completed:
|
|
for event in self.vm.get_qmp_events(wait=True):
|
|
if event['event'] == 'BLOCK_JOB_ERROR':
|
|
self.assert_qmp(event, 'data/device', 'drive0')
|
|
self.assert_qmp(event, 'data/operation', 'read')
|
|
error = True
|
|
|
|
result = self.vm.qmp('query-block-jobs')
|
|
self.assert_qmp(result, 'return[0]/paused', True)
|
|
self.assert_qmp(result, 'return[0]/offset', self.STREAM_BUFFER_SIZE)
|
|
self.assert_qmp(result, 'return[0]/io-status', 'nospace')
|
|
|
|
result = self.vm.qmp('block-job-resume', device='drive0')
|
|
self.assert_qmp(result, 'return', {})
|
|
|
|
result = self.vm.qmp('query-block-jobs')
|
|
if result == {'return': []}:
|
|
# Race; likely already finished. Check.
|
|
continue
|
|
self.assert_qmp(result, 'return[0]/paused', False)
|
|
self.assert_qmp(result, 'return[0]/io-status', 'ok')
|
|
elif event['event'] == 'BLOCK_JOB_COMPLETED':
|
|
self.assertTrue(error, 'job completed unexpectedly')
|
|
self.assert_qmp(event, 'data/type', 'stream')
|
|
self.assert_qmp(event, 'data/device', 'drive0')
|
|
self.assert_qmp_absent(event, 'data/error')
|
|
self.assert_qmp(event, 'data/offset', self.image_len)
|
|
self.assert_qmp(event, 'data/len', self.image_len)
|
|
completed = True
|
|
elif event['event'] == 'JOB_STATUS_CHANGE':
|
|
self.assert_qmp(event, 'data/id', 'drive0')
|
|
|
|
self.assert_no_active_block_jobs()
|
|
self.vm.shutdown()
|
|
|
|
class TestStreamStop(iotests.QMPTestCase):
|
|
image_len = 8 * 1024 * 1024 * 1024 # GB
|
|
|
|
def setUp(self):
|
|
qemu_img('create', backing_img, str(TestStreamStop.image_len))
|
|
qemu_io('-f', 'raw', '-c', 'write -P 0x1 0 32M', backing_img)
|
|
qemu_img('create', '-f', iotests.imgfmt, '-o', 'backing_file=%s' % backing_img, test_img)
|
|
qemu_io('-f', iotests.imgfmt, '-c', 'write -P 0x1 32M 32M', test_img)
|
|
self.vm = iotests.VM().add_drive("blkdebug::" + test_img)
|
|
self.vm.launch()
|
|
|
|
def tearDown(self):
|
|
self.vm.shutdown()
|
|
os.remove(test_img)
|
|
os.remove(backing_img)
|
|
|
|
def test_stream_stop(self):
|
|
self.assert_no_active_block_jobs()
|
|
|
|
self.vm.pause_drive('drive0')
|
|
result = self.vm.qmp('block-stream', device='drive0')
|
|
self.assert_qmp(result, 'return', {})
|
|
|
|
time.sleep(0.1)
|
|
events = self.vm.get_qmp_events(wait=False)
|
|
for e in events:
|
|
self.assert_qmp(e, 'event', 'JOB_STATUS_CHANGE')
|
|
self.assert_qmp(e, 'data/id', 'drive0')
|
|
|
|
self.cancel_and_wait(resume=True)
|
|
|
|
class TestSetSpeed(iotests.QMPTestCase):
|
|
image_len = 80 * 1024 * 1024 # MB
|
|
|
|
def setUp(self):
|
|
qemu_img('create', backing_img, str(TestSetSpeed.image_len))
|
|
qemu_io('-f', 'raw', '-c', 'write -P 0x1 0 32M', backing_img)
|
|
qemu_img('create', '-f', iotests.imgfmt, '-o', 'backing_file=%s' % backing_img, test_img)
|
|
qemu_io('-f', iotests.imgfmt, '-c', 'write -P 0x1 32M 32M', test_img)
|
|
self.vm = iotests.VM().add_drive('blkdebug::' + test_img)
|
|
self.vm.launch()
|
|
|
|
def tearDown(self):
|
|
self.vm.shutdown()
|
|
os.remove(test_img)
|
|
os.remove(backing_img)
|
|
|
|
# This is a short performance test which is not run by default.
|
|
# Invoke "IMGFMT=qed ./030 TestSetSpeed.perf_test_throughput"
|
|
def perf_test_throughput(self):
|
|
self.assert_no_active_block_jobs()
|
|
|
|
result = self.vm.qmp('block-stream', device='drive0')
|
|
self.assert_qmp(result, 'return', {})
|
|
|
|
result = self.vm.qmp('block-job-set-speed', device='drive0', speed=8 * 1024 * 1024)
|
|
self.assert_qmp(result, 'return', {})
|
|
|
|
self.wait_until_completed()
|
|
|
|
self.assert_no_active_block_jobs()
|
|
|
|
def test_set_speed(self):
|
|
self.assert_no_active_block_jobs()
|
|
|
|
self.vm.pause_drive('drive0')
|
|
result = self.vm.qmp('block-stream', device='drive0')
|
|
self.assert_qmp(result, 'return', {})
|
|
|
|
# Default speed is 0
|
|
result = self.vm.qmp('query-block-jobs')
|
|
self.assert_qmp(result, 'return[0]/device', 'drive0')
|
|
self.assert_qmp(result, 'return[0]/speed', 0)
|
|
|
|
result = self.vm.qmp('block-job-set-speed', device='drive0', speed=8 * 1024 * 1024)
|
|
self.assert_qmp(result, 'return', {})
|
|
|
|
# Ensure the speed we set was accepted
|
|
result = self.vm.qmp('query-block-jobs')
|
|
self.assert_qmp(result, 'return[0]/device', 'drive0')
|
|
self.assert_qmp(result, 'return[0]/speed', 8 * 1024 * 1024)
|
|
|
|
self.cancel_and_wait(resume=True)
|
|
self.vm.pause_drive('drive0')
|
|
|
|
# Check setting speed in block-stream works
|
|
result = self.vm.qmp('block-stream', device='drive0', speed=4 * 1024 * 1024)
|
|
self.assert_qmp(result, 'return', {})
|
|
|
|
result = self.vm.qmp('query-block-jobs')
|
|
self.assert_qmp(result, 'return[0]/device', 'drive0')
|
|
self.assert_qmp(result, 'return[0]/speed', 4 * 1024 * 1024)
|
|
|
|
self.cancel_and_wait(resume=True)
|
|
|
|
def test_set_speed_invalid(self):
|
|
self.assert_no_active_block_jobs()
|
|
|
|
result = self.vm.qmp('block-stream', device='drive0', speed=-1)
|
|
self.assert_qmp(result, 'error/class', 'GenericError')
|
|
|
|
self.assert_no_active_block_jobs()
|
|
|
|
self.vm.pause_drive('drive0')
|
|
result = self.vm.qmp('block-stream', device='drive0')
|
|
self.assert_qmp(result, 'return', {})
|
|
|
|
result = self.vm.qmp('block-job-set-speed', device='drive0', speed=-1)
|
|
self.assert_qmp(result, 'error/class', 'GenericError')
|
|
|
|
self.cancel_and_wait(resume=True)
|
|
|
|
if __name__ == '__main__':
|
|
iotests.main(supported_fmts=['qcow2', 'qed'])
|