2018-05-08 14:04:58 +02:00
|
|
|
#!/usr/bin/env python
|
|
|
|
#
|
|
|
|
# Copyright (C) 2018 Red Hat, Inc.
|
|
|
|
#
|
|
|
|
# This program is free software; you can redistribute it and/or modify
|
|
|
|
# it under the terms of the GNU General Public License as published by
|
|
|
|
# the Free Software Foundation; either version 2 of the License, or
|
|
|
|
# (at your option) any later version.
|
|
|
|
#
|
|
|
|
# This program is distributed in the hope that it will be useful,
|
|
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
# GNU General Public License for more details.
|
|
|
|
#
|
|
|
|
# You should have received a copy of the GNU General Public License
|
|
|
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
#
|
|
|
|
# Creator/Owner: Kevin Wolf <kwolf@redhat.com>
|
|
|
|
#
|
|
|
|
# Check using the job-* QMP commands with block jobs
|
|
|
|
|
|
|
|
import iotests
|
|
|
|
|
|
|
|
iotests.verify_image_format(supported_fmts=['qcow2'])
|
|
|
|
|
2019-05-16 18:11:14 +02:00
|
|
|
img_size = 4 * 1024 * 1024
|
|
|
|
|
2018-05-08 14:04:58 +02:00
|
|
|
def pause_wait(vm, job_id):
|
|
|
|
with iotests.Timeout(3, "Timeout waiting for job to pause"):
|
|
|
|
while True:
|
|
|
|
result = vm.qmp('query-jobs')
|
|
|
|
for job in result['return']:
|
|
|
|
if job['id'] == job_id and job['status'] in ['paused', 'standby']:
|
|
|
|
return job
|
|
|
|
|
|
|
|
# Test that block-job-pause/resume and job-pause/resume can be mixed
|
|
|
|
def test_pause_resume(vm):
|
|
|
|
for pause_cmd, pause_arg in [('block-job-pause', 'device'),
|
|
|
|
('job-pause', 'id')]:
|
|
|
|
for resume_cmd, resume_arg in [('block-job-resume', 'device'),
|
|
|
|
('job-resume', 'id')]:
|
|
|
|
iotests.log('=== Testing %s/%s ===' % (pause_cmd, resume_cmd))
|
|
|
|
|
|
|
|
iotests.log(vm.qmp(pause_cmd, **{pause_arg: 'job0'}))
|
|
|
|
pause_wait(vm, 'job0')
|
|
|
|
iotests.log(iotests.filter_qmp_event(vm.event_wait('JOB_STATUS_CHANGE')))
|
iotests: Fix 219's timing
219 has two issues that may lead to sporadic failure, both of which are
the result of issuing query-jobs too early after a job has been
modified. This can then lead to different results based on whether the
modification has taken effect already or not.
First, query-jobs is issued right after the job has been created.
Besides its current progress possibly being in any random state (which
has already been taken care of), its total progress too is basically
arbitrary, because the job may not yet have been able to determine it.
This patch addresses this by just filtering the total progress, like
what has been done for the current progress already. However, for more
clarity, the filtering is changed to replace the values by a string
'FILTERED' instead of deleting them.
Secondly, query-jobs is issued right after a job has been resumed. The
job may or may not yet have had the time to actually perform any I/O,
and thus its current progress may or may not have advanced. To make
sure it has indeed advanced (which is what the reference output already
assumes), keep querying it until it has.
Signed-off-by: Max Reitz <mreitz@redhat.com>
Message-id: 20180606190628.8170-1-mreitz@redhat.com
Reviewed-by: Eric Blake <eblake@redhat.com>
Signed-off-by: Max Reitz <mreitz@redhat.com>
2018-06-06 21:06:28 +02:00
|
|
|
result = vm.qmp('query-jobs')
|
|
|
|
iotests.log(result)
|
|
|
|
|
|
|
|
old_progress = result['return'][0]['current-progress']
|
|
|
|
total_progress = result['return'][0]['total-progress']
|
2018-05-08 14:04:58 +02:00
|
|
|
|
|
|
|
iotests.log(vm.qmp(resume_cmd, **{resume_arg: 'job0'}))
|
|
|
|
iotests.log(iotests.filter_qmp_event(vm.event_wait('JOB_STATUS_CHANGE')))
|
iotests: Fix 219's timing
219 has two issues that may lead to sporadic failure, both of which are
the result of issuing query-jobs too early after a job has been
modified. This can then lead to different results based on whether the
modification has taken effect already or not.
First, query-jobs is issued right after the job has been created.
Besides its current progress possibly being in any random state (which
has already been taken care of), its total progress too is basically
arbitrary, because the job may not yet have been able to determine it.
This patch addresses this by just filtering the total progress, like
what has been done for the current progress already. However, for more
clarity, the filtering is changed to replace the values by a string
'FILTERED' instead of deleting them.
Secondly, query-jobs is issued right after a job has been resumed. The
job may or may not yet have had the time to actually perform any I/O,
and thus its current progress may or may not have advanced. To make
sure it has indeed advanced (which is what the reference output already
assumes), keep querying it until it has.
Signed-off-by: Max Reitz <mreitz@redhat.com>
Message-id: 20180606190628.8170-1-mreitz@redhat.com
Reviewed-by: Eric Blake <eblake@redhat.com>
Signed-off-by: Max Reitz <mreitz@redhat.com>
2018-06-06 21:06:28 +02:00
|
|
|
if old_progress < total_progress:
|
|
|
|
# Wait for the job to advance
|
|
|
|
while result['return'][0]['current-progress'] == old_progress:
|
|
|
|
result = vm.qmp('query-jobs')
|
|
|
|
iotests.log(result)
|
|
|
|
else:
|
|
|
|
# Already reached the end, so the job cannot advance
|
|
|
|
# any further; therefore, the query-jobs result can be
|
|
|
|
# logged immediately
|
|
|
|
iotests.log(vm.qmp('query-jobs'))
|
2018-05-08 14:04:58 +02:00
|
|
|
|
|
|
|
def test_job_lifecycle(vm, job, job_args, has_ready=False):
|
2019-05-16 18:11:14 +02:00
|
|
|
global img_size
|
|
|
|
|
2018-05-08 14:04:58 +02:00
|
|
|
iotests.log('')
|
|
|
|
iotests.log('')
|
|
|
|
iotests.log('Starting block job: %s (auto-finalize: %s; auto-dismiss: %s)' %
|
|
|
|
(job,
|
|
|
|
job_args.get('auto-finalize', True),
|
|
|
|
job_args.get('auto-dismiss', True)))
|
|
|
|
iotests.log(vm.qmp(job, job_id='job0', **job_args))
|
|
|
|
|
|
|
|
# Depending on the storage, the first request may or may not have completed
|
iotests: Fix 219's timing
219 has two issues that may lead to sporadic failure, both of which are
the result of issuing query-jobs too early after a job has been
modified. This can then lead to different results based on whether the
modification has taken effect already or not.
First, query-jobs is issued right after the job has been created.
Besides its current progress possibly being in any random state (which
has already been taken care of), its total progress too is basically
arbitrary, because the job may not yet have been able to determine it.
This patch addresses this by just filtering the total progress, like
what has been done for the current progress already. However, for more
clarity, the filtering is changed to replace the values by a string
'FILTERED' instead of deleting them.
Secondly, query-jobs is issued right after a job has been resumed. The
job may or may not yet have had the time to actually perform any I/O,
and thus its current progress may or may not have advanced. To make
sure it has indeed advanced (which is what the reference output already
assumes), keep querying it until it has.
Signed-off-by: Max Reitz <mreitz@redhat.com>
Message-id: 20180606190628.8170-1-mreitz@redhat.com
Reviewed-by: Eric Blake <eblake@redhat.com>
Signed-off-by: Max Reitz <mreitz@redhat.com>
2018-06-06 21:06:28 +02:00
|
|
|
# yet (and the total progress may not have been fully determined yet), so
|
|
|
|
# filter out the progress. Later query-job calls don't need the filtering
|
|
|
|
# because the progress is made deterministic by the block job speed
|
2018-05-08 14:04:58 +02:00
|
|
|
result = vm.qmp('query-jobs')
|
|
|
|
for j in result['return']:
|
iotests: Fix 219's timing
219 has two issues that may lead to sporadic failure, both of which are
the result of issuing query-jobs too early after a job has been
modified. This can then lead to different results based on whether the
modification has taken effect already or not.
First, query-jobs is issued right after the job has been created.
Besides its current progress possibly being in any random state (which
has already been taken care of), its total progress too is basically
arbitrary, because the job may not yet have been able to determine it.
This patch addresses this by just filtering the total progress, like
what has been done for the current progress already. However, for more
clarity, the filtering is changed to replace the values by a string
'FILTERED' instead of deleting them.
Secondly, query-jobs is issued right after a job has been resumed. The
job may or may not yet have had the time to actually perform any I/O,
and thus its current progress may or may not have advanced. To make
sure it has indeed advanced (which is what the reference output already
assumes), keep querying it until it has.
Signed-off-by: Max Reitz <mreitz@redhat.com>
Message-id: 20180606190628.8170-1-mreitz@redhat.com
Reviewed-by: Eric Blake <eblake@redhat.com>
Signed-off-by: Max Reitz <mreitz@redhat.com>
2018-06-06 21:06:28 +02:00
|
|
|
j['current-progress'] = 'FILTERED'
|
|
|
|
j['total-progress'] = 'FILTERED'
|
2018-05-08 14:04:58 +02:00
|
|
|
iotests.log(result)
|
|
|
|
|
|
|
|
# undefined -> created -> running
|
|
|
|
iotests.log(iotests.filter_qmp_event(vm.event_wait('JOB_STATUS_CHANGE')))
|
|
|
|
iotests.log(iotests.filter_qmp_event(vm.event_wait('JOB_STATUS_CHANGE')))
|
|
|
|
|
2019-05-16 18:11:14 +02:00
|
|
|
# Wait for total-progress to stabilize
|
|
|
|
while vm.qmp('query-jobs')['return'][0]['total-progress'] < img_size:
|
|
|
|
pass
|
|
|
|
|
2018-05-08 14:04:58 +02:00
|
|
|
# RUNNING state:
|
|
|
|
# pause/resume should work, complete/finalize/dismiss should error out
|
|
|
|
iotests.log('')
|
|
|
|
iotests.log('Pause/resume in RUNNING')
|
|
|
|
test_pause_resume(vm)
|
|
|
|
|
|
|
|
iotests.log(vm.qmp('job-complete', id='job0'))
|
|
|
|
iotests.log(vm.qmp('job-finalize', id='job0'))
|
|
|
|
iotests.log(vm.qmp('job-dismiss', id='job0'))
|
|
|
|
|
|
|
|
iotests.log(vm.qmp('block-job-complete', device='job0'))
|
|
|
|
iotests.log(vm.qmp('block-job-finalize', id='job0'))
|
|
|
|
iotests.log(vm.qmp('block-job-dismiss', id='job0'))
|
|
|
|
|
|
|
|
# Let the job complete (or transition to READY if it supports that)
|
|
|
|
iotests.log(vm.qmp('block-job-set-speed', device='job0', speed=0))
|
|
|
|
if has_ready:
|
|
|
|
iotests.log('')
|
|
|
|
iotests.log('Waiting for READY state...')
|
|
|
|
vm.event_wait('BLOCK_JOB_READY')
|
|
|
|
iotests.log(iotests.filter_qmp_event(vm.event_wait('JOB_STATUS_CHANGE')))
|
|
|
|
iotests.log(vm.qmp('query-jobs'))
|
|
|
|
|
|
|
|
# READY state:
|
|
|
|
# pause/resume/complete should work, finalize/dismiss should error out
|
|
|
|
iotests.log('')
|
|
|
|
iotests.log('Pause/resume in READY')
|
|
|
|
test_pause_resume(vm)
|
|
|
|
|
|
|
|
iotests.log(vm.qmp('job-finalize', id='job0'))
|
|
|
|
iotests.log(vm.qmp('job-dismiss', id='job0'))
|
|
|
|
|
|
|
|
iotests.log(vm.qmp('block-job-finalize', id='job0'))
|
|
|
|
iotests.log(vm.qmp('block-job-dismiss', id='job0'))
|
|
|
|
|
|
|
|
# Transition to WAITING
|
|
|
|
iotests.log(vm.qmp('job-complete', id='job0'))
|
|
|
|
|
|
|
|
# Move to WAITING and PENDING state
|
|
|
|
iotests.log('')
|
|
|
|
iotests.log('Waiting for PENDING state...')
|
|
|
|
iotests.log(iotests.filter_qmp_event(vm.event_wait('JOB_STATUS_CHANGE')))
|
|
|
|
iotests.log(iotests.filter_qmp_event(vm.event_wait('JOB_STATUS_CHANGE')))
|
|
|
|
|
|
|
|
if not job_args.get('auto-finalize', True):
|
|
|
|
# PENDING state:
|
|
|
|
# finalize should work, pause/complete/dismiss should error out
|
|
|
|
iotests.log(vm.qmp('query-jobs'))
|
|
|
|
|
|
|
|
iotests.log(vm.qmp('job-pause', id='job0'))
|
|
|
|
iotests.log(vm.qmp('job-complete', id='job0'))
|
|
|
|
iotests.log(vm.qmp('job-dismiss', id='job0'))
|
|
|
|
|
|
|
|
iotests.log(vm.qmp('block-job-pause', device='job0'))
|
|
|
|
iotests.log(vm.qmp('block-job-complete', device='job0'))
|
|
|
|
iotests.log(vm.qmp('block-job-dismiss', id='job0'))
|
|
|
|
|
|
|
|
# Transition to CONCLUDED
|
|
|
|
iotests.log(vm.qmp('job-finalize', id='job0'))
|
|
|
|
|
|
|
|
|
|
|
|
# Move to CONCLUDED state
|
|
|
|
iotests.log(iotests.filter_qmp_event(vm.event_wait('JOB_STATUS_CHANGE')))
|
|
|
|
|
|
|
|
if not job_args.get('auto-dismiss', True):
|
|
|
|
# CONCLUDED state:
|
|
|
|
# dismiss should work, pause/complete/finalize should error out
|
|
|
|
iotests.log(vm.qmp('query-jobs'))
|
|
|
|
|
|
|
|
iotests.log(vm.qmp('job-pause', id='job0'))
|
|
|
|
iotests.log(vm.qmp('job-complete', id='job0'))
|
|
|
|
iotests.log(vm.qmp('job-finalize', id='job0'))
|
|
|
|
|
|
|
|
iotests.log(vm.qmp('block-job-pause', device='job0'))
|
|
|
|
iotests.log(vm.qmp('block-job-complete', device='job0'))
|
|
|
|
iotests.log(vm.qmp('block-job-finalize', id='job0'))
|
|
|
|
|
|
|
|
# Transition to NULL
|
|
|
|
iotests.log(vm.qmp('job-dismiss', id='job0'))
|
|
|
|
|
|
|
|
# Move to NULL state
|
|
|
|
iotests.log(iotests.filter_qmp_event(vm.event_wait('JOB_STATUS_CHANGE')))
|
|
|
|
iotests.log(vm.qmp('query-jobs'))
|
|
|
|
|
|
|
|
|
|
|
|
with iotests.FilePath('disk.img') as disk_path, \
|
|
|
|
iotests.FilePath('copy.img') as copy_path, \
|
|
|
|
iotests.VM() as vm:
|
|
|
|
|
2019-05-16 18:11:14 +02:00
|
|
|
iotests.qemu_img_create('-f', iotests.imgfmt, disk_path, str(img_size))
|
|
|
|
iotests.qemu_io('-c', 'write 0 %i' % (img_size),
|
2018-05-08 14:04:58 +02:00
|
|
|
'-f', iotests.imgfmt, disk_path)
|
|
|
|
|
|
|
|
iotests.log('Launching VM...')
|
|
|
|
vm.add_blockdev(vm.qmp_to_opts({
|
|
|
|
'driver': iotests.imgfmt,
|
|
|
|
'node-name': 'drive0-node',
|
|
|
|
'file': {
|
|
|
|
'driver': 'file',
|
|
|
|
'filename': disk_path,
|
|
|
|
},
|
|
|
|
}))
|
|
|
|
vm.launch()
|
|
|
|
|
|
|
|
# In order to keep things deterministic (especially progress in query-job,
|
|
|
|
# but related to this also automatic state transitions like job
|
|
|
|
# completion), but still get pause points often enough to avoid making this
|
|
|
|
# test very slow, it's important to have the right ratio between speed and
|
|
|
|
# buf_size.
|
|
|
|
#
|
|
|
|
# For backup, buf_size is hard-coded to the source image cluster size (64k),
|
|
|
|
# so we'll pick the same for mirror. The slice time, i.e. the granularity
|
|
|
|
# of the rate limiting is 100ms. With a speed of 256k per second, we can
|
|
|
|
# get four pause points per second. This gives us 250ms per iteration,
|
|
|
|
# which should be enough to stay deterministic.
|
|
|
|
|
|
|
|
test_job_lifecycle(vm, 'drive-mirror', has_ready=True, job_args={
|
|
|
|
'device': 'drive0-node',
|
|
|
|
'target': copy_path,
|
|
|
|
'sync': 'full',
|
|
|
|
'speed': 262144,
|
|
|
|
'buf_size': 65536,
|
|
|
|
})
|
|
|
|
|
|
|
|
for auto_finalize in [True, False]:
|
|
|
|
for auto_dismiss in [True, False]:
|
|
|
|
test_job_lifecycle(vm, 'drive-backup', job_args={
|
|
|
|
'device': 'drive0-node',
|
|
|
|
'target': copy_path,
|
|
|
|
'sync': 'full',
|
|
|
|
'speed': 262144,
|
|
|
|
'auto-finalize': auto_finalize,
|
|
|
|
'auto-dismiss': auto_dismiss,
|
|
|
|
})
|
|
|
|
|
|
|
|
vm.shutdown()
|