qemu-e2k/qapi/block-core.json

4798 lines
151 KiB
JSON
Raw Normal View History

# -*- Mode: Python -*-
##
# == Block core (VM unrelated)
##
{ 'include': 'common.json' }
{ 'include': 'crypto.json' }
{ 'include': 'sockets.json' }
##
# @SnapshotInfo:
#
# @id: unique snapshot id
#
# @name: user chosen name
#
# @vm-state-size: size of the VM state
#
# @date-sec: UTC date of the snapshot in seconds
#
# @date-nsec: fractional part in nano seconds to be used with date-sec
#
# @vm-clock-sec: VM clock relative to boot in seconds
#
# @vm-clock-nsec: fractional part in nano seconds to be used with vm-clock-sec
#
# Since: 1.3
#
##
{ 'struct': 'SnapshotInfo',
'data': { 'id': 'str', 'name': 'str', 'vm-state-size': 'int',
'date-sec': 'int', 'date-nsec': 'int',
'vm-clock-sec': 'int', 'vm-clock-nsec': 'int' } }
qcow2: report encryption specific image information Currently 'qemu-img info' reports a simple "encrypted: yes" field. This is not very useful now that qcow2 can support multiple encryption formats. Users want to know which format is in use and some data related to it. Wire up usage of the qcrypto_block_get_info() method so that 'qemu-img info' can report about the encryption format and parameters in use $ qemu-img create \ --object secret,id=sec0,data=123456 \ -o encrypt.format=luks,encrypt.key-secret=sec0 \ -f qcow2 demo.qcow2 1G Formatting 'demo.qcow2', fmt=qcow2 size=1073741824 \ encryption=off encrypt.format=luks encrypt.key-secret=sec0 \ cluster_size=65536 lazy_refcounts=off refcount_bits=16 $ qemu-img info demo.qcow2 image: demo.qcow2 file format: qcow2 virtual size: 1.0G (1073741824 bytes) disk size: 480K encrypted: yes cluster_size: 65536 Format specific information: compat: 1.1 lazy refcounts: false refcount bits: 16 encrypt: ivgen alg: plain64 hash alg: sha256 cipher alg: aes-256 uuid: 3fa930c4-58c8-4ef7-b3c5-314bb5af21f3 format: luks cipher mode: xts slots: [0]: active: true iters: 1839058 key offset: 4096 stripes: 4000 [1]: active: false key offset: 262144 [2]: active: false key offset: 520192 [3]: active: false key offset: 778240 [4]: active: false key offset: 1036288 [5]: active: false key offset: 1294336 [6]: active: false key offset: 1552384 [7]: active: false key offset: 1810432 payload offset: 2068480 master key iters: 438487 corrupt: false With the legacy "AES" encryption we just report the format name $ qemu-img create \ --object secret,id=sec0,data=123456 \ -o encrypt.format=aes,encrypt.key-secret=sec0 \ -f qcow2 demo.qcow2 1G Formatting 'demo.qcow2', fmt=qcow2 size=1073741824 \ encryption=off encrypt.format=aes encrypt.key-secret=sec0 \ cluster_size=65536 lazy_refcounts=off refcount_bits=16 $ ./qemu-img info demo.qcow2 image: demo.qcow2 file format: qcow2 virtual size: 1.0G (1073741824 bytes) disk size: 196K encrypted: yes cluster_size: 65536 Format specific information: compat: 1.1 lazy refcounts: false refcount bits: 16 encrypt: format: aes corrupt: false Reviewed-by: Alberto Garcia <berto@igalia.com> Reviewed-by: Max Reitz <mreitz@redhat.com> Reviewed-by: Eric Blake <eblake@redhat.com> Signed-off-by: Daniel P. Berrange <berrange@redhat.com> Message-id: 20170623162419.26068-20-berrange@redhat.com Signed-off-by: Max Reitz <mreitz@redhat.com>
2017-06-23 18:24:18 +02:00
##
# @ImageInfoSpecificQCow2EncryptionBase:
#
# @format: The encryption format
#
# Since: 2.10
##
{ 'struct': 'ImageInfoSpecificQCow2EncryptionBase',
'data': { 'format': 'BlockdevQcow2EncryptionFormat'}}
##
# @ImageInfoSpecificQCow2Encryption:
#
# Since: 2.10
##
{ 'union': 'ImageInfoSpecificQCow2Encryption',
'base': 'ImageInfoSpecificQCow2EncryptionBase',
'discriminator': 'format',
'data': { 'aes': 'QCryptoBlockInfoQCow',
'luks': 'QCryptoBlockInfoLUKS' } }
##
# @ImageInfoSpecificQCow2:
#
# @compat: compatibility level
#
# @lazy-refcounts: on or off; only valid for compat >= 1.1
#
# @corrupt: true if the image has been marked corrupt; only valid for
# compat >= 1.1 (since 2.2)
#
# @refcount-bits: width of a refcount entry in bits (since 2.3)
#
qcow2: report encryption specific image information Currently 'qemu-img info' reports a simple "encrypted: yes" field. This is not very useful now that qcow2 can support multiple encryption formats. Users want to know which format is in use and some data related to it. Wire up usage of the qcrypto_block_get_info() method so that 'qemu-img info' can report about the encryption format and parameters in use $ qemu-img create \ --object secret,id=sec0,data=123456 \ -o encrypt.format=luks,encrypt.key-secret=sec0 \ -f qcow2 demo.qcow2 1G Formatting 'demo.qcow2', fmt=qcow2 size=1073741824 \ encryption=off encrypt.format=luks encrypt.key-secret=sec0 \ cluster_size=65536 lazy_refcounts=off refcount_bits=16 $ qemu-img info demo.qcow2 image: demo.qcow2 file format: qcow2 virtual size: 1.0G (1073741824 bytes) disk size: 480K encrypted: yes cluster_size: 65536 Format specific information: compat: 1.1 lazy refcounts: false refcount bits: 16 encrypt: ivgen alg: plain64 hash alg: sha256 cipher alg: aes-256 uuid: 3fa930c4-58c8-4ef7-b3c5-314bb5af21f3 format: luks cipher mode: xts slots: [0]: active: true iters: 1839058 key offset: 4096 stripes: 4000 [1]: active: false key offset: 262144 [2]: active: false key offset: 520192 [3]: active: false key offset: 778240 [4]: active: false key offset: 1036288 [5]: active: false key offset: 1294336 [6]: active: false key offset: 1552384 [7]: active: false key offset: 1810432 payload offset: 2068480 master key iters: 438487 corrupt: false With the legacy "AES" encryption we just report the format name $ qemu-img create \ --object secret,id=sec0,data=123456 \ -o encrypt.format=aes,encrypt.key-secret=sec0 \ -f qcow2 demo.qcow2 1G Formatting 'demo.qcow2', fmt=qcow2 size=1073741824 \ encryption=off encrypt.format=aes encrypt.key-secret=sec0 \ cluster_size=65536 lazy_refcounts=off refcount_bits=16 $ ./qemu-img info demo.qcow2 image: demo.qcow2 file format: qcow2 virtual size: 1.0G (1073741824 bytes) disk size: 196K encrypted: yes cluster_size: 65536 Format specific information: compat: 1.1 lazy refcounts: false refcount bits: 16 encrypt: format: aes corrupt: false Reviewed-by: Alberto Garcia <berto@igalia.com> Reviewed-by: Max Reitz <mreitz@redhat.com> Reviewed-by: Eric Blake <eblake@redhat.com> Signed-off-by: Daniel P. Berrange <berrange@redhat.com> Message-id: 20170623162419.26068-20-berrange@redhat.com Signed-off-by: Max Reitz <mreitz@redhat.com>
2017-06-23 18:24:18 +02:00
# @encrypt: details about encryption parameters; only set if image
# is encrypted (since 2.10)
#
# Since: 1.7
##
{ 'struct': 'ImageInfoSpecificQCow2',
'data': {
'compat': 'str',
'*lazy-refcounts': 'bool',
'*corrupt': 'bool',
qcow2: report encryption specific image information Currently 'qemu-img info' reports a simple "encrypted: yes" field. This is not very useful now that qcow2 can support multiple encryption formats. Users want to know which format is in use and some data related to it. Wire up usage of the qcrypto_block_get_info() method so that 'qemu-img info' can report about the encryption format and parameters in use $ qemu-img create \ --object secret,id=sec0,data=123456 \ -o encrypt.format=luks,encrypt.key-secret=sec0 \ -f qcow2 demo.qcow2 1G Formatting 'demo.qcow2', fmt=qcow2 size=1073741824 \ encryption=off encrypt.format=luks encrypt.key-secret=sec0 \ cluster_size=65536 lazy_refcounts=off refcount_bits=16 $ qemu-img info demo.qcow2 image: demo.qcow2 file format: qcow2 virtual size: 1.0G (1073741824 bytes) disk size: 480K encrypted: yes cluster_size: 65536 Format specific information: compat: 1.1 lazy refcounts: false refcount bits: 16 encrypt: ivgen alg: plain64 hash alg: sha256 cipher alg: aes-256 uuid: 3fa930c4-58c8-4ef7-b3c5-314bb5af21f3 format: luks cipher mode: xts slots: [0]: active: true iters: 1839058 key offset: 4096 stripes: 4000 [1]: active: false key offset: 262144 [2]: active: false key offset: 520192 [3]: active: false key offset: 778240 [4]: active: false key offset: 1036288 [5]: active: false key offset: 1294336 [6]: active: false key offset: 1552384 [7]: active: false key offset: 1810432 payload offset: 2068480 master key iters: 438487 corrupt: false With the legacy "AES" encryption we just report the format name $ qemu-img create \ --object secret,id=sec0,data=123456 \ -o encrypt.format=aes,encrypt.key-secret=sec0 \ -f qcow2 demo.qcow2 1G Formatting 'demo.qcow2', fmt=qcow2 size=1073741824 \ encryption=off encrypt.format=aes encrypt.key-secret=sec0 \ cluster_size=65536 lazy_refcounts=off refcount_bits=16 $ ./qemu-img info demo.qcow2 image: demo.qcow2 file format: qcow2 virtual size: 1.0G (1073741824 bytes) disk size: 196K encrypted: yes cluster_size: 65536 Format specific information: compat: 1.1 lazy refcounts: false refcount bits: 16 encrypt: format: aes corrupt: false Reviewed-by: Alberto Garcia <berto@igalia.com> Reviewed-by: Max Reitz <mreitz@redhat.com> Reviewed-by: Eric Blake <eblake@redhat.com> Signed-off-by: Daniel P. Berrange <berrange@redhat.com> Message-id: 20170623162419.26068-20-berrange@redhat.com Signed-off-by: Max Reitz <mreitz@redhat.com>
2017-06-23 18:24:18 +02:00
'refcount-bits': 'int',
'*encrypt': 'ImageInfoSpecificQCow2Encryption'
} }
##
# @ImageInfoSpecificVmdk:
#
# @create-type: The create type of VMDK image
#
# @cid: Content id of image
#
# @parent-cid: Parent VMDK image's cid
#
# @extents: List of extent files
#
# Since: 1.7
##
{ 'struct': 'ImageInfoSpecificVmdk',
'data': {
'create-type': 'str',
'cid': 'int',
'parent-cid': 'int',
'extents': ['ImageInfo']
} }
##
# @ImageInfoSpecific:
#
# A discriminated record of image format specific information structures.
#
# Since: 1.7
##
{ 'union': 'ImageInfoSpecific',
'data': {
'qcow2': 'ImageInfoSpecificQCow2',
'vmdk': 'ImageInfoSpecificVmdk',
# If we need to add block driver specific parameters for
# LUKS in future, then we'll subclass QCryptoBlockInfoLUKS
# to define a ImageInfoSpecificLUKS
'luks': 'QCryptoBlockInfoLUKS'
} }
##
# @ImageInfo:
#
# Information about a QEMU image file
#
# @filename: name of the image file
#
# @format: format of the image file
#
# @virtual-size: maximum capacity in bytes of the image
#
# @actual-size: actual size on disk in bytes of the image
#
# @dirty-flag: true if image is not cleanly closed
#
# @cluster-size: size of a cluster in bytes
#
# @encrypted: true if the image is encrypted
#
# @compressed: true if the image is compressed (Since 1.7)
#
# @backing-filename: name of the backing file
#
# @full-backing-filename: full path of the backing file
#
# @backing-filename-format: the format of the backing file
#
# @snapshots: list of VM snapshots
#
# @backing-image: info of the backing image (since 1.6)
#
# @format-specific: structure supplying additional format-specific
# information (since 1.7)
#
# Since: 1.3
#
##
{ 'struct': 'ImageInfo',
'data': {'filename': 'str', 'format': 'str', '*dirty-flag': 'bool',
'*actual-size': 'int', 'virtual-size': 'int',
'*cluster-size': 'int', '*encrypted': 'bool', '*compressed': 'bool',
'*backing-filename': 'str', '*full-backing-filename': 'str',
'*backing-filename-format': 'str', '*snapshots': ['SnapshotInfo'],
'*backing-image': 'ImageInfo',
'*format-specific': 'ImageInfoSpecific' } }
##
# @ImageCheck:
#
# Information about a QEMU image file check
#
# @filename: name of the image file checked
#
# @format: format of the image file checked
#
# @check-errors: number of unexpected errors occurred during check
#
# @image-end-offset: offset (in bytes) where the image ends, this
# field is present if the driver for the image format
# supports it
#
# @corruptions: number of corruptions found during the check if any
#
# @leaks: number of leaks found during the check if any
#
# @corruptions-fixed: number of corruptions fixed during the check
# if any
#
# @leaks-fixed: number of leaks fixed during the check if any
#
# @total-clusters: total number of clusters, this field is present
# if the driver for the image format supports it
#
# @allocated-clusters: total number of allocated clusters, this
# field is present if the driver for the image format
# supports it
#
# @fragmented-clusters: total number of fragmented clusters, this
# field is present if the driver for the image format
# supports it
#
# @compressed-clusters: total number of compressed clusters, this
# field is present if the driver for the image format
# supports it
#
# Since: 1.4
#
##
{ 'struct': 'ImageCheck',
'data': {'filename': 'str', 'format': 'str', 'check-errors': 'int',
'*image-end-offset': 'int', '*corruptions': 'int', '*leaks': 'int',
'*corruptions-fixed': 'int', '*leaks-fixed': 'int',
'*total-clusters': 'int', '*allocated-clusters': 'int',
'*fragmented-clusters': 'int', '*compressed-clusters': 'int' } }
##
# @MapEntry:
#
# Mapping information from a virtual block range to a host file range
#
# @start: the start byte of the mapped virtual range
#
# @length: the number of bytes of the mapped virtual range
#
# @data: whether the mapped range has data
#
# @zero: whether the virtual blocks are zeroed
#
# @depth: the depth of the mapping
#
# @offset: the offset in file that the virtual sectors are mapped to
#
# @filename: filename that is referred to by @offset
#
# Since: 2.6
#
##
{ 'struct': 'MapEntry',
'data': {'start': 'int', 'length': 'int', 'data': 'bool',
'zero': 'bool', 'depth': 'int', '*offset': 'int',
'*filename': 'str' } }
##
# @BlockdevCacheInfo:
#
# Cache mode information for a block device
#
# @writeback: true if writeback mode is enabled
# @direct: true if the host page cache is bypassed (O_DIRECT)
# @no-flush: true if flush requests are ignored for the device
#
# Since: 2.3
##
{ 'struct': 'BlockdevCacheInfo',
'data': { 'writeback': 'bool',
'direct': 'bool',
'no-flush': 'bool' } }
##
# @BlockDeviceInfo:
#
# Information about the backing device for a block device.
#
# @file: the filename of the backing device
#
# @node-name: the name of the block driver node (Since 2.0)
#
# @ro: true if the backing device was open read-only
#
# @drv: the name of the block format used to open the backing device. As of
# 0.14.0 this can be: 'blkdebug', 'bochs', 'cloop', 'cow', 'dmg',
# 'file', 'file', 'ftp', 'ftps', 'host_cdrom', 'host_device',
block: add generic full disk encryption driver Add a block driver that is capable of supporting any full disk encryption format. This utilizes the previously added block encryption code, and at this time supports the LUKS format. The driver code is capable of supporting any format supported by the QCryptoBlock module, so it registers one block driver for each format. This patch only registers the "luks" driver since the "qcow" driver is there only for back-compatibility with existing qcow built-in encryption. New LUKS compatible volumes can be formatted using qemu-img with defaults for all settings. $ qemu-img create --object secret,data=123456,id=sec0 \ -f luks -o key-secret=sec0 demo.luks 10G Alternatively the cryptographic settings can be explicitly set $ qemu-img create --object secret,data=123456,id=sec0 \ -f luks -o key-secret=sec0,cipher-alg=aes-256,\ cipher-mode=cbc,ivgen-alg=plain64,hash-alg=sha256 \ demo.luks 10G And query its size $ qemu-img info demo.img image: demo.img file format: luks virtual size: 10G (10737418240 bytes) disk size: 132K encrypted: yes Note that it was not necessary to provide the password when querying info for the volume. The password is only required when performing I/O on the volume All volumes created by this new 'luks' driver should be capable of being opened by the kernel dm-crypt driver. The only algorithms listed in the LUKS spec that are not currently supported by this impl are sha512 and ripemd160 hashes and cast6 cipher. Reviewed-by: Eric Blake <eblake@redhat.com> Signed-off-by: Daniel P. Berrange <berrange@redhat.com> [ kwolf - Added #include to resolve conflict with da34e65c ] Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2016-03-21 15:11:47 +01:00
# 'http', 'https', 'luks', 'nbd', 'parallels', 'qcow',
# 'qcow2', 'raw', 'vdi', 'vmdk', 'vpc', 'vvfat'
block: delete cow block driver This patch removes support for the cow file format. Normally we do not break backwards compatibility but in this case there is no impact and it is the most logical option. Extraordinary claims require extraordinary evidence so I will show why removing the cow block driver is the right thing to do. The cow file format is the disk image format for Usermode Linux, a way of running a Linux system in userspace. The performance of UML was never great and it was hacky, but it enjoyed some popularity before hardware virtualization support became mainstream. QEMU's block/cow.c is supposed to read this image file format. Unfortunately the file format was underspecified: 1. Earlier Linux versions used the MAXPATHLEN constant for the backing filename field. The value of MAXPATHLEN can change, so Linux switched to a 4096 literal but QEMU has a 1024 literal. 2. Padding was not used on the header struct (both in the Linux kernel and in QEMU) so the struct layout varied across architectures. In particular, i386 and x86_64 were different due to int64_t alignment differences. Linux now uses __attribute__((packed)), QEMU does not. Therefore: 1. QEMU cow images do not conform to the Linux cow image file format. 2. cow images cannot be shared between different host architectures. This means QEMU cow images are useless and QEMU has not had bug reports from users actually hitting these issues. Let's get rid of this thing, it serves no purpose and no one will be affected. Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> Reviewed-by: Markus Armbruster <armbru@redhat.com> Message-id: 1410877464-20481-1-git-send-email-stefanha@redhat.com Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2014-09-16 16:24:24 +02:00
# 2.2: 'archipelago' added, 'cow' dropped
# 2.3: 'host_floppy' deprecated
# 2.5: 'host_floppy' dropped
block: add generic full disk encryption driver Add a block driver that is capable of supporting any full disk encryption format. This utilizes the previously added block encryption code, and at this time supports the LUKS format. The driver code is capable of supporting any format supported by the QCryptoBlock module, so it registers one block driver for each format. This patch only registers the "luks" driver since the "qcow" driver is there only for back-compatibility with existing qcow built-in encryption. New LUKS compatible volumes can be formatted using qemu-img with defaults for all settings. $ qemu-img create --object secret,data=123456,id=sec0 \ -f luks -o key-secret=sec0 demo.luks 10G Alternatively the cryptographic settings can be explicitly set $ qemu-img create --object secret,data=123456,id=sec0 \ -f luks -o key-secret=sec0,cipher-alg=aes-256,\ cipher-mode=cbc,ivgen-alg=plain64,hash-alg=sha256 \ demo.luks 10G And query its size $ qemu-img info demo.img image: demo.img file format: luks virtual size: 10G (10737418240 bytes) disk size: 132K encrypted: yes Note that it was not necessary to provide the password when querying info for the volume. The password is only required when performing I/O on the volume All volumes created by this new 'luks' driver should be capable of being opened by the kernel dm-crypt driver. The only algorithms listed in the LUKS spec that are not currently supported by this impl are sha512 and ripemd160 hashes and cast6 cipher. Reviewed-by: Eric Blake <eblake@redhat.com> Signed-off-by: Daniel P. Berrange <berrange@redhat.com> [ kwolf - Added #include to resolve conflict with da34e65c ] Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2016-03-21 15:11:47 +01:00
# 2.6: 'luks' added
# 2.8: 'replication' added, 'tftp' dropped
# 2.9: 'archipelago' dropped
#
# @backing_file: the name of the backing file (for copy-on-write)
#
# @backing_file_depth: number of files in the backing file chain (since: 1.2)
#
# @encrypted: true if the backing device is encrypted
#
# @encryption_key_missing: Deprecated; always false
#
# @detect_zeroes: detect and optimize zero writes (Since 2.1)
#
# @bps: total throughput limit in bytes per second is specified
#
# @bps_rd: read throughput limit in bytes per second is specified
#
# @bps_wr: write throughput limit in bytes per second is specified
#
# @iops: total I/O operations per second is specified
#
# @iops_rd: read I/O operations per second is specified
#
# @iops_wr: write I/O operations per second is specified
#
# @image: the info of image used (since: 1.6)
#
# @bps_max: total throughput limit during bursts,
# in bytes (Since 1.7)
#
# @bps_rd_max: read throughput limit during bursts,
# in bytes (Since 1.7)
#
# @bps_wr_max: write throughput limit during bursts,
# in bytes (Since 1.7)
#
# @iops_max: total I/O operations per second during bursts,
# in bytes (Since 1.7)
#
# @iops_rd_max: read I/O operations per second during bursts,
# in bytes (Since 1.7)
#
# @iops_wr_max: write I/O operations per second during bursts,
# in bytes (Since 1.7)
#
# @bps_max_length: maximum length of the @bps_max burst
# period, in seconds. (Since 2.6)
#
# @bps_rd_max_length: maximum length of the @bps_rd_max
# burst period, in seconds. (Since 2.6)
#
# @bps_wr_max_length: maximum length of the @bps_wr_max
# burst period, in seconds. (Since 2.6)
#
# @iops_max_length: maximum length of the @iops burst
# period, in seconds. (Since 2.6)
#
# @iops_rd_max_length: maximum length of the @iops_rd_max
# burst period, in seconds. (Since 2.6)
#
# @iops_wr_max_length: maximum length of the @iops_wr_max
# burst period, in seconds. (Since 2.6)
#
# @iops_size: an I/O size in bytes (Since 1.7)
#
# @group: throttle group name (Since 2.4)
#
# @cache: the cache mode used for the block device (since: 2.3)
#
block: add event when disk usage exceeds threshold Managing applications, like oVirt (http://www.ovirt.org), make extensive use of thin-provisioned disk images. To let the guest run smoothly and be not unnecessarily paused, oVirt sets a disk usage threshold (so called 'high water mark') based on the occupation of the device, and automatically extends the image once the threshold is reached or exceeded. In order to detect the crossing of the threshold, oVirt has no choice but aggressively polling the QEMU monitor using the query-blockstats command. This lead to unnecessary system load, and is made even worse under scale: deployments with hundreds of VMs are no longer rare. To fix this, this patch adds: * A new monitor command `block-set-write-threshold', to set a mark for a given block device. * A new event `BLOCK_WRITE_THRESHOLD', to report if a block device usage exceeds the threshold. * A new `write_threshold' field into the `BlockDeviceInfo' structure, to report the configured threshold. This will allow the managing application to use smarter and more efficient monitoring, greatly reducing the need of polling. [Updated qemu-iotests 067 output to add the new 'write_threshold' property. --Stefan] [Changed g_assert_false() to !g_assert() to fix the build on older glib versions. --Kevin] Signed-off-by: Francesco Romani <fromani@redhat.com> Reviewed-by: Eric Blake <eblake@redhat.com> Message-id: 1421068273-692-1-git-send-email-fromani@redhat.com Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2015-01-12 14:11:13 +01:00
# @write_threshold: configured write threshold for the device.
# 0 if disabled. (Since 2.3)
#
# Since: 0.14.0
#
##
{ 'struct': 'BlockDeviceInfo',
'data': { 'file': 'str', '*node-name': 'str', 'ro': 'bool', 'drv': 'str',
'*backing_file': 'str', 'backing_file_depth': 'int',
'encrypted': 'bool', 'encryption_key_missing': 'bool',
'detect_zeroes': 'BlockdevDetectZeroesOptions',
'bps': 'int', 'bps_rd': 'int', 'bps_wr': 'int',
'iops': 'int', 'iops_rd': 'int', 'iops_wr': 'int',
'image': 'ImageInfo',
'*bps_max': 'int', '*bps_rd_max': 'int',
'*bps_wr_max': 'int', '*iops_max': 'int',
'*iops_rd_max': 'int', '*iops_wr_max': 'int',
'*bps_max_length': 'int', '*bps_rd_max_length': 'int',
'*bps_wr_max_length': 'int', '*iops_max_length': 'int',
'*iops_rd_max_length': 'int', '*iops_wr_max_length': 'int',
'*iops_size': 'int', '*group': 'str', 'cache': 'BlockdevCacheInfo',
block: add event when disk usage exceeds threshold Managing applications, like oVirt (http://www.ovirt.org), make extensive use of thin-provisioned disk images. To let the guest run smoothly and be not unnecessarily paused, oVirt sets a disk usage threshold (so called 'high water mark') based on the occupation of the device, and automatically extends the image once the threshold is reached or exceeded. In order to detect the crossing of the threshold, oVirt has no choice but aggressively polling the QEMU monitor using the query-blockstats command. This lead to unnecessary system load, and is made even worse under scale: deployments with hundreds of VMs are no longer rare. To fix this, this patch adds: * A new monitor command `block-set-write-threshold', to set a mark for a given block device. * A new event `BLOCK_WRITE_THRESHOLD', to report if a block device usage exceeds the threshold. * A new `write_threshold' field into the `BlockDeviceInfo' structure, to report the configured threshold. This will allow the managing application to use smarter and more efficient monitoring, greatly reducing the need of polling. [Updated qemu-iotests 067 output to add the new 'write_threshold' property. --Stefan] [Changed g_assert_false() to !g_assert() to fix the build on older glib versions. --Kevin] Signed-off-by: Francesco Romani <fromani@redhat.com> Reviewed-by: Eric Blake <eblake@redhat.com> Message-id: 1421068273-692-1-git-send-email-fromani@redhat.com Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2015-01-12 14:11:13 +01:00
'write_threshold': 'int' } }
##
# @BlockDeviceIoStatus:
#
# An enumeration of block device I/O status.
#
# @ok: The last I/O operation has succeeded
#
# @failed: The last I/O operation has failed
#
# @nospace: The last I/O operation has failed due to a no-space condition
#
# Since: 1.0
##
{ 'enum': 'BlockDeviceIoStatus', 'data': [ 'ok', 'failed', 'nospace' ] }
##
# @BlockDeviceMapEntry:
#
# Entry in the metadata map of the device (returned by "qemu-img map")
#
# @start: Offset in the image of the first byte described by this entry
# (in bytes)
#
# @length: Length of the range described by this entry (in bytes)
#
# @depth: Number of layers (0 = top image, 1 = top image's backing file, etc.)
# before reaching one for which the range is allocated. The value is
# in the range 0 to the depth of the image chain - 1.
#
# @zero: the sectors in this range read as zeros
#
# @data: reading the image will actually read data from a file (in particular,
# if @offset is present this means that the sectors are not simply
# preallocated, but contain actual data in raw format)
#
# @offset: if present, the image file stores the data for this range in
# raw format at the given offset.
#
# Since: 1.7
##
{ 'struct': 'BlockDeviceMapEntry',
'data': { 'start': 'int', 'length': 'int', 'depth': 'int', 'zero': 'bool',
'data': 'bool', '*offset': 'int' } }
##
# @DirtyBitmapStatus:
#
# An enumeration of possible states that a dirty bitmap can report to the user.
#
# @frozen: The bitmap is currently in-use by a backup operation or block job,
# and is immutable.
#
# @disabled: The bitmap is currently in-use by an internal operation and is
# read-only. It can still be deleted.
#
# @active: The bitmap is actively monitoring for new writes, and can be cleared,
# deleted, or used for backup operations.
#
# @locked: The bitmap is currently in-use by some operation and can not be
# cleared, deleted, or used for backup operations. (Since 2.12)
#
# Since: 2.4
##
{ 'enum': 'DirtyBitmapStatus',
'data': ['active', 'disabled', 'frozen', 'locked'] }
##
# @BlockDirtyInfo:
#
# Block dirty bitmap information.
#
# @name: the name of the dirty bitmap (Since 2.4)
#
# @count: number of dirty bytes according to the dirty bitmap
#
# @granularity: granularity of the dirty bitmap in bytes (since 1.4)
#
# @status: current status of the dirty bitmap (since 2.4)
#
# Since: 1.3
##
{ 'struct': 'BlockDirtyInfo',
'data': {'*name': 'str', 'count': 'int', 'granularity': 'uint32',
'status': 'DirtyBitmapStatus'} }
##
# @BlockLatencyHistogramInfo:
#
# Block latency histogram.
#
# @boundaries: list of interval boundary values in nanoseconds, all greater
# than zero and in ascending order.
# For example, the list [10, 50, 100] produces the following
# histogram intervals: [0, 10), [10, 50), [50, 100), [100, +inf).
#
# @bins: list of io request counts corresponding to histogram intervals.
# len(@bins) = len(@boundaries) + 1
# For the example above, @bins may be something like [3, 1, 5, 2],
# and corresponding histogram looks like:
#
# 5| *
# 4| *
# 3| * *
# 2| * * *
# 1| * * * *
# +------------------
# 10 50 100
#
# Since: 2.12
##
{ 'struct': 'BlockLatencyHistogramInfo',
'data': {'boundaries': ['uint64'], 'bins': ['uint64'] } }
##
# @x-block-latency-histogram-set:
#
# Manage read, write and flush latency histograms for the device.
#
# If only @device parameter is specified, remove all present latency histograms
# for the device. Otherwise, add/reset some of (or all) latency histograms.
#
# @device: device name to set latency histogram for.
#
# @boundaries: list of interval boundary values (see description in
# BlockLatencyHistogramInfo definition). If specified, all
# latency histograms are removed, and empty ones created for all
# io types with intervals corresponding to @boundaries (except for
# io types, for which specific boundaries are set through the
# following parameters).
#
# @boundaries-read: list of interval boundary values for read latency
# histogram. If specified, old read latency histogram is
# removed, and empty one created with intervals
# corresponding to @boundaries-read. The parameter has higher
# priority then @boundaries.
#
# @boundaries-write: list of interval boundary values for write latency
# histogram.
#
# @boundaries-flush: list of interval boundary values for flush latency
# histogram.
#
# Returns: error if device is not found or any boundary arrays are invalid.
#
# Since: 2.12
#
# Example: set new histograms for all io types with intervals
# [0, 10), [10, 50), [50, 100), [100, +inf):
#
# -> { "execute": "block-latency-histogram-set",
# "arguments": { "device": "drive0",
# "boundaries": [10, 50, 100] } }
# <- { "return": {} }
#
# Example: set new histogram only for write, other histograms will remain
# not changed (or not created):
#
# -> { "execute": "block-latency-histogram-set",
# "arguments": { "device": "drive0",
# "boundaries-write": [10, 50, 100] } }
# <- { "return": {} }
#
# Example: set new histograms with the following intervals:
# read, flush: [0, 10), [10, 50), [50, 100), [100, +inf)
# write: [0, 1000), [1000, 5000), [5000, +inf)
#
# -> { "execute": "block-latency-histogram-set",
# "arguments": { "device": "drive0",
# "boundaries": [10, 50, 100],
# "boundaries-write": [1000, 5000] } }
# <- { "return": {} }
#
# Example: remove all latency histograms:
#
# -> { "execute": "block-latency-histogram-set",
# "arguments": { "device": "drive0" } }
# <- { "return": {} }
##
{ 'command': 'x-block-latency-histogram-set',
'data': {'device': 'str',
'*boundaries': ['uint64'],
'*boundaries-read': ['uint64'],
'*boundaries-write': ['uint64'],
'*boundaries-flush': ['uint64'] } }
##
# @BlockInfo:
#
# Block device information. This structure describes a virtual device and
# the backing device associated with it.
#
# @device: The device name associated with the virtual device.
#
# @qdev: The qdev ID, or if no ID is assigned, the QOM path of the block
# device. (since 2.10)
#
# @type: This field is returned only for compatibility reasons, it should
# not be used (always returns 'unknown')
#
# @removable: True if the device supports removable media.
#
# @locked: True if the guest has locked this device from having its media
# removed
#
# @tray_open: True if the device's tray is open
# (only present if it has a tray)
#
# @dirty-bitmaps: dirty bitmaps information (only present if the
# driver has one or more dirty bitmaps) (Since 2.0)
#
# @io-status: @BlockDeviceIoStatus. Only present if the device
# supports it and the VM is configured to stop on errors
# (supported device models: virtio-blk, IDE, SCSI except
# scsi-generic)
#
# @inserted: @BlockDeviceInfo describing the device if media is
# present
#
# Since: 0.14.0
##
{ 'struct': 'BlockInfo',
'data': {'device': 'str', '*qdev': 'str', 'type': 'str', 'removable': 'bool',
'locked': 'bool', '*inserted': 'BlockDeviceInfo',
'*tray_open': 'bool', '*io-status': 'BlockDeviceIoStatus',
'*dirty-bitmaps': ['BlockDirtyInfo'] } }
##
# @BlockMeasureInfo:
#
# Image file size calculation information. This structure describes the size
# requirements for creating a new image file.
#
# The size requirements depend on the new image file format. File size always
# equals virtual disk size for the 'raw' format, even for sparse POSIX files.
# Compact formats such as 'qcow2' represent unallocated and zero regions
# efficiently so file size may be smaller than virtual disk size.
#
# The values are upper bounds that are guaranteed to fit the new image file.
# Subsequent modification, such as internal snapshot or bitmap creation, may
# require additional space and is not covered here.
#
# @required: Size required for a new image file, in bytes.
#
# @fully-allocated: Image file size, in bytes, once data has been written
# to all sectors.
#
# Since: 2.10
##
{ 'struct': 'BlockMeasureInfo',
'data': {'required': 'int', 'fully-allocated': 'int'} }
##
# @query-block:
#
# Get a list of BlockInfo for all virtual block devices.
#
block: Skip implicit nodes in query-block/blockstats Commits 0db832f and 6cdbceb introduced the automatic insertion of filter nodes above the top layer of mirror and commit block jobs. The assumption made there was that since libvirt doesn't do node-level management of the block layer yet, it shouldn't be affected by added nodes. This is true as far as commands issued by libvirt are concerned. It only uses BlockBackend names to address nodes, so any operations it performs still operate on the root of the tree as intended. However, the assumption breaks down when you consider query commands, which return data for the wrong node now. These commands also return information on some child nodes (bs->file and/or bs->backing), which libvirt does make use of, and which refer to the wrong nodes, too. One of the consequences is that oVirt gets wrong information about the image size and stops the VM in response as long as a mirror or commit job is running: https://bugzilla.redhat.com/show_bug.cgi?id=1470634 This patch fixes the problem by hiding the implicit nodes created automatically by the mirror and commit block jobs in the output of query-block and BlockBackend-based query-blockstats as long as the user doesn't indicate that they are aware of those nodes by providing a node name for them in the QMP command to start the block job. The node-based commands query-named-block-nodes and query-blockstats with query-nodes=true still show all nodes, including implicit ones. This ensures that users that are capable of node-level management can still access the full information; users that only know BlockBackends won't use these commands. Cc: qemu-stable@nongnu.org Signed-off-by: Kevin Wolf <kwolf@redhat.com> Reviewed-by: Peter Krempa <pkrempa@redhat.com> Reviewed-by: Max Reitz <mreitz@redhat.com> Tested-by: Eric Blake <eblake@redhat.com>
2017-07-18 17:24:05 +02:00
# Returns: a list of @BlockInfo describing each virtual block device. Filter
# nodes that were created implicitly are skipped over.
#
# Since: 0.14.0
#
# Example:
#
# -> { "execute": "query-block" }
# <- {
# "return":[
# {
# "io-status": "ok",
# "device":"ide0-hd0",
# "locked":false,
# "removable":false,
# "inserted":{
# "ro":false,
# "drv":"qcow2",
# "encrypted":false,
# "file":"disks/test.qcow2",
# "backing_file_depth":1,
# "bps":1000000,
# "bps_rd":0,
# "bps_wr":0,
# "iops":1000000,
# "iops_rd":0,
# "iops_wr":0,
# "bps_max": 8000000,
# "bps_rd_max": 0,
# "bps_wr_max": 0,
# "iops_max": 0,
# "iops_rd_max": 0,
# "iops_wr_max": 0,
# "iops_size": 0,
# "detect_zeroes": "on",
# "write_threshold": 0,
# "image":{
# "filename":"disks/test.qcow2",
# "format":"qcow2",
# "virtual-size":2048000,
# "backing_file":"base.qcow2",
# "full-backing-filename":"disks/base.qcow2",
# "backing-filename-format":"qcow2",
# "snapshots":[
# {
# "id": "1",
# "name": "snapshot1",
# "vm-state-size": 0,
# "date-sec": 10000200,
# "date-nsec": 12,
# "vm-clock-sec": 206,
# "vm-clock-nsec": 30
# }
# ],
# "backing-image":{
# "filename":"disks/base.qcow2",
# "format":"qcow2",
# "virtual-size":2048000
# }
# }
# },
# "qdev": "ide_disk",
# "type":"unknown"
# },
# {
# "io-status": "ok",
# "device":"ide1-cd0",
# "locked":false,
# "removable":true,
# "qdev": "/machine/unattached/device[23]",
# "tray_open": false,
# "type":"unknown"
# },
# {
# "device":"floppy0",
# "locked":false,
# "removable":true,
# "qdev": "/machine/unattached/device[20]",
# "type":"unknown"
# },
# {
# "device":"sd0",
# "locked":false,
# "removable":true,
# "type":"unknown"
# }
# ]
# }
#
##
{ 'command': 'query-block', 'returns': ['BlockInfo'] }
##
# @BlockDeviceTimedStats:
#
# Statistics of a block device during a given interval of time.
#
# @interval_length: Interval used for calculating the statistics,
# in seconds.
#
# @min_rd_latency_ns: Minimum latency of read operations in the
# defined interval, in nanoseconds.
#
# @min_wr_latency_ns: Minimum latency of write operations in the
# defined interval, in nanoseconds.
#
# @min_flush_latency_ns: Minimum latency of flush operations in the
# defined interval, in nanoseconds.
#
# @max_rd_latency_ns: Maximum latency of read operations in the
# defined interval, in nanoseconds.
#
# @max_wr_latency_ns: Maximum latency of write operations in the
# defined interval, in nanoseconds.
#
# @max_flush_latency_ns: Maximum latency of flush operations in the
# defined interval, in nanoseconds.
#
# @avg_rd_latency_ns: Average latency of read operations in the
# defined interval, in nanoseconds.
#
# @avg_wr_latency_ns: Average latency of write operations in the
# defined interval, in nanoseconds.
#
# @avg_flush_latency_ns: Average latency of flush operations in the
# defined interval, in nanoseconds.
#
# @avg_rd_queue_depth: Average number of pending read operations
# in the defined interval.
#
# @avg_wr_queue_depth: Average number of pending write operations
# in the defined interval.
#
# Since: 2.5
##
{ 'struct': 'BlockDeviceTimedStats',
'data': { 'interval_length': 'int', 'min_rd_latency_ns': 'int',
'max_rd_latency_ns': 'int', 'avg_rd_latency_ns': 'int',
'min_wr_latency_ns': 'int', 'max_wr_latency_ns': 'int',
'avg_wr_latency_ns': 'int', 'min_flush_latency_ns': 'int',
'max_flush_latency_ns': 'int', 'avg_flush_latency_ns': 'int',
'avg_rd_queue_depth': 'number', 'avg_wr_queue_depth': 'number' } }
##
# @BlockDeviceStats:
#
# Statistics of a virtual block device or a block backing device.
#
# @rd_bytes: The number of bytes read by the device.
#
# @wr_bytes: The number of bytes written by the device.
#
# @rd_operations: The number of read operations performed by the device.
#
# @wr_operations: The number of write operations performed by the device.
#
# @flush_operations: The number of cache flush operations performed by the
# device (since 0.15.0)
#
# @flush_total_time_ns: Total time spend on cache flushes in nano-seconds
# (since 0.15.0).
#
# @wr_total_time_ns: Total time spend on writes in nano-seconds (since 0.15.0).
#
# @rd_total_time_ns: Total_time_spend on reads in nano-seconds (since 0.15.0).
#
# @wr_highest_offset: The offset after the greatest byte written to the
# device. The intended use of this information is for
# growable sparse files (like qcow2) that are used on top
# of a physical device.
#
# @rd_merged: Number of read requests that have been merged into another
# request (Since 2.3).
#
# @wr_merged: Number of write requests that have been merged into another
# request (Since 2.3).
#
# @idle_time_ns: Time since the last I/O operation, in
# nanoseconds. If the field is absent it means that
# there haven't been any operations yet (Since 2.5).
#
# @failed_rd_operations: The number of failed read operations
# performed by the device (Since 2.5)
#
# @failed_wr_operations: The number of failed write operations
# performed by the device (Since 2.5)
#
# @failed_flush_operations: The number of failed flush operations
# performed by the device (Since 2.5)
#
# @invalid_rd_operations: The number of invalid read operations
# performed by the device (Since 2.5)
#
# @invalid_wr_operations: The number of invalid write operations
# performed by the device (Since 2.5)
#
# @invalid_flush_operations: The number of invalid flush operations
# performed by the device (Since 2.5)
#
# @account_invalid: Whether invalid operations are included in the
# last access statistics (Since 2.5)
#
# @account_failed: Whether failed operations are included in the
# latency and last access statistics (Since 2.5)
#
# @timed_stats: Statistics specific to the set of previously defined
# intervals of time (Since 2.5)
#
# @x_rd_latency_histogram: @BlockLatencyHistogramInfo. (Since 2.12)
#
# @x_wr_latency_histogram: @BlockLatencyHistogramInfo. (Since 2.12)
#
# @x_flush_latency_histogram: @BlockLatencyHistogramInfo. (Since 2.12)
#
# Since: 0.14.0
##
{ 'struct': 'BlockDeviceStats',
'data': {'rd_bytes': 'int', 'wr_bytes': 'int', 'rd_operations': 'int',
'wr_operations': 'int', 'flush_operations': 'int',
'flush_total_time_ns': 'int', 'wr_total_time_ns': 'int',
'rd_total_time_ns': 'int', 'wr_highest_offset': 'int',
'rd_merged': 'int', 'wr_merged': 'int', '*idle_time_ns': 'int',
'failed_rd_operations': 'int', 'failed_wr_operations': 'int',
'failed_flush_operations': 'int', 'invalid_rd_operations': 'int',
'invalid_wr_operations': 'int', 'invalid_flush_operations': 'int',
'account_invalid': 'bool', 'account_failed': 'bool',
'timed_stats': ['BlockDeviceTimedStats'],
'*x_rd_latency_histogram': 'BlockLatencyHistogramInfo',
'*x_wr_latency_histogram': 'BlockLatencyHistogramInfo',
'*x_flush_latency_histogram': 'BlockLatencyHistogramInfo' } }
##
# @BlockStats:
#
# Statistics of a virtual block device or a block backing device.
#
# @device: If the stats are for a virtual block device, the name
# corresponding to the virtual block device.
#
# @node-name: The node name of the device. (Since 2.3)
#
# @stats: A @BlockDeviceStats for the device.
#
# @parent: This describes the file block device if it has one.
# Contains recursively the statistics of the underlying
# protocol (e.g. the host file for a qcow2 image). If there is
# no underlying protocol, this field is omitted
#
# @backing: This describes the backing block device if it has one.
# (Since 2.0)
#
# Since: 0.14.0
##
{ 'struct': 'BlockStats',
'data': {'*device': 'str', '*node-name': 'str',
'stats': 'BlockDeviceStats',
'*parent': 'BlockStats',
'*backing': 'BlockStats'} }
##
# @query-blockstats:
#
# Query the @BlockStats for all virtual block devices.
#
# @query-nodes: If true, the command will query all the block nodes
# that have a node name, in a list which will include "parent"
# information, but not "backing".
# If false or omitted, the behavior is as before - query all the
# device backends, recursively including their "parent" and
block: Skip implicit nodes in query-block/blockstats Commits 0db832f and 6cdbceb introduced the automatic insertion of filter nodes above the top layer of mirror and commit block jobs. The assumption made there was that since libvirt doesn't do node-level management of the block layer yet, it shouldn't be affected by added nodes. This is true as far as commands issued by libvirt are concerned. It only uses BlockBackend names to address nodes, so any operations it performs still operate on the root of the tree as intended. However, the assumption breaks down when you consider query commands, which return data for the wrong node now. These commands also return information on some child nodes (bs->file and/or bs->backing), which libvirt does make use of, and which refer to the wrong nodes, too. One of the consequences is that oVirt gets wrong information about the image size and stops the VM in response as long as a mirror or commit job is running: https://bugzilla.redhat.com/show_bug.cgi?id=1470634 This patch fixes the problem by hiding the implicit nodes created automatically by the mirror and commit block jobs in the output of query-block and BlockBackend-based query-blockstats as long as the user doesn't indicate that they are aware of those nodes by providing a node name for them in the QMP command to start the block job. The node-based commands query-named-block-nodes and query-blockstats with query-nodes=true still show all nodes, including implicit ones. This ensures that users that are capable of node-level management can still access the full information; users that only know BlockBackends won't use these commands. Cc: qemu-stable@nongnu.org Signed-off-by: Kevin Wolf <kwolf@redhat.com> Reviewed-by: Peter Krempa <pkrempa@redhat.com> Reviewed-by: Max Reitz <mreitz@redhat.com> Tested-by: Eric Blake <eblake@redhat.com>
2017-07-18 17:24:05 +02:00
# "backing". Filter nodes that were created implicitly are
# skipped over in this mode. (Since 2.3)
#
# Returns: A list of @BlockStats for each virtual block devices.
#
# Since: 0.14.0
#
# Example:
#
# -> { "execute": "query-blockstats" }
# <- {
# "return":[
# {
# "device":"ide0-hd0",
# "parent":{
# "stats":{
# "wr_highest_offset":3686448128,
# "wr_bytes":9786368,
# "wr_operations":751,
# "rd_bytes":122567168,
# "rd_operations":36772
# "wr_total_times_ns":313253456
# "rd_total_times_ns":3465673657
# "flush_total_times_ns":49653
# "flush_operations":61,
# "rd_merged":0,
# "wr_merged":0,
# "idle_time_ns":2953431879,
# "account_invalid":true,
# "account_failed":false
# }
# },
# "stats":{
# "wr_highest_offset":2821110784,
# "wr_bytes":9786368,
# "wr_operations":692,
# "rd_bytes":122739200,
# "rd_operations":36604
# "flush_operations":51,
# "wr_total_times_ns":313253456
# "rd_total_times_ns":3465673657
# "flush_total_times_ns":49653,
# "rd_merged":0,
# "wr_merged":0,
# "idle_time_ns":2953431879,
# "account_invalid":true,
# "account_failed":false
# }
# },
# {
# "device":"ide1-cd0",
# "stats":{
# "wr_highest_offset":0,
# "wr_bytes":0,
# "wr_operations":0,
# "rd_bytes":0,
# "rd_operations":0
# "flush_operations":0,
# "wr_total_times_ns":0
# "rd_total_times_ns":0
# "flush_total_times_ns":0,
# "rd_merged":0,
# "wr_merged":0,
# "account_invalid":false,
# "account_failed":false
# }
# },
# {
# "device":"floppy0",
# "stats":{
# "wr_highest_offset":0,
# "wr_bytes":0,
# "wr_operations":0,
# "rd_bytes":0,
# "rd_operations":0
# "flush_operations":0,
# "wr_total_times_ns":0
# "rd_total_times_ns":0
# "flush_total_times_ns":0,
# "rd_merged":0,
# "wr_merged":0,
# "account_invalid":false,
# "account_failed":false
# }
# },
# {
# "device":"sd0",
# "stats":{
# "wr_highest_offset":0,
# "wr_bytes":0,
# "wr_operations":0,
# "rd_bytes":0,
# "rd_operations":0
# "flush_operations":0,
# "wr_total_times_ns":0
# "rd_total_times_ns":0
# "flush_total_times_ns":0,
# "rd_merged":0,
# "wr_merged":0,
# "account_invalid":false,
# "account_failed":false
# }
# }
# ]
# }
#
##
{ 'command': 'query-blockstats',
'data': { '*query-nodes': 'bool' },
'returns': ['BlockStats'] }
##
# @BlockdevOnError:
#
# An enumeration of possible behaviors for errors on I/O operations.
# The exact meaning depends on whether the I/O was initiated by a guest
# or by a block job
#
# @report: for guest operations, report the error to the guest;
# for jobs, cancel the job
#
# @ignore: ignore the error, only report a QMP event (BLOCK_IO_ERROR
# or BLOCK_JOB_ERROR)
#
# @enospc: same as @stop on ENOSPC, same as @report otherwise.
#
# @stop: for guest operations, stop the virtual machine;
# for jobs, pause the job
#
# @auto: inherit the error handling policy of the backend (since: 2.7)
#
# Since: 1.3
##
{ 'enum': 'BlockdevOnError',
'data': ['report', 'ignore', 'enospc', 'stop', 'auto'] }
##
# @MirrorSyncMode:
#
# An enumeration of possible behaviors for the initial synchronization
# phase of storage mirroring.
#
# @top: copies data in the topmost image to the destination
#
# @full: copies data from all images to the destination
#
# @none: only copy data written from now on
#
# @incremental: only copy data described by the dirty bitmap. Since: 2.4
#
# Since: 1.3
##
{ 'enum': 'MirrorSyncMode',
'data': ['top', 'full', 'none', 'incremental'] }
##
# @BlockJobType:
#
# Type of a block job.
#
# @commit: block commit job type, see "block-commit"
#
# @stream: block stream job type, see "block-stream"
#
# @mirror: drive mirror job type, see "drive-mirror"
#
# @backup: drive backup job type, see "drive-backup"
#
# Since: 1.7
##
{ 'enum': 'BlockJobType',
'data': ['commit', 'stream', 'mirror', 'backup'] }
blockjobs: add block_job_verb permission table Which commands ("verbs") are appropriate for jobs in which state is also somewhat burdensome to keep track of. As of this commit, it looks rather useless, but begins to look more interesting the more states we add to the STM table. A recurring theme is that no verb will apply to an 'undefined' job. Further, it's not presently possible to restrict the "pause" or "resume" verbs any more than they are in this commit because of the asynchronous nature of how jobs enter the PAUSED state; justifications for some seemingly erroneous applications are given below. ===== Verbs ===== Cancel: Any state except undefined. Pause: Any state except undefined; 'created': Requests that the job pauses as it starts. 'running': Normal usage. (PAUSED) 'paused': The job may be paused for internal reasons, but the user may wish to force an indefinite user-pause, so this is allowed. 'ready': Normal usage. (STANDBY) 'standby': Same logic as above. Resume: Any state except undefined; 'created': Will lift a user's pause-on-start request. 'running': Will lift a pause request before it takes effect. 'paused': Normal usage. 'ready': Will lift a pause request before it takes effect. 'standby': Normal usage. Set-speed: Any state except undefined, though ready may not be meaningful. Complete: Only a 'ready' job may accept a complete request. ======= Changes ======= (1) To facilitate "nice" error checking, all five major block-job verb interfaces in blockjob.c now support an errp parameter: - block_job_user_cancel is added as a new interface. - block_job_user_pause gains an errp paramter - block_job_user_resume gains an errp parameter - block_job_set_speed already had an errp parameter. - block_job_complete already had an errp parameter. (2) block-job-pause and block-job-resume will no longer no-op when trying to pause an already paused job, or trying to resume a job that isn't paused. These functions will now report that they did not perform the action requested because it was not possible. iotests have been adjusted to address this new behavior. (3) block-job-complete doesn't worry about checking !block_job_started, because the permission table guards against this. (4) test-bdrv-drain's job implementation needs to announce that it is 'ready' now, in order to be completed. Signed-off-by: John Snow <jsnow@redhat.com> Reviewed-by: Kevin Wolf <kwolf@redhat.com> Reviewed-by: Eric Blake <eblake@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2018-03-10 09:27:32 +01:00
##
# @BlockJobVerb:
#
# Represents command verbs that can be applied to a blockjob.
#
# @cancel: see @block-job-cancel
#
# @pause: see @block-job-pause
#
# @resume: see @block-job-resume
#
# @set-speed: see @block-job-set-speed
#
# @complete: see @block-job-complete
#
# @dismiss: see @block-job-dismiss
#
# @finalize: see @block-job-finalize
#
blockjobs: add block_job_verb permission table Which commands ("verbs") are appropriate for jobs in which state is also somewhat burdensome to keep track of. As of this commit, it looks rather useless, but begins to look more interesting the more states we add to the STM table. A recurring theme is that no verb will apply to an 'undefined' job. Further, it's not presently possible to restrict the "pause" or "resume" verbs any more than they are in this commit because of the asynchronous nature of how jobs enter the PAUSED state; justifications for some seemingly erroneous applications are given below. ===== Verbs ===== Cancel: Any state except undefined. Pause: Any state except undefined; 'created': Requests that the job pauses as it starts. 'running': Normal usage. (PAUSED) 'paused': The job may be paused for internal reasons, but the user may wish to force an indefinite user-pause, so this is allowed. 'ready': Normal usage. (STANDBY) 'standby': Same logic as above. Resume: Any state except undefined; 'created': Will lift a user's pause-on-start request. 'running': Will lift a pause request before it takes effect. 'paused': Normal usage. 'ready': Will lift a pause request before it takes effect. 'standby': Normal usage. Set-speed: Any state except undefined, though ready may not be meaningful. Complete: Only a 'ready' job may accept a complete request. ======= Changes ======= (1) To facilitate "nice" error checking, all five major block-job verb interfaces in blockjob.c now support an errp parameter: - block_job_user_cancel is added as a new interface. - block_job_user_pause gains an errp paramter - block_job_user_resume gains an errp parameter - block_job_set_speed already had an errp parameter. - block_job_complete already had an errp parameter. (2) block-job-pause and block-job-resume will no longer no-op when trying to pause an already paused job, or trying to resume a job that isn't paused. These functions will now report that they did not perform the action requested because it was not possible. iotests have been adjusted to address this new behavior. (3) block-job-complete doesn't worry about checking !block_job_started, because the permission table guards against this. (4) test-bdrv-drain's job implementation needs to announce that it is 'ready' now, in order to be completed. Signed-off-by: John Snow <jsnow@redhat.com> Reviewed-by: Kevin Wolf <kwolf@redhat.com> Reviewed-by: Eric Blake <eblake@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2018-03-10 09:27:32 +01:00
# Since: 2.12
##
{ 'enum': 'BlockJobVerb',
'data': ['cancel', 'pause', 'resume', 'set-speed', 'complete', 'dismiss',
'finalize' ] }
blockjobs: add block_job_verb permission table Which commands ("verbs") are appropriate for jobs in which state is also somewhat burdensome to keep track of. As of this commit, it looks rather useless, but begins to look more interesting the more states we add to the STM table. A recurring theme is that no verb will apply to an 'undefined' job. Further, it's not presently possible to restrict the "pause" or "resume" verbs any more than they are in this commit because of the asynchronous nature of how jobs enter the PAUSED state; justifications for some seemingly erroneous applications are given below. ===== Verbs ===== Cancel: Any state except undefined. Pause: Any state except undefined; 'created': Requests that the job pauses as it starts. 'running': Normal usage. (PAUSED) 'paused': The job may be paused for internal reasons, but the user may wish to force an indefinite user-pause, so this is allowed. 'ready': Normal usage. (STANDBY) 'standby': Same logic as above. Resume: Any state except undefined; 'created': Will lift a user's pause-on-start request. 'running': Will lift a pause request before it takes effect. 'paused': Normal usage. 'ready': Will lift a pause request before it takes effect. 'standby': Normal usage. Set-speed: Any state except undefined, though ready may not be meaningful. Complete: Only a 'ready' job may accept a complete request. ======= Changes ======= (1) To facilitate "nice" error checking, all five major block-job verb interfaces in blockjob.c now support an errp parameter: - block_job_user_cancel is added as a new interface. - block_job_user_pause gains an errp paramter - block_job_user_resume gains an errp parameter - block_job_set_speed already had an errp parameter. - block_job_complete already had an errp parameter. (2) block-job-pause and block-job-resume will no longer no-op when trying to pause an already paused job, or trying to resume a job that isn't paused. These functions will now report that they did not perform the action requested because it was not possible. iotests have been adjusted to address this new behavior. (3) block-job-complete doesn't worry about checking !block_job_started, because the permission table guards against this. (4) test-bdrv-drain's job implementation needs to announce that it is 'ready' now, in order to be completed. Signed-off-by: John Snow <jsnow@redhat.com> Reviewed-by: Kevin Wolf <kwolf@redhat.com> Reviewed-by: Eric Blake <eblake@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2018-03-10 09:27:32 +01:00
blockjobs: add status enum We're about to add several new states, and booleans are becoming unwieldly and difficult to reason about. It would help to have a more explicit bookkeeping of the state of blockjobs. To this end, add a new "status" field and add our existing states in a redundant manner alongside the bools they are replacing: UNDEFINED: Placeholder, default state. Not currently visible to QMP unless changes occur in the future to allow creating jobs without starting them via QMP. CREATED: replaces !!job->co && paused && !busy RUNNING: replaces effectively (!paused && busy) PAUSED: Nearly redundant with info->paused, which shows pause_count. This reports the actual status of the job, which almost always matches the paused request status. It differs in that it is strictly only true when the job has actually gone dormant. READY: replaces job->ready. STANDBY: Paused, but job->ready is true. New state additions in coming commits will not be quite so redundant: WAITING: Waiting on transaction. This job has finished all the work it can until the transaction converges, fails, or is canceled. PENDING: Pending authorization from user. This job has finished all the work it can until the job or transaction is finalized via block_job_finalize. This implies the transaction has converged and left the WAITING phase. ABORTING: Job has encountered an error condition and is in the process of aborting. CONCLUDED: Job has ceased all operations and has a return code available for query and may be dismissed via block_job_dismiss. NULL: Job has been dismissed and (should) be destroyed. Should never be visible to QMP. Some of these states appear somewhat superfluous, but it helps define the expected flow of a job; so some of the states wind up being synchronous empty transitions. Importantly, jobs can be in only one of these states at any given time, which helps code and external users alike reason about the current condition of a job unambiguously. Signed-off-by: John Snow <jsnow@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2018-03-10 09:27:29 +01:00
##
# @BlockJobStatus:
#
# Indicates the present state of a given blockjob in its lifetime.
#
# @undefined: Erroneous, default state. Should not ever be visible.
#
# @created: The job has been created, but not yet started.
#
# @running: The job is currently running.
#
# @paused: The job is running, but paused. The pause may be requested by
# either the QMP user or by internal processes.
#
# @ready: The job is running, but is ready for the user to signal completion.
# This is used for long-running jobs like mirror that are designed to
# run indefinitely.
#
# @standby: The job is ready, but paused. This is nearly identical to @paused.
# The job may return to @ready or otherwise be canceled.
#
# @waiting: The job is waiting for other jobs in the transaction to converge
# to the waiting state. This status will likely not be visible for
# the last job in a transaction.
#
# @pending: The job has finished its work, but has finalization steps that it
# needs to make prior to completing. These changes may require
# manual intervention by the management process if manual was set
# to true. These changes may still fail.
#
# @aborting: The job is in the process of being aborted, and will finish with
# an error. The job will afterwards report that it is @concluded.
# This status may not be visible to the management process.
#
# @concluded: The job has finished all work. If manual was set to true, the job
# will remain in the query list until it is dismissed.
#
blockjobs: add NULL state Add a new state that specifically demarcates when we begin to permanently demolish a job after it has performed all work. This makes the transition explicit in the STM table and highlights conditions under which a job may be demolished. Alongside this state, add a new helper command "block_job_decommission", which transitions to the NULL state and puts down our implicit reference. This separates instances in the code for "block_job_unref" which merely undo a matching "block_job_ref" with instances intended to initiate the full destruction of the object. This decommission action also sets a number of fields to make sure that block internals or external users that are holding a reference to a job to see when it "finishes" are convinced that the job object is "done." This is necessary, for instance, to do a block_job_cancel_sync on a created object which will not make any progress. Now, all jobs must go through block_job_decommission prior to being freed, giving us start-to-finish state machine coverage for jobs. Transitions: Created -> Null: Early failure event before the job is started Concluded -> Null: Standard transition. Verbs: None. This should not ever be visible to the monitor. +---------+ |UNDEFINED| +--+------+ | +--v----+ +---------+CREATED+------------------+ | +--+----+ | | | | | +--v----+ +------+ | +---------+RUNNING<----->PAUSED| | | +--+-+--+ +------+ | | | | | | | +------------------+ | | | | | | +--v--+ +-------+ | | +---------+READY<------->STANDBY| | | | +--+--+ +-------+ | | | | | | +--v-----+ +--v------+ | | |ABORTING+--->CONCLUDED<-------------+ | +--------+ +--+------+ | | | +--v-+ | |NULL<---------------------+ +----+ Signed-off-by: John Snow <jsnow@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2018-03-10 09:27:35 +01:00
# @null: The job is in the process of being dismantled. This state should not
# ever be visible externally.
#
blockjobs: add status enum We're about to add several new states, and booleans are becoming unwieldly and difficult to reason about. It would help to have a more explicit bookkeeping of the state of blockjobs. To this end, add a new "status" field and add our existing states in a redundant manner alongside the bools they are replacing: UNDEFINED: Placeholder, default state. Not currently visible to QMP unless changes occur in the future to allow creating jobs without starting them via QMP. CREATED: replaces !!job->co && paused && !busy RUNNING: replaces effectively (!paused && busy) PAUSED: Nearly redundant with info->paused, which shows pause_count. This reports the actual status of the job, which almost always matches the paused request status. It differs in that it is strictly only true when the job has actually gone dormant. READY: replaces job->ready. STANDBY: Paused, but job->ready is true. New state additions in coming commits will not be quite so redundant: WAITING: Waiting on transaction. This job has finished all the work it can until the transaction converges, fails, or is canceled. PENDING: Pending authorization from user. This job has finished all the work it can until the job or transaction is finalized via block_job_finalize. This implies the transaction has converged and left the WAITING phase. ABORTING: Job has encountered an error condition and is in the process of aborting. CONCLUDED: Job has ceased all operations and has a return code available for query and may be dismissed via block_job_dismiss. NULL: Job has been dismissed and (should) be destroyed. Should never be visible to QMP. Some of these states appear somewhat superfluous, but it helps define the expected flow of a job; so some of the states wind up being synchronous empty transitions. Importantly, jobs can be in only one of these states at any given time, which helps code and external users alike reason about the current condition of a job unambiguously. Signed-off-by: John Snow <jsnow@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2018-03-10 09:27:29 +01:00
# Since: 2.12
##
{ 'enum': 'BlockJobStatus',
'data': ['undefined', 'created', 'running', 'paused', 'ready', 'standby',
'waiting', 'pending', 'aborting', 'concluded', 'null' ] }
blockjobs: add status enum We're about to add several new states, and booleans are becoming unwieldly and difficult to reason about. It would help to have a more explicit bookkeeping of the state of blockjobs. To this end, add a new "status" field and add our existing states in a redundant manner alongside the bools they are replacing: UNDEFINED: Placeholder, default state. Not currently visible to QMP unless changes occur in the future to allow creating jobs without starting them via QMP. CREATED: replaces !!job->co && paused && !busy RUNNING: replaces effectively (!paused && busy) PAUSED: Nearly redundant with info->paused, which shows pause_count. This reports the actual status of the job, which almost always matches the paused request status. It differs in that it is strictly only true when the job has actually gone dormant. READY: replaces job->ready. STANDBY: Paused, but job->ready is true. New state additions in coming commits will not be quite so redundant: WAITING: Waiting on transaction. This job has finished all the work it can until the transaction converges, fails, or is canceled. PENDING: Pending authorization from user. This job has finished all the work it can until the job or transaction is finalized via block_job_finalize. This implies the transaction has converged and left the WAITING phase. ABORTING: Job has encountered an error condition and is in the process of aborting. CONCLUDED: Job has ceased all operations and has a return code available for query and may be dismissed via block_job_dismiss. NULL: Job has been dismissed and (should) be destroyed. Should never be visible to QMP. Some of these states appear somewhat superfluous, but it helps define the expected flow of a job; so some of the states wind up being synchronous empty transitions. Importantly, jobs can be in only one of these states at any given time, which helps code and external users alike reason about the current condition of a job unambiguously. Signed-off-by: John Snow <jsnow@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2018-03-10 09:27:29 +01:00
##
# @BlockJobInfo:
#
# Information about a long-running block device operation.
#
# @type: the job type ('stream' for image streaming)
#
# @device: The job identifier. Originally the device name but other
# values are allowed since QEMU 2.7
#
# @len: the maximum progress value
#
# @busy: false if the job is known to be in a quiescent state, with
# no pending I/O. Since 1.3.
#
# @paused: whether the job is paused or, if @busy is true, will
# pause itself as soon as possible. Since 1.3.
#
# @offset: the current progress value
#
# @speed: the rate limit, bytes per second
#
# @io-status: the status of the job (since 1.3)
#
# @ready: true if the job may be completed (since 2.2)
#
blockjobs: add status enum We're about to add several new states, and booleans are becoming unwieldly and difficult to reason about. It would help to have a more explicit bookkeeping of the state of blockjobs. To this end, add a new "status" field and add our existing states in a redundant manner alongside the bools they are replacing: UNDEFINED: Placeholder, default state. Not currently visible to QMP unless changes occur in the future to allow creating jobs without starting them via QMP. CREATED: replaces !!job->co && paused && !busy RUNNING: replaces effectively (!paused && busy) PAUSED: Nearly redundant with info->paused, which shows pause_count. This reports the actual status of the job, which almost always matches the paused request status. It differs in that it is strictly only true when the job has actually gone dormant. READY: replaces job->ready. STANDBY: Paused, but job->ready is true. New state additions in coming commits will not be quite so redundant: WAITING: Waiting on transaction. This job has finished all the work it can until the transaction converges, fails, or is canceled. PENDING: Pending authorization from user. This job has finished all the work it can until the job or transaction is finalized via block_job_finalize. This implies the transaction has converged and left the WAITING phase. ABORTING: Job has encountered an error condition and is in the process of aborting. CONCLUDED: Job has ceased all operations and has a return code available for query and may be dismissed via block_job_dismiss. NULL: Job has been dismissed and (should) be destroyed. Should never be visible to QMP. Some of these states appear somewhat superfluous, but it helps define the expected flow of a job; so some of the states wind up being synchronous empty transitions. Importantly, jobs can be in only one of these states at any given time, which helps code and external users alike reason about the current condition of a job unambiguously. Signed-off-by: John Snow <jsnow@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2018-03-10 09:27:29 +01:00
# @status: Current job state/status (since 2.12)
#
# @auto-finalize: Job will finalize itself when PENDING, moving to
# the CONCLUDED state. (since 2.12)
#
# @auto-dismiss: Job will dismiss itself when CONCLUDED, moving to the NULL
# state and disappearing from the query list. (since 2.12)
#
# @error: Error information if the job did not complete successfully.
# Not set if the job completed successfully. (since 2.12.1)
#
# Since: 1.1
##
{ 'struct': 'BlockJobInfo',
'data': {'type': 'str', 'device': 'str', 'len': 'int',
'offset': 'int', 'busy': 'bool', 'paused': 'bool', 'speed': 'int',
blockjobs: add status enum We're about to add several new states, and booleans are becoming unwieldly and difficult to reason about. It would help to have a more explicit bookkeeping of the state of blockjobs. To this end, add a new "status" field and add our existing states in a redundant manner alongside the bools they are replacing: UNDEFINED: Placeholder, default state. Not currently visible to QMP unless changes occur in the future to allow creating jobs without starting them via QMP. CREATED: replaces !!job->co && paused && !busy RUNNING: replaces effectively (!paused && busy) PAUSED: Nearly redundant with info->paused, which shows pause_count. This reports the actual status of the job, which almost always matches the paused request status. It differs in that it is strictly only true when the job has actually gone dormant. READY: replaces job->ready. STANDBY: Paused, but job->ready is true. New state additions in coming commits will not be quite so redundant: WAITING: Waiting on transaction. This job has finished all the work it can until the transaction converges, fails, or is canceled. PENDING: Pending authorization from user. This job has finished all the work it can until the job or transaction is finalized via block_job_finalize. This implies the transaction has converged and left the WAITING phase. ABORTING: Job has encountered an error condition and is in the process of aborting. CONCLUDED: Job has ceased all operations and has a return code available for query and may be dismissed via block_job_dismiss. NULL: Job has been dismissed and (should) be destroyed. Should never be visible to QMP. Some of these states appear somewhat superfluous, but it helps define the expected flow of a job; so some of the states wind up being synchronous empty transitions. Importantly, jobs can be in only one of these states at any given time, which helps code and external users alike reason about the current condition of a job unambiguously. Signed-off-by: John Snow <jsnow@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2018-03-10 09:27:29 +01:00
'io-status': 'BlockDeviceIoStatus', 'ready': 'bool',
'status': 'BlockJobStatus',
'auto-finalize': 'bool', 'auto-dismiss': 'bool',
'*error': 'str' } }
##
# @query-block-jobs:
#
# Return information about long-running block device operations.
#
# Returns: a list of @BlockJobInfo for each active block job
#
# Since: 1.1
##
{ 'command': 'query-block-jobs', 'returns': ['BlockJobInfo'] }
##
# @block_passwd:
#
# This command sets the password of a block device that has not been open
# with a password and requires one.
#
# This command is now obsolete and will always return an error since 2.10
#
##
{ 'command': 'block_passwd', 'data': {'*device': 'str',
'*node-name': 'str', 'password': 'str'} }
##
# @block_resize:
#
# Resize a block image while a guest is running.
#
# Either @device or @node-name must be set but not both.
#
# @device: the name of the device to get the image resized
#
# @node-name: graph node name to get the image resized (Since 2.0)
#
# @size: new image size in bytes
#
# Returns: nothing on success
# If @device is not a valid block device, DeviceNotFound
#
# Since: 0.14.0
#
# Example:
#
# -> { "execute": "block_resize",
# "arguments": { "device": "scratch", "size": 1073741824 } }
# <- { "return": {} }
#
##
{ 'command': 'block_resize', 'data': { '*device': 'str',
'*node-name': 'str',
'size': 'int' }}
##
# @NewImageMode:
#
# An enumeration that tells QEMU how to set the backing file path in
# a new image file.
#
# @existing: QEMU should look for an existing image file.
#
# @absolute-paths: QEMU should create a new image with absolute paths
# for the backing file. If there is no backing file available, the new
# image will not be backed either.
#
# Since: 1.1
##
{ 'enum': 'NewImageMode',
'data': [ 'existing', 'absolute-paths' ] }
##
# @BlockdevSnapshotSync:
#
# Either @device or @node-name must be set but not both.
#
# @device: the name of the device to generate the snapshot from.
#
# @node-name: graph node name to generate the snapshot from (Since 2.0)
#
# @snapshot-file: the target of the new image. If the file exists, or
# if it is a device, the snapshot will be created in the existing
# file/device. Otherwise, a new file will be created.
#
# @snapshot-node-name: the graph node name of the new image (Since 2.0)
#
# @format: the format of the snapshot image, default is 'qcow2'.
#
# @mode: whether and how QEMU should create a new image, default is
# 'absolute-paths'.
##
{ 'struct': 'BlockdevSnapshotSync',
'data': { '*device': 'str', '*node-name': 'str',
'snapshot-file': 'str', '*snapshot-node-name': 'str',
'*format': 'str', '*mode': 'NewImageMode' } }
##
# @BlockdevSnapshot:
#
# @node: device or node name that will have a snapshot created.
#
# @overlay: reference to the existing block device that will become
# the overlay of @node, as part of creating the snapshot.
# It must not have a current backing file (this can be
# achieved by passing "backing": null to blockdev-add).
#
# Since: 2.5
##
{ 'struct': 'BlockdevSnapshot',
'data': { 'node': 'str', 'overlay': 'str' } }
##
# @DriveBackup:
#
# @job-id: identifier for the newly-created block job. If
# omitted, the device name will be used. (Since 2.7)
#
# @device: the device name or node-name of a root node which should be copied.
#
# @target: the target of the new image. If the file exists, or if it
# is a device, the existing file/device will be used as the new
# destination. If it does not exist, a new file will be created.
#
# @format: the format of the new destination, default is to
# probe if @mode is 'existing', else the format of the source
#
# @sync: what parts of the disk image should be copied to the destination
# (all the disk, only the sectors allocated in the topmost image, from a
# dirty bitmap, or only new I/O).
#
# @mode: whether and how QEMU should create a new image, default is
# 'absolute-paths'.
#
# @speed: the maximum speed, in bytes per second
#
# @bitmap: the name of dirty bitmap if sync is "incremental".
# Must be present if sync is "incremental", must NOT be present
# otherwise. (Since 2.4)
#
# @compress: true to compress data, if the target format supports it.
# (default: false) (since 2.8)
#
# @on-source-error: the action to take on an error on the source,
# default 'report'. 'stop' and 'enospc' can only be used
# if the block device supports io-status (see BlockInfo).
#
# @on-target-error: the action to take on an error on the target,
# default 'report' (no limitations, since this applies to
# a different block device than @device).
#
# @auto-finalize: When false, this job will wait in a PENDING state after it has
# finished its work, waiting for @block-job-finalize.
# When true, this job will automatically perform its abort or
# commit actions.
# Defaults to true. (Since 2.12)
#
# @auto-dismiss: When false, this job will wait in a CONCLUDED state after it
# has completed ceased all work, and wait for @block-job-dismiss.
# When true, this job will automatically disappear from the query
# list without user intervention.
# Defaults to true. (Since 2.12)
#
# Note: @on-source-error and @on-target-error only affect background
# I/O. If an error occurs during a guest write request, the device's
# rerror/werror actions will be used.
#
# Since: 1.6
##
{ 'struct': 'DriveBackup',
'data': { '*job-id': 'str', 'device': 'str', 'target': 'str',
'*format': 'str', 'sync': 'MirrorSyncMode',
'*mode': 'NewImageMode', '*speed': 'int',
'*bitmap': 'str', '*compress': 'bool',
'*on-source-error': 'BlockdevOnError',
'*on-target-error': 'BlockdevOnError',
'*auto-finalize': 'bool', '*auto-dismiss': 'bool' } }
##
# @BlockdevBackup:
#
# @job-id: identifier for the newly-created block job. If
# omitted, the device name will be used. (Since 2.7)
#
# @device: the device name or node-name of a root node which should be copied.
#
# @target: the device name or node-name of the backup target node.
#
# @sync: what parts of the disk image should be copied to the destination
# (all the disk, only the sectors allocated in the topmost image, or
# only new I/O).
#
# @speed: the maximum speed, in bytes per second. The default is 0,
# for unlimited.
#
# @compress: true to compress data, if the target format supports it.
# (default: false) (since 2.8)
#
# @on-source-error: the action to take on an error on the source,
# default 'report'. 'stop' and 'enospc' can only be used
# if the block device supports io-status (see BlockInfo).
#
# @on-target-error: the action to take on an error on the target,
# default 'report' (no limitations, since this applies to
# a different block device than @device).
#
# @auto-finalize: When false, this job will wait in a PENDING state after it has
# finished its work, waiting for @block-job-finalize.
# When true, this job will automatically perform its abort or
# commit actions.
# Defaults to true. (Since 2.12)
#
# @auto-dismiss: When false, this job will wait in a CONCLUDED state after it
# has completed ceased all work, and wait for @block-job-dismiss.
# When true, this job will automatically disappear from the query
# list without user intervention.
# Defaults to true. (Since 2.12)
#
# Note: @on-source-error and @on-target-error only affect background
# I/O. If an error occurs during a guest write request, the device's
# rerror/werror actions will be used.
#
# Since: 2.3
##
{ 'struct': 'BlockdevBackup',
'data': { '*job-id': 'str', 'device': 'str', 'target': 'str',
'sync': 'MirrorSyncMode', '*speed': 'int', '*compress': 'bool',
'*on-source-error': 'BlockdevOnError',
'*on-target-error': 'BlockdevOnError',
'*auto-finalize': 'bool', '*auto-dismiss': 'bool' } }
##
# @blockdev-snapshot-sync:
#
# Generates a synchronous snapshot of a block device.
#
# For the arguments, see the documentation of BlockdevSnapshotSync.
#
# Returns: nothing on success
# If @device is not a valid block device, DeviceNotFound
#
# Since: 0.14.0
#
# Example:
#
# -> { "execute": "blockdev-snapshot-sync",
# "arguments": { "device": "ide-hd0",
# "snapshot-file":
# "/some/place/my-image",
# "format": "qcow2" } }
# <- { "return": {} }
#
##
{ 'command': 'blockdev-snapshot-sync',
'data': 'BlockdevSnapshotSync' }
##
# @blockdev-snapshot:
#
# Generates a snapshot of a block device.
#
# Create a snapshot, by installing 'node' as the backing image of
# 'overlay'. Additionally, if 'node' is associated with a block
# device, the block device changes to using 'overlay' as its new active
# image.
#
# For the arguments, see the documentation of BlockdevSnapshot.
#
# Since: 2.5
#
# Example:
#
# -> { "execute": "blockdev-add",
# "arguments": { "driver": "qcow2",
# "node-name": "node1534",
# "file": { "driver": "file",
# "filename": "hd1.qcow2" },
# "backing": null } }
#
# <- { "return": {} }
#
# -> { "execute": "blockdev-snapshot",
# "arguments": { "node": "ide-hd0",
# "overlay": "node1534" } }
# <- { "return": {} }
#
##
{ 'command': 'blockdev-snapshot',
'data': 'BlockdevSnapshot' }
##
# @change-backing-file:
#
# Change the backing file in the image file metadata. This does not
# cause QEMU to reopen the image file to reparse the backing filename
# (it may, however, perform a reopen to change permissions from
# r/o -> r/w -> r/o, if needed). The new backing file string is written
# into the image file metadata, and the QEMU internal strings are
# updated.
#
# @image-node-name: The name of the block driver state node of the
# image to modify. The "device" argument is used
# to verify "image-node-name" is in the chain
# described by "device".
#
# @device: The device name or node-name of the root node that owns
# image-node-name.
#
# @backing-file: The string to write as the backing file. This
# string is not validated, so care should be taken
# when specifying the string or the image chain may
# not be able to be reopened again.
#
# Returns: Nothing on success
#
# If "device" does not exist or cannot be determined, DeviceNotFound
#
# Since: 2.1
##
{ 'command': 'change-backing-file',
'data': { 'device': 'str', 'image-node-name': 'str',
'backing-file': 'str' } }
##
# @block-commit:
#
# Live commit of data from overlay image nodes into backing nodes - i.e.,
# writes data between 'top' and 'base' into 'base'.
#
# @job-id: identifier for the newly-created block job. If
# omitted, the device name will be used. (Since 2.7)
#
# @device: the device name or node-name of a root node
#
# @base: The file name of the backing image to write data into.
# If not specified, this is the deepest backing image.
#
# @top: The file name of the backing image within the image chain,
# which contains the topmost data to be committed down. If
# not specified, this is the active layer.
#
# @backing-file: The backing file string to write into the overlay
# image of 'top'. If 'top' is the active layer,
# specifying a backing file string is an error. This
# filename is not validated.
#
# If a pathname string is such that it cannot be
# resolved by QEMU, that means that subsequent QMP or
# HMP commands must use node-names for the image in
# question, as filename lookup methods will fail.
#
# If not specified, QEMU will automatically determine
# the backing file string to use, or error out if
# there is no obvious choice. Care should be taken
# when specifying the string, to specify a valid
# filename or protocol.
# (Since 2.1)
#
# If top == base, that is an error.
# If top == active, the job will not be completed by itself,
# user needs to complete the job with the block-job-complete
# command after getting the ready event. (Since 2.0)
#
# If the base image is smaller than top, then the base image
# will be resized to be the same size as top. If top is
# smaller than the base image, the base will not be
# truncated. If you want the base image size to match the
# size of the smaller top, you can safely truncate it
# yourself once the commit operation successfully completes.
#
# @speed: the maximum speed, in bytes per second
#
# @filter-node-name: the node name that should be assigned to the
# filter driver that the commit job inserts into the graph
# above @top. If this option is not given, a node name is
# autogenerated. (Since: 2.9)
#
# Returns: Nothing on success
# If commit or stream is already active on this device, DeviceInUse
# If @device does not exist, DeviceNotFound
# If image commit is not supported by this device, NotSupported
# If @base or @top is invalid, a generic error is returned
# If @speed is invalid, InvalidParameter
#
# Since: 1.3
#
# Example:
#
# -> { "execute": "block-commit",
# "arguments": { "device": "virtio0",
# "top": "/tmp/snap1.qcow2" } }
# <- { "return": {} }
#
##
{ 'command': 'block-commit',
'data': { '*job-id': 'str', 'device': 'str', '*base': 'str', '*top': 'str',
'*backing-file': 'str', '*speed': 'int',
'*filter-node-name': 'str' } }
##
# @drive-backup:
#
# Start a point-in-time copy of a block device to a new destination. The
# status of ongoing drive-backup operations can be checked with
# query-block-jobs where the BlockJobInfo.type field has the value 'backup'.
# The operation can be stopped before it has completed using the
# block-job-cancel command.
#
# Returns: nothing on success
# If @device is not a valid block device, GenericError
#
# Since: 1.6
#
# Example:
#
# -> { "execute": "drive-backup",
# "arguments": { "device": "drive0",
# "sync": "full",
# "target": "backup.img" } }
# <- { "return": {} }
#
##
{ 'command': 'drive-backup', 'boxed': true,
'data': 'DriveBackup' }
##
# @blockdev-backup:
#
# Start a point-in-time copy of a block device to a new destination. The
# status of ongoing blockdev-backup operations can be checked with
# query-block-jobs where the BlockJobInfo.type field has the value 'backup'.
# The operation can be stopped before it has completed using the
# block-job-cancel command.
#
# Returns: nothing on success
# If @device is not a valid block device, DeviceNotFound
#
# Since: 2.3
#
# Example:
# -> { "execute": "blockdev-backup",
# "arguments": { "device": "src-id",
# "sync": "full",
# "target": "tgt-id" } }
# <- { "return": {} }
#
##
{ 'command': 'blockdev-backup', 'boxed': true,
'data': 'BlockdevBackup' }
##
# @query-named-block-nodes:
#
# Get the named block driver list
#
# Returns: the list of BlockDeviceInfo
#
# Since: 2.0
#
# Example:
#
# -> { "execute": "query-named-block-nodes" }
# <- { "return": [ { "ro":false,
# "drv":"qcow2",
# "encrypted":false,
# "file":"disks/test.qcow2",
# "node-name": "my-node",
# "backing_file_depth":1,
# "bps":1000000,
# "bps_rd":0,
# "bps_wr":0,
# "iops":1000000,
# "iops_rd":0,
# "iops_wr":0,
# "bps_max": 8000000,
# "bps_rd_max": 0,
# "bps_wr_max": 0,
# "iops_max": 0,
# "iops_rd_max": 0,
# "iops_wr_max": 0,
# "iops_size": 0,
# "write_threshold": 0,
# "image":{
# "filename":"disks/test.qcow2",
# "format":"qcow2",
# "virtual-size":2048000,
# "backing_file":"base.qcow2",
# "full-backing-filename":"disks/base.qcow2",
# "backing-filename-format":"qcow2",
# "snapshots":[
# {
# "id": "1",
# "name": "snapshot1",
# "vm-state-size": 0,
# "date-sec": 10000200,
# "date-nsec": 12,
# "vm-clock-sec": 206,
# "vm-clock-nsec": 30
# }
# ],
# "backing-image":{
# "filename":"disks/base.qcow2",
# "format":"qcow2",
# "virtual-size":2048000
# }
# } } ] }
#
##
{ 'command': 'query-named-block-nodes', 'returns': [ 'BlockDeviceInfo' ] }
##
# @drive-mirror:
#
# Start mirroring a block device's writes to a new destination. target
# specifies the target of the new image. If the file exists, or if it
# is a device, it will be used as the new destination for writes. If
# it does not exist, a new file will be created. format specifies the
# format of the mirror image, default is to probe if mode='existing',
# else the format of the source.
#
# Returns: nothing on success
# If @device is not a valid block device, GenericError
#
# Since: 1.3
#
# Example:
#
# -> { "execute": "drive-mirror",
# "arguments": { "device": "ide-hd0",
# "target": "/some/place/my-image",
# "sync": "full",
# "format": "qcow2" } }
# <- { "return": {} }
#
##
{ 'command': 'drive-mirror', 'boxed': true,
'data': 'DriveMirror' }
##
# @DriveMirror:
#
# A set of parameters describing drive mirror setup.
#
# @job-id: identifier for the newly-created block job. If
# omitted, the device name will be used. (Since 2.7)
#
# @device: the device name or node-name of a root node whose writes should be
# mirrored.
#
# @target: the target of the new image. If the file exists, or if it
# is a device, the existing file/device will be used as the new
# destination. If it does not exist, a new file will be created.
#
# @format: the format of the new destination, default is to
# probe if @mode is 'existing', else the format of the source
#
# @node-name: the new block driver state node name in the graph
# (Since 2.1)
#
# @replaces: with sync=full graph node name to be replaced by the new
# image when a whole image copy is done. This can be used to repair
# broken Quorum files. (Since 2.1)
#
# @mode: whether and how QEMU should create a new image, default is
# 'absolute-paths'.
#
# @speed: the maximum speed, in bytes per second
#
# @sync: what parts of the disk image should be copied to the destination
# (all the disk, only the sectors allocated in the topmost image, or
# only new I/O).
#
# @granularity: granularity of the dirty bitmap, default is 64K
# if the image format doesn't have clusters, 4K if the clusters
# are smaller than that, else the cluster size. Must be a
# power of 2 between 512 and 64M (since 1.4).
#
# @buf-size: maximum amount of data in flight from source to
# target (since 1.4).
#
# @on-source-error: the action to take on an error on the source,
# default 'report'. 'stop' and 'enospc' can only be used
# if the block device supports io-status (see BlockInfo).
#
# @on-target-error: the action to take on an error on the target,
# default 'report' (no limitations, since this applies to
# a different block device than @device).
# @unmap: Whether to try to unmap target sectors where source has
# only zero. If true, and target unallocated sectors will read as zero,
# target image sectors will be unmapped; otherwise, zeroes will be
# written. Both will result in identical contents.
# Default is true. (Since 2.4)
#
# Since: 1.3
##
{ 'struct': 'DriveMirror',
'data': { '*job-id': 'str', 'device': 'str', 'target': 'str',
'*format': 'str', '*node-name': 'str', '*replaces': 'str',
'sync': 'MirrorSyncMode', '*mode': 'NewImageMode',
'*speed': 'int', '*granularity': 'uint32',
'*buf-size': 'int', '*on-source-error': 'BlockdevOnError',
'*on-target-error': 'BlockdevOnError',
'*unmap': 'bool' } }
##
# @BlockDirtyBitmap:
#
# @node: name of device/node which the bitmap is tracking
#
# @name: name of the dirty bitmap
#
# Since: 2.4
##
{ 'struct': 'BlockDirtyBitmap',
'data': { 'node': 'str', 'name': 'str' } }
##
# @BlockDirtyBitmapAdd:
#
# @node: name of device/node which the bitmap is tracking
#
# @name: name of the dirty bitmap
#
# @granularity: the bitmap granularity, default is 64k for
# block-dirty-bitmap-add
#
# @persistent: the bitmap is persistent, i.e. it will be saved to the
# corresponding block device image file on its close. For now only
# Qcow2 disks support persistent bitmaps. Default is false for
# block-dirty-bitmap-add. (Since: 2.10)
#
# @autoload: ignored and deprecated since 2.12.
# Currently, all dirty tracking bitmaps are loaded from Qcow2 on
# open.
#
# Since: 2.4
##
{ 'struct': 'BlockDirtyBitmapAdd',
'data': { 'node': 'str', 'name': 'str', '*granularity': 'uint32',
'*persistent': 'bool', '*autoload': 'bool' } }
##
# @block-dirty-bitmap-add:
#
# Create a dirty bitmap with a name on the node, and start tracking the writes.
#
# Returns: nothing on success
# If @node is not a valid block device or node, DeviceNotFound
# If @name is already taken, GenericError with an explanation
#
# Since: 2.4
#
# Example:
#
# -> { "execute": "block-dirty-bitmap-add",
# "arguments": { "node": "drive0", "name": "bitmap0" } }
# <- { "return": {} }
#
##
{ 'command': 'block-dirty-bitmap-add',
'data': 'BlockDirtyBitmapAdd' }
##
# @block-dirty-bitmap-remove:
#
# Stop write tracking and remove the dirty bitmap that was created
# with block-dirty-bitmap-add. If the bitmap is persistent, remove it from its
# storage too.
#
# Returns: nothing on success
# If @node is not a valid block device or node, DeviceNotFound
# If @name is not found, GenericError with an explanation
block: Add bitmap successors A bitmap successor is an anonymous BdrvDirtyBitmap that is intended to be created just prior to a sensitive operation (e.g. Incremental Backup) that can either succeed or fail, but during the course of which we still want a bitmap tracking writes. On creating a successor, we "freeze" the parent bitmap which prevents its deletion, enabling, anonymization, or creating a bitmap with the same name. On success, the parent bitmap can "abdicate" responsibility to the successor, which will inherit its name. The successor will have been tracking writes during the course of the backup operation. The parent will be safely deleted. On failure, we can "reclaim" the successor from the parent, unifying them such that the resulting bitmap describes all writes occurring since the last successful backup, for instance. Reclamation will thaw the parent, but not explicitly re-enable it. BdrvDirtyBitmap operations that target a single bitmap are protected by assertions that the bitmap is not frozen and/or disabled. BdrvDirtyBitmap operations that target a group of bitmaps, such as bdrv_{set,reset}_dirty will ignore frozen/disabled drives with a conditional instead. Internal functions that enable/disable dirty bitmaps have assertions added to them to prevent modifying frozen bitmaps. Signed-off-by: John Snow <jsnow@redhat.com> Reviewed-by: Max Reitz <mreitz@redhat.com> Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> Reviewed-by: Eric Blake <eblake@redhat.com> Message-id: 1429314609-29776-10-git-send-email-jsnow@redhat.com Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2015-04-18 01:49:57 +02:00
# if @name is frozen by an operation, GenericError
#
# Since: 2.4
#
# Example:
#
# -> { "execute": "block-dirty-bitmap-remove",
# "arguments": { "node": "drive0", "name": "bitmap0" } }
# <- { "return": {} }
#
##
{ 'command': 'block-dirty-bitmap-remove',
'data': 'BlockDirtyBitmap' }
##
# @block-dirty-bitmap-clear:
#
# Clear (reset) a dirty bitmap on the device, so that an incremental
# backup from this point in time forward will only backup clusters
# modified after this clear operation.
#
# Returns: nothing on success
# If @node is not a valid block device, DeviceNotFound
# If @name is not found, GenericError with an explanation
#
# Since: 2.4
#
# Example:
#
# -> { "execute": "block-dirty-bitmap-clear",
# "arguments": { "node": "drive0", "name": "bitmap0" } }
# <- { "return": {} }
#
##
{ 'command': 'block-dirty-bitmap-clear',
'data': 'BlockDirtyBitmap' }
##
# @BlockDirtyBitmapSha256:
#
# SHA256 hash of dirty bitmap data
#
# @sha256: ASCII representation of SHA256 bitmap hash
#
# Since: 2.10
##
{ 'struct': 'BlockDirtyBitmapSha256',
'data': {'sha256': 'str'} }
##
# @x-debug-block-dirty-bitmap-sha256:
#
# Get bitmap SHA256
#
# Returns: BlockDirtyBitmapSha256 on success
# If @node is not a valid block device, DeviceNotFound
# If @name is not found or if hashing has failed, GenericError with an
# explanation
#
# Since: 2.10
##
{ 'command': 'x-debug-block-dirty-bitmap-sha256',
'data': 'BlockDirtyBitmap', 'returns': 'BlockDirtyBitmapSha256' }
##
# @blockdev-mirror:
#
# Start mirroring a block device's writes to a new destination.
#
# @job-id: identifier for the newly-created block job. If
# omitted, the device name will be used. (Since 2.7)
#
# @device: The device name or node-name of a root node whose writes should be
# mirrored.
#
# @target: the id or node-name of the block device to mirror to. This mustn't be
# attached to guest.
#
# @replaces: with sync=full graph node name to be replaced by the new
# image when a whole image copy is done. This can be used to repair
# broken Quorum files.
#
# @speed: the maximum speed, in bytes per second
#
# @sync: what parts of the disk image should be copied to the destination
# (all the disk, only the sectors allocated in the topmost image, or
# only new I/O).
#
# @granularity: granularity of the dirty bitmap, default is 64K
# if the image format doesn't have clusters, 4K if the clusters
# are smaller than that, else the cluster size. Must be a
# power of 2 between 512 and 64M
#
# @buf-size: maximum amount of data in flight from source to
# target
#
# @on-source-error: the action to take on an error on the source,
# default 'report'. 'stop' and 'enospc' can only be used
# if the block device supports io-status (see BlockInfo).
#
# @on-target-error: the action to take on an error on the target,
# default 'report' (no limitations, since this applies to
# a different block device than @device).
#
# @filter-node-name: the node name that should be assigned to the
# filter driver that the mirror job inserts into the graph
# above @device. If this option is not given, a node name is
# autogenerated. (Since: 2.9)
#
# Returns: nothing on success.
#
# Since: 2.6
#
# Example:
#
# -> { "execute": "blockdev-mirror",
# "arguments": { "device": "ide-hd0",
# "target": "target0",
# "sync": "full" } }
# <- { "return": {} }
#
##
{ 'command': 'blockdev-mirror',
'data': { '*job-id': 'str', 'device': 'str', 'target': 'str',
'*replaces': 'str',
'sync': 'MirrorSyncMode',
'*speed': 'int', '*granularity': 'uint32',
'*buf-size': 'int', '*on-source-error': 'BlockdevOnError',
'*on-target-error': 'BlockdevOnError',
'*filter-node-name': 'str' } }
##
# @block_set_io_throttle:
#
# Change I/O throttle limits for a block drive.
#
# Since QEMU 2.4, each device with I/O limits is member of a throttle
# group.
#
# If two or more devices are members of the same group, the limits
# will apply to the combined I/O of the whole group in a round-robin
# fashion. Therefore, setting new I/O limits to a device will affect
# the whole group.
#
# The name of the group can be specified using the 'group' parameter.
# If the parameter is unset, it is assumed to be the current group of
# that device. If it's not in any group yet, the name of the device
# will be used as the name for its group.
#
# The 'group' parameter can also be used to move a device to a
# different group. In this case the limits specified in the parameters
# will be applied to the new group only.
#
# I/O limits can be disabled by setting all of them to 0. In this case
# the device will be removed from its group and the rest of its
# members will not be affected. The 'group' parameter is ignored.
#
# Returns: Nothing on success
# If @device is not a valid block device, DeviceNotFound
#
# Since: 1.1
#
# Example:
#
# -> { "execute": "block_set_io_throttle",
# "arguments": { "id": "virtio-blk-pci0/virtio-backend",
# "bps": 0,
# "bps_rd": 0,
# "bps_wr": 0,
# "iops": 512,
# "iops_rd": 0,
# "iops_wr": 0,
# "bps_max": 0,
# "bps_rd_max": 0,
# "bps_wr_max": 0,
# "iops_max": 0,
# "iops_rd_max": 0,
# "iops_wr_max": 0,
# "bps_max_length": 0,
# "iops_size": 0 } }
# <- { "return": {} }
#
# -> { "execute": "block_set_io_throttle",
# "arguments": { "id": "ide0-1-0",
# "bps": 1000000,
# "bps_rd": 0,
# "bps_wr": 0,
# "iops": 0,
# "iops_rd": 0,
# "iops_wr": 0,
# "bps_max": 8000000,
# "bps_rd_max": 0,
# "bps_wr_max": 0,
# "iops_max": 0,
# "iops_rd_max": 0,
# "iops_wr_max": 0,
# "bps_max_length": 60,
# "iops_size": 0 } }
# <- { "return": {} }
##
{ 'command': 'block_set_io_throttle', 'boxed': true,
'data': 'BlockIOThrottle' }
##
# @BlockIOThrottle:
#
# A set of parameters describing block throttling.
#
# @device: Block device name (deprecated, use @id instead)
#
# @id: The name or QOM path of the guest device (since: 2.8)
#
# @bps: total throughput limit in bytes per second
#
# @bps_rd: read throughput limit in bytes per second
#
# @bps_wr: write throughput limit in bytes per second
#
# @iops: total I/O operations per second
#
# @iops_rd: read I/O operations per second
#
# @iops_wr: write I/O operations per second
#
# @bps_max: total throughput limit during bursts,
# in bytes (Since 1.7)
#
# @bps_rd_max: read throughput limit during bursts,
# in bytes (Since 1.7)
#
# @bps_wr_max: write throughput limit during bursts,
# in bytes (Since 1.7)
#
# @iops_max: total I/O operations per second during bursts,
# in bytes (Since 1.7)
#
# @iops_rd_max: read I/O operations per second during bursts,
# in bytes (Since 1.7)
#
# @iops_wr_max: write I/O operations per second during bursts,
# in bytes (Since 1.7)
#
# @bps_max_length: maximum length of the @bps_max burst
# period, in seconds. It must only
# be set if @bps_max is set as well.
# Defaults to 1. (Since 2.6)
#
# @bps_rd_max_length: maximum length of the @bps_rd_max
# burst period, in seconds. It must only
# be set if @bps_rd_max is set as well.
# Defaults to 1. (Since 2.6)
#
# @bps_wr_max_length: maximum length of the @bps_wr_max
# burst period, in seconds. It must only
# be set if @bps_wr_max is set as well.
# Defaults to 1. (Since 2.6)
#
# @iops_max_length: maximum length of the @iops burst
# period, in seconds. It must only
# be set if @iops_max is set as well.
# Defaults to 1. (Since 2.6)
#
# @iops_rd_max_length: maximum length of the @iops_rd_max
# burst period, in seconds. It must only
# be set if @iops_rd_max is set as well.
# Defaults to 1. (Since 2.6)
#
# @iops_wr_max_length: maximum length of the @iops_wr_max
# burst period, in seconds. It must only
# be set if @iops_wr_max is set as well.
# Defaults to 1. (Since 2.6)
#
# @iops_size: an I/O size in bytes (Since 1.7)
#
# @group: throttle group name (Since 2.4)
#
# Since: 1.1
##
{ 'struct': 'BlockIOThrottle',
'data': { '*device': 'str', '*id': 'str', 'bps': 'int', 'bps_rd': 'int',
'bps_wr': 'int', 'iops': 'int', 'iops_rd': 'int', 'iops_wr': 'int',
'*bps_max': 'int', '*bps_rd_max': 'int',
'*bps_wr_max': 'int', '*iops_max': 'int',
'*iops_rd_max': 'int', '*iops_wr_max': 'int',
'*bps_max_length': 'int', '*bps_rd_max_length': 'int',
'*bps_wr_max_length': 'int', '*iops_max_length': 'int',
'*iops_rd_max_length': 'int', '*iops_wr_max_length': 'int',
'*iops_size': 'int', '*group': 'str' } }
block: convert ThrottleGroup to object with QOM ThrottleGroup is converted to an object. This will allow the future throttle block filter drive easy creation and configuration of throttle groups in QMP and cli. A new QAPI struct, ThrottleLimits, is introduced to provide a shared struct for all throttle configuration needs in QMP. ThrottleGroups can be created via CLI as -object throttle-group,id=foo,x-iops-total=100,x-.. where x-* are individual limit properties. Since we can't add non-scalar properties in -object this interface must be used instead. However, setting these properties must be disabled after initialization because certain combinations of limits are forbidden and thus configuration changes should be done in one transaction. The individual properties will go away when support for non-scalar values in CLI is implemented and thus are marked as experimental. ThrottleGroup also has a `limits` property that uses the ThrottleLimits struct. It can be used to create ThrottleGroups or set the configuration in existing groups as follows: { "execute": "object-add", "arguments": { "qom-type": "throttle-group", "id": "foo", "props" : { "limits": { "iops-total": 100 } } } } { "execute" : "qom-set", "arguments" : { "path" : "foo", "property" : "limits", "value" : { "iops-total" : 99 } } } This also means a group's configuration can be fetched with qom-get. Signed-off-by: Manos Pitsidianakis <el13635@mail.ntua.gr> Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> Reviewed-by: Alberto Garcia <berto@igalia.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2017-08-25 15:20:26 +02:00
##
# @ThrottleLimits:
#
# Limit parameters for throttling.
# Since some limit combinations are illegal, limits should always be set in one
# transaction. All fields are optional. When setting limits, if a field is
# missing the current value is not changed.
#
# @iops-total: limit total I/O operations per second
# @iops-total-max: I/O operations burst
# @iops-total-max-length: length of the iops-total-max burst period, in seconds
# It must only be set if @iops-total-max is set as well.
# @iops-read: limit read operations per second
# @iops-read-max: I/O operations read burst
# @iops-read-max-length: length of the iops-read-max burst period, in seconds
# It must only be set if @iops-read-max is set as well.
# @iops-write: limit write operations per second
# @iops-write-max: I/O operations write burst
# @iops-write-max-length: length of the iops-write-max burst period, in seconds
# It must only be set if @iops-write-max is set as well.
# @bps-total: limit total bytes per second
# @bps-total-max: total bytes burst
# @bps-total-max-length: length of the bps-total-max burst period, in seconds.
# It must only be set if @bps-total-max is set as well.
# @bps-read: limit read bytes per second
# @bps-read-max: total bytes read burst
# @bps-read-max-length: length of the bps-read-max burst period, in seconds
# It must only be set if @bps-read-max is set as well.
# @bps-write: limit write bytes per second
# @bps-write-max: total bytes write burst
# @bps-write-max-length: length of the bps-write-max burst period, in seconds
# It must only be set if @bps-write-max is set as well.
# @iops-size: when limiting by iops max size of an I/O in bytes
#
# Since: 2.11
##
{ 'struct': 'ThrottleLimits',
'data': { '*iops-total' : 'int', '*iops-total-max' : 'int',
'*iops-total-max-length' : 'int', '*iops-read' : 'int',
'*iops-read-max' : 'int', '*iops-read-max-length' : 'int',
'*iops-write' : 'int', '*iops-write-max' : 'int',
'*iops-write-max-length' : 'int', '*bps-total' : 'int',
'*bps-total-max' : 'int', '*bps-total-max-length' : 'int',
'*bps-read' : 'int', '*bps-read-max' : 'int',
'*bps-read-max-length' : 'int', '*bps-write' : 'int',
'*bps-write-max' : 'int', '*bps-write-max-length' : 'int',
'*iops-size' : 'int' } }
##
# @block-stream:
#
# Copy data from a backing file into a block device.
#
# The block streaming operation is performed in the background until the entire
# backing file has been copied. This command returns immediately once streaming
# has started. The status of ongoing block streaming operations can be checked
# with query-block-jobs. The operation can be stopped before it has completed
# using the block-job-cancel command.
#
# The node that receives the data is called the top image, can be located in
# any part of the chain (but always above the base image; see below) and can be
# specified using its device or node name. Earlier qemu versions only allowed
# 'device' to name the top level node; presence of the 'base-node' parameter
# during introspection can be used as a witness of the enhanced semantics
# of 'device'.
#
# If a base file is specified then sectors are not copied from that base file and
# its backing chain. When streaming completes the image file will have the base
# file as its backing file. This can be used to stream a subset of the backing
# file chain instead of flattening the entire image.
#
# On successful completion the image file is updated to drop the backing file
# and the BLOCK_JOB_COMPLETED event is emitted.
#
# @job-id: identifier for the newly-created block job. If
# omitted, the device name will be used. (Since 2.7)
#
# @device: the device or node name of the top image
#
# @base: the common backing file name.
# It cannot be set if @base-node is also set.
#
# @base-node: the node name of the backing file.
# It cannot be set if @base is also set. (Since 2.8)
#
# @backing-file: The backing file string to write into the top
# image. This filename is not validated.
#
# If a pathname string is such that it cannot be
# resolved by QEMU, that means that subsequent QMP or
# HMP commands must use node-names for the image in
# question, as filename lookup methods will fail.
#
# If not specified, QEMU will automatically determine
# the backing file string to use, or error out if there
# is no obvious choice. Care should be taken when
# specifying the string, to specify a valid filename or
# protocol.
# (Since 2.1)
#
# @speed: the maximum speed, in bytes per second
#
# @on-error: the action to take on an error (default report).
# 'stop' and 'enospc' can only be used if the block device
# supports io-status (see BlockInfo). Since 1.3.
#
# Returns: Nothing on success. If @device does not exist, DeviceNotFound.
#
# Since: 1.1
#
# Example:
#
# -> { "execute": "block-stream",
# "arguments": { "device": "virtio0",
# "base": "/tmp/master.qcow2" } }
# <- { "return": {} }
#
##
{ 'command': 'block-stream',
'data': { '*job-id': 'str', 'device': 'str', '*base': 'str',
'*base-node': 'str', '*backing-file': 'str', '*speed': 'int',
'*on-error': 'BlockdevOnError' } }
##
# @block-job-set-speed:
#
# Set maximum speed for a background block operation.
#
# This command can only be issued when there is an active block job.
#
# Throttling can be disabled by setting the speed to 0.
#
# @device: The job identifier. This used to be a device name (hence
# the name of the parameter), but since QEMU 2.7 it can have
# other values.
#
# @speed: the maximum speed, in bytes per second, or 0 for unlimited.
# Defaults to 0.
#
# Returns: Nothing on success
# If no background operation is active on this device, DeviceNotActive
#
# Since: 1.1
##
{ 'command': 'block-job-set-speed',
'data': { 'device': 'str', 'speed': 'int' } }
##
# @block-job-cancel:
#
# Stop an active background block operation.
#
# This command returns immediately after marking the active background block
# operation for cancellation. It is an error to call this command if no
# operation is in progress.
#
# The operation will cancel as soon as possible and then emit the
# BLOCK_JOB_CANCELLED event. Before that happens the job is still visible when
# enumerated using query-block-jobs.
#
# Note that if you issue 'block-job-cancel' after 'drive-mirror' has indicated
# (via the event BLOCK_JOB_READY) that the source and destination are
# synchronized, then the event triggered by this command changes to
# BLOCK_JOB_COMPLETED, to indicate that the mirroring has ended and the
# destination now has a point-in-time copy tied to the time of the cancellation.
#
# For streaming, the image file retains its backing file unless the streaming
# operation happens to complete just as it is being cancelled. A new streaming
# operation can be started at a later time to finish copying all data from the
# backing file.
#
# @device: The job identifier. This used to be a device name (hence
# the name of the parameter), but since QEMU 2.7 it can have
# other values.
#
block/mirror: change the semantic of 'force' of block-job-cancel When doing drive mirror to a low speed shared storage, if there was heavy BLK IO write workload in VM after the 'ready' event, drive mirror block job can't be canceled immediately, it would keep running until the heavy BLK IO workload stopped in the VM. Libvirt depends on the current block-job-cancel semantics, which is that when used without a flag after the 'ready' event, the command blocks until data is in sync. However, these semantics are awkward in other situations, for example, people may use drive mirror for realtime backups while still wanting to use block live migration. Libvirt cannot start a block live migration while another drive mirror is in progress, but the user would rather abandon the backup attempt as broken and proceed with the live migration than be stuck waiting for the current drive mirror backup to finish. The drive-mirror command already includes a 'force' flag, which libvirt does not use, although it documented the flag as only being useful to quit a job which is paused. However, since quitting a paused job has the same effect as abandoning a backup in a non-paused job (namely, the destination file is not in sync, and the command completes immediately), we can just improve the documentation to make the force flag obviously useful. Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: Jeff Cody <jcody@redhat.com> Cc: Kevin Wolf <kwolf@redhat.com> Cc: Max Reitz <mreitz@redhat.com> Cc: Eric Blake <eblake@redhat.com> Cc: John Snow <jsnow@redhat.com> Reported-by: Huaitong Han <huanhuaitong@didichuxing.com> Signed-off-by: Huaitong Han <huanhuaitong@didichuxing.com> Signed-off-by: Liang Li <liliangleo@didichuxing.com> Signed-off-by: Jeff Cody <jcody@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2018-03-13 13:12:16 +01:00
# @force: If true, and the job has already emitted the event BLOCK_JOB_READY,
# abandon the job immediately (even if it is paused) instead of waiting
# for the destination to complete its final synchronization (since 1.3)
#
# Returns: Nothing on success
# If no background operation is active on this device, DeviceNotActive
#
# Since: 1.1
##
{ 'command': 'block-job-cancel', 'data': { 'device': 'str', '*force': 'bool' } }
##
# @block-job-pause:
#
# Pause an active background block operation.
#
# This command returns immediately after marking the active background block
# operation for pausing. It is an error to call this command if no
# operation is in progress or if the job is already paused.
#
# The operation will pause as soon as possible. No event is emitted when
# the operation is actually paused. Cancelling a paused job automatically
# resumes it.
#
# @device: The job identifier. This used to be a device name (hence
# the name of the parameter), but since QEMU 2.7 it can have
# other values.
#
# Returns: Nothing on success
# If no background operation is active on this device, DeviceNotActive
#
# Since: 1.3
##
{ 'command': 'block-job-pause', 'data': { 'device': 'str' } }
##
# @block-job-resume:
#
# Resume an active background block operation.
#
# This command returns immediately after resuming a paused background block
# operation. It is an error to call this command if no operation is in
# progress or if the job is not paused.
#
# This command also clears the error status of the job.
#
# @device: The job identifier. This used to be a device name (hence
# the name of the parameter), but since QEMU 2.7 it can have
# other values.
#
# Returns: Nothing on success
# If no background operation is active on this device, DeviceNotActive
#
# Since: 1.3
##
{ 'command': 'block-job-resume', 'data': { 'device': 'str' } }
##
# @block-job-complete:
#
# Manually trigger completion of an active background block operation. This
# is supported for drive mirroring, where it also switches the device to
# write to the target path only. The ability to complete is signaled with
# a BLOCK_JOB_READY event.
#
# This command completes an active background block operation synchronously.
# The ordering of this command's return with the BLOCK_JOB_COMPLETED event
# is not defined. Note that if an I/O error occurs during the processing of
# this command: 1) the command itself will fail; 2) the error will be processed
# according to the rerror/werror arguments that were specified when starting
# the operation.
#
# A cancelled or paused job cannot be completed.
#
# @device: The job identifier. This used to be a device name (hence
# the name of the parameter), but since QEMU 2.7 it can have
# other values.
#
# Returns: Nothing on success
# If no background operation is active on this device, DeviceNotActive
#
# Since: 1.3
##
{ 'command': 'block-job-complete', 'data': { 'device': 'str' } }
##
# @block-job-dismiss:
#
# For jobs that have already concluded, remove them from the block-job-query
# list. This command only needs to be run for jobs which were started with
# QEMU 2.12+ job lifetime management semantics.
#
# This command will refuse to operate on any job that has not yet reached
# its terminal state, BLOCK_JOB_STATUS_CONCLUDED. For jobs that make use of
# BLOCK_JOB_READY event, block-job-cancel or block-job-complete will still need
# to be used as appropriate.
#
# @id: The job identifier.
#
# Returns: Nothing on success
#
# Since: 2.12
##
{ 'command': 'block-job-dismiss', 'data': { 'id': 'str' } }
##
# @block-job-finalize:
#
# Once a job that has manual=true reaches the pending state, it can be
# instructed to finalize any graph changes and do any necessary cleanup
# via this command.
# For jobs in a transaction, instructing one job to finalize will force
# ALL jobs in the transaction to finalize, so it is only necessary to instruct
# a single member job to finalize.
#
# @id: The job identifier.
#
# Returns: Nothing on success
#
# Since: 2.12
##
{ 'command': 'block-job-finalize', 'data': { 'id': 'str' } }
##
# @BlockdevDiscardOptions:
#
# Determines how to handle discard requests.
#
# @ignore: Ignore the request
# @unmap: Forward as an unmap request
#
# Since: 2.9
##
{ 'enum': 'BlockdevDiscardOptions',
'data': [ 'ignore', 'unmap' ] }
##
# @BlockdevDetectZeroesOptions:
#
# Describes the operation mode for the automatic conversion of plain
# zero writes by the OS to driver specific optimized zero write commands.
#
# @off: Disabled (default)
# @on: Enabled
# @unmap: Enabled and even try to unmap blocks if possible. This requires
# also that @BlockdevDiscardOptions is set to unmap for this device.
#
# Since: 2.1
##
{ 'enum': 'BlockdevDetectZeroesOptions',
'data': [ 'off', 'on', 'unmap' ] }
##
# @BlockdevAioOptions:
#
# Selects the AIO backend to handle I/O requests
#
# @threads: Use qemu's thread pool
# @native: Use native AIO backend (only Linux and Windows)
#
# Since: 2.9
##
{ 'enum': 'BlockdevAioOptions',
'data': [ 'threads', 'native' ] }
##
# @BlockdevCacheOptions:
#
# Includes cache-related options for block devices
#
# @direct: enables use of O_DIRECT (bypass the host page cache;
# default: false)
# @no-flush: ignore any flush requests for the device (default:
# false)
#
# Since: 2.9
##
{ 'struct': 'BlockdevCacheOptions',
'data': { '*direct': 'bool',
'*no-flush': 'bool' } }
##
# @BlockdevDriver:
#
# Drivers that are supported in block device operations.
#
# @vxhs: Since 2.10
# @throttle: Since 2.11
# @nvme: Since 2.12
# @copy-on-read: Since 2.13
#
# Since: 2.9
##
{ 'enum': 'BlockdevDriver',
'data': [ 'blkdebug', 'blkverify', 'bochs', 'cloop', 'copy-on-read',
'dmg', 'file', 'ftp', 'ftps', 'gluster', 'host_cdrom',
'host_device', 'http', 'https', 'iscsi', 'luks', 'nbd', 'nfs',
'null-aio', 'null-co', 'nvme', 'parallels', 'qcow', 'qcow2', 'qed',
'quorum', 'raw', 'rbd', 'replication', 'sheepdog', 'ssh',
'throttle', 'vdi', 'vhdx', 'vmdk', 'vpc', 'vvfat', 'vxhs' ] }
##
# @BlockdevOptionsFile:
#
# Driver specific block device options for the file backend.
#
# @filename: path to the image file
scsi, file-posix: add support for persistent reservation management It is a common requirement for virtual machine to send persistent reservations, but this currently requires either running QEMU with CAP_SYS_RAWIO, or using out-of-tree patches that let an unprivileged QEMU bypass Linux's filter on SG_IO commands. As an alternative mechanism, the next patches will introduce a privileged helper to run persistent reservation commands without expanding QEMU's attack surface unnecessarily. The helper is invoked through a "pr-manager" QOM object, to which file-posix.c passes SG_IO requests for PERSISTENT RESERVE OUT and PERSISTENT RESERVE IN commands. For example: $ qemu-system-x86_64 -device virtio-scsi \ -object pr-manager-helper,id=helper0,path=/var/run/qemu-pr-helper.sock -drive if=none,id=hd,driver=raw,file.filename=/dev/sdb,file.pr-manager=helper0 -device scsi-block,drive=hd or: $ qemu-system-x86_64 -device virtio-scsi \ -object pr-manager-helper,id=helper0,path=/var/run/qemu-pr-helper.sock -blockdev node-name=hd,driver=raw,file.driver=host_device,file.filename=/dev/sdb,file.pr-manager=helper0 -device scsi-block,drive=hd Multiple pr-manager implementations are conceivable and possible, though only one is implemented right now. For example, a pr-manager could: - talk directly to the multipath daemon from a privileged QEMU (i.e. QEMU links to libmpathpersist); this makes reservation work properly with multipath, but still requires CAP_SYS_RAWIO - use the Linux IOC_PR_* ioctls (they require CAP_SYS_ADMIN though) - more interestingly, implement reservations directly in QEMU through file system locks or a shared database (e.g. sqlite) Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2017-08-21 18:58:56 +02:00
# @pr-manager: the id for the object that will handle persistent reservations
# for this device (default: none, forward the commands via SG_IO;
# since 2.11)
# @aio: AIO backend (default: threads) (since: 2.8)
# @locking: whether to enable file locking. If set to 'auto', only enable
# when Open File Descriptor (OFD) locking API is available
# (default: auto, since 2.10)
# @x-check-cache-dropped: whether to check that page cache was dropped on live
# migration. May cause noticeable delays if the image
# file is large, do not use in production.
# (default: off) (since: 2.13)
#
# Since: 2.9
##
{ 'struct': 'BlockdevOptionsFile',
'data': { 'filename': 'str',
scsi, file-posix: add support for persistent reservation management It is a common requirement for virtual machine to send persistent reservations, but this currently requires either running QEMU with CAP_SYS_RAWIO, or using out-of-tree patches that let an unprivileged QEMU bypass Linux's filter on SG_IO commands. As an alternative mechanism, the next patches will introduce a privileged helper to run persistent reservation commands without expanding QEMU's attack surface unnecessarily. The helper is invoked through a "pr-manager" QOM object, to which file-posix.c passes SG_IO requests for PERSISTENT RESERVE OUT and PERSISTENT RESERVE IN commands. For example: $ qemu-system-x86_64 -device virtio-scsi \ -object pr-manager-helper,id=helper0,path=/var/run/qemu-pr-helper.sock -drive if=none,id=hd,driver=raw,file.filename=/dev/sdb,file.pr-manager=helper0 -device scsi-block,drive=hd or: $ qemu-system-x86_64 -device virtio-scsi \ -object pr-manager-helper,id=helper0,path=/var/run/qemu-pr-helper.sock -blockdev node-name=hd,driver=raw,file.driver=host_device,file.filename=/dev/sdb,file.pr-manager=helper0 -device scsi-block,drive=hd Multiple pr-manager implementations are conceivable and possible, though only one is implemented right now. For example, a pr-manager could: - talk directly to the multipath daemon from a privileged QEMU (i.e. QEMU links to libmpathpersist); this makes reservation work properly with multipath, but still requires CAP_SYS_RAWIO - use the Linux IOC_PR_* ioctls (they require CAP_SYS_ADMIN though) - more interestingly, implement reservations directly in QEMU through file system locks or a shared database (e.g. sqlite) Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2017-08-21 18:58:56 +02:00
'*pr-manager': 'str',
'*locking': 'OnOffAuto',
'*aio': 'BlockdevAioOptions',
'*x-check-cache-dropped': 'bool' } }
##
# @BlockdevOptionsNull:
#
# Driver specific block device options for the null backend.
#
# @size: size of the device in bytes.
# @latency-ns: emulated latency (in nanoseconds) in processing
# requests. Default to zero which completes requests immediately.
# (Since 2.4)
#
# Since: 2.9
##
{ 'struct': 'BlockdevOptionsNull',
'data': { '*size': 'int', '*latency-ns': 'uint64' } }
##
# @BlockdevOptionsNVMe:
#
# Driver specific block device options for the NVMe backend.
#
# @device: controller address of the NVMe device.
# @namespace: namespace number of the device, starting from 1.
#
# Since: 2.12
##
{ 'struct': 'BlockdevOptionsNVMe',
'data': { 'device': 'str', 'namespace': 'int' } }
##
# @BlockdevOptionsVVFAT:
#
# Driver specific block device options for the vvfat protocol.
#
# @dir: directory to be exported as FAT image
# @fat-type: FAT type: 12, 16 or 32
# @floppy: whether to export a floppy image (true) or
# partitioned hard disk (false; default)
# @label: set the volume label, limited to 11 bytes. FAT16 and
# FAT32 traditionally have some restrictions on labels, which are
# ignored by most operating systems. Defaults to "QEMU VVFAT".
# (since 2.4)
# @rw: whether to allow write operations (default: false)
#
# Since: 2.9
##
{ 'struct': 'BlockdevOptionsVVFAT',
'data': { 'dir': 'str', '*fat-type': 'int', '*floppy': 'bool',
'*label': 'str', '*rw': 'bool' } }
##
# @BlockdevOptionsGenericFormat:
#
# Driver specific block device options for image format that have no option
# besides their data source.
#
# @file: reference to or definition of the data source block device
#
# Since: 2.9
##
{ 'struct': 'BlockdevOptionsGenericFormat',
'data': { 'file': 'BlockdevRef' } }
block: add generic full disk encryption driver Add a block driver that is capable of supporting any full disk encryption format. This utilizes the previously added block encryption code, and at this time supports the LUKS format. The driver code is capable of supporting any format supported by the QCryptoBlock module, so it registers one block driver for each format. This patch only registers the "luks" driver since the "qcow" driver is there only for back-compatibility with existing qcow built-in encryption. New LUKS compatible volumes can be formatted using qemu-img with defaults for all settings. $ qemu-img create --object secret,data=123456,id=sec0 \ -f luks -o key-secret=sec0 demo.luks 10G Alternatively the cryptographic settings can be explicitly set $ qemu-img create --object secret,data=123456,id=sec0 \ -f luks -o key-secret=sec0,cipher-alg=aes-256,\ cipher-mode=cbc,ivgen-alg=plain64,hash-alg=sha256 \ demo.luks 10G And query its size $ qemu-img info demo.img image: demo.img file format: luks virtual size: 10G (10737418240 bytes) disk size: 132K encrypted: yes Note that it was not necessary to provide the password when querying info for the volume. The password is only required when performing I/O on the volume All volumes created by this new 'luks' driver should be capable of being opened by the kernel dm-crypt driver. The only algorithms listed in the LUKS spec that are not currently supported by this impl are sha512 and ripemd160 hashes and cast6 cipher. Reviewed-by: Eric Blake <eblake@redhat.com> Signed-off-by: Daniel P. Berrange <berrange@redhat.com> [ kwolf - Added #include to resolve conflict with da34e65c ] Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2016-03-21 15:11:47 +01:00
##
# @BlockdevOptionsLUKS:
block: add generic full disk encryption driver Add a block driver that is capable of supporting any full disk encryption format. This utilizes the previously added block encryption code, and at this time supports the LUKS format. The driver code is capable of supporting any format supported by the QCryptoBlock module, so it registers one block driver for each format. This patch only registers the "luks" driver since the "qcow" driver is there only for back-compatibility with existing qcow built-in encryption. New LUKS compatible volumes can be formatted using qemu-img with defaults for all settings. $ qemu-img create --object secret,data=123456,id=sec0 \ -f luks -o key-secret=sec0 demo.luks 10G Alternatively the cryptographic settings can be explicitly set $ qemu-img create --object secret,data=123456,id=sec0 \ -f luks -o key-secret=sec0,cipher-alg=aes-256,\ cipher-mode=cbc,ivgen-alg=plain64,hash-alg=sha256 \ demo.luks 10G And query its size $ qemu-img info demo.img image: demo.img file format: luks virtual size: 10G (10737418240 bytes) disk size: 132K encrypted: yes Note that it was not necessary to provide the password when querying info for the volume. The password is only required when performing I/O on the volume All volumes created by this new 'luks' driver should be capable of being opened by the kernel dm-crypt driver. The only algorithms listed in the LUKS spec that are not currently supported by this impl are sha512 and ripemd160 hashes and cast6 cipher. Reviewed-by: Eric Blake <eblake@redhat.com> Signed-off-by: Daniel P. Berrange <berrange@redhat.com> [ kwolf - Added #include to resolve conflict with da34e65c ] Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2016-03-21 15:11:47 +01:00
#
# Driver specific block device options for LUKS.
#
# @key-secret: the ID of a QCryptoSecret object providing
block: add generic full disk encryption driver Add a block driver that is capable of supporting any full disk encryption format. This utilizes the previously added block encryption code, and at this time supports the LUKS format. The driver code is capable of supporting any format supported by the QCryptoBlock module, so it registers one block driver for each format. This patch only registers the "luks" driver since the "qcow" driver is there only for back-compatibility with existing qcow built-in encryption. New LUKS compatible volumes can be formatted using qemu-img with defaults for all settings. $ qemu-img create --object secret,data=123456,id=sec0 \ -f luks -o key-secret=sec0 demo.luks 10G Alternatively the cryptographic settings can be explicitly set $ qemu-img create --object secret,data=123456,id=sec0 \ -f luks -o key-secret=sec0,cipher-alg=aes-256,\ cipher-mode=cbc,ivgen-alg=plain64,hash-alg=sha256 \ demo.luks 10G And query its size $ qemu-img info demo.img image: demo.img file format: luks virtual size: 10G (10737418240 bytes) disk size: 132K encrypted: yes Note that it was not necessary to provide the password when querying info for the volume. The password is only required when performing I/O on the volume All volumes created by this new 'luks' driver should be capable of being opened by the kernel dm-crypt driver. The only algorithms listed in the LUKS spec that are not currently supported by this impl are sha512 and ripemd160 hashes and cast6 cipher. Reviewed-by: Eric Blake <eblake@redhat.com> Signed-off-by: Daniel P. Berrange <berrange@redhat.com> [ kwolf - Added #include to resolve conflict with da34e65c ] Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2016-03-21 15:11:47 +01:00
# the decryption key (since 2.6). Mandatory except when
# doing a metadata-only probe of the image.
#
# Since: 2.9
block: add generic full disk encryption driver Add a block driver that is capable of supporting any full disk encryption format. This utilizes the previously added block encryption code, and at this time supports the LUKS format. The driver code is capable of supporting any format supported by the QCryptoBlock module, so it registers one block driver for each format. This patch only registers the "luks" driver since the "qcow" driver is there only for back-compatibility with existing qcow built-in encryption. New LUKS compatible volumes can be formatted using qemu-img with defaults for all settings. $ qemu-img create --object secret,data=123456,id=sec0 \ -f luks -o key-secret=sec0 demo.luks 10G Alternatively the cryptographic settings can be explicitly set $ qemu-img create --object secret,data=123456,id=sec0 \ -f luks -o key-secret=sec0,cipher-alg=aes-256,\ cipher-mode=cbc,ivgen-alg=plain64,hash-alg=sha256 \ demo.luks 10G And query its size $ qemu-img info demo.img image: demo.img file format: luks virtual size: 10G (10737418240 bytes) disk size: 132K encrypted: yes Note that it was not necessary to provide the password when querying info for the volume. The password is only required when performing I/O on the volume All volumes created by this new 'luks' driver should be capable of being opened by the kernel dm-crypt driver. The only algorithms listed in the LUKS spec that are not currently supported by this impl are sha512 and ripemd160 hashes and cast6 cipher. Reviewed-by: Eric Blake <eblake@redhat.com> Signed-off-by: Daniel P. Berrange <berrange@redhat.com> [ kwolf - Added #include to resolve conflict with da34e65c ] Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2016-03-21 15:11:47 +01:00
##
{ 'struct': 'BlockdevOptionsLUKS',
'base': 'BlockdevOptionsGenericFormat',
'data': { '*key-secret': 'str' } }
##
# @BlockdevOptionsGenericCOWFormat:
#
# Driver specific block device options for image format that have no option
# besides their data source and an optional backing file.
#
# @backing: reference to or definition of the backing file block
block: Use JSON null instead of "" to disable backing file BlockdevRef is an alternate of BlockdevOptions (inline definition) and str (reference to an existing block device by name). BlockdevRef value "" is special: "no block device should be referenced." It's actually interpreted that way in just one place: optional member @backing of COW formats. Semantics: * Present means "use this block device" as backing storage * Absent means "default to the one stored in the image" * Except "" means "don't use backing storage at all" The first two are perfectly normal: when the parameter is absent, it defaults to an implied value, but the value's meaning is the same. The third one overloads the parameter with a second meaning. The overloading is *implicit*, i.e. it's not visible in the types. Works here, because "" is not a value block device ID. Pressing argument values the schema accepts, but are semantically invalid, into service to mean "do something else entirely" is not general, as suitable invalid values need not exist. I also find it ugly. To clean this up, we could add a separate flag argument to suppress @backing, or add a distinct value to @backing. This commit implements the latter: add JSON null to the values of @backing, deprecate "". Because we're so close to the 2.10 freeze, implement it in the stupidest way possible: have qmp_blockdev_add() rewrite null to "" before anything else can see the null. Works, because BlockdevRef occurs only within arguments of blockdev-add. The proper way to do it would be rewriting "" to null, preferably in a cleaner way, but that requires fixing up code to work with null. Add a TODO comment for that. Signed-off-by: Markus Armbruster <armbru@redhat.com> Reviewed-by: Daniel P. Berrange <berrange@redhat.com> Acked-by: Kevin Wolf <kwolf@redhat.com> Reviewed-by: Eric Blake <eblake@redhat.com>
2017-07-18 08:54:00 +02:00
# device, null disables the backing file entirely.
# Defaults to the backing file stored the image file.
#
# Since: 2.9
##
{ 'struct': 'BlockdevOptionsGenericCOWFormat',
'base': 'BlockdevOptionsGenericFormat',
block: Use JSON null instead of "" to disable backing file BlockdevRef is an alternate of BlockdevOptions (inline definition) and str (reference to an existing block device by name). BlockdevRef value "" is special: "no block device should be referenced." It's actually interpreted that way in just one place: optional member @backing of COW formats. Semantics: * Present means "use this block device" as backing storage * Absent means "default to the one stored in the image" * Except "" means "don't use backing storage at all" The first two are perfectly normal: when the parameter is absent, it defaults to an implied value, but the value's meaning is the same. The third one overloads the parameter with a second meaning. The overloading is *implicit*, i.e. it's not visible in the types. Works here, because "" is not a value block device ID. Pressing argument values the schema accepts, but are semantically invalid, into service to mean "do something else entirely" is not general, as suitable invalid values need not exist. I also find it ugly. To clean this up, we could add a separate flag argument to suppress @backing, or add a distinct value to @backing. This commit implements the latter: add JSON null to the values of @backing, deprecate "". Because we're so close to the 2.10 freeze, implement it in the stupidest way possible: have qmp_blockdev_add() rewrite null to "" before anything else can see the null. Works, because BlockdevRef occurs only within arguments of blockdev-add. The proper way to do it would be rewriting "" to null, preferably in a cleaner way, but that requires fixing up code to work with null. Add a TODO comment for that. Signed-off-by: Markus Armbruster <armbru@redhat.com> Reviewed-by: Daniel P. Berrange <berrange@redhat.com> Acked-by: Kevin Wolf <kwolf@redhat.com> Reviewed-by: Eric Blake <eblake@redhat.com>
2017-07-18 08:54:00 +02:00
'data': { '*backing': 'BlockdevRefOrNull' } }
##
# @Qcow2OverlapCheckMode:
#
# General overlap check modes.
#
# @none: Do not perform any checks
#
# @constant: Perform only checks which can be done in constant time and
# without reading anything from disk
#
# @cached: Perform only checks which can be done without reading anything
# from disk
#
# @all: Perform all available overlap checks
#
# Since: 2.9
##
{ 'enum': 'Qcow2OverlapCheckMode',
'data': [ 'none', 'constant', 'cached', 'all' ] }
##
# @Qcow2OverlapCheckFlags:
#
# Structure of flags for each metadata structure. Setting a field to 'true'
# makes qemu guard that structure against unintended overwriting. The default
# value is chosen according to the template given.
#
# @template: Specifies a template mode which can be adjusted using the other
# flags, defaults to 'cached'
#
# Since: 2.9
##
{ 'struct': 'Qcow2OverlapCheckFlags',
'data': { '*template': 'Qcow2OverlapCheckMode',
'*main-header': 'bool',
'*active-l1': 'bool',
'*active-l2': 'bool',
'*refcount-table': 'bool',
'*refcount-block': 'bool',
'*snapshot-table': 'bool',
'*inactive-l1': 'bool',
'*inactive-l2': 'bool' } }
##
# @Qcow2OverlapChecks:
#
# Specifies which metadata structures should be guarded against unintended
# overwriting.
#
# @flags: set of flags for separate specification of each metadata structure
# type
#
# @mode: named mode which chooses a specific set of flags
#
# Since: 2.9
##
{ 'alternate': 'Qcow2OverlapChecks',
'data': { 'flags': 'Qcow2OverlapCheckFlags',
'mode': 'Qcow2OverlapCheckMode' } }
##
# @BlockdevQcowEncryptionFormat:
#
# @aes: AES-CBC with plain64 initialization vectors
#
# Since: 2.10
##
{ 'enum': 'BlockdevQcowEncryptionFormat',
'data': [ 'aes' ] }
##
# @BlockdevQcowEncryption:
#
# Since: 2.10
##
{ 'union': 'BlockdevQcowEncryption',
'base': { 'format': 'BlockdevQcowEncryptionFormat' },
'discriminator': 'format',
'data': { 'aes': 'QCryptoBlockOptionsQCow' } }
##
# @BlockdevOptionsQcow:
#
# Driver specific block device options for qcow.
#
# @encrypt: Image decryption options. Mandatory for
# encrypted images, except when doing a metadata-only
# probe of the image.
#
# Since: 2.10
##
{ 'struct': 'BlockdevOptionsQcow',
'base': 'BlockdevOptionsGenericCOWFormat',
'data': { '*encrypt': 'BlockdevQcowEncryption' } }
##
# @BlockdevQcow2EncryptionFormat:
# @aes: AES-CBC with plain64 initialization venctors
#
# Since: 2.10
##
{ 'enum': 'BlockdevQcow2EncryptionFormat',
qcow2: add support for LUKS encryption format This adds support for using LUKS as an encryption format with the qcow2 file, using the new encrypt.format parameter to request "luks" format. e.g. # qemu-img create --object secret,data=123456,id=sec0 \ -f qcow2 -o encrypt.format=luks,encrypt.key-secret=sec0 \ test.qcow2 10G The legacy "encryption=on" parameter still results in creation of the old qcow2 AES format (and is equivalent to the new 'encryption-format=aes'). e.g. the following are equivalent: # qemu-img create --object secret,data=123456,id=sec0 \ -f qcow2 -o encryption=on,encrypt.key-secret=sec0 \ test.qcow2 10G # qemu-img create --object secret,data=123456,id=sec0 \ -f qcow2 -o encryption-format=aes,encrypt.key-secret=sec0 \ test.qcow2 10G With the LUKS format it is necessary to store the LUKS partition header and key material in the QCow2 file. This data can be many MB in size, so cannot go into the QCow2 header region directly. Thus the spec defines a FDE (Full Disk Encryption) header extension that specifies the offset of a set of clusters to hold the FDE headers, as well as the length of that region. The LUKS header is thus stored in these extra allocated clusters before the main image payload. Aside from all the cryptographic differences implied by use of the LUKS format, there is one further key difference between the use of legacy AES and LUKS encryption in qcow2. For LUKS, the initialiazation vectors are generated using the host physical sector as the input, rather than the guest virtual sector. This guarantees unique initialization vectors for all sectors when qcow2 internal snapshots are used, thus giving stronger protection against watermarking attacks. Signed-off-by: Daniel P. Berrange <berrange@redhat.com> Message-id: 20170623162419.26068-14-berrange@redhat.com Reviewed-by: Alberto Garcia <berto@igalia.com> Signed-off-by: Max Reitz <mreitz@redhat.com>
2017-06-23 18:24:12 +02:00
'data': [ 'aes', 'luks' ] }
##
# @BlockdevQcow2Encryption:
#
# Since: 2.10
##
{ 'union': 'BlockdevQcow2Encryption',
'base': { 'format': 'BlockdevQcow2EncryptionFormat' },
'discriminator': 'format',
qcow2: add support for LUKS encryption format This adds support for using LUKS as an encryption format with the qcow2 file, using the new encrypt.format parameter to request "luks" format. e.g. # qemu-img create --object secret,data=123456,id=sec0 \ -f qcow2 -o encrypt.format=luks,encrypt.key-secret=sec0 \ test.qcow2 10G The legacy "encryption=on" parameter still results in creation of the old qcow2 AES format (and is equivalent to the new 'encryption-format=aes'). e.g. the following are equivalent: # qemu-img create --object secret,data=123456,id=sec0 \ -f qcow2 -o encryption=on,encrypt.key-secret=sec0 \ test.qcow2 10G # qemu-img create --object secret,data=123456,id=sec0 \ -f qcow2 -o encryption-format=aes,encrypt.key-secret=sec0 \ test.qcow2 10G With the LUKS format it is necessary to store the LUKS partition header and key material in the QCow2 file. This data can be many MB in size, so cannot go into the QCow2 header region directly. Thus the spec defines a FDE (Full Disk Encryption) header extension that specifies the offset of a set of clusters to hold the FDE headers, as well as the length of that region. The LUKS header is thus stored in these extra allocated clusters before the main image payload. Aside from all the cryptographic differences implied by use of the LUKS format, there is one further key difference between the use of legacy AES and LUKS encryption in qcow2. For LUKS, the initialiazation vectors are generated using the host physical sector as the input, rather than the guest virtual sector. This guarantees unique initialization vectors for all sectors when qcow2 internal snapshots are used, thus giving stronger protection against watermarking attacks. Signed-off-by: Daniel P. Berrange <berrange@redhat.com> Message-id: 20170623162419.26068-14-berrange@redhat.com Reviewed-by: Alberto Garcia <berto@igalia.com> Signed-off-by: Max Reitz <mreitz@redhat.com>
2017-06-23 18:24:12 +02:00
'data': { 'aes': 'QCryptoBlockOptionsQCow',
'luks': 'QCryptoBlockOptionsLUKS'} }
##
# @BlockdevOptionsQcow2:
#
# Driver specific block device options for qcow2.
#
# @lazy-refcounts: whether to enable the lazy refcounts
# feature (default is taken from the image file)
#
# @pass-discard-request: whether discard requests to the qcow2
# device should be forwarded to the data source
#
# @pass-discard-snapshot: whether discard requests for the data source
# should be issued when a snapshot operation (e.g.
# deleting a snapshot) frees clusters in the qcow2 file
#
# @pass-discard-other: whether discard requests for the data source
# should be issued on other occasions where a cluster
# gets freed
#
# @overlap-check: which overlap checks to perform for writes
# to the image, defaults to 'cached' (since 2.2)
#
# @cache-size: the maximum total size of the L2 table and
# refcount block caches in bytes (since 2.2)
#
# @l2-cache-size: the maximum size of the L2 table cache in
# bytes (since 2.2)
#
qcow2: Allow configuring the L2 slice size Now that the code is ready to handle L2 slices we can finally add an option to allow configuring their size. An L2 slice is the portion of an L2 table that is read by the qcow2 cache. Until now the cache was always reading full L2 tables, and since the L2 table size is equal to the cluster size this was not very efficient with large clusters. Here's a more detailed explanation of why it makes sense to have smaller cache entries in order to load L2 data: https://lists.gnu.org/archive/html/qemu-block/2017-09/msg00635.html This patch introduces a new command-line option to the qcow2 driver named l2-cache-entry-size (cf. l2-cache-size). The cache entry size has the same restrictions as the cluster size: it must be a power of two and it has the same range of allowed values, with the additional requirement that it must not be larger than the cluster size. The L2 cache entry size (L2 slice size) remains equal to the cluster size for now by default, so this feature must be explicitly enabled. Although my tests show that 4KB slices consistently improve performance and give the best results, let's wait and make more tests with different cluster sizes before deciding on an optimal default. Now that the cache entry size is not necessarily equal to the cluster size we need to reflect that in the MIN_L2_CACHE_SIZE documentation. That minimum value is a requirement of the COW algorithm: we need to read two L2 slices (and not two L2 tables) in order to do COW, see l2_allocate() for the actual code. Signed-off-by: Alberto Garcia <berto@igalia.com> Reviewed-by: Eric Blake <eblake@redhat.com> Reviewed-by: Max Reitz <mreitz@redhat.com> Message-id: c73e5611ff4a9ec5d20de68a6c289553a13d2354.1517840877.git.berto@igalia.com Signed-off-by: Max Reitz <mreitz@redhat.com>
2018-02-05 15:33:36 +01:00
# @l2-cache-entry-size: the size of each entry in the L2 cache in
# bytes. It must be a power of two between 512
# and the cluster size. The default value is
# the cluster size (since 2.12)
#
# @refcount-cache-size: the maximum size of the refcount block cache
# in bytes (since 2.2)
#
# @cache-clean-interval: clean unused entries in the L2 and refcount
# caches. The interval is in seconds. The default value
# is 0 and it disables this feature (since 2.5)
# @encrypt: Image decryption options. Mandatory for
# encrypted images, except when doing a metadata-only
# probe of the image. (since 2.10)
#
# Since: 2.9
##
{ 'struct': 'BlockdevOptionsQcow2',
'base': 'BlockdevOptionsGenericCOWFormat',
'data': { '*lazy-refcounts': 'bool',
'*pass-discard-request': 'bool',
'*pass-discard-snapshot': 'bool',
'*pass-discard-other': 'bool',
'*overlap-check': 'Qcow2OverlapChecks',
'*cache-size': 'int',
'*l2-cache-size': 'int',
qcow2: Allow configuring the L2 slice size Now that the code is ready to handle L2 slices we can finally add an option to allow configuring their size. An L2 slice is the portion of an L2 table that is read by the qcow2 cache. Until now the cache was always reading full L2 tables, and since the L2 table size is equal to the cluster size this was not very efficient with large clusters. Here's a more detailed explanation of why it makes sense to have smaller cache entries in order to load L2 data: https://lists.gnu.org/archive/html/qemu-block/2017-09/msg00635.html This patch introduces a new command-line option to the qcow2 driver named l2-cache-entry-size (cf. l2-cache-size). The cache entry size has the same restrictions as the cluster size: it must be a power of two and it has the same range of allowed values, with the additional requirement that it must not be larger than the cluster size. The L2 cache entry size (L2 slice size) remains equal to the cluster size for now by default, so this feature must be explicitly enabled. Although my tests show that 4KB slices consistently improve performance and give the best results, let's wait and make more tests with different cluster sizes before deciding on an optimal default. Now that the cache entry size is not necessarily equal to the cluster size we need to reflect that in the MIN_L2_CACHE_SIZE documentation. That minimum value is a requirement of the COW algorithm: we need to read two L2 slices (and not two L2 tables) in order to do COW, see l2_allocate() for the actual code. Signed-off-by: Alberto Garcia <berto@igalia.com> Reviewed-by: Eric Blake <eblake@redhat.com> Reviewed-by: Max Reitz <mreitz@redhat.com> Message-id: c73e5611ff4a9ec5d20de68a6c289553a13d2354.1517840877.git.berto@igalia.com Signed-off-by: Max Reitz <mreitz@redhat.com>
2018-02-05 15:33:36 +01:00
'*l2-cache-entry-size': 'int',
'*refcount-cache-size': 'int',
'*cache-clean-interval': 'int',
'*encrypt': 'BlockdevQcow2Encryption' } }
##
# @SshHostKeyCheckMode:
#
# @none Don't check the host key at all
# @hash Compare the host key with a given hash
# @known_hosts Check the host key against the known_hosts file
#
# Since: 2.12
##
{ 'enum': 'SshHostKeyCheckMode',
'data': [ 'none', 'hash', 'known_hosts' ] }
##
# @SshHostKeyCheckHashType:
#
# @md5 The given hash is an md5 hash
# @sha1 The given hash is an sha1 hash
#
# Since: 2.12
##
{ 'enum': 'SshHostKeyCheckHashType',
'data': [ 'md5', 'sha1' ] }
##
# @SshHostKeyHash:
#
# @type The hash algorithm used for the hash
# @hash The expected hash value
#
# Since: 2.12
##
{ 'struct': 'SshHostKeyHash',
'data': { 'type': 'SshHostKeyCheckHashType',
'hash': 'str' }}
##
# @SshHostKeyDummy:
#
# For those union branches that don't need additional fields.
#
# Since: 2.12
##
{ 'struct': 'SshHostKeyDummy',
'data': {} }
##
# @SshHostKeyCheck:
#
# Since: 2.12
##
{ 'union': 'SshHostKeyCheck',
'base': { 'mode': 'SshHostKeyCheckMode' },
'discriminator': 'mode',
'data': { 'none': 'SshHostKeyDummy',
'hash': 'SshHostKeyHash',
'known_hosts': 'SshHostKeyDummy' } }
##
# @BlockdevOptionsSsh:
#
# @server: host address
#
# @path: path to the image on the host
#
# @user: user as which to connect, defaults to current
# local user name
#
# @host-key-check: Defines how and what to check the host key against
# (default: known_hosts)
#
# Since: 2.9
##
{ 'struct': 'BlockdevOptionsSsh',
'data': { 'server': 'InetSocketAddress',
'path': 'str',
'*user': 'str',
'*host-key-check': 'SshHostKeyCheck' } }
##
# @BlkdebugEvent:
#
# Trigger events supported by blkdebug.
#
# @l1_shrink_write_table: write zeros to the l1 table to shrink image.
# (since 2.11)
#
# @l1_shrink_free_l2_clusters: discard the l2 tables. (since 2.11)
#
# @cor_write: a write due to copy-on-read (since 2.11)
#
# Since: 2.9
##
{ 'enum': 'BlkdebugEvent', 'prefix': 'BLKDBG',
'data': [ 'l1_update', 'l1_grow_alloc_table', 'l1_grow_write_table',
'l1_grow_activate_table', 'l2_load', 'l2_update',
'l2_update_compressed', 'l2_alloc_cow_read', 'l2_alloc_write',
'read_aio', 'read_backing_aio', 'read_compressed', 'write_aio',
'write_compressed', 'vmstate_load', 'vmstate_save', 'cow_read',
'cow_write', 'reftable_load', 'reftable_grow', 'reftable_update',
'refblock_load', 'refblock_update', 'refblock_update_part',
'refblock_alloc', 'refblock_alloc_hookup', 'refblock_alloc_write',
'refblock_alloc_write_blocks', 'refblock_alloc_write_table',
'refblock_alloc_switch_table', 'cluster_alloc',
'cluster_alloc_bytes', 'cluster_free', 'flush_to_os',
'flush_to_disk', 'pwritev_rmw_head', 'pwritev_rmw_after_head',
'pwritev_rmw_tail', 'pwritev_rmw_after_tail', 'pwritev',
'pwritev_zero', 'pwritev_done', 'empty_image_prepare',
'l1_shrink_write_table', 'l1_shrink_free_l2_clusters',
'cor_write'] }
##
# @BlkdebugInjectErrorOptions:
#
# Describes a single error injection for blkdebug.
#
# @event: trigger event
#
# @state: the state identifier blkdebug needs to be in to
# actually trigger the event; defaults to "any"
#
# @errno: error identifier (errno) to be returned; defaults to
# EIO
#
# @sector: specifies the sector index which has to be affected
# in order to actually trigger the event; defaults to "any
# sector"
#
# @once: disables further events after this one has been
# triggered; defaults to false
#
# @immediately: fail immediately; defaults to false
#
# Since: 2.9
##
{ 'struct': 'BlkdebugInjectErrorOptions',
'data': { 'event': 'BlkdebugEvent',
'*state': 'int',
'*errno': 'int',
'*sector': 'int',
'*once': 'bool',
'*immediately': 'bool' } }
##
# @BlkdebugSetStateOptions:
#
# Describes a single state-change event for blkdebug.
#
# @event: trigger event
#
# @state: the current state identifier blkdebug needs to be in;
# defaults to "any"
#
# @new_state: the state identifier blkdebug is supposed to assume if
# this event is triggered
#
# Since: 2.9
##
{ 'struct': 'BlkdebugSetStateOptions',
'data': { 'event': 'BlkdebugEvent',
'*state': 'int',
'new_state': 'int' } }
##
# @BlockdevOptionsBlkdebug:
#
# Driver specific block device options for blkdebug.
#
# @image: underlying raw block device (or image file)
#
# @config: filename of the configuration file
#
# @align: required alignment for requests in bytes, must be
# positive power of 2, or 0 for default
#
# @max-transfer: maximum size for I/O transfers in bytes, must be
# positive multiple of @align and of the underlying
# file's request alignment (but need not be a power of
# 2), or 0 for default (since 2.10)
#
# @opt-write-zero: preferred alignment for write zero requests in bytes,
# must be positive multiple of @align and of the
# underlying file's request alignment (but need not be a
# power of 2), or 0 for default (since 2.10)
#
# @max-write-zero: maximum size for write zero requests in bytes, must be
# positive multiple of @align, of @opt-write-zero, and of
# the underlying file's request alignment (but need not
# be a power of 2), or 0 for default (since 2.10)
#
# @opt-discard: preferred alignment for discard requests in bytes, must
# be positive multiple of @align and of the underlying
# file's request alignment (but need not be a power of
# 2), or 0 for default (since 2.10)
#
# @max-discard: maximum size for discard requests in bytes, must be
# positive multiple of @align, of @opt-discard, and of
# the underlying file's request alignment (but need not
# be a power of 2), or 0 for default (since 2.10)
#
# @inject-error: array of error injection descriptions
#
# @set-state: array of state-change descriptions
#
# Since: 2.9
##
{ 'struct': 'BlockdevOptionsBlkdebug',
'data': { 'image': 'BlockdevRef',
'*config': 'str',
'*align': 'int', '*max-transfer': 'int32',
'*opt-write-zero': 'int32', '*max-write-zero': 'int32',
'*opt-discard': 'int32', '*max-discard': 'int32',
'*inject-error': ['BlkdebugInjectErrorOptions'],
'*set-state': ['BlkdebugSetStateOptions'] } }
##
# @BlockdevOptionsBlkverify:
#
# Driver specific block device options for blkverify.
#
# @test: block device to be tested
#
# @raw: raw image used for verification
#
# Since: 2.9
##
{ 'struct': 'BlockdevOptionsBlkverify',
'data': { 'test': 'BlockdevRef',
'raw': 'BlockdevRef' } }
##
# @QuorumReadPattern:
#
# An enumeration of quorum read patterns.
#
# @quorum: read all the children and do a quorum vote on reads
#
# @fifo: read only from the first child that has not failed
#
# Since: 2.9
##
{ 'enum': 'QuorumReadPattern', 'data': [ 'quorum', 'fifo' ] }
##
# @BlockdevOptionsQuorum:
#
# Driver specific block device options for Quorum
#
# @blkverify: true if the driver must print content mismatch
# set to false by default
#
# @children: the children block devices to use
#
# @vote-threshold: the vote limit under which a read will fail
#
# @rewrite-corrupted: rewrite corrupted data when quorum is reached
# (Since 2.1)
#
# @read-pattern: choose read pattern and set to quorum by default
# (Since 2.2)
#
# Since: 2.9
##
{ 'struct': 'BlockdevOptionsQuorum',
'data': { '*blkverify': 'bool',
'children': [ 'BlockdevRef' ],
'vote-threshold': 'int',
'*rewrite-corrupted': 'bool',
'*read-pattern': 'QuorumReadPattern' } }
##
# @BlockdevOptionsGluster:
#
# Driver specific block device options for Gluster
#
# @volume: name of gluster volume where VM image resides
#
# @path: absolute path to image file in gluster volume
#
# @server: gluster servers description
#
# @debug: libgfapi log level (default '4' which is Error)
# (Since 2.8)
#
# @logfile: libgfapi log file (default /dev/stderr) (Since 2.8)
#
# Since: 2.9
##
{ 'struct': 'BlockdevOptionsGluster',
'data': { 'volume': 'str',
'path': 'str',
'server': ['SocketAddress'],
'*debug': 'int',
'*logfile': 'str' } }
##
# @IscsiTransport:
#
# An enumeration of libiscsi transport types
#
# Since: 2.9
##
{ 'enum': 'IscsiTransport',
'data': [ 'tcp', 'iser' ] }
##
# @IscsiHeaderDigest:
#
# An enumeration of header digests supported by libiscsi
#
# Since: 2.9
##
{ 'enum': 'IscsiHeaderDigest',
'prefix': 'QAPI_ISCSI_HEADER_DIGEST',
'data': [ 'crc32c', 'none', 'crc32c-none', 'none-crc32c' ] }
##
# @BlockdevOptionsIscsi:
#
# @transport: The iscsi transport type
#
# @portal: The address of the iscsi portal
#
# @target: The target iqn name
#
# @lun: LUN to connect to. Defaults to 0.
#
# @user: User name to log in with. If omitted, no CHAP
# authentication is performed.
#
# @password-secret: The ID of a QCryptoSecret object providing
# the password for the login. This option is required if
# @user is specified.
#
# @initiator-name: The iqn name we want to identify to the target
# as. If this option is not specified, an initiator name is
# generated automatically.
#
# @header-digest: The desired header digest. Defaults to
# none-crc32c.
#
# @timeout: Timeout in seconds after which a request will
# timeout. 0 means no timeout and is the default.
#
# Driver specific block device options for iscsi
#
# Since: 2.9
##
{ 'struct': 'BlockdevOptionsIscsi',
'data': { 'transport': 'IscsiTransport',
'portal': 'str',
'target': 'str',
'*lun': 'int',
'*user': 'str',
'*password-secret': 'str',
'*initiator-name': 'str',
'*header-digest': 'IscsiHeaderDigest',
'*timeout': 'int' } }
##
# @BlockdevOptionsRbd:
#
# @pool: Ceph pool name.
#
# @image: Image name in the Ceph pool.
#
# @conf: path to Ceph configuration file. Values
# in the configuration file will be overridden by
# options specified via QAPI.
#
# @snapshot: Ceph snapshot name.
#
# @user: Ceph id name.
#
# @server: Monitor host address and port. This maps
# to the "mon_host" Ceph option.
#
# Since: 2.9
##
{ 'struct': 'BlockdevOptionsRbd',
'data': { 'pool': 'str',
'image': 'str',
'*conf': 'str',
'*snapshot': 'str',
'*user': 'str',
'*server': ['InetSocketAddressBase'] } }
##
# @BlockdevOptionsSheepdog:
#
# Driver specific block device options for sheepdog
#
# @vdi: Virtual disk image name
# @server: The Sheepdog server to connect to
# @snap-id: Snapshot ID
# @tag: Snapshot tag name
#
# Only one of @snap-id and @tag may be present.
#
# Since: 2.9
##
{ 'struct': 'BlockdevOptionsSheepdog',
'data': { 'server': 'SocketAddress',
'vdi': 'str',
'*snap-id': 'uint32',
'*tag': 'str' } }
##
# @ReplicationMode:
#
# An enumeration of replication modes.
#
# @primary: Primary mode, the vm's state will be sent to secondary QEMU.
#
# @secondary: Secondary mode, receive the vm's state from primary QEMU.
#
# Since: 2.9
##
{ 'enum' : 'ReplicationMode', 'data' : [ 'primary', 'secondary' ] }
##
# @BlockdevOptionsReplication:
#
# Driver specific block device options for replication
#
# @mode: the replication mode
#
# @top-id: In secondary mode, node name or device ID of the root
# node who owns the replication node chain. Must not be given in
# primary mode.
#
# Since: 2.9
##
{ 'struct': 'BlockdevOptionsReplication',
'base': 'BlockdevOptionsGenericFormat',
'data': { 'mode': 'ReplicationMode',
'*top-id': 'str' } }
##
# @NFSTransport:
#
# An enumeration of NFS transport types
#
# @inet: TCP transport
#
# Since: 2.9
##
{ 'enum': 'NFSTransport',
'data': [ 'inet' ] }
##
# @NFSServer:
#
# Captures the address of the socket
#
# @type: transport type used for NFS (only TCP supported)
#
# @host: host address for NFS server
#
# Since: 2.9
##
{ 'struct': 'NFSServer',
'data': { 'type': 'NFSTransport',
'host': 'str' } }
##
# @BlockdevOptionsNfs:
#
# Driver specific block device option for NFS
#
# @server: host address
#
# @path: path of the image on the host
#
# @user: UID value to use when talking to the
# server (defaults to 65534 on Windows and getuid()
# on unix)
#
# @group: GID value to use when talking to the
# server (defaults to 65534 on Windows and getgid()
# in unix)
#
# @tcp-syn-count: number of SYNs during the session
# establishment (defaults to libnfs default)
#
# @readahead-size: set the readahead size in bytes (defaults
# to libnfs default)
#
# @page-cache-size: set the pagecache size in bytes (defaults
# to libnfs default)
#
# @debug: set the NFS debug level (max 2) (defaults
# to libnfs default)
#
# Since: 2.9
##
{ 'struct': 'BlockdevOptionsNfs',
'data': { 'server': 'NFSServer',
'path': 'str',
'*user': 'int',
'*group': 'int',
'*tcp-syn-count': 'int',
'*readahead-size': 'int',
'*page-cache-size': 'int',
'*debug': 'int' } }
##
# @BlockdevOptionsCurlBase:
#
# Driver specific block device options shared by all protocols supported by the
# curl backend.
#
# @url: URL of the image file
#
# @readahead: Size of the read-ahead cache; must be a multiple of
# 512 (defaults to 256 kB)
#
# @timeout: Timeout for connections, in seconds (defaults to 5)
#
# @username: Username for authentication (defaults to none)
#
# @password-secret: ID of a QCryptoSecret object providing a password
# for authentication (defaults to no password)
#
# @proxy-username: Username for proxy authentication (defaults to none)
#
# @proxy-password-secret: ID of a QCryptoSecret object providing a password
# for proxy authentication (defaults to no password)
#
# Since: 2.9
##
{ 'struct': 'BlockdevOptionsCurlBase',
'data': { 'url': 'str',
'*readahead': 'int',
'*timeout': 'int',
'*username': 'str',
'*password-secret': 'str',
'*proxy-username': 'str',
'*proxy-password-secret': 'str' } }
##
# @BlockdevOptionsCurlHttp:
#
# Driver specific block device options for HTTP connections over the curl
# backend. URLs must start with "http://".
#
# @cookie: List of cookies to set; format is
# "name1=content1; name2=content2;" as explained by
# CURLOPT_COOKIE(3). Defaults to no cookies.
#
# @cookie-secret: ID of a QCryptoSecret object providing the cookie data in a
# secure way. See @cookie for the format. (since 2.10)
#
# Since: 2.9
##
{ 'struct': 'BlockdevOptionsCurlHttp',
'base': 'BlockdevOptionsCurlBase',
'data': { '*cookie': 'str',
'*cookie-secret': 'str'} }
##
# @BlockdevOptionsCurlHttps:
#
# Driver specific block device options for HTTPS connections over the curl
# backend. URLs must start with "https://".
#
# @cookie: List of cookies to set; format is
# "name1=content1; name2=content2;" as explained by
# CURLOPT_COOKIE(3). Defaults to no cookies.
#
# @sslverify: Whether to verify the SSL certificate's validity (defaults to
# true)
#
# @cookie-secret: ID of a QCryptoSecret object providing the cookie data in a
# secure way. See @cookie for the format. (since 2.10)
#
# Since: 2.9
##
{ 'struct': 'BlockdevOptionsCurlHttps',
'base': 'BlockdevOptionsCurlBase',
'data': { '*cookie': 'str',
'*sslverify': 'bool',
'*cookie-secret': 'str'} }
##
# @BlockdevOptionsCurlFtp:
#
# Driver specific block device options for FTP connections over the curl
# backend. URLs must start with "ftp://".
#
# Since: 2.9
##
{ 'struct': 'BlockdevOptionsCurlFtp',
'base': 'BlockdevOptionsCurlBase',
'data': { } }
##
# @BlockdevOptionsCurlFtps:
#
# Driver specific block device options for FTPS connections over the curl
# backend. URLs must start with "ftps://".
#
# @sslverify: Whether to verify the SSL certificate's validity (defaults to
# true)
#
# Since: 2.9
##
{ 'struct': 'BlockdevOptionsCurlFtps',
'base': 'BlockdevOptionsCurlBase',
'data': { '*sslverify': 'bool' } }
##
# @BlockdevOptionsNbd:
#
# Driver specific block device options for NBD.
#
# @server: NBD server address
#
# @export: export name
#
# @tls-creds: TLS credentials ID
#
# Since: 2.9
##
{ 'struct': 'BlockdevOptionsNbd',
'data': { 'server': 'SocketAddress',
'*export': 'str',
'*tls-creds': 'str' } }
##
# @BlockdevOptionsRaw:
#
# Driver specific block device options for the raw driver.
#
# @offset: position where the block device starts
# @size: the assumed size of the device
#
# Since: 2.9
##
{ 'struct': 'BlockdevOptionsRaw',
'base': 'BlockdevOptionsGenericFormat',
'data': { '*offset': 'int', '*size': 'int' } }
##
# @BlockdevOptionsVxHS:
#
# Driver specific block device options for VxHS
#
# @vdisk-id: UUID of VxHS volume
# @server: vxhs server IP, port
# @tls-creds: TLS credentials ID
#
# Since: 2.10
##
{ 'struct': 'BlockdevOptionsVxHS',
'data': { 'vdisk-id': 'str',
'server': 'InetSocketAddressBase',
'*tls-creds': 'str' } }
##
# @BlockdevOptionsThrottle:
#
# Driver specific block device options for the throttle driver
#
# @throttle-group: the name of the throttle-group object to use. It
# must already exist.
# @file: reference to or definition of the data source block device
# Since: 2.11
##
{ 'struct': 'BlockdevOptionsThrottle',
'data': { 'throttle-group': 'str',
'file' : 'BlockdevRef'
} }
##
# @BlockdevOptions:
#
# Options for creating a block device. Many options are available for all
# block devices, independent of the block driver:
#
# @driver: block driver name
# @node-name: the node name of the new node (Since 2.0).
# This option is required on the top level of blockdev-add.
# @discard: discard-related options (default: ignore)
# @cache: cache-related options
# @read-only: whether the block device should be read-only (default: false).
# Note that some block drivers support only read-only access,
# either generally or in certain configurations. In this case,
# the default value does not work and the option must be
# specified explicitly.
# @detect-zeroes: detect and optimize zero writes (Since 2.1)
# (default: off)
# @force-share: force share all permission on added nodes.
# Requires read-only=true. (Since 2.10)
#
# Remaining options are determined by the block driver.
#
# Since: 2.9
##
{ 'union': 'BlockdevOptions',
'base': { 'driver': 'BlockdevDriver',
'*node-name': 'str',
'*discard': 'BlockdevDiscardOptions',
'*cache': 'BlockdevCacheOptions',
'*read-only': 'bool',
'*force-share': 'bool',
'*detect-zeroes': 'BlockdevDetectZeroesOptions' },
'discriminator': 'driver',
'data': {
'blkdebug': 'BlockdevOptionsBlkdebug',
'blkverify': 'BlockdevOptionsBlkverify',
'bochs': 'BlockdevOptionsGenericFormat',
'cloop': 'BlockdevOptionsGenericFormat',
'copy-on-read':'BlockdevOptionsGenericFormat',
'dmg': 'BlockdevOptionsGenericFormat',
'file': 'BlockdevOptionsFile',
'ftp': 'BlockdevOptionsCurlFtp',
'ftps': 'BlockdevOptionsCurlFtps',
'gluster': 'BlockdevOptionsGluster',
'host_cdrom': 'BlockdevOptionsFile',
'host_device':'BlockdevOptionsFile',
'http': 'BlockdevOptionsCurlHttp',
'https': 'BlockdevOptionsCurlHttps',
'iscsi': 'BlockdevOptionsIscsi',
block: add generic full disk encryption driver Add a block driver that is capable of supporting any full disk encryption format. This utilizes the previously added block encryption code, and at this time supports the LUKS format. The driver code is capable of supporting any format supported by the QCryptoBlock module, so it registers one block driver for each format. This patch only registers the "luks" driver since the "qcow" driver is there only for back-compatibility with existing qcow built-in encryption. New LUKS compatible volumes can be formatted using qemu-img with defaults for all settings. $ qemu-img create --object secret,data=123456,id=sec0 \ -f luks -o key-secret=sec0 demo.luks 10G Alternatively the cryptographic settings can be explicitly set $ qemu-img create --object secret,data=123456,id=sec0 \ -f luks -o key-secret=sec0,cipher-alg=aes-256,\ cipher-mode=cbc,ivgen-alg=plain64,hash-alg=sha256 \ demo.luks 10G And query its size $ qemu-img info demo.img image: demo.img file format: luks virtual size: 10G (10737418240 bytes) disk size: 132K encrypted: yes Note that it was not necessary to provide the password when querying info for the volume. The password is only required when performing I/O on the volume All volumes created by this new 'luks' driver should be capable of being opened by the kernel dm-crypt driver. The only algorithms listed in the LUKS spec that are not currently supported by this impl are sha512 and ripemd160 hashes and cast6 cipher. Reviewed-by: Eric Blake <eblake@redhat.com> Signed-off-by: Daniel P. Berrange <berrange@redhat.com> [ kwolf - Added #include to resolve conflict with da34e65c ] Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2016-03-21 15:11:47 +01:00
'luks': 'BlockdevOptionsLUKS',
'nbd': 'BlockdevOptionsNbd',
'nfs': 'BlockdevOptionsNfs',
'null-aio': 'BlockdevOptionsNull',
'null-co': 'BlockdevOptionsNull',
'nvme': 'BlockdevOptionsNVMe',
'parallels': 'BlockdevOptionsGenericFormat',
'qcow2': 'BlockdevOptionsQcow2',
'qcow': 'BlockdevOptionsQcow',
'qed': 'BlockdevOptionsGenericCOWFormat',
'quorum': 'BlockdevOptionsQuorum',
'raw': 'BlockdevOptionsRaw',
'rbd': 'BlockdevOptionsRbd',
'replication':'BlockdevOptionsReplication',
'sheepdog': 'BlockdevOptionsSheepdog',
'ssh': 'BlockdevOptionsSsh',
'throttle': 'BlockdevOptionsThrottle',
'vdi': 'BlockdevOptionsGenericFormat',
'vhdx': 'BlockdevOptionsGenericFormat',
'vmdk': 'BlockdevOptionsGenericCOWFormat',
'vpc': 'BlockdevOptionsGenericFormat',
'vvfat': 'BlockdevOptionsVVFAT',
'vxhs': 'BlockdevOptionsVxHS'
} }
##
# @BlockdevRef:
#
# Reference to a block device.
#
# @definition: defines a new block device inline
block: Use JSON null instead of "" to disable backing file BlockdevRef is an alternate of BlockdevOptions (inline definition) and str (reference to an existing block device by name). BlockdevRef value "" is special: "no block device should be referenced." It's actually interpreted that way in just one place: optional member @backing of COW formats. Semantics: * Present means "use this block device" as backing storage * Absent means "default to the one stored in the image" * Except "" means "don't use backing storage at all" The first two are perfectly normal: when the parameter is absent, it defaults to an implied value, but the value's meaning is the same. The third one overloads the parameter with a second meaning. The overloading is *implicit*, i.e. it's not visible in the types. Works here, because "" is not a value block device ID. Pressing argument values the schema accepts, but are semantically invalid, into service to mean "do something else entirely" is not general, as suitable invalid values need not exist. I also find it ugly. To clean this up, we could add a separate flag argument to suppress @backing, or add a distinct value to @backing. This commit implements the latter: add JSON null to the values of @backing, deprecate "". Because we're so close to the 2.10 freeze, implement it in the stupidest way possible: have qmp_blockdev_add() rewrite null to "" before anything else can see the null. Works, because BlockdevRef occurs only within arguments of blockdev-add. The proper way to do it would be rewriting "" to null, preferably in a cleaner way, but that requires fixing up code to work with null. Add a TODO comment for that. Signed-off-by: Markus Armbruster <armbru@redhat.com> Reviewed-by: Daniel P. Berrange <berrange@redhat.com> Acked-by: Kevin Wolf <kwolf@redhat.com> Reviewed-by: Eric Blake <eblake@redhat.com>
2017-07-18 08:54:00 +02:00
# @reference: references the ID of an existing block device
#
# Since: 2.9
##
{ 'alternate': 'BlockdevRef',
'data': { 'definition': 'BlockdevOptions',
'reference': 'str' } }
block: Use JSON null instead of "" to disable backing file BlockdevRef is an alternate of BlockdevOptions (inline definition) and str (reference to an existing block device by name). BlockdevRef value "" is special: "no block device should be referenced." It's actually interpreted that way in just one place: optional member @backing of COW formats. Semantics: * Present means "use this block device" as backing storage * Absent means "default to the one stored in the image" * Except "" means "don't use backing storage at all" The first two are perfectly normal: when the parameter is absent, it defaults to an implied value, but the value's meaning is the same. The third one overloads the parameter with a second meaning. The overloading is *implicit*, i.e. it's not visible in the types. Works here, because "" is not a value block device ID. Pressing argument values the schema accepts, but are semantically invalid, into service to mean "do something else entirely" is not general, as suitable invalid values need not exist. I also find it ugly. To clean this up, we could add a separate flag argument to suppress @backing, or add a distinct value to @backing. This commit implements the latter: add JSON null to the values of @backing, deprecate "". Because we're so close to the 2.10 freeze, implement it in the stupidest way possible: have qmp_blockdev_add() rewrite null to "" before anything else can see the null. Works, because BlockdevRef occurs only within arguments of blockdev-add. The proper way to do it would be rewriting "" to null, preferably in a cleaner way, but that requires fixing up code to work with null. Add a TODO comment for that. Signed-off-by: Markus Armbruster <armbru@redhat.com> Reviewed-by: Daniel P. Berrange <berrange@redhat.com> Acked-by: Kevin Wolf <kwolf@redhat.com> Reviewed-by: Eric Blake <eblake@redhat.com>
2017-07-18 08:54:00 +02:00
##
# @BlockdevRefOrNull:
#
# Reference to a block device.
#
# @definition: defines a new block device inline
# @reference: references the ID of an existing block device.
# An empty string means that no block device should
# be referenced. Deprecated; use null instead.
# @null: No block device should be referenced (since 2.10)
#
# Since: 2.9
##
{ 'alternate': 'BlockdevRefOrNull',
'data': { 'definition': 'BlockdevOptions',
'reference': 'str',
'null': 'null' } }
##
# @blockdev-add:
#
# Creates a new block device. If the @id option is given at the top level, a
# BlockBackend will be created; otherwise, @node-name is mandatory at the top
# level and no BlockBackend will be created.
#
# Since: 2.9
#
# Example:
#
# 1.
# -> { "execute": "blockdev-add",
# "arguments": {
# "driver": "qcow2",
# "node-name": "test1",
# "file": {
# "driver": "file",
# "filename": "test.qcow2"
# }
# }
# }
# <- { "return": {} }
#
# 2.
# -> { "execute": "blockdev-add",
# "arguments": {
# "driver": "qcow2",
# "node-name": "node0",
# "discard": "unmap",
# "cache": {
# "direct": true
# },
# "file": {
# "driver": "file",
# "filename": "/tmp/test.qcow2"
# },
# "backing": {
# "driver": "raw",
# "file": {
# "driver": "file",
# "filename": "/dev/fdset/4"
# }
# }
# }
# }
#
# <- { "return": {} }
#
##
{ 'command': 'blockdev-add', 'data': 'BlockdevOptions', 'boxed': true }
##
# @blockdev-del:
#
# Deletes a block device that has been added using blockdev-add.
# The command will fail if the node is attached to a device or is
# otherwise being used.
#
# @node-name: Name of the graph node to delete.
#
# Since: 2.9
#
# Example:
#
# -> { "execute": "blockdev-add",
# "arguments": {
# "driver": "qcow2",
# "node-name": "node0",
# "file": {
# "driver": "file",
# "filename": "test.qcow2"
# }
# }
# }
# <- { "return": {} }
#
# -> { "execute": "blockdev-del",
# "arguments": { "node-name": "node0" }
# }
# <- { "return": {} }
#
##
{ 'command': 'blockdev-del', 'data': { 'node-name': 'str' } }
##
# @BlockdevCreateOptionsFile:
#
# Driver specific image creation options for file.
#
# @filename Filename for the new image file
# @size Size of the virtual disk in bytes
# @preallocation Preallocation mode for the new image (default: off)
# @nocow Turn off copy-on-write (valid only on btrfs; default: off)
#
# Since: 2.12
##
{ 'struct': 'BlockdevCreateOptionsFile',
'data': { 'filename': 'str',
'size': 'size',
'*preallocation': 'PreallocMode',
'*nocow': 'bool' } }
##
# @BlockdevCreateOptionsGluster:
#
# Driver specific image creation options for gluster.
#
# @location Where to store the new image file
# @size Size of the virtual disk in bytes
# @preallocation Preallocation mode for the new image (default: off)
#
# Since: 2.12
##
{ 'struct': 'BlockdevCreateOptionsGluster',
'data': { 'location': 'BlockdevOptionsGluster',
'size': 'size',
'*preallocation': 'PreallocMode' } }
##
# @BlockdevCreateOptionsLUKS:
#
# Driver specific image creation options for LUKS.
#
# @file Node to create the image format on
# @size Size of the virtual disk in bytes
#
# Since: 2.12
##
{ 'struct': 'BlockdevCreateOptionsLUKS',
'base': 'QCryptoBlockCreateOptionsLUKS',
'data': { 'file': 'BlockdevRef',
'size': 'size' } }
##
# @BlockdevCreateOptionsNfs:
#
# Driver specific image creation options for NFS.
#
# @location Where to store the new image file
# @size Size of the virtual disk in bytes
#
# Since: 2.12
##
{ 'struct': 'BlockdevCreateOptionsNfs',
'data': { 'location': 'BlockdevOptionsNfs',
'size': 'size' } }
##
# @BlockdevCreateOptionsParallels:
#
# Driver specific image creation options for parallels.
#
# @file Node to create the image format on
# @size Size of the virtual disk in bytes
# @cluster-size Cluster size in bytes (default: 1 MB)
#
# Since: 2.12
##
{ 'struct': 'BlockdevCreateOptionsParallels',
'data': { 'file': 'BlockdevRef',
'size': 'size',
'*cluster-size': 'size' } }
##
# @BlockdevCreateOptionsQcow:
#
# Driver specific image creation options for qcow.
#
# @file Node to create the image format on
# @size Size of the virtual disk in bytes
# @backing-file File name of the backing file if a backing file
# should be used
# @encrypt Encryption options if the image should be encrypted
#
# Since: 2.12
##
{ 'struct': 'BlockdevCreateOptionsQcow',
'data': { 'file': 'BlockdevRef',
'size': 'size',
'*backing-file': 'str',
'*encrypt': 'QCryptoBlockCreateOptions' } }
##
# @BlockdevQcow2Version:
#
# @v2: The original QCOW2 format as introduced in qemu 0.10 (version 2)
# @v3: The extended QCOW2 format as introduced in qemu 1.1 (version 3)
#
# Since: 2.12
##
{ 'enum': 'BlockdevQcow2Version',
'data': [ 'v2', 'v3' ] }
##
# @BlockdevCreateOptionsQcow2:
#
# Driver specific image creation options for qcow2.
#
# @file Node to create the image format on
# @size Size of the virtual disk in bytes
# @version Compatibility level (default: v3)
# @backing-file File name of the backing file if a backing file
# should be used
# @backing-fmt Name of the block driver to use for the backing file
# @encrypt Encryption options if the image should be encrypted
# @cluster-size qcow2 cluster size in bytes (default: 65536)
# @preallocation Preallocation mode for the new image (default: off)
# @lazy-refcounts True if refcounts may be updated lazily (default: off)
# @refcount-bits Width of reference counts in bits (default: 16)
#
# Since: 2.12
##
{ 'struct': 'BlockdevCreateOptionsQcow2',
'data': { 'file': 'BlockdevRef',
'size': 'size',
'*version': 'BlockdevQcow2Version',
'*backing-file': 'str',
'*backing-fmt': 'BlockdevDriver',
'*encrypt': 'QCryptoBlockCreateOptions',
'*cluster-size': 'size',
'*preallocation': 'PreallocMode',
'*lazy-refcounts': 'bool',
'*refcount-bits': 'int' } }
##
# @BlockdevCreateOptionsQed:
#
# Driver specific image creation options for qed.
#
# @file Node to create the image format on
# @size Size of the virtual disk in bytes
# @backing-file File name of the backing file if a backing file
# should be used
# @backing-fmt Name of the block driver to use for the backing file
# @cluster-size Cluster size in bytes (default: 65536)
# @table-size L1/L2 table size (in clusters)
#
# Since: 2.12
##
{ 'struct': 'BlockdevCreateOptionsQed',
'data': { 'file': 'BlockdevRef',
'size': 'size',
'*backing-file': 'str',
'*backing-fmt': 'BlockdevDriver',
'*cluster-size': 'size',
'*table-size': 'int' } }
##
# @BlockdevCreateOptionsRbd:
#
# Driver specific image creation options for rbd/Ceph.
#
# @location Where to store the new image file. This location cannot
# point to a snapshot.
# @size Size of the virtual disk in bytes
# @cluster-size RBD object size
#
# Since: 2.12
##
{ 'struct': 'BlockdevCreateOptionsRbd',
'data': { 'location': 'BlockdevOptionsRbd',
'size': 'size',
'*cluster-size' : 'size' } }
##
# @SheepdogRedundancyType:
#
# @full Create a fully replicated vdi with x copies
# @erasure-coded Create an erasure coded vdi with x data strips and
# y parity strips
#
# Since: 2.12
##
{ 'enum': 'SheepdogRedundancyType',
'data': [ 'full', 'erasure-coded' ] }
##
# @SheepdogRedundancyFull:
#
# @copies Number of copies to use (between 1 and 31)
#
# Since: 2.12
##
{ 'struct': 'SheepdogRedundancyFull',
'data': { 'copies': 'int' }}
##
# @SheepdogRedundancyErasureCoded:
#
# @data-strips Number of data strips to use (one of {2,4,8,16})
# @parity-strips Number of parity strips to use (between 1 and 15)
#
# Since: 2.12
##
{ 'struct': 'SheepdogRedundancyErasureCoded',
'data': { 'data-strips': 'int',
'parity-strips': 'int' }}
##
# @SheepdogRedundancy:
#
# Since: 2.12
##
{ 'union': 'SheepdogRedundancy',
'base': { 'type': 'SheepdogRedundancyType' },
'discriminator': 'type',
'data': { 'full': 'SheepdogRedundancyFull',
'erasure-coded': 'SheepdogRedundancyErasureCoded' } }
##
# @BlockdevCreateOptionsSheepdog:
#
# Driver specific image creation options for Sheepdog.
#
# @location Where to store the new image file
# @size Size of the virtual disk in bytes
# @backing-file File name of a base image
# @preallocation Preallocation mode (allowed values: off, full)
# @redundancy Redundancy of the image
# @object-size Object size of the image
#
# Since: 2.12
##
{ 'struct': 'BlockdevCreateOptionsSheepdog',
'data': { 'location': 'BlockdevOptionsSheepdog',
'size': 'size',
'*backing-file': 'str',
'*preallocation': 'PreallocMode',
'*redundancy': 'SheepdogRedundancy',
'*object-size': 'size' } }
##
# @BlockdevCreateOptionsSsh:
#
# Driver specific image creation options for SSH.
#
# @location Where to store the new image file
# @size Size of the virtual disk in bytes
#
# Since: 2.12
##
{ 'struct': 'BlockdevCreateOptionsSsh',
'data': { 'location': 'BlockdevOptionsSsh',
'size': 'size' } }
##
# @BlockdevCreateOptionsVdi:
#
# Driver specific image creation options for VDI.
#
# @file Node to create the image format on
# @size Size of the virtual disk in bytes
# @preallocation Preallocation mode for the new image (allowed values: off,
# metadata; default: off)
#
# Since: 2.12
##
{ 'struct': 'BlockdevCreateOptionsVdi',
'data': { 'file': 'BlockdevRef',
'size': 'size',
'*preallocation': 'PreallocMode' } }
##
# @BlockdevVhdxSubformat:
#
# @dynamic: Growing image file
# @fixed: Preallocated fixed-size image file
#
# Since: 2.12
##
{ 'enum': 'BlockdevVhdxSubformat',
'data': [ 'dynamic', 'fixed' ] }
##
# @BlockdevCreateOptionsVhdx:
#
# Driver specific image creation options for vhdx.
#
# @file Node to create the image format on
# @size Size of the virtual disk in bytes
# @log-size Log size in bytes, must be a multiple of 1 MB
# (default: 1 MB)
# @block-size Block size in bytes, must be a multiple of 1 MB and not
# larger than 256 MB (default: automatically choose a block
# size depending on the image size)
# @subformat vhdx subformat (default: dynamic)
# @block-state-zero Force use of payload blocks of type 'ZERO'. Non-standard,
# but default. Do not set to 'off' when using 'qemu-img
# convert' with subformat=dynamic.
#
# Since: 2.12
##
{ 'struct': 'BlockdevCreateOptionsVhdx',
'data': { 'file': 'BlockdevRef',
'size': 'size',
'*log-size': 'size',
'*block-size': 'size',
'*subformat': 'BlockdevVhdxSubformat',
'*block-state-zero': 'bool' } }
##
# @BlockdevVpcSubformat:
#
# @dynamic: Growing image file
# @fixed: Preallocated fixed-size image file
#
# Since: 2.12
##
{ 'enum': 'BlockdevVpcSubformat',
'data': [ 'dynamic', 'fixed' ] }
##
# @BlockdevCreateOptionsVpc:
#
# Driver specific image creation options for vpc (VHD).
#
# @file Node to create the image format on
# @size Size of the virtual disk in bytes
# @subformat vhdx subformat (default: dynamic)
# @force-size Force use of the exact byte size instead of rounding to the
# next size that can be represented in CHS geometry
# (default: false)
#
# Since: 2.12
##
{ 'struct': 'BlockdevCreateOptionsVpc',
'data': { 'file': 'BlockdevRef',
'size': 'size',
'*subformat': 'BlockdevVpcSubformat',
'*force-size': 'bool' } }
##
# @BlockdevCreateNotSupported:
#
# This is used for all drivers that don't support creating images.
#
# Since: 2.12
##
{ 'struct': 'BlockdevCreateNotSupported', 'data': {}}
##
# @BlockdevCreateOptions:
#
# Options for creating an image format on a given node.
#
# @driver block driver to create the image format
#
# Since: 2.12
##
{ 'union': 'BlockdevCreateOptions',
'base': {
'driver': 'BlockdevDriver' },
'discriminator': 'driver',
'data': {
'blkdebug': 'BlockdevCreateNotSupported',
'blkverify': 'BlockdevCreateNotSupported',
'bochs': 'BlockdevCreateNotSupported',
'cloop': 'BlockdevCreateNotSupported',
'copy-on-read': 'BlockdevCreateNotSupported',
'dmg': 'BlockdevCreateNotSupported',
'file': 'BlockdevCreateOptionsFile',
'ftp': 'BlockdevCreateNotSupported',
'ftps': 'BlockdevCreateNotSupported',
'gluster': 'BlockdevCreateOptionsGluster',
'host_cdrom': 'BlockdevCreateNotSupported',
'host_device': 'BlockdevCreateNotSupported',
'http': 'BlockdevCreateNotSupported',
'https': 'BlockdevCreateNotSupported',
'iscsi': 'BlockdevCreateNotSupported',
'luks': 'BlockdevCreateOptionsLUKS',
'nbd': 'BlockdevCreateNotSupported',
'nfs': 'BlockdevCreateOptionsNfs',
'null-aio': 'BlockdevCreateNotSupported',
'null-co': 'BlockdevCreateNotSupported',
'nvme': 'BlockdevCreateNotSupported',
'parallels': 'BlockdevCreateOptionsParallels',
'qcow': 'BlockdevCreateOptionsQcow',
'qcow2': 'BlockdevCreateOptionsQcow2',
'qed': 'BlockdevCreateOptionsQed',
'quorum': 'BlockdevCreateNotSupported',
'raw': 'BlockdevCreateNotSupported',
'rbd': 'BlockdevCreateOptionsRbd',
'replication': 'BlockdevCreateNotSupported',
'sheepdog': 'BlockdevCreateOptionsSheepdog',
'ssh': 'BlockdevCreateOptionsSsh',
'throttle': 'BlockdevCreateNotSupported',
'vdi': 'BlockdevCreateOptionsVdi',
'vhdx': 'BlockdevCreateOptionsVhdx',
'vmdk': 'BlockdevCreateNotSupported',
'vpc': 'BlockdevCreateOptionsVpc',
'vvfat': 'BlockdevCreateNotSupported',
'vxhs': 'BlockdevCreateNotSupported'
} }
##
# @x-blockdev-create:
#
# Create an image format on a given node.
# TODO Replace with something asynchronous (block job?)
#
# Since: 2.12
##
{ 'command': 'x-blockdev-create',
'data': 'BlockdevCreateOptions',
'boxed': true }
##
# @blockdev-open-tray:
#
# Opens a block device's tray. If there is a block driver state tree inserted as
# a medium, it will become inaccessible to the guest (but it will remain
# associated to the block device, so closing the tray will make it accessible
# again).
#
# If the tray was already open before, this will be a no-op.
#
# Once the tray opens, a DEVICE_TRAY_MOVED event is emitted. There are cases in
# which no such event will be generated, these include:
# - if the guest has locked the tray, @force is false and the guest does not
# respond to the eject request
# - if the BlockBackend denoted by @device does not have a guest device attached
# to it
# - if the guest device does not have an actual tray
#
# @device: Block device name (deprecated, use @id instead)
#
# @id: The name or QOM path of the guest device (since: 2.8)
#
# @force: if false (the default), an eject request will be sent to
# the guest if it has locked the tray (and the tray will not be opened
# immediately); if true, the tray will be opened regardless of whether
# it is locked
#
# Since: 2.5
#
# Example:
#
# -> { "execute": "blockdev-open-tray",
# "arguments": { "id": "ide0-1-0" } }
#
# <- { "timestamp": { "seconds": 1418751016,
# "microseconds": 716996 },
# "event": "DEVICE_TRAY_MOVED",
# "data": { "device": "ide1-cd0",
# "id": "ide0-1-0",
# "tray-open": true } }
#
# <- { "return": {} }
#
##
{ 'command': 'blockdev-open-tray',
'data': { '*device': 'str',
'*id': 'str',
'*force': 'bool' } }
##
# @blockdev-close-tray:
#
# Closes a block device's tray. If there is a block driver state tree associated
# with the block device (which is currently ejected), that tree will be loaded
# as the medium.
#
# If the tray was already closed before, this will be a no-op.
#
# @device: Block device name (deprecated, use @id instead)
#
# @id: The name or QOM path of the guest device (since: 2.8)
#
# Since: 2.5
#
# Example:
#
# -> { "execute": "blockdev-close-tray",
# "arguments": { "id": "ide0-1-0" } }
#
# <- { "timestamp": { "seconds": 1418751345,
# "microseconds": 272147 },
# "event": "DEVICE_TRAY_MOVED",
# "data": { "device": "ide1-cd0",
# "id": "ide0-1-0",
# "tray-open": false } }
#
# <- { "return": {} }
#
##
{ 'command': 'blockdev-close-tray',
'data': { '*device': 'str',
'*id': 'str' } }
##
# @blockdev-remove-medium:
#
# Removes a medium (a block driver state tree) from a block device. That block
# device's tray must currently be open (unless there is no attached guest
# device).
#
# If the tray is open and there is no medium inserted, this will be a no-op.
#
# @id: The name or QOM path of the guest device
#
# Since: 2.12
#
# Example:
#
# -> { "execute": "blockdev-remove-medium",
# "arguments": { "id": "ide0-1-0" } }
#
# <- { "error": { "class": "GenericError",
# "desc": "Tray of device 'ide0-1-0' is not open" } }
#
# -> { "execute": "blockdev-open-tray",
# "arguments": { "id": "ide0-1-0" } }
#
# <- { "timestamp": { "seconds": 1418751627,
# "microseconds": 549958 },
# "event": "DEVICE_TRAY_MOVED",
# "data": { "device": "ide1-cd0",
# "id": "ide0-1-0",
# "tray-open": true } }
#
# <- { "return": {} }
#
# -> { "execute": "blockdev-remove-medium",
# "arguments": { "id": "ide0-1-0" } }
#
# <- { "return": {} }
#
##
{ 'command': 'blockdev-remove-medium',
'data': { 'id': 'str' } }
##
# @blockdev-insert-medium:
#
# Inserts a medium (a block driver state tree) into a block device. That block
# device's tray must currently be open (unless there is no attached guest
# device) and there must be no medium inserted already.
#
# @id: The name or QOM path of the guest device
#
# @node-name: name of a node in the block driver state graph
#
# Since: 2.12
#
# Example:
#
# -> { "execute": "blockdev-add",
# "arguments": {
# "node-name": "node0",
# "driver": "raw",
# "file": { "driver": "file",
# "filename": "fedora.iso" } } }
# <- { "return": {} }
#
# -> { "execute": "blockdev-insert-medium",
# "arguments": { "id": "ide0-1-0",
# "node-name": "node0" } }
#
# <- { "return": {} }
#
##
{ 'command': 'blockdev-insert-medium',
'data': { 'id': 'str',
'node-name': 'str'} }
##
# @BlockdevChangeReadOnlyMode:
#
# Specifies the new read-only mode of a block device subject to the
# @blockdev-change-medium command.
#
# @retain: Retains the current read-only mode
#
# @read-only: Makes the device read-only
#
# @read-write: Makes the device writable
#
# Since: 2.3
#
##
{ 'enum': 'BlockdevChangeReadOnlyMode',
'data': ['retain', 'read-only', 'read-write'] }
##
# @blockdev-change-medium:
#
# Changes the medium inserted into a block device by ejecting the current medium
# and loading a new image file which is inserted as the new medium (this command
# combines blockdev-open-tray, blockdev-remove-medium, blockdev-insert-medium
# and blockdev-close-tray).
#
# @device: Block device name (deprecated, use @id instead)
#
# @id: The name or QOM path of the guest device
# (since: 2.8)
#
# @filename: filename of the new image to be loaded
#
# @format: format to open the new image with (defaults to
# the probed format)
#
# @read-only-mode: change the read-only mode of the device; defaults
# to 'retain'
#
# Since: 2.5
#
# Examples:
#
# 1. Change a removable medium
#
# -> { "execute": "blockdev-change-medium",
# "arguments": { "id": "ide0-1-0",
# "filename": "/srv/images/Fedora-12-x86_64-DVD.iso",
# "format": "raw" } }
# <- { "return": {} }
#
# 2. Load a read-only medium into a writable drive
#
# -> { "execute": "blockdev-change-medium",
# "arguments": { "id": "floppyA",
# "filename": "/srv/images/ro.img",
# "format": "raw",
# "read-only-mode": "retain" } }
#
# <- { "error":
# { "class": "GenericError",
# "desc": "Could not open '/srv/images/ro.img': Permission denied" } }
#
# -> { "execute": "blockdev-change-medium",
# "arguments": { "id": "floppyA",
# "filename": "/srv/images/ro.img",
# "format": "raw",
# "read-only-mode": "read-only" } }
#
# <- { "return": {} }
#
##
{ 'command': 'blockdev-change-medium',
'data': { '*device': 'str',
'*id': 'str',
'filename': 'str',
'*format': 'str',
'*read-only-mode': 'BlockdevChangeReadOnlyMode' } }
##
# @BlockErrorAction:
#
# An enumeration of action that has been taken when a DISK I/O occurs
#
# @ignore: error has been ignored
#
# @report: error has been reported to the device
#
# @stop: error caused VM to be stopped
#
# Since: 2.1
##
{ 'enum': 'BlockErrorAction',
'data': [ 'ignore', 'report', 'stop' ] }
##
# @BLOCK_IMAGE_CORRUPTED:
#
# Emitted when a disk image is being marked corrupt. The image can be
# identified by its device or node name. The 'device' field is always
# present for compatibility reasons, but it can be empty ("") if the
# image does not have a device name associated.
#
# @device: device name. This is always present for compatibility
# reasons, but it can be empty ("") if the image does not
# have a device name associated.
#
# @node-name: node name (Since: 2.4)
#
# @msg: informative message for human consumption, such as the kind of
# corruption being detected. It should not be parsed by machine as it is
# not guaranteed to be stable
#
# @offset: if the corruption resulted from an image access, this is
# the host's access offset into the image
#
# @size: if the corruption resulted from an image access, this is
# the access size
#
# @fatal: if set, the image is marked corrupt and therefore unusable after this
# event and must be repaired (Since 2.2; before, every
# BLOCK_IMAGE_CORRUPTED event was fatal)
#
# Note: If action is "stop", a STOP event will eventually follow the
# BLOCK_IO_ERROR event.
#
# Example:
#
# <- { "event": "BLOCK_IMAGE_CORRUPTED",
# "data": { "device": "ide0-hd0", "node-name": "node0",
# "msg": "Prevented active L1 table overwrite", "offset": 196608,
# "size": 65536 },
# "timestamp": { "seconds": 1378126126, "microseconds": 966463 } }
#
# Since: 1.7
##
{ 'event': 'BLOCK_IMAGE_CORRUPTED',
'data': { 'device' : 'str',
'*node-name' : 'str',
'msg' : 'str',
'*offset' : 'int',
'*size' : 'int',
'fatal' : 'bool' } }
##
# @BLOCK_IO_ERROR:
#
# Emitted when a disk I/O error occurs
#
# @device: device name. This is always present for compatibility
# reasons, but it can be empty ("") if the image does not
# have a device name associated.
#
# @node-name: node name. Note that errors may be reported for the root node
# that is directly attached to a guest device rather than for the
# node where the error occurred. The node name is not present if
# the drive is empty. (Since: 2.8)
#
# @operation: I/O operation
#
# @action: action that has been taken
#
# @nospace: true if I/O error was caused due to a no-space
# condition. This key is only present if query-block's
# io-status is present, please see query-block documentation
# for more information (since: 2.2)
#
# @reason: human readable string describing the error cause.
# (This field is a debugging aid for humans, it should not
# be parsed by applications) (since: 2.2)
#
# Note: If action is "stop", a STOP event will eventually follow the
# BLOCK_IO_ERROR event
#
# Since: 0.13.0
#
# Example:
#
# <- { "event": "BLOCK_IO_ERROR",
# "data": { "device": "ide0-hd1",
# "node-name": "#block212",
# "operation": "write",
# "action": "stop" },
# "timestamp": { "seconds": 1265044230, "microseconds": 450486 } }
#
##
{ 'event': 'BLOCK_IO_ERROR',
'data': { 'device': 'str', '*node-name': 'str',
'operation': 'IoOperationType',
'action': 'BlockErrorAction', '*nospace': 'bool',
'reason': 'str' } }
##
# @BLOCK_JOB_COMPLETED:
#
# Emitted when a block job has completed
#
# @type: job type
#
# @device: The job identifier. Originally the device name but other
# values are allowed since QEMU 2.7
#
# @len: maximum progress value
#
# @offset: current progress value. On success this is equal to len.
# On failure this is less than len
#
# @speed: rate limit, bytes per second
#
# @error: error message. Only present on failure. This field
# contains a human-readable error message. There are no semantics
# other than that streaming has failed and clients should not try to
# interpret the error string
#
# Since: 1.1
#
# Example:
#
# <- { "event": "BLOCK_JOB_COMPLETED",
# "data": { "type": "stream", "device": "virtio-disk0",
# "len": 10737418240, "offset": 10737418240,
# "speed": 0 },
# "timestamp": { "seconds": 1267061043, "microseconds": 959568 } }
#
##
{ 'event': 'BLOCK_JOB_COMPLETED',
'data': { 'type' : 'BlockJobType',
'device': 'str',
'len' : 'int',
'offset': 'int',
'speed' : 'int',
'*error': 'str' } }
##
# @BLOCK_JOB_CANCELLED:
#
# Emitted when a block job has been cancelled
#
# @type: job type
#
# @device: The job identifier. Originally the device name but other
# values are allowed since QEMU 2.7
#
# @len: maximum progress value
#
# @offset: current progress value. On success this is equal to len.
# On failure this is less than len
#
# @speed: rate limit, bytes per second
#
# Since: 1.1
#
# Example:
#
# <- { "event": "BLOCK_JOB_CANCELLED",
# "data": { "type": "stream", "device": "virtio-disk0",
# "len": 10737418240, "offset": 134217728,
# "speed": 0 },
# "timestamp": { "seconds": 1267061043, "microseconds": 959568 } }
#
##
{ 'event': 'BLOCK_JOB_CANCELLED',
'data': { 'type' : 'BlockJobType',
'device': 'str',
'len' : 'int',
'offset': 'int',
'speed' : 'int' } }
##
# @BLOCK_JOB_ERROR:
#
# Emitted when a block job encounters an error
#
# @device: The job identifier. Originally the device name but other
# values are allowed since QEMU 2.7
#
# @operation: I/O operation
#
# @action: action that has been taken
#
# Since: 1.3
#
# Example:
#
# <- { "event": "BLOCK_JOB_ERROR",
# "data": { "device": "ide0-hd1",
# "operation": "write",
# "action": "stop" },
# "timestamp": { "seconds": 1265044230, "microseconds": 450486 } }
#
##
{ 'event': 'BLOCK_JOB_ERROR',
'data': { 'device' : 'str',
'operation': 'IoOperationType',
'action' : 'BlockErrorAction' } }
##
# @BLOCK_JOB_READY:
#
# Emitted when a block job is ready to complete
#
# @type: job type
#
# @device: The job identifier. Originally the device name but other
# values are allowed since QEMU 2.7
#
# @len: maximum progress value
#
# @offset: current progress value. On success this is equal to len.
# On failure this is less than len
#
# @speed: rate limit, bytes per second
#
# Note: The "ready to complete" status is always reset by a @BLOCK_JOB_ERROR
# event
#
# Since: 1.3
#
# Example:
#
# <- { "event": "BLOCK_JOB_READY",
# "data": { "device": "drive0", "type": "mirror", "speed": 0,
# "len": 2097152, "offset": 2097152 }
# "timestamp": { "seconds": 1265044230, "microseconds": 450486 } }
#
##
{ 'event': 'BLOCK_JOB_READY',
'data': { 'type' : 'BlockJobType',
'device': 'str',
'len' : 'int',
'offset': 'int',
'speed' : 'int' } }
##
# @BLOCK_JOB_PENDING:
#
# Emitted when a block job is awaiting explicit authorization to finalize graph
# changes via @block-job-finalize. If this job is part of a transaction, it will
# not emit this event until the transaction has converged first.
#
# @type: job type
#
# @id: The job identifier.
#
# Since: 2.12
#
# Example:
#
# <- { "event": "BLOCK_JOB_WAITING",
# "data": { "device": "drive0", "type": "mirror" },
# "timestamp": { "seconds": 1265044230, "microseconds": 450486 } }
#
##
{ 'event': 'BLOCK_JOB_PENDING',
'data': { 'type' : 'BlockJobType',
'id' : 'str' } }
##
# @PreallocMode:
#
# Preallocation mode of QEMU image file
#
# @off: no preallocation
# @metadata: preallocate only for metadata
# @falloc: like @full preallocation but allocate disk space by
# posix_fallocate() rather than writing zeros.
# @full: preallocate all data by writing zeros to device to ensure disk
# space is really available. @full preallocation also sets up
# metadata correctly.
#
# Since: 2.2
##
{ 'enum': 'PreallocMode',
'data': [ 'off', 'metadata', 'falloc', 'full' ] }
block: add event when disk usage exceeds threshold Managing applications, like oVirt (http://www.ovirt.org), make extensive use of thin-provisioned disk images. To let the guest run smoothly and be not unnecessarily paused, oVirt sets a disk usage threshold (so called 'high water mark') based on the occupation of the device, and automatically extends the image once the threshold is reached or exceeded. In order to detect the crossing of the threshold, oVirt has no choice but aggressively polling the QEMU monitor using the query-blockstats command. This lead to unnecessary system load, and is made even worse under scale: deployments with hundreds of VMs are no longer rare. To fix this, this patch adds: * A new monitor command `block-set-write-threshold', to set a mark for a given block device. * A new event `BLOCK_WRITE_THRESHOLD', to report if a block device usage exceeds the threshold. * A new `write_threshold' field into the `BlockDeviceInfo' structure, to report the configured threshold. This will allow the managing application to use smarter and more efficient monitoring, greatly reducing the need of polling. [Updated qemu-iotests 067 output to add the new 'write_threshold' property. --Stefan] [Changed g_assert_false() to !g_assert() to fix the build on older glib versions. --Kevin] Signed-off-by: Francesco Romani <fromani@redhat.com> Reviewed-by: Eric Blake <eblake@redhat.com> Message-id: 1421068273-692-1-git-send-email-fromani@redhat.com Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2015-01-12 14:11:13 +01:00
##
# @BLOCK_WRITE_THRESHOLD:
block: add event when disk usage exceeds threshold Managing applications, like oVirt (http://www.ovirt.org), make extensive use of thin-provisioned disk images. To let the guest run smoothly and be not unnecessarily paused, oVirt sets a disk usage threshold (so called 'high water mark') based on the occupation of the device, and automatically extends the image once the threshold is reached or exceeded. In order to detect the crossing of the threshold, oVirt has no choice but aggressively polling the QEMU monitor using the query-blockstats command. This lead to unnecessary system load, and is made even worse under scale: deployments with hundreds of VMs are no longer rare. To fix this, this patch adds: * A new monitor command `block-set-write-threshold', to set a mark for a given block device. * A new event `BLOCK_WRITE_THRESHOLD', to report if a block device usage exceeds the threshold. * A new `write_threshold' field into the `BlockDeviceInfo' structure, to report the configured threshold. This will allow the managing application to use smarter and more efficient monitoring, greatly reducing the need of polling. [Updated qemu-iotests 067 output to add the new 'write_threshold' property. --Stefan] [Changed g_assert_false() to !g_assert() to fix the build on older glib versions. --Kevin] Signed-off-by: Francesco Romani <fromani@redhat.com> Reviewed-by: Eric Blake <eblake@redhat.com> Message-id: 1421068273-692-1-git-send-email-fromani@redhat.com Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2015-01-12 14:11:13 +01:00
#
# Emitted when writes on block device reaches or exceeds the
# configured write threshold. For thin-provisioned devices, this
# means the device should be extended to avoid pausing for
# disk exhaustion.
# The event is one shot. Once triggered, it needs to be
# re-registered with another block-set-write-threshold command.
block: add event when disk usage exceeds threshold Managing applications, like oVirt (http://www.ovirt.org), make extensive use of thin-provisioned disk images. To let the guest run smoothly and be not unnecessarily paused, oVirt sets a disk usage threshold (so called 'high water mark') based on the occupation of the device, and automatically extends the image once the threshold is reached or exceeded. In order to detect the crossing of the threshold, oVirt has no choice but aggressively polling the QEMU monitor using the query-blockstats command. This lead to unnecessary system load, and is made even worse under scale: deployments with hundreds of VMs are no longer rare. To fix this, this patch adds: * A new monitor command `block-set-write-threshold', to set a mark for a given block device. * A new event `BLOCK_WRITE_THRESHOLD', to report if a block device usage exceeds the threshold. * A new `write_threshold' field into the `BlockDeviceInfo' structure, to report the configured threshold. This will allow the managing application to use smarter and more efficient monitoring, greatly reducing the need of polling. [Updated qemu-iotests 067 output to add the new 'write_threshold' property. --Stefan] [Changed g_assert_false() to !g_assert() to fix the build on older glib versions. --Kevin] Signed-off-by: Francesco Romani <fromani@redhat.com> Reviewed-by: Eric Blake <eblake@redhat.com> Message-id: 1421068273-692-1-git-send-email-fromani@redhat.com Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2015-01-12 14:11:13 +01:00
#
# @node-name: graph node name on which the threshold was exceeded.
#
# @amount-exceeded: amount of data which exceeded the threshold, in bytes.
#
# @write-threshold: last configured threshold, in bytes.
#
# Since: 2.3
##
{ 'event': 'BLOCK_WRITE_THRESHOLD',
'data': { 'node-name': 'str',
'amount-exceeded': 'uint64',
'write-threshold': 'uint64' } }
##
# @block-set-write-threshold:
block: add event when disk usage exceeds threshold Managing applications, like oVirt (http://www.ovirt.org), make extensive use of thin-provisioned disk images. To let the guest run smoothly and be not unnecessarily paused, oVirt sets a disk usage threshold (so called 'high water mark') based on the occupation of the device, and automatically extends the image once the threshold is reached or exceeded. In order to detect the crossing of the threshold, oVirt has no choice but aggressively polling the QEMU monitor using the query-blockstats command. This lead to unnecessary system load, and is made even worse under scale: deployments with hundreds of VMs are no longer rare. To fix this, this patch adds: * A new monitor command `block-set-write-threshold', to set a mark for a given block device. * A new event `BLOCK_WRITE_THRESHOLD', to report if a block device usage exceeds the threshold. * A new `write_threshold' field into the `BlockDeviceInfo' structure, to report the configured threshold. This will allow the managing application to use smarter and more efficient monitoring, greatly reducing the need of polling. [Updated qemu-iotests 067 output to add the new 'write_threshold' property. --Stefan] [Changed g_assert_false() to !g_assert() to fix the build on older glib versions. --Kevin] Signed-off-by: Francesco Romani <fromani@redhat.com> Reviewed-by: Eric Blake <eblake@redhat.com> Message-id: 1421068273-692-1-git-send-email-fromani@redhat.com Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2015-01-12 14:11:13 +01:00
#
# Change the write threshold for a block drive. An event will be
# delivered if a write to this block drive crosses the configured
# threshold. The threshold is an offset, thus must be
# non-negative. Default is no write threshold. Setting the threshold
# to zero disables it.
#
block: add event when disk usage exceeds threshold Managing applications, like oVirt (http://www.ovirt.org), make extensive use of thin-provisioned disk images. To let the guest run smoothly and be not unnecessarily paused, oVirt sets a disk usage threshold (so called 'high water mark') based on the occupation of the device, and automatically extends the image once the threshold is reached or exceeded. In order to detect the crossing of the threshold, oVirt has no choice but aggressively polling the QEMU monitor using the query-blockstats command. This lead to unnecessary system load, and is made even worse under scale: deployments with hundreds of VMs are no longer rare. To fix this, this patch adds: * A new monitor command `block-set-write-threshold', to set a mark for a given block device. * A new event `BLOCK_WRITE_THRESHOLD', to report if a block device usage exceeds the threshold. * A new `write_threshold' field into the `BlockDeviceInfo' structure, to report the configured threshold. This will allow the managing application to use smarter and more efficient monitoring, greatly reducing the need of polling. [Updated qemu-iotests 067 output to add the new 'write_threshold' property. --Stefan] [Changed g_assert_false() to !g_assert() to fix the build on older glib versions. --Kevin] Signed-off-by: Francesco Romani <fromani@redhat.com> Reviewed-by: Eric Blake <eblake@redhat.com> Message-id: 1421068273-692-1-git-send-email-fromani@redhat.com Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2015-01-12 14:11:13 +01:00
# This is useful to transparently resize thin-provisioned drives without
# the guest OS noticing.
#
# @node-name: graph node name on which the threshold must be set.
#
# @write-threshold: configured threshold for the block device, bytes.
# Use 0 to disable the threshold.
#
# Since: 2.3
#
# Example:
#
# -> { "execute": "block-set-write-threshold",
# "arguments": { "node-name": "mydev",
# "write-threshold": 17179869184 } }
# <- { "return": {} }
#
block: add event when disk usage exceeds threshold Managing applications, like oVirt (http://www.ovirt.org), make extensive use of thin-provisioned disk images. To let the guest run smoothly and be not unnecessarily paused, oVirt sets a disk usage threshold (so called 'high water mark') based on the occupation of the device, and automatically extends the image once the threshold is reached or exceeded. In order to detect the crossing of the threshold, oVirt has no choice but aggressively polling the QEMU monitor using the query-blockstats command. This lead to unnecessary system load, and is made even worse under scale: deployments with hundreds of VMs are no longer rare. To fix this, this patch adds: * A new monitor command `block-set-write-threshold', to set a mark for a given block device. * A new event `BLOCK_WRITE_THRESHOLD', to report if a block device usage exceeds the threshold. * A new `write_threshold' field into the `BlockDeviceInfo' structure, to report the configured threshold. This will allow the managing application to use smarter and more efficient monitoring, greatly reducing the need of polling. [Updated qemu-iotests 067 output to add the new 'write_threshold' property. --Stefan] [Changed g_assert_false() to !g_assert() to fix the build on older glib versions. --Kevin] Signed-off-by: Francesco Romani <fromani@redhat.com> Reviewed-by: Eric Blake <eblake@redhat.com> Message-id: 1421068273-692-1-git-send-email-fromani@redhat.com Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2015-01-12 14:11:13 +01:00
##
{ 'command': 'block-set-write-threshold',
'data': { 'node-name': 'str', 'write-threshold': 'uint64' } }
##
# @x-blockdev-change:
#
# Dynamically reconfigure the block driver state graph. It can be used
# to add, remove, insert or replace a graph node. Currently only the
# Quorum driver implements this feature to add or remove its child. This
# is useful to fix a broken quorum child.
#
# If @node is specified, it will be inserted under @parent. @child
# may not be specified in this case. If both @parent and @child are
# specified but @node is not, @child will be detached from @parent.
#
# @parent: the id or name of the parent node.
#
# @child: the name of a child under the given parent node.
#
# @node: the name of the node that will be added.
#
# Note: this command is experimental, and its API is not stable. It
# does not support all kinds of operations, all kinds of children, nor
# all block drivers.
#
# FIXME Removing children from a quorum node means introducing gaps in the
# child indices. This cannot be represented in the 'children' list of
# BlockdevOptionsQuorum, as returned by .bdrv_refresh_filename().
#
# Warning: The data in a new quorum child MUST be consistent with that of
# the rest of the array.
#
# Since: 2.7
#
# Example:
#
# 1. Add a new node to a quorum
# -> { "execute": "blockdev-add",
# "arguments": {
# "driver": "raw",
# "node-name": "new_node",
# "file": { "driver": "file",
# "filename": "test.raw" } } }
# <- { "return": {} }
# -> { "execute": "x-blockdev-change",
# "arguments": { "parent": "disk1",
# "node": "new_node" } }
# <- { "return": {} }
#
# 2. Delete a quorum's node
# -> { "execute": "x-blockdev-change",
# "arguments": { "parent": "disk1",
# "child": "children.1" } }
# <- { "return": {} }
#
##
{ 'command': 'x-blockdev-change',
'data' : { 'parent': 'str',
'*child': 'str',
'*node': 'str' } }
##
# @x-blockdev-set-iothread:
#
# Move @node and its children into the @iothread. If @iothread is null then
# move @node and its children into the main loop.
#
# The node must not be attached to a BlockBackend.
#
# @node-name: the name of the block driver node
#
# @iothread: the name of the IOThread object or null for the main loop
#
# @force: true if the node and its children should be moved when a BlockBackend
# is already attached
#
# Note: this command is experimental and intended for test cases that need
# control over IOThreads only.
#
# Since: 2.12
#
# Example:
#
# 1. Move a node into an IOThread
# -> { "execute": "x-blockdev-set-iothread",
# "arguments": { "node-name": "disk1",
# "iothread": "iothread0" } }
# <- { "return": {} }
#
# 2. Move a node into the main loop
# -> { "execute": "x-blockdev-set-iothread",
# "arguments": { "node-name": "disk1",
# "iothread": null } }
# <- { "return": {} }
#
##
{ 'command': 'x-blockdev-set-iothread',
'data' : { 'node-name': 'str',
'iothread': 'StrOrNull',
'*force': 'bool' } }