migration/next for 20180115

-----BEGIN PGP SIGNATURE-----
 
 iQIcBAABCAAGBQJaXJWkAAoJEPSH7xhYctcjYycP/RfdDyPoU+33jh4b6P1vgS1F
 SAWstVA22bcfh0cnIXQXKZw+xTR5F8VZTqUAlzVRp4XfEN8bvq611aDiLznhn+5x
 z3FS22toBmgocssMUjnkILbkkWDG2EzzjNIHKc7tdYqsyMAupFXsFbwYdbO0hS8l
 0AlWmUmdLQ5E5Sv4Cl8oXzh+Ueik035Gng9QabKSJJ3H7AlyeuX0WRwE2NXX9jVP
 6cGWD+76ut7m9noT1GpBh076C+ULKmnKy3Z8mlhhfeX17051iFtqe/G539zy6vLr
 XyhB7Q5oWbYpeJxD8cHJCqAMPfIdjgZaLwCLCGyz4Bswfl9QhG0d2YHBZbkrAe1W
 AYNsR3wra3gCnr+glCxmY8p5OyLzlXo18956kX57I3vAZK5mynW0sk/o7C6V5Wlk
 1Qvk/s9w0Uoep9BAhU+bAGMPoHugwbflSDDL6k0ebds4U5q5RxGtRamSygc9tQwo
 e8ciCk3/0hxRv5dVAdlvR1ysM7CqUbFCCS95m9Do21kjs3fm1rY79Kkzuimr62+K
 AOBgBdGjNM1ii632ekbR5+buCgMRUuEjwQbcBUyABoZdWtOPXvZICMwxHMoxQS3z
 IXc3NEvJ3g9KBcg+MjU0lynvAnvV1lYJTvivkL7wXSA46yxNWa5eis80qyl3iGrO
 EcPDdwChH91nAXhwHJu0
 =nS/P
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/juanquintela/tags/migration/20180115' into staging

migration/next for 20180115

# gpg: Signature made Mon 15 Jan 2018 11:51:00 GMT
# gpg:                using RSA key 0xF487EF185872D723
# gpg: Good signature from "Juan Quintela <quintela@redhat.com>"
# gpg:                 aka "Juan Quintela <quintela@trasno.org>"
# Primary key fingerprint: 1899 FF8E DEBF 58CC EE03  4B82 F487 EF18 5872 D723

* remotes/juanquintela/tags/migration/20180115: (27 commits)
  migration: remove notify in fd_error
  migration: remove some block_cleanup_parameters()
  migration: put the finish part into a new function
  migration: major cleanup for migrate iterations
  migration: cleanup stats update into function
  migration: use switch at the end of migration
  migration: introduce migrate_calculate_complete
  migration: introduce downtime_start
  migration: move vm_old_running into global state
  migration: split use of MigrationState.total_time
  migration: remove "enable_colo" var
  migration: qemu_savevm_state_cleanup() in cleanup
  migration: assert colo instead of check
  migration: finalize current_migration object
  migration: Guard ram_bytes_remaining against early call
  migration: add postcopy total blocktime into query-migrate
  migration: add blocktime calculation into migration-test
  migration: postcopy_blocktime documentation
  migration: calculate vCPU blocktime on dst side
  migration: add postcopy blocktime ctx into MigrationIncomingState
  ...

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2018-01-15 13:17:47 +00:00
commit f521eeee3b
13 changed files with 885 additions and 407 deletions

View File

@ -1,4 +1,6 @@
= Migration =
=========
Migration
=========
QEMU has code to load/save the state of the guest that it is running.
These are two complementary operations. Saving the state just does
@ -26,7 +28,8 @@ the guest to be stopped. Typically the time that the guest is
unresponsive during live migration is the low hundred of milliseconds
(notice that this depends on a lot of things).
=== Types of migration ===
Types of migration
==================
Now that we have talked about live migration, there are several ways
to do migration:
@ -41,49 +44,21 @@ All these four migration protocols use the same infrastructure to
save/restore state devices. This infrastructure is shared with the
savevm/loadvm functionality.
=== State Live Migration ===
State Live Migration
====================
This is used for RAM and block devices. It is not yet ported to vmstate.
<Fill more information here>
=== What is the common infrastructure ===
Common infrastructure
=====================
QEMU uses a QEMUFile abstraction to be able to do migration. Any type
of migration that wants to use QEMU infrastructure has to create a
QEMUFile with:
The files, sockets or fd's that carry the migration stream are abstracted by
the ``QEMUFile`` type (see `migration/qemu-file.h`). In most cases this
is connected to a subtype of ``QIOChannel`` (see `io/`).
QEMUFile *qemu_fopen_ops(void *opaque,
QEMUFilePutBufferFunc *put_buffer,
QEMUFileGetBufferFunc *get_buffer,
QEMUFileCloseFunc *close);
The functions have the following functionality:
This function writes a chunk of data to a file at the given position.
The pos argument can be ignored if the file is only used for
streaming. The handler should try to write all of the data it can.
typedef int (QEMUFilePutBufferFunc)(void *opaque, const uint8_t *buf,
int64_t pos, int size);
Read a chunk of data from a file at the given position. The pos argument
can be ignored if the file is only be used for streaming. The number of
bytes actually read should be returned.
typedef int (QEMUFileGetBufferFunc)(void *opaque, uint8_t *buf,
int64_t pos, int size);
Close a file and return an error code.
typedef int (QEMUFileCloseFunc)(void *opaque);
You can use any internal state that you need using the opaque void *
pointer that is passed to all functions.
The important functions for us are put_buffer()/get_buffer() that
allow to write/read a buffer into the QEMUFile.
=== How to save the state of one device ===
Saving the state of one device
==============================
The state of a device is saved using intermediate buffers. There are
some helper functions to assist this saving.
@ -93,34 +68,38 @@ version. When we migrate a device, we save/load the state as a series
of fields. Some times, due to bugs or new functionality, we need to
change the state to store more/different information. We use the
version to identify each time that we do a change. Each version is
associated with a series of fields saved. The save_state always saves
the state as the newer version. But load_state sometimes is able to
associated with a series of fields saved. The `save_state` always saves
the state as the newer version. But `load_state` sometimes is able to
load state from an older version.
=== Legacy way ===
Legacy way
----------
This way is going to disappear as soon as all current users are ported to VMSTATE.
Each device has to register two functions, one to save the state and
another to load the state back.
int register_savevm(DeviceState *dev,
const char *idstr,
int instance_id,
int version_id,
SaveStateHandler *save_state,
LoadStateHandler *load_state,
void *opaque);
.. code:: c
typedef void SaveStateHandler(QEMUFile *f, void *opaque);
typedef int LoadStateHandler(QEMUFile *f, void *opaque, int version_id);
int register_savevm(DeviceState *dev,
const char *idstr,
int instance_id,
int version_id,
SaveStateHandler *save_state,
LoadStateHandler *load_state,
void *opaque);
The important functions for the device state format are the save_state
and load_state. Notice that load_state receives a version_id
parameter to know what state format is receiving. save_state doesn't
typedef void SaveStateHandler(QEMUFile *f, void *opaque);
typedef int LoadStateHandler(QEMUFile *f, void *opaque, int version_id);
The important functions for the device state format are the `save_state`
and `load_state`. Notice that `load_state` receives a version_id
parameter to know what state format is receiving. `save_state` doesn't
have a version_id parameter because it always uses the latest version.
=== VMState ===
VMState
-------
The legacy way of saving/loading state of the device had the problem
that we have to maintain two functions in sync. If we did one change
@ -135,31 +114,36 @@ save/load functions.
An example (from hw/input/pckbd.c)
static const VMStateDescription vmstate_kbd = {
.name = "pckbd",
.version_id = 3,
.minimum_version_id = 3,
.fields = (VMStateField[]) {
VMSTATE_UINT8(write_cmd, KBDState),
VMSTATE_UINT8(status, KBDState),
VMSTATE_UINT8(mode, KBDState),
VMSTATE_UINT8(pending, KBDState),
VMSTATE_END_OF_LIST()
}
};
.. code:: c
static const VMStateDescription vmstate_kbd = {
.name = "pckbd",
.version_id = 3,
.minimum_version_id = 3,
.fields = (VMStateField[]) {
VMSTATE_UINT8(write_cmd, KBDState),
VMSTATE_UINT8(status, KBDState),
VMSTATE_UINT8(mode, KBDState),
VMSTATE_UINT8(pending, KBDState),
VMSTATE_END_OF_LIST()
}
};
We are declaring the state with name "pckbd".
The version_id is 3, and the fields are 4 uint8_t in a KBDState structure.
The `version_id` is 3, and the fields are 4 uint8_t in a KBDState structure.
We registered this with:
.. code:: c
vmstate_register(NULL, 0, &vmstate_kbd, s);
Note: talk about how vmstate <-> qdev interact, and what the instance ids mean.
You can search for VMSTATE_* macros for lots of types used in QEMU in
You can search for ``VMSTATE_*`` macros for lots of types used in QEMU in
include/hw/hw.h.
=== More about versions ===
More about versions
-------------------
Version numbers are intended for major incompatible changes to the
migration of a device, and using them breaks backwards-migration
@ -168,22 +152,23 @@ compatibility; in general most changes can be made by adding Subsections
You can see that there are several version fields:
- version_id: the maximum version_id supported by VMState for that device.
- minimum_version_id: the minimum version_id that VMState is able to understand
- `version_id`: the maximum version_id supported by VMState for that device.
- `minimum_version_id`: the minimum version_id that VMState is able to understand
for that device.
- minimum_version_id_old: For devices that were not able to port to vmstate, we can
- `minimum_version_id_old`: For devices that were not able to port to vmstate, we can
assign a function that knows how to read this old state. This field is
ignored if there is no load_state_old handler.
ignored if there is no `load_state_old` handler.
So, VMState is able to read versions from minimum_version_id to
version_id. And the function load_state_old() (if present) is able to
version_id. And the function ``load_state_old()`` (if present) is able to
load state from minimum_version_id_old to minimum_version_id. This
function is deprecated and will be removed when no more users are left.
Saving state will always create a section with the 'version_id' value
and thus can't be loaded by any older QEMU.
=== Massaging functions ===
Massaging functions
-------------------
Sometimes, it is not enough to be able to save the state directly
from one structure, we need to fill the correct values there. One
@ -194,24 +179,24 @@ load the state for the cpu that we have just loaded from the QEMUFile.
The functions to do that are inside a vmstate definition, and are called:
- int (*pre_load)(void *opaque);
- ``int (*pre_load)(void *opaque);``
This function is called before we load the state of one device.
- int (*post_load)(void *opaque, int version_id);
- ``int (*post_load)(void *opaque, int version_id);``
This function is called after we load the state of one device.
- int (*pre_save)(void *opaque);
- ``int (*pre_save)(void *opaque);``
This function is called before we save the state of one device.
Example: You can look at hpet.c, that uses the three function to
massage the state that is transferred.
massage the state that is transferred.
If you use memory API functions that update memory layout outside
initialization (i.e., in response to a guest action), this is a strong
indication that you need to call these functions in a post_load callback.
indication that you need to call these functions in a `post_load` callback.
Examples of such memory API functions are:
- memory_region_add_subregion()
@ -221,7 +206,8 @@ Examples of such memory API functions are:
- memory_region_set_address()
- memory_region_set_alias_offset()
=== Subsections ===
Subsections
-----------
The use of version_id allows to be able to migrate from older versions
to newer versions of a device. But not the other way around. This
@ -251,52 +237,54 @@ value that it uses.
Example:
static bool ide_drive_pio_state_needed(void *opaque)
{
IDEState *s = opaque;
.. code:: c
return ((s->status & DRQ_STAT) != 0)
|| (s->bus->error_status & BM_STATUS_PIO_RETRY);
}
static bool ide_drive_pio_state_needed(void *opaque)
{
IDEState *s = opaque;
const VMStateDescription vmstate_ide_drive_pio_state = {
.name = "ide_drive/pio_state",
.version_id = 1,
.minimum_version_id = 1,
.pre_save = ide_drive_pio_pre_save,
.post_load = ide_drive_pio_post_load,
.needed = ide_drive_pio_state_needed,
.fields = (VMStateField[]) {
VMSTATE_INT32(req_nb_sectors, IDEState),
VMSTATE_VARRAY_INT32(io_buffer, IDEState, io_buffer_total_len, 1,
vmstate_info_uint8, uint8_t),
VMSTATE_INT32(cur_io_buffer_offset, IDEState),
VMSTATE_INT32(cur_io_buffer_len, IDEState),
VMSTATE_UINT8(end_transfer_fn_idx, IDEState),
VMSTATE_INT32(elementary_transfer_size, IDEState),
VMSTATE_INT32(packet_transfer_size, IDEState),
VMSTATE_END_OF_LIST()
}
};
return ((s->status & DRQ_STAT) != 0)
|| (s->bus->error_status & BM_STATUS_PIO_RETRY);
}
const VMStateDescription vmstate_ide_drive = {
.name = "ide_drive",
.version_id = 3,
.minimum_version_id = 0,
.post_load = ide_drive_post_load,
.fields = (VMStateField[]) {
.... several fields ....
VMSTATE_END_OF_LIST()
},
.subsections = (const VMStateDescription*[]) {
&vmstate_ide_drive_pio_state,
NULL
}
};
const VMStateDescription vmstate_ide_drive_pio_state = {
.name = "ide_drive/pio_state",
.version_id = 1,
.minimum_version_id = 1,
.pre_save = ide_drive_pio_pre_save,
.post_load = ide_drive_pio_post_load,
.needed = ide_drive_pio_state_needed,
.fields = (VMStateField[]) {
VMSTATE_INT32(req_nb_sectors, IDEState),
VMSTATE_VARRAY_INT32(io_buffer, IDEState, io_buffer_total_len, 1,
vmstate_info_uint8, uint8_t),
VMSTATE_INT32(cur_io_buffer_offset, IDEState),
VMSTATE_INT32(cur_io_buffer_len, IDEState),
VMSTATE_UINT8(end_transfer_fn_idx, IDEState),
VMSTATE_INT32(elementary_transfer_size, IDEState),
VMSTATE_INT32(packet_transfer_size, IDEState),
VMSTATE_END_OF_LIST()
}
};
const VMStateDescription vmstate_ide_drive = {
.name = "ide_drive",
.version_id = 3,
.minimum_version_id = 0,
.post_load = ide_drive_post_load,
.fields = (VMStateField[]) {
.... several fields ....
VMSTATE_END_OF_LIST()
},
.subsections = (const VMStateDescription*[]) {
&vmstate_ide_drive_pio_state,
NULL
}
};
Here we have a subsection for the pio state. We only need to
save/send this state when we are in the middle of a pio operation
(that is what ide_drive_pio_state_needed() checks). If DRQ_STAT is
(that is what ``ide_drive_pio_state_needed()`` checks). If DRQ_STAT is
not enabled, the values on that fields are garbage and don't need to
be sent.
@ -304,11 +292,12 @@ Using a condition function that checks a 'property' to determine whether
to send a subsection allows backwards migration compatibility when
new subsections are added.
For example;
a) Add a new property using DEFINE_PROP_BOOL - e.g. support-foo and
For example:
a) Add a new property using ``DEFINE_PROP_BOOL`` - e.g. support-foo and
default it to true.
b) Add an entry to the HW_COMPAT_ for the previous version
that sets the property to false.
b) Add an entry to the ``HW_COMPAT_`` for the previous version that sets
the property to false.
c) Add a static bool support_foo function that tests the property.
d) Add a subsection with a .needed set to the support_foo function
e) (potentially) Add a pre_load that sets up a default value for 'foo'
@ -332,25 +321,30 @@ in most cases. In general the preference is to tie the subsection to
the machine type, and allow reliable migrations, unless the behaviour
from omission of the subsection is really bad.
= Not sending existing elements =
Not sending existing elements
-----------------------------
Sometimes members of the VMState are no longer needed;
removing them will break migration compatibility
making them version dependent and bumping the version will break backwards
migration compatibility.
Sometimes members of the VMState are no longer needed:
- removing them will break migration compatibility
- making them version dependent and bumping the version will break backwards migration compatibility.
The best way is to:
a) Add a new property/compatibility/function in the same way for subsections
above.
a) Add a new property/compatibility/function in the same way for subsections above.
b) replace the VMSTATE macro with the _TEST version of the macro, e.g.:
VMSTATE_UINT32(foo, barstruct)
``VMSTATE_UINT32(foo, barstruct)``
becomes
VMSTATE_UINT32_TEST(foo, barstruct, pre_version_baz)
Sometime in the future when we no longer care about the ancient
versions these can be killed off.
``VMSTATE_UINT32_TEST(foo, barstruct, pre_version_baz)``
= Return path =
Sometime in the future when we no longer care about the ancient versions these can be killed off.
Return path
-----------
In most migration scenarios there is only a single data path that runs
from the source VM to the destination, typically along a single fd (although
@ -360,19 +354,23 @@ However, some uses need two way communication; in particular the Postcopy
destination needs to be able to request pages on demand from the source.
For these scenarios there is a 'return path' from the destination to the source;
qemu_file_get_return_path(QEMUFile* fwdpath) gives the QEMUFile* for the return
``qemu_file_get_return_path(QEMUFile* fwdpath)`` gives the QEMUFile* for the return
path.
Source side
Forward path - written by migration thread
Return path - opened by main thread, read by return-path thread
Destination side
Forward path - read by main thread
Return path - opened by main thread, written by main thread AND postcopy
thread (protected by rp_mutex)
thread (protected by rp_mutex)
Postcopy
========
= Postcopy =
'Postcopy' migration is a way to deal with migrations that refuse to converge
(or take too long to converge) its plus side is that there is an upper bound on
the amount of migration traffic and time it takes, the down side is that during
@ -386,27 +384,44 @@ a fault that's translated by QEMU into a request to the source QEMU.
Postcopy can be combined with precopy (i.e. normal migration) so that if precopy
doesn't finish in a given time the switch is made to postcopy.
=== Enabling postcopy ===
Enabling postcopy
-----------------
To enable postcopy, issue this command on the monitor prior to the
start of migration:
migrate_set_capability postcopy-ram on
``migrate_set_capability postcopy-ram on``
The normal commands are then used to start a migration, which is still
started in precopy mode. Issuing:
migrate_start_postcopy
``migrate_start_postcopy``
will now cause the transition from precopy to postcopy.
It can be issued immediately after migration is started or any
time later on. Issuing it after the end of a migration is harmless.
Note: During the postcopy phase, the bandwidth limits set using
migrate_set_speed is ignored (to avoid delaying requested pages that
the destination is waiting for).
Blocktime is a postcopy live migration metric, intended to show how
long the vCPU was in state of interruptable sleep due to pagefault.
That metric is calculated both for all vCPUs as overlapped value, and
separately for each vCPU. These values are calculated on destination
side. To enable postcopy blocktime calculation, enter following
command on destination monitor:
=== Postcopy device transfer ===
``migrate_set_capability postcopy-blocktime on``
Postcopy blocktime can be retrieved by query-migrate qmp command.
postcopy-blocktime value of qmp command will show overlapped blocking
time for all vCPU, postcopy-vcpu-blocktime will show list of blocking
time per vCPU.
.. note::
During the postcopy phase, the bandwidth limits set using
``migrate_set_speed`` is ignored (to avoid delaying requested pages that
the destination is waiting for).
Postcopy device transfer
------------------------
Loading of device data may cause the device emulation to access guest RAM
that may trigger faults that have to be resolved by the source, as such
@ -416,6 +431,7 @@ before the device load begins to free the stream up. This is achieved by
'packaging' the device data into a blob that's read in one go.
Source behaviour
----------------
Until postcopy is entered the migration stream is identical to normal
precopy, except for the addition of a 'postcopy advise' command at
@ -423,13 +439,14 @@ the beginning, to tell the destination that postcopy might happen.
When postcopy starts the source sends the page discard data and then
forms the 'package' containing:
Command: 'postcopy listen'
The device state
A series of sections, identical to the precopy streams device state stream
containing everything except postcopiable devices (i.e. RAM)
Command: 'postcopy run'
- Command: 'postcopy listen'
- The device state
The 'package' is sent as the data part of a Command: 'CMD_PACKAGED', and the
A series of sections, identical to the precopy streams device state stream
containing everything except postcopiable devices (i.e. RAM)
- Command: 'postcopy run'
The 'package' is sent as the data part of a Command: ``CMD_PACKAGED``, and the
contents are formatted in the same way as the main migration stream.
During postcopy the source scans the list of dirty pages and sends them
@ -441,82 +458,100 @@ to be sent quickly in the hope that those pages are likely to be used
by the destination soon.
Destination behaviour
---------------------
Initially the destination looks the same as precopy, with a single thread
reading the migration stream; the 'postcopy advise' and 'discard' commands
are processed to change the way RAM is managed, but don't affect the stream
processing.
------------------------------------------------------------------------------
1 2 3 4 5 6 7
main -----DISCARD-CMD_PACKAGED ( LISTEN DEVICE DEVICE DEVICE RUN )
thread | |
| (page request)
| \___
v \
listen thread: --- page -- page -- page -- page -- page --
::
a b c
------------------------------------------------------------------------------
------------------------------------------------------------------------------
1 2 3 4 5 6 7
main -----DISCARD-CMD_PACKAGED ( LISTEN DEVICE DEVICE DEVICE RUN )
thread | |
| (page request)
| \___
v \
listen thread: --- page -- page -- page -- page -- page --
On receipt of CMD_PACKAGED (1)
All the data associated with the package - the ( ... ) section in the
diagram - is read into memory, and the main thread recurses into
qemu_loadvm_state_main to process the contents of the package (2)
which contains commands (3,6) and devices (4...)
a b c
------------------------------------------------------------------------------
On receipt of 'postcopy listen' - 3 -(i.e. the 1st command in the package)
a new thread (a) is started that takes over servicing the migration stream,
while the main thread carries on loading the package. It loads normal
background page data (b) but if during a device load a fault happens (5) the
returned page (c) is loaded by the listen thread allowing the main threads
device load to carry on.
- On receipt of ``CMD_PACKAGED`` (1)
The last thing in the CMD_PACKAGED is a 'RUN' command (6) letting the destination
CPUs start running.
At the end of the CMD_PACKAGED (7) the main thread returns to normal running behaviour
and is no longer used by migration, while the listen thread carries
on servicing page data until the end of migration.
All the data associated with the package - the ( ... ) section in the diagram -
is read into memory, and the main thread recurses into qemu_loadvm_state_main
to process the contents of the package (2) which contains commands (3,6) and
devices (4...)
=== Postcopy states ===
- On receipt of 'postcopy listen' - 3 -(i.e. the 1st command in the package)
a new thread (a) is started that takes over servicing the migration stream,
while the main thread carries on loading the package. It loads normal
background page data (b) but if during a device load a fault happens (5)
the returned page (c) is loaded by the listen thread allowing the main
threads device load to carry on.
- The last thing in the ``CMD_PACKAGED`` is a 'RUN' command (6)
letting the destination CPUs start running. At the end of the
``CMD_PACKAGED`` (7) the main thread returns to normal running behaviour and
is no longer used by migration, while the listen thread carries on servicing
page data until the end of migration.
Postcopy states
---------------
Postcopy moves through a series of states (see postcopy_state) from
ADVISE->DISCARD->LISTEN->RUNNING->END
Advise: Set at the start of migration if postcopy is enabled, even
if it hasn't had the start command; here the destination
checks that its OS has the support needed for postcopy, and performs
setup to ensure the RAM mappings are suitable for later postcopy.
The destination will fail early in migration at this point if the
required OS support is not present.
(Triggered by reception of POSTCOPY_ADVISE command)
- Advise
Discard: Entered on receipt of the first 'discard' command; prior to
the first Discard being performed, hugepages are switched off
(using madvise) to ensure that no new huge pages are created
during the postcopy phase, and to cause any huge pages that
have discards on them to be broken.
Set at the start of migration if postcopy is enabled, even
if it hasn't had the start command; here the destination
checks that its OS has the support needed for postcopy, and performs
setup to ensure the RAM mappings are suitable for later postcopy.
The destination will fail early in migration at this point if the
required OS support is not present.
(Triggered by reception of POSTCOPY_ADVISE command)
Listen: The first command in the package, POSTCOPY_LISTEN, switches
the destination state to Listen, and starts a new thread
(the 'listen thread') which takes over the job of receiving
pages off the migration stream, while the main thread carries
on processing the blob. With this thread able to process page
reception, the destination now 'sensitises' the RAM to detect
any access to missing pages (on Linux using the 'userfault'
system).
- Discard
Running: POSTCOPY_RUN causes the destination to synchronise all
state and start the CPUs and IO devices running. The main
thread now finishes processing the migration package and
now carries on as it would for normal precopy migration
(although it can't do the cleanup it would do as it
finishes a normal migration).
Entered on receipt of the first 'discard' command; prior to
the first Discard being performed, hugepages are switched off
(using madvise) to ensure that no new huge pages are created
during the postcopy phase, and to cause any huge pages that
have discards on them to be broken.
End: The listen thread can now quit, and perform the cleanup of migration
state, the migration is now complete.
- Listen
=== Source side page maps ===
The first command in the package, POSTCOPY_LISTEN, switches
the destination state to Listen, and starts a new thread
(the 'listen thread') which takes over the job of receiving
pages off the migration stream, while the main thread carries
on processing the blob. With this thread able to process page
reception, the destination now 'sensitises' the RAM to detect
any access to missing pages (on Linux using the 'userfault'
system).
- Running
POSTCOPY_RUN causes the destination to synchronise all
state and start the CPUs and IO devices running. The main
thread now finishes processing the migration package and
now carries on as it would for normal precopy migration
(although it can't do the cleanup it would do as it
finishes a normal migration).
- End
The listen thread can now quit, and perform the cleanup of migration
state, the migration is now complete.
Source side page maps
---------------------
The source side keeps two bitmaps during postcopy; 'the migration bitmap'
and 'unsent map'. The 'migration bitmap' is basically the same as in
@ -529,6 +564,7 @@ The 'unsent map' is used for the transition to postcopy. It is a bitmap that
has a bit cleared whenever a page is sent to the destination, however during
the transition to postcopy mode it is combined with the migration bitmap
to form a set of pages that:
a) Have been sent but then redirtied (which must be discarded)
b) Have not yet been sent - which also must be discarded to cause any
transparent huge pages built during precopy to be broken.
@ -540,15 +576,17 @@ request for a page that has already been sent is ignored. Duplicate requests
such as this can happen as a page is sent at about the same time the
destination accesses it.
=== Postcopy with hugepages ===
Postcopy with hugepages
-----------------------
Postcopy now works with hugetlbfs backed memory:
a) The linux kernel on the destination must support userfault on hugepages.
b) The huge-page configuration on the source and destination VMs must be
identical; i.e. RAMBlocks on both sides must use the same page size.
c) Note that -mem-path /dev/hugepages will fall back to allocating normal
c) Note that ``-mem-path /dev/hugepages`` will fall back to allocating normal
RAM if it doesn't have enough hugepages, triggering (b) to fail.
Using -mem-prealloc enforces the allocation using hugepages.
Using ``-mem-prealloc`` enforces the allocation using hugepages.
d) Care should be taken with the size of hugepage used; postcopy with 2MB
hugepages works well, however 1GB hugepages are likely to be problematic
since it takes ~1 second to transfer a 1GB hugepage across a 10Gbps link,

37
hmp.c
View File

@ -264,6 +264,21 @@ void hmp_info_migrate(Monitor *mon, const QDict *qdict)
info->cpu_throttle_percentage);
}
if (info->has_postcopy_blocktime) {
monitor_printf(mon, "postcopy blocktime: %" PRId64 "\n",
info->postcopy_blocktime);
}
if (info->has_postcopy_vcpu_blocktime) {
Visitor *v;
char *str;
v = string_output_visitor_new(false, &str);
visit_type_int64List(v, NULL, &info->postcopy_vcpu_blocktime, NULL);
visit_complete(v, &str);
monitor_printf(mon, "postcopy vcpu blocktime: %s\n", str);
g_free(str);
visit_free(v);
}
qapi_free_MigrationInfo(info);
qapi_free_MigrationCapabilityStatusList(caps);
}
@ -293,23 +308,23 @@ void hmp_info_migrate_parameters(Monitor *mon, const QDict *qdict)
if (params) {
assert(params->has_compress_level);
monitor_printf(mon, "%s: %" PRId64 "\n",
monitor_printf(mon, "%s: %u\n",
MigrationParameter_str(MIGRATION_PARAMETER_COMPRESS_LEVEL),
params->compress_level);
assert(params->has_compress_threads);
monitor_printf(mon, "%s: %" PRId64 "\n",
monitor_printf(mon, "%s: %u\n",
MigrationParameter_str(MIGRATION_PARAMETER_COMPRESS_THREADS),
params->compress_threads);
assert(params->has_decompress_threads);
monitor_printf(mon, "%s: %" PRId64 "\n",
monitor_printf(mon, "%s: %u\n",
MigrationParameter_str(MIGRATION_PARAMETER_DECOMPRESS_THREADS),
params->decompress_threads);
assert(params->has_cpu_throttle_initial);
monitor_printf(mon, "%s: %" PRId64 "\n",
monitor_printf(mon, "%s: %u\n",
MigrationParameter_str(MIGRATION_PARAMETER_CPU_THROTTLE_INITIAL),
params->cpu_throttle_initial);
assert(params->has_cpu_throttle_increment);
monitor_printf(mon, "%s: %" PRId64 "\n",
monitor_printf(mon, "%s: %u\n",
MigrationParameter_str(MIGRATION_PARAMETER_CPU_THROTTLE_INCREMENT),
params->cpu_throttle_increment);
assert(params->has_tls_creds);
@ -321,28 +336,28 @@ void hmp_info_migrate_parameters(Monitor *mon, const QDict *qdict)
MigrationParameter_str(MIGRATION_PARAMETER_TLS_HOSTNAME),
params->tls_hostname);
assert(params->has_max_bandwidth);
monitor_printf(mon, "%s: %" PRId64 " bytes/second\n",
monitor_printf(mon, "%s: %" PRIu64 " bytes/second\n",
MigrationParameter_str(MIGRATION_PARAMETER_MAX_BANDWIDTH),
params->max_bandwidth);
assert(params->has_downtime_limit);
monitor_printf(mon, "%s: %" PRId64 " milliseconds\n",
monitor_printf(mon, "%s: %" PRIu64 " milliseconds\n",
MigrationParameter_str(MIGRATION_PARAMETER_DOWNTIME_LIMIT),
params->downtime_limit);
assert(params->has_x_checkpoint_delay);
monitor_printf(mon, "%s: %" PRId64 "\n",
monitor_printf(mon, "%s: %u\n",
MigrationParameter_str(MIGRATION_PARAMETER_X_CHECKPOINT_DELAY),
params->x_checkpoint_delay);
assert(params->has_block_incremental);
monitor_printf(mon, "%s: %s\n",
MigrationParameter_str(MIGRATION_PARAMETER_BLOCK_INCREMENTAL),
params->block_incremental ? "on" : "off");
monitor_printf(mon, "%s: %" PRId64 "\n",
monitor_printf(mon, "%s: %u\n",
MigrationParameter_str(MIGRATION_PARAMETER_X_MULTIFD_CHANNELS),
params->x_multifd_channels);
monitor_printf(mon, "%s: %" PRId64 "\n",
monitor_printf(mon, "%s: %u\n",
MigrationParameter_str(MIGRATION_PARAMETER_X_MULTIFD_PAGE_COUNT),
params->x_multifd_page_count);
monitor_printf(mon, "%s: %" PRId64 "\n",
monitor_printf(mon, "%s: %" PRIu64 "\n",
MigrationParameter_str(MIGRATION_PARAMETER_XBZRLE_CACHE_SIZE),
params->xbzrle_cache_size);
}

View File

@ -44,6 +44,7 @@ void dump_vmstate_json_to_file(FILE *out_fp);
/* migration/migration.c */
void migration_object_init(void);
void migration_object_finalize(void);
void qemu_start_incoming_migration(const char *uri, Error **errp);
bool migration_is_idle(void);
void add_migration_state_change_notifier(Notifier *notify);

View File

@ -132,6 +132,11 @@ void migration_object_init(void)
}
}
void migration_object_finalize(void)
{
object_unref(OBJECT(current_migration));
}
/* For outgoing */
MigrationState *migrate_get_current(void)
{
@ -591,14 +596,15 @@ static void populate_disk_info(MigrationInfo *info)
}
}
MigrationInfo *qmp_query_migrate(Error **errp)
static void fill_source_migration_info(MigrationInfo *info)
{
MigrationInfo *info = g_malloc0(sizeof(*info));
MigrationState *s = migrate_get_current();
switch (s->state) {
case MIGRATION_STATUS_NONE:
/* no migration has happened ever */
/* do not overwrite destination migration status */
return;
break;
case MIGRATION_STATUS_SETUP:
info->has_status = true;
@ -613,7 +619,7 @@ MigrationInfo *qmp_query_migrate(Error **errp)
info->has_status = true;
info->has_total_time = true;
info->total_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME)
- s->total_time;
- s->start_time;
info->has_expected_downtime = true;
info->expected_downtime = s->expected_downtime;
info->has_setup_time = true;
@ -649,8 +655,6 @@ MigrationInfo *qmp_query_migrate(Error **errp)
break;
}
info->status = s->state;
return info;
}
/**
@ -714,6 +718,41 @@ static bool migrate_caps_check(bool *cap_list,
return true;
}
static void fill_destination_migration_info(MigrationInfo *info)
{
MigrationIncomingState *mis = migration_incoming_get_current();
switch (mis->state) {
case MIGRATION_STATUS_NONE:
return;
break;
case MIGRATION_STATUS_SETUP:
case MIGRATION_STATUS_CANCELLING:
case MIGRATION_STATUS_CANCELLED:
case MIGRATION_STATUS_ACTIVE:
case MIGRATION_STATUS_POSTCOPY_ACTIVE:
case MIGRATION_STATUS_FAILED:
case MIGRATION_STATUS_COLO:
info->has_status = true;
break;
case MIGRATION_STATUS_COMPLETED:
info->has_status = true;
fill_destination_postcopy_migration_info(info);
break;
}
info->status = mis->state;
}
MigrationInfo *qmp_query_migrate(Error **errp)
{
MigrationInfo *info = g_malloc0(sizeof(*info));
fill_destination_migration_info(info);
fill_source_migration_info(info);
return info;
}
void qmp_migrate_set_capabilities(MigrationCapabilityStatusList *params,
Error **errp)
{
@ -741,22 +780,20 @@ void qmp_migrate_set_capabilities(MigrationCapabilityStatusList *params,
static bool migrate_params_check(MigrationParameters *params, Error **errp)
{
if (params->has_compress_level &&
(params->compress_level < 0 || params->compress_level > 9)) {
(params->compress_level > 9)) {
error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "compress_level",
"is invalid, it should be in the range of 0 to 9");
return false;
}
if (params->has_compress_threads &&
(params->compress_threads < 1 || params->compress_threads > 255)) {
if (params->has_compress_threads && (params->compress_threads < 1)) {
error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
"compress_threads",
"is invalid, it should be in the range of 1 to 255");
return false;
}
if (params->has_decompress_threads &&
(params->decompress_threads < 1 || params->decompress_threads > 255)) {
if (params->has_decompress_threads && (params->decompress_threads < 1)) {
error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
"decompress_threads",
"is invalid, it should be in the range of 1 to 255");
@ -781,38 +818,31 @@ static bool migrate_params_check(MigrationParameters *params, Error **errp)
return false;
}
if (params->has_max_bandwidth &&
(params->max_bandwidth < 0 || params->max_bandwidth > SIZE_MAX)) {
if (params->has_max_bandwidth && (params->max_bandwidth > SIZE_MAX)) {
error_setg(errp, "Parameter 'max_bandwidth' expects an integer in the"
" range of 0 to %zu bytes/second", SIZE_MAX);
return false;
}
if (params->has_downtime_limit &&
(params->downtime_limit < 0 ||
params->downtime_limit > MAX_MIGRATE_DOWNTIME)) {
(params->downtime_limit > MAX_MIGRATE_DOWNTIME)) {
error_setg(errp, "Parameter 'downtime_limit' expects an integer in "
"the range of 0 to %d milliseconds",
MAX_MIGRATE_DOWNTIME);
return false;
}
if (params->has_x_checkpoint_delay && (params->x_checkpoint_delay < 0)) {
error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
"x_checkpoint_delay",
"is invalid, it should be positive");
return false;
}
if (params->has_x_multifd_channels &&
(params->x_multifd_channels < 1 || params->x_multifd_channels > 255)) {
/* x_checkpoint_delay is now always positive */
if (params->has_x_multifd_channels && (params->x_multifd_channels < 1)) {
error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
"multifd_channels",
"is invalid, it should be in the range of 1 to 255");
return false;
}
if (params->has_x_multifd_page_count &&
(params->x_multifd_page_count < 1 ||
params->x_multifd_page_count > 10000)) {
(params->x_multifd_page_count < 1 ||
params->x_multifd_page_count > 10000)) {
error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
"multifd_page_count",
"is invalid, it should be in the range of 1 to 10000");
@ -1077,6 +1107,8 @@ static void migrate_fd_cleanup(void *opaque)
qemu_bh_delete(s->cleanup_bh);
s->cleanup_bh = NULL;
qemu_savevm_state_cleanup();
if (s->to_dst_file) {
Error *local_err = NULL;
@ -1127,8 +1159,6 @@ void migrate_fd_error(MigrationState *s, const Error *error)
migrate_set_state(&s->state, MIGRATION_STATUS_SETUP,
MIGRATION_STATUS_FAILED);
migrate_set_error(s, error);
notifier_list_notify(&migration_state_notifiers, s);
block_cleanup_parameters(s);
}
static void migrate_fd_cancel(MigrationState *s)
@ -1174,7 +1204,6 @@ static void migrate_fd_cancel(MigrationState *s)
s->block_inactive = false;
}
}
block_cleanup_parameters(s);
}
void add_migration_state_change_notifier(Notifier *notify)
@ -1268,7 +1297,11 @@ MigrationState *migrate_init(void)
migrate_set_state(&s->state, MIGRATION_STATUS_NONE, MIGRATION_STATUS_SETUP);
s->total_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
s->start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
s->total_time = 0;
s->vm_was_running = false;
s->iteration_initial_bytes = 0;
s->threshold_size = 0;
return s;
}
@ -1508,6 +1541,15 @@ bool migrate_zero_blocks(void)
return s->enabled_capabilities[MIGRATION_CAPABILITY_ZERO_BLOCKS];
}
bool migrate_postcopy_blocktime(void)
{
MigrationState *s;
s = migrate_get_current();
return s->enabled_capabilities[MIGRATION_CAPABILITY_POSTCOPY_BLOCKTIME];
}
bool migrate_use_compression(void)
{
MigrationState *s;
@ -1843,7 +1885,7 @@ static int await_return_path_close_on_source(MigrationState *ms)
* Switch from normal iteration to postcopy
* Returns non-0 on error
*/
static int postcopy_start(MigrationState *ms, bool *old_vm_running)
static int postcopy_start(MigrationState *ms)
{
int ret;
QIOChannelBuffer *bioc;
@ -1861,7 +1903,6 @@ static int postcopy_start(MigrationState *ms, bool *old_vm_running)
trace_postcopy_start_set_run();
qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER);
*old_vm_running = runstate_is_running();
global_state_store();
ret = vm_stop_force_state(RUN_STATE_FINISH_MIGRATE);
if (ret < 0) {
@ -2051,21 +2092,17 @@ static int migration_maybe_pause(MigrationState *s,
* The caller 'breaks' the loop when this returns.
*
* @s: Current migration state
* @current_active_state: The migration state we expect to be in
* @*old_vm_running: Pointer to old_vm_running flag
* @*start_time: Pointer to time to update
*/
static void migration_completion(MigrationState *s, int current_active_state,
bool *old_vm_running,
int64_t *start_time)
static void migration_completion(MigrationState *s)
{
int ret;
int current_active_state = s->state;
if (s->state == MIGRATION_STATUS_ACTIVE) {
qemu_mutex_lock_iothread();
*start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
s->downtime_start = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER);
*old_vm_running = runstate_is_running();
s->vm_was_running = runstate_is_running();
ret = global_state_store();
if (!ret) {
@ -2152,6 +2189,155 @@ bool migrate_colo_enabled(void)
return s->enabled_capabilities[MIGRATION_CAPABILITY_X_COLO];
}
static void migration_calculate_complete(MigrationState *s)
{
uint64_t bytes = qemu_ftell(s->to_dst_file);
int64_t end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
s->total_time = end_time - s->start_time;
if (!s->downtime) {
/*
* It's still not set, so we are precopy migration. For
* postcopy, downtime is calculated during postcopy_start().
*/
s->downtime = end_time - s->downtime_start;
}
if (s->total_time) {
s->mbps = ((double) bytes * 8.0) / s->total_time / 1000;
}
}
static void migration_update_counters(MigrationState *s,
int64_t current_time)
{
uint64_t transferred, time_spent;
int64_t threshold_size;
double bandwidth;
if (current_time < s->iteration_start_time + BUFFER_DELAY) {
return;
}
transferred = qemu_ftell(s->to_dst_file) - s->iteration_initial_bytes;
time_spent = current_time - s->iteration_start_time;
bandwidth = (double)transferred / time_spent;
threshold_size = bandwidth * s->parameters.downtime_limit;
s->mbps = (((double) transferred * 8.0) /
((double) time_spent / 1000.0)) / 1000.0 / 1000.0;
/*
* if we haven't sent anything, we don't want to
* recalculate. 10000 is a small enough number for our purposes
*/
if (ram_counters.dirty_pages_rate && transferred > 10000) {
s->expected_downtime = ram_counters.dirty_pages_rate *
qemu_target_page_size() / bandwidth;
}
qemu_file_reset_rate_limit(s->to_dst_file);
s->iteration_start_time = current_time;
s->iteration_initial_bytes = qemu_ftell(s->to_dst_file);
trace_migrate_transferred(transferred, time_spent,
bandwidth, threshold_size);
}
/* Migration thread iteration status */
typedef enum {
MIG_ITERATE_RESUME, /* Resume current iteration */
MIG_ITERATE_SKIP, /* Skip current iteration */
MIG_ITERATE_BREAK, /* Break the loop */
} MigIterateState;
/*
* Return true if continue to the next iteration directly, false
* otherwise.
*/
static MigIterateState migration_iteration_run(MigrationState *s)
{
uint64_t pending_size, pend_post, pend_nonpost;
bool in_postcopy = s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE;
qemu_savevm_state_pending(s->to_dst_file, s->threshold_size,
&pend_nonpost, &pend_post);
pending_size = pend_nonpost + pend_post;
trace_migrate_pending(pending_size, s->threshold_size,
pend_post, pend_nonpost);
if (pending_size && pending_size >= s->threshold_size) {
/* Still a significant amount to transfer */
if (migrate_postcopy() && !in_postcopy &&
pend_nonpost <= s->threshold_size &&
atomic_read(&s->start_postcopy)) {
if (postcopy_start(s)) {
error_report("%s: postcopy failed to start", __func__);
}
return MIG_ITERATE_SKIP;
}
/* Just another iteration step */
qemu_savevm_state_iterate(s->to_dst_file,
s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE);
} else {
trace_migration_thread_low_pending(pending_size);
migration_completion(s);
return MIG_ITERATE_BREAK;
}
return MIG_ITERATE_RESUME;
}
static void migration_iteration_finish(MigrationState *s)
{
/* If we enabled cpu throttling for auto-converge, turn it off. */
cpu_throttle_stop();
qemu_mutex_lock_iothread();
switch (s->state) {
case MIGRATION_STATUS_COMPLETED:
migration_calculate_complete(s);
runstate_set(RUN_STATE_POSTMIGRATE);
break;
case MIGRATION_STATUS_ACTIVE:
/*
* We should really assert here, but since it's during
* migration, let's try to reduce the usage of assertions.
*/
if (!migrate_colo_enabled()) {
error_report("%s: critical error: calling COLO code without "
"COLO enabled", __func__);
}
migrate_start_colo_process(s);
/*
* Fixme: we will run VM in COLO no matter its old running state.
* After exited COLO, we will keep running.
*/
s->vm_was_running = true;
/* Fallthrough */
case MIGRATION_STATUS_FAILED:
case MIGRATION_STATUS_CANCELLED:
if (s->vm_was_running) {
vm_start();
} else {
if (runstate_check(RUN_STATE_FINISH_MIGRATE)) {
runstate_set(RUN_STATE_POSTMIGRATE);
}
}
break;
default:
/* Should not reach here, but if so, forgive the VM. */
error_report("%s: Unknown ending state %d", __func__, s->state);
break;
}
qemu_bh_schedule(s->cleanup_bh);
qemu_mutex_unlock_iothread();
}
/*
* Master migration thread on the source VM.
* It drives the migration and pumps the data down the outgoing channel.
@ -2159,26 +2345,12 @@ bool migrate_colo_enabled(void)
static void *migration_thread(void *opaque)
{
MigrationState *s = opaque;
/* Used by the bandwidth calcs, updated later */
int64_t initial_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
int64_t setup_start = qemu_clock_get_ms(QEMU_CLOCK_HOST);
int64_t initial_bytes = 0;
/*
* The final stage happens when the remaining data is smaller than
* this threshold; it's calculated from the requested downtime and
* measured bandwidth
*/
int64_t threshold_size = 0;
int64_t start_time = initial_time;
int64_t end_time;
bool old_vm_running = false;
bool entered_postcopy = false;
/* The active state we expect to be in; ACTIVE or POSTCOPY_ACTIVE */
enum MigrationStatus current_active_state = MIGRATION_STATUS_ACTIVE;
bool enable_colo = migrate_colo_enabled();
rcu_register_thread();
s->iteration_start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
qemu_savevm_state_header(s->to_dst_file);
/*
@ -2213,122 +2385,38 @@ static void *migration_thread(void *opaque)
while (s->state == MIGRATION_STATUS_ACTIVE ||
s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) {
int64_t current_time;
uint64_t pending_size;
if (!qemu_file_rate_limit(s->to_dst_file)) {
uint64_t pend_post, pend_nonpost;
qemu_savevm_state_pending(s->to_dst_file, threshold_size,
&pend_nonpost, &pend_post);
pending_size = pend_nonpost + pend_post;
trace_migrate_pending(pending_size, threshold_size,
pend_post, pend_nonpost);
if (pending_size && pending_size >= threshold_size) {
/* Still a significant amount to transfer */
if (migrate_postcopy() &&
s->state != MIGRATION_STATUS_POSTCOPY_ACTIVE &&
pend_nonpost <= threshold_size &&
atomic_read(&s->start_postcopy)) {
if (!postcopy_start(s, &old_vm_running)) {
current_active_state = MIGRATION_STATUS_POSTCOPY_ACTIVE;
entered_postcopy = true;
}
continue;
}
/* Just another iteration step */
qemu_savevm_state_iterate(s->to_dst_file, entered_postcopy);
} else {
trace_migration_thread_low_pending(pending_size);
migration_completion(s, current_active_state,
&old_vm_running, &start_time);
MigIterateState iter_state = migration_iteration_run(s);
if (iter_state == MIG_ITERATE_SKIP) {
continue;
} else if (iter_state == MIG_ITERATE_BREAK) {
break;
}
}
if (qemu_file_get_error(s->to_dst_file)) {
migrate_set_state(&s->state, current_active_state,
MIGRATION_STATUS_FAILED);
if (migration_is_setup_or_active(s->state)) {
migrate_set_state(&s->state, s->state,
MIGRATION_STATUS_FAILED);
}
trace_migration_thread_file_err();
break;
}
current_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
if (current_time >= initial_time + BUFFER_DELAY) {
uint64_t transferred_bytes = qemu_ftell(s->to_dst_file) -
initial_bytes;
uint64_t time_spent = current_time - initial_time;
double bandwidth = (double)transferred_bytes / time_spent;
threshold_size = bandwidth * s->parameters.downtime_limit;
s->mbps = (((double) transferred_bytes * 8.0) /
((double) time_spent / 1000.0)) / 1000.0 / 1000.0;
migration_update_counters(s, current_time);
trace_migrate_transferred(transferred_bytes, time_spent,
bandwidth, threshold_size);
/* if we haven't sent anything, we don't want to recalculate
10000 is a small enough number for our purposes */
if (ram_counters.dirty_pages_rate && transferred_bytes > 10000) {
s->expected_downtime = ram_counters.dirty_pages_rate *
qemu_target_page_size() / bandwidth;
}
qemu_file_reset_rate_limit(s->to_dst_file);
initial_time = current_time;
initial_bytes = qemu_ftell(s->to_dst_file);
}
if (qemu_file_rate_limit(s->to_dst_file)) {
/* usleep expects microseconds */
g_usleep((initial_time + BUFFER_DELAY - current_time)*1000);
g_usleep((s->iteration_start_time + BUFFER_DELAY -
current_time) * 1000);
}
}
trace_migration_thread_after_loop();
/* If we enabled cpu throttling for auto-converge, turn it off. */
cpu_throttle_stop();
end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
qemu_mutex_lock_iothread();
/*
* The resource has been allocated by migration will be reused in COLO
* process, so don't release them.
*/
if (!enable_colo) {
qemu_savevm_state_cleanup();
}
if (s->state == MIGRATION_STATUS_COMPLETED) {
uint64_t transferred_bytes = qemu_ftell(s->to_dst_file);
s->total_time = end_time - s->total_time;
if (!entered_postcopy) {
s->downtime = end_time - start_time;
}
if (s->total_time) {
s->mbps = (((double) transferred_bytes * 8.0) /
((double) s->total_time)) / 1000;
}
runstate_set(RUN_STATE_POSTMIGRATE);
} else {
if (s->state == MIGRATION_STATUS_ACTIVE && enable_colo) {
migrate_start_colo_process(s);
qemu_savevm_state_cleanup();
/*
* Fixme: we will run VM in COLO no matter its old running state.
* After exited COLO, we will keep running.
*/
old_vm_running = true;
}
if (old_vm_running && !entered_postcopy) {
vm_start();
} else {
if (runstate_check(RUN_STATE_FINISH_MIGRATE)) {
runstate_set(RUN_STATE_POSTMIGRATE);
}
}
}
qemu_bh_schedule(s->cleanup_bh);
qemu_mutex_unlock_iothread();
migration_iteration_finish(s);
rcu_unregister_thread();
return NULL;
}
@ -2375,10 +2463,15 @@ void migration_global_dump(Monitor *mon)
{
MigrationState *ms = migrate_get_current();
monitor_printf(mon, "globals: store-global-state=%d, only_migratable=%d, "
"send-configuration=%d, send-section-footer=%d\n",
ms->store_global_state, ms->only_migratable,
ms->send_configuration, ms->send_section_footer);
monitor_printf(mon, "globals:\n");
monitor_printf(mon, "store-global-state: %s\n",
ms->store_global_state ? "on" : "off");
monitor_printf(mon, "only-migratable: %s\n",
ms->only_migratable ? "on" : "off");
monitor_printf(mon, "send-configuration: %s\n",
ms->send_configuration ? "on" : "off");
monitor_printf(mon, "send-section-footer: %s\n",
ms->send_section_footer ? "on" : "off");
}
#define DEFINE_PROP_MIG_CAP(name, x) \
@ -2394,33 +2487,33 @@ static Property migration_properties[] = {
send_section_footer, true),
/* Migration parameters */
DEFINE_PROP_INT64("x-compress-level", MigrationState,
DEFINE_PROP_UINT8("x-compress-level", MigrationState,
parameters.compress_level,
DEFAULT_MIGRATE_COMPRESS_LEVEL),
DEFINE_PROP_INT64("x-compress-threads", MigrationState,
DEFINE_PROP_UINT8("x-compress-threads", MigrationState,
parameters.compress_threads,
DEFAULT_MIGRATE_COMPRESS_THREAD_COUNT),
DEFINE_PROP_INT64("x-decompress-threads", MigrationState,
DEFINE_PROP_UINT8("x-decompress-threads", MigrationState,
parameters.decompress_threads,
DEFAULT_MIGRATE_DECOMPRESS_THREAD_COUNT),
DEFINE_PROP_INT64("x-cpu-throttle-initial", MigrationState,
DEFINE_PROP_UINT8("x-cpu-throttle-initial", MigrationState,
parameters.cpu_throttle_initial,
DEFAULT_MIGRATE_CPU_THROTTLE_INITIAL),
DEFINE_PROP_INT64("x-cpu-throttle-increment", MigrationState,
DEFINE_PROP_UINT8("x-cpu-throttle-increment", MigrationState,
parameters.cpu_throttle_increment,
DEFAULT_MIGRATE_CPU_THROTTLE_INCREMENT),
DEFINE_PROP_INT64("x-max-bandwidth", MigrationState,
DEFINE_PROP_SIZE("x-max-bandwidth", MigrationState,
parameters.max_bandwidth, MAX_THROTTLE),
DEFINE_PROP_INT64("x-downtime-limit", MigrationState,
DEFINE_PROP_UINT64("x-downtime-limit", MigrationState,
parameters.downtime_limit,
DEFAULT_MIGRATE_SET_DOWNTIME),
DEFINE_PROP_INT64("x-checkpoint-delay", MigrationState,
DEFINE_PROP_UINT32("x-checkpoint-delay", MigrationState,
parameters.x_checkpoint_delay,
DEFAULT_MIGRATE_X_CHECKPOINT_DELAY),
DEFINE_PROP_INT64("x-multifd-channels", MigrationState,
DEFINE_PROP_UINT8("x-multifd-channels", MigrationState,
parameters.x_multifd_channels,
DEFAULT_MIGRATE_MULTIFD_CHANNELS),
DEFINE_PROP_INT64("x-multifd-page-count", MigrationState,
DEFINE_PROP_UINT32("x-multifd-page-count", MigrationState,
parameters.x_multifd_page_count,
DEFAULT_MIGRATE_MULTIFD_PAGE_COUNT),
DEFINE_PROP_SIZE("xbzrle-cache-size", MigrationState,

View File

@ -22,6 +22,8 @@
#include "hw/qdev.h"
#include "io/channel.h"
struct PostcopyBlocktimeContext;
/* State for the incoming migration */
struct MigrationIncomingState {
QEMUFile *from_src_file;
@ -59,10 +61,20 @@ struct MigrationIncomingState {
/* The coroutine we should enter (back) after failover */
Coroutine *migration_incoming_co;
QemuSemaphore colo_incoming_sem;
/*
* PostcopyBlocktimeContext to keep information for postcopy
* live migration, to calculate vCPU block time
* */
struct PostcopyBlocktimeContext *blocktime_ctx;
};
MigrationIncomingState *migration_incoming_get_current(void);
void migration_incoming_state_destroy(void);
/*
* Functions to work with blocktime context
*/
void fill_destination_postcopy_migration_info(MigrationInfo *info);
#define TYPE_MIGRATION "migration"
@ -90,6 +102,17 @@ struct MigrationState
QEMUBH *cleanup_bh;
QEMUFile *to_dst_file;
/* bytes already send at the beggining of current interation */
uint64_t iteration_initial_bytes;
/* time at the start of current iteration */
int64_t iteration_start_time;
/*
* The final stage happens when the remaining data is smaller than
* this threshold; it's calculated from the requested downtime and
* measured bandwidth
*/
int64_t threshold_size;
/* params from 'migrate-set-parameters' */
MigrationParameters parameters;
@ -103,11 +126,22 @@ struct MigrationState
} rp_state;
double mbps;
/* Timestamp when recent migration starts (ms) */
int64_t start_time;
/* Total time used by latest migration (ms) */
int64_t total_time;
/* Timestamp when VM is down (ms) to migrate the last stuff */
int64_t downtime_start;
int64_t downtime;
int64_t expected_downtime;
bool enabled_capabilities[MIGRATION_CAPABILITY__MAX];
int64_t setup_time;
/*
* Whether guest was running when we enter the completion stage.
* If migration is interrupted by any reason, we need to continue
* running the guest on source.
*/
bool vm_was_running;
/* Flag set once the migration has been asked to enter postcopy */
bool start_postcopy;
@ -201,6 +235,7 @@ int migrate_compress_level(void);
int migrate_compress_threads(void);
int migrate_decompress_threads(void);
bool migrate_use_events(void);
bool migrate_postcopy_blocktime(void);
/* Sending on the return path - generic and then for each message type */
void migrate_send_rp_shut(MigrationIncomingState *mis,

View File

@ -61,6 +61,101 @@ struct PostcopyDiscardState {
#include <sys/eventfd.h>
#include <linux/userfaultfd.h>
typedef struct PostcopyBlocktimeContext {
/* time when page fault initiated per vCPU */
int64_t *page_fault_vcpu_time;
/* page address per vCPU */
uintptr_t *vcpu_addr;
int64_t total_blocktime;
/* blocktime per vCPU */
int64_t *vcpu_blocktime;
/* point in time when last page fault was initiated */
int64_t last_begin;
/* number of vCPU are suspended */
int smp_cpus_down;
/*
* Handler for exit event, necessary for
* releasing whole blocktime_ctx
*/
Notifier exit_notifier;
} PostcopyBlocktimeContext;
static void destroy_blocktime_context(struct PostcopyBlocktimeContext *ctx)
{
g_free(ctx->page_fault_vcpu_time);
g_free(ctx->vcpu_addr);
g_free(ctx->vcpu_blocktime);
g_free(ctx);
}
static void migration_exit_cb(Notifier *n, void *data)
{
PostcopyBlocktimeContext *ctx = container_of(n, PostcopyBlocktimeContext,
exit_notifier);
destroy_blocktime_context(ctx);
}
static struct PostcopyBlocktimeContext *blocktime_context_new(void)
{
PostcopyBlocktimeContext *ctx = g_new0(PostcopyBlocktimeContext, 1);
ctx->page_fault_vcpu_time = g_new0(int64_t, smp_cpus);
ctx->vcpu_addr = g_new0(uintptr_t, smp_cpus);
ctx->vcpu_blocktime = g_new0(int64_t, smp_cpus);
ctx->exit_notifier.notify = migration_exit_cb;
qemu_add_exit_notifier(&ctx->exit_notifier);
return ctx;
}
static int64List *get_vcpu_blocktime_list(PostcopyBlocktimeContext *ctx)
{
int64List *list = NULL, *entry = NULL;
int i;
for (i = smp_cpus - 1; i >= 0; i--) {
entry = g_new0(int64List, 1);
entry->value = ctx->vcpu_blocktime[i];
entry->next = list;
list = entry;
}
return list;
}
/*
* This function just populates MigrationInfo from postcopy's
* blocktime context. It will not populate MigrationInfo,
* unless postcopy-blocktime capability was set.
*
* @info: pointer to MigrationInfo to populate
*/
void fill_destination_postcopy_migration_info(MigrationInfo *info)
{
MigrationIncomingState *mis = migration_incoming_get_current();
PostcopyBlocktimeContext *bc = mis->blocktime_ctx;
if (!bc) {
return;
}
info->has_postcopy_blocktime = true;
info->postcopy_blocktime = bc->total_blocktime;
info->has_postcopy_vcpu_blocktime = true;
info->postcopy_vcpu_blocktime = get_vcpu_blocktime_list(bc);
}
static uint64_t get_postcopy_total_blocktime(void)
{
MigrationIncomingState *mis = migration_incoming_get_current();
PostcopyBlocktimeContext *bc = mis->blocktime_ctx;
if (!bc) {
return 0;
}
return bc->total_blocktime;
}
/**
* receive_ufd_features: check userfault fd features, to request only supported
@ -153,6 +248,19 @@ static bool ufd_check_and_apply(int ufd, MigrationIncomingState *mis)
}
}
#ifdef UFFD_FEATURE_THREAD_ID
if (migrate_postcopy_blocktime() && mis &&
UFFD_FEATURE_THREAD_ID & supported_features) {
/* kernel supports that feature */
/* don't create blocktime_context if it exists */
if (!mis->blocktime_ctx) {
mis->blocktime_ctx = blocktime_context_new();
}
asked_features |= UFFD_FEATURE_THREAD_ID;
}
#endif
/*
* request features, even if asked_features is 0, due to
* kernel expects UFFD_API before UFFDIO_REGISTER, per
@ -423,6 +531,9 @@ int postcopy_ram_incoming_cleanup(MigrationIncomingState *mis)
munmap(mis->postcopy_tmp_zero_page, mis->largest_page_size);
mis->postcopy_tmp_zero_page = NULL;
}
trace_postcopy_ram_incoming_cleanup_blocktime(
get_postcopy_total_blocktime());
trace_postcopy_ram_incoming_cleanup_exit();
return 0;
}
@ -494,6 +605,142 @@ static int ram_block_enable_notify(const char *block_name, void *host_addr,
return 0;
}
static int get_mem_fault_cpu_index(uint32_t pid)
{
CPUState *cpu_iter;
CPU_FOREACH(cpu_iter) {
if (cpu_iter->thread_id == pid) {
trace_get_mem_fault_cpu_index(cpu_iter->cpu_index, pid);
return cpu_iter->cpu_index;
}
}
trace_get_mem_fault_cpu_index(-1, pid);
return -1;
}
/*
* This function is being called when pagefault occurs. It
* tracks down vCPU blocking time.
*
* @addr: faulted host virtual address
* @ptid: faulted process thread id
* @rb: ramblock appropriate to addr
*/
static void mark_postcopy_blocktime_begin(uintptr_t addr, uint32_t ptid,
RAMBlock *rb)
{
int cpu, already_received;
MigrationIncomingState *mis = migration_incoming_get_current();
PostcopyBlocktimeContext *dc = mis->blocktime_ctx;
int64_t now_ms;
if (!dc || ptid == 0) {
return;
}
cpu = get_mem_fault_cpu_index(ptid);
if (cpu < 0) {
return;
}
now_ms = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
if (dc->vcpu_addr[cpu] == 0) {
atomic_inc(&dc->smp_cpus_down);
}
atomic_xchg__nocheck(&dc->last_begin, now_ms);
atomic_xchg__nocheck(&dc->page_fault_vcpu_time[cpu], now_ms);
atomic_xchg__nocheck(&dc->vcpu_addr[cpu], addr);
/* check it here, not at the begining of the function,
* due to, check could accur early than bitmap_set in
* qemu_ufd_copy_ioctl */
already_received = ramblock_recv_bitmap_test(rb, (void *)addr);
if (already_received) {
atomic_xchg__nocheck(&dc->vcpu_addr[cpu], 0);
atomic_xchg__nocheck(&dc->page_fault_vcpu_time[cpu], 0);
atomic_dec(&dc->smp_cpus_down);
}
trace_mark_postcopy_blocktime_begin(addr, dc, dc->page_fault_vcpu_time[cpu],
cpu, already_received);
}
/*
* This function just provide calculated blocktime per cpu and trace it.
* Total blocktime is calculated in mark_postcopy_blocktime_end.
*
*
* Assume we have 3 CPU
*
* S1 E1 S1 E1
* -----***********------------xxx***************------------------------> CPU1
*
* S2 E2
* ------------****************xxx---------------------------------------> CPU2
*
* S3 E3
* ------------------------****xxx********-------------------------------> CPU3
*
* We have sequence S1,S2,E1,S3,S1,E2,E3,E1
* S2,E1 - doesn't match condition due to sequence S1,S2,E1 doesn't include CPU3
* S3,S1,E2 - sequence includes all CPUs, in this case overlap will be S1,E2 -
* it's a part of total blocktime.
* S1 - here is last_begin
* Legend of the picture is following:
* * - means blocktime per vCPU
* x - means overlapped blocktime (total blocktime)
*
* @addr: host virtual address
*/
static void mark_postcopy_blocktime_end(uintptr_t addr)
{
MigrationIncomingState *mis = migration_incoming_get_current();
PostcopyBlocktimeContext *dc = mis->blocktime_ctx;
int i, affected_cpu = 0;
int64_t now_ms;
bool vcpu_total_blocktime = false;
int64_t read_vcpu_time;
if (!dc) {
return;
}
now_ms = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
/* lookup cpu, to clear it,
* that algorithm looks straighforward, but it's not
* optimal, more optimal algorithm is keeping tree or hash
* where key is address value is a list of */
for (i = 0; i < smp_cpus; i++) {
uint64_t vcpu_blocktime = 0;
read_vcpu_time = atomic_fetch_add(&dc->page_fault_vcpu_time[i], 0);
if (atomic_fetch_add(&dc->vcpu_addr[i], 0) != addr ||
read_vcpu_time == 0) {
continue;
}
atomic_xchg__nocheck(&dc->vcpu_addr[i], 0);
vcpu_blocktime = now_ms - read_vcpu_time;
affected_cpu += 1;
/* we need to know is that mark_postcopy_end was due to
* faulted page, another possible case it's prefetched
* page and in that case we shouldn't be here */
if (!vcpu_total_blocktime &&
atomic_fetch_add(&dc->smp_cpus_down, 0) == smp_cpus) {
vcpu_total_blocktime = true;
}
/* continue cycle, due to one page could affect several vCPUs */
dc->vcpu_blocktime[i] += vcpu_blocktime;
}
atomic_sub(&dc->smp_cpus_down, affected_cpu);
if (vcpu_total_blocktime) {
dc->total_blocktime += now_ms - atomic_fetch_add(&dc->last_begin, 0);
}
trace_mark_postcopy_blocktime_end(addr, dc, dc->total_blocktime,
affected_cpu);
}
/*
* Handle faults detected by the USERFAULT markings
*/
@ -571,8 +818,11 @@ static void *postcopy_ram_fault_thread(void *opaque)
rb_offset &= ~(qemu_ram_pagesize(rb) - 1);
trace_postcopy_ram_fault_thread_request(msg.arg.pagefault.address,
qemu_ram_get_idstr(rb),
rb_offset);
rb_offset,
msg.arg.pagefault.feat.ptid);
mark_postcopy_blocktime_begin((uintptr_t)(msg.arg.pagefault.address),
msg.arg.pagefault.feat.ptid, rb);
/*
* Send the request to the source - we want to request one
* of our host page sizes (which is >= TPS)
@ -662,6 +912,8 @@ static int qemu_ufd_copy_ioctl(int userfault_fd, void *host_addr,
if (!ret) {
ramblock_recv_bitmap_set_range(rb, host_addr,
pagesize / qemu_target_page_size());
mark_postcopy_blocktime_end((uintptr_t)host_addr);
}
return ret;
}
@ -759,6 +1011,10 @@ void *postcopy_get_tmp_page(MigrationIncomingState *mis)
#else
/* No target OS support, stubs just fail */
void fill_destination_postcopy_migration_info(MigrationInfo *info)
{
}
bool postcopy_ram_supported_by_host(MigrationIncomingState *mis)
{
error_report("%s: No OS support", __func__);

View File

@ -237,7 +237,8 @@ static RAMState *ram_state;
uint64_t ram_bytes_remaining(void)
{
return ram_state->migration_dirty_pages * TARGET_PAGE_SIZE;
return ram_state ? (ram_state->migration_dirty_pages * TARGET_PAGE_SIZE) :
0;
}
MigrationStats ram_counters;

View File

@ -172,7 +172,6 @@ static void socket_start_incoming_migration(SocketAddress *saddr,
if (qio_channel_socket_listen_sync(listen_ioc, saddr, errp) < 0) {
object_unref(OBJECT(listen_ioc));
qapi_free_SocketAddress(saddr);
return;
}
@ -181,7 +180,6 @@ static void socket_start_incoming_migration(SocketAddress *saddr,
socket_accept_incoming_migration,
listen_ioc,
(GDestroyNotify)object_unref);
qapi_free_SocketAddress(saddr);
}
void tcp_start_incoming_migration(const char *host_port, Error **errp)
@ -191,6 +189,7 @@ void tcp_start_incoming_migration(const char *host_port, Error **errp)
if (!err) {
socket_start_incoming_migration(saddr, &err);
}
qapi_free_SocketAddress(saddr);
error_propagate(errp, err);
}
@ -198,4 +197,5 @@ void unix_start_incoming_migration(const char *path, Error **errp)
{
SocketAddress *saddr = unix_build_address(path);
socket_start_incoming_migration(saddr, errp);
qapi_free_SocketAddress(saddr);
}

View File

@ -115,6 +115,8 @@ process_incoming_migration_co_end(int ret, int ps) "ret=%d postcopy-state=%d"
process_incoming_migration_co_postcopy_end_main(void) ""
migration_set_incoming_channel(void *ioc, const char *ioctype) "ioc=%p ioctype=%s"
migration_set_outgoing_channel(void *ioc, const char *ioctype, const char *hostname) "ioc=%p ioctype=%s hostname=%s"
mark_postcopy_blocktime_begin(uint64_t addr, void *dd, int64_t time, int cpu, int received) "addr: 0x%" PRIx64 ", dd: %p, time: %" PRId64 ", cpu: %d, already_received: %d"
mark_postcopy_blocktime_end(uint64_t addr, void *dd, int64_t time, int affected_cpu) "addr: 0x%" PRIx64 ", dd: %p, time: %" PRId64 ", affected_cpu: %d"
# migration/rdma.c
qemu_rdma_accept_incoming_migration(void) ""
@ -191,15 +193,17 @@ postcopy_ram_enable_notify(void) ""
postcopy_ram_fault_thread_entry(void) ""
postcopy_ram_fault_thread_exit(void) ""
postcopy_ram_fault_thread_quit(void) ""
postcopy_ram_fault_thread_request(uint64_t hostaddr, const char *ramblock, size_t offset) "Request for HVA=0x%" PRIx64 " rb=%s offset=0x%zx"
postcopy_ram_fault_thread_request(uint64_t hostaddr, const char *ramblock, size_t offset, uint32_t pid) "Request for HVA=0x%" PRIx64 " rb=%s offset=0x%zx pid=%u"
postcopy_ram_incoming_cleanup_closeuf(void) ""
postcopy_ram_incoming_cleanup_entry(void) ""
postcopy_ram_incoming_cleanup_exit(void) ""
postcopy_ram_incoming_cleanup_join(void) ""
postcopy_ram_incoming_cleanup_blocktime(uint64_t total) "total blocktime %" PRIu64
save_xbzrle_page_skipping(void) ""
save_xbzrle_page_overflow(void) ""
ram_save_iterate_big_wait(uint64_t milliconds, int iterations) "big wait: %" PRIu64 " milliseconds, %d iterations"
ram_load_complete(int ret, uint64_t seq_iter) "exit_code %d seq iteration %" PRIu64
get_mem_fault_cpu_index(int cpu, uint32_t pid) "cpu: %d, pid: %u"
# migration/exec.c
migration_exec_outgoing(const char *cmd) "cmd=%s"

View File

@ -155,6 +155,13 @@
# @error-desc: the human readable error description string, when
# @status is 'failed'. Clients should not attempt to parse the
# error strings. (Since 2.7)
#
# @postcopy-blocktime: total time when all vCPU were blocked during postcopy
# live migration (Since 2.12)
#
# @postcopy-vcpu-blocktime: list of the postcopy blocktime per vCPU (Since 2.12)
#
#
# Since: 0.14.0
##
@ -167,7 +174,9 @@
'*downtime': 'int',
'*setup-time': 'int',
'*cpu-throttle-percentage': 'int',
'*error-desc': 'str'} }
'*error-desc': 'str',
'*postcopy-blocktime' : 'int64',
'*postcopy-vcpu-blocktime': ['int64']} }
##
# @query-migrate:
@ -352,12 +361,16 @@
#
# @x-multifd: Use more than one fd for migration (since 2.11)
#
# @postcopy-blocktime: Calculate downtime for postcopy live migration
# (since 2.12)
#
# Since: 1.2
##
{ 'enum': 'MigrationCapability',
'data': ['xbzrle', 'rdma-pin-all', 'auto-converge', 'zero-blocks',
'compress', 'events', 'postcopy-ram', 'x-colo', 'release-ram',
'block', 'return-path', 'pause-before-switchover', 'x-multifd' ] }
'block', 'return-path', 'pause-before-switchover', 'x-multifd',
'postcopy-blocktime' ] }
##
# @MigrationCapabilityStatus:
@ -668,19 +681,19 @@
# Since: 2.4
##
{ 'struct': 'MigrationParameters',
'data': { '*compress-level': 'int',
'*compress-threads': 'int',
'*decompress-threads': 'int',
'*cpu-throttle-initial': 'int',
'*cpu-throttle-increment': 'int',
'data': { '*compress-level': 'uint8',
'*compress-threads': 'uint8',
'*decompress-threads': 'uint8',
'*cpu-throttle-initial': 'uint8',
'*cpu-throttle-increment': 'uint8',
'*tls-creds': 'str',
'*tls-hostname': 'str',
'*max-bandwidth': 'int',
'*downtime-limit': 'int',
'*x-checkpoint-delay': 'int',
'*max-bandwidth': 'size',
'*downtime-limit': 'uint64',
'*x-checkpoint-delay': 'uint32',
'*block-incremental': 'bool' ,
'*x-multifd-channels': 'int',
'*x-multifd-page-count': 'int',
'*x-multifd-channels': 'uint8',
'*x-multifd-page-count': 'uint32',
'*xbzrle-cache-size': 'size' } }
##

View File

@ -234,6 +234,10 @@ class HTABSection(object):
header = self.file.read32()
if (header == -1):
# "no HPT" encoding
return
if (header > 0):
# First section, just the hash shift
return

View File

@ -25,6 +25,7 @@
const unsigned start_address = 1024 * 1024;
const unsigned end_address = 100 * 1024 * 1024;
bool got_stop;
static bool uffd_feature_thread_id;
#if defined(__linux__)
#include <sys/syscall.h>
@ -54,6 +55,7 @@ static bool ufd_version_check(void)
g_test_message("Skipping test: UFFDIO_API failed");
return false;
}
uffd_feature_thread_id = api_struct.features & UFFD_FEATURE_THREAD_ID;
ioctl_mask = (__u64)1 << _UFFDIO_REGISTER |
(__u64)1 << _UFFDIO_UNREGISTER;
@ -266,6 +268,16 @@ static uint64_t get_migration_pass(QTestState *who)
return result;
}
static void read_blocktime(QTestState *who)
{
QDict *rsp, *rsp_return;
rsp = wait_command(who, "{ 'execute': 'query-migrate' }");
rsp_return = qdict_get_qdict(rsp, "return");
g_assert(qdict_haskey(rsp_return, "postcopy-blocktime"));
QDECREF(rsp);
}
static void wait_for_migration_complete(QTestState *who)
{
QDict *rsp, *rsp_return;
@ -358,13 +370,14 @@ static void migrate_check_parameter(QTestState *who, const char *parameter,
const char *value)
{
QDict *rsp, *rsp_return;
const char *result;
char *result;
rsp = wait_command(who, "{ 'execute': 'query-migrate-parameters' }");
rsp_return = qdict_get_qdict(rsp, "return");
result = g_strdup_printf("%" PRId64,
qdict_get_try_int(rsp_return, parameter, -1));
g_assert_cmpstr(result, ==, value);
g_free(result);
QDECREF(rsp);
}
@ -524,6 +537,7 @@ static void test_migrate(void)
migrate_set_capability(from, "postcopy-ram", "true");
migrate_set_capability(to, "postcopy-ram", "true");
migrate_set_capability(to, "postcopy-blocktime", "true");
/* We want to pick a speed slow enough that the test completes
* quickly, but that it doesn't complete precopy even on a slow
@ -552,6 +566,9 @@ static void test_migrate(void)
wait_for_serial("dest_serial");
wait_for_migration_complete(from);
if (uffd_feature_thread_id) {
read_blocktime(to);
}
g_free(uri);
test_migrate_end(from, to);

1
vl.c
View File

@ -4792,6 +4792,7 @@ int main(int argc, char **argv, char **envp)
monitor_cleanup();
qemu_chr_cleanup();
user_creatable_cleanup();
migration_object_finalize();
/* TODO: unref root container, check all devices are ok */
return 0;