migration/: fix some comment spelling errors

I found that there are many spelling errors in the comments of qemu,
so I used the spellcheck tool to check the spelling errors
and finally found some spelling errors in the migration folder.

Signed-off-by: zhaolichang <zhaolichang@huawei.com>
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Message-Id: <20200917075029.313-3-zhaolichang@huawei.com>
Signed-off-by: Laurent Vivier <laurent@vivier.eu>
This commit is contained in:
zhaolichang 2020-09-17 15:50:21 +08:00 committed by Laurent Vivier
parent e3a6e0daf4
commit 3a4452d896
8 changed files with 18 additions and 18 deletions

View File

@ -46,7 +46,7 @@ void failover_request_active(Error **errp)
{ {
if (failover_set_state(FAILOVER_STATUS_NONE, if (failover_set_state(FAILOVER_STATUS_NONE,
FAILOVER_STATUS_REQUIRE) != FAILOVER_STATUS_NONE) { FAILOVER_STATUS_REQUIRE) != FAILOVER_STATUS_NONE) {
error_setg(errp, "COLO failover is already actived"); error_setg(errp, "COLO failover is already activated");
return; return;
} }
failover_bh = qemu_bh_new(colo_failover_bh, NULL); failover_bh = qemu_bh_new(colo_failover_bh, NULL);

View File

@ -632,7 +632,7 @@ out:
/* /*
* It is safe to unregister notifier after failover finished. * It is safe to unregister notifier after failover finished.
* Besides, colo_delay_timer and colo_checkpoint_sem can't be * Besides, colo_delay_timer and colo_checkpoint_sem can't be
* released befor unregister notifier, or there will be use-after-free * released before unregister notifier, or there will be use-after-free
* error. * error.
*/ */
colo_compare_unregister_notifier(&packets_compare_notifier); colo_compare_unregister_notifier(&packets_compare_notifier);

View File

@ -731,7 +731,7 @@ static void multifd_new_send_channel_async(QIOTask *task, gpointer opaque)
qemu_sem_post(&p->sem_sync); qemu_sem_post(&p->sem_sync);
/* /*
* Although multifd_send_thread is not created, but main migration * Although multifd_send_thread is not created, but main migration
* thread neet to judge whether it is running, so we need to mark * thread needs to judge whether it is running, so we need to mark
* its status. * its status.
*/ */
p->quit = true; p->quit = true;
@ -1042,7 +1042,7 @@ bool multifd_recv_all_channels_created(void)
/* /*
* Try to receive all multifd channels to get ready for the migration. * Try to receive all multifd channels to get ready for the migration.
* - Return true and do not set @errp when correctly receving all channels; * - Return true and do not set @errp when correctly receiving all channels;
* - Return false and do not set @errp when correctly receiving the current one; * - Return false and do not set @errp when correctly receiving the current one;
* - Return false and set @errp when failing to receive the current channel. * - Return false and set @errp when failing to receive the current channel.
*/ */

View File

@ -237,7 +237,7 @@ release_ufd:
* request_ufd_features: this function should be called only once on a newly * request_ufd_features: this function should be called only once on a newly
* opened ufd, subsequent calls will lead to error. * opened ufd, subsequent calls will lead to error.
* *
* Returns: true on succes * Returns: true on success
* *
* @ufd: fd obtained from userfaultfd syscall * @ufd: fd obtained from userfaultfd syscall
* @features: bit mask see UFFD_API_FEATURES * @features: bit mask see UFFD_API_FEATURES
@ -807,7 +807,7 @@ static void mark_postcopy_blocktime_end(uintptr_t addr)
low_time_offset = get_low_time_offset(dc); low_time_offset = get_low_time_offset(dc);
/* lookup cpu, to clear it, /* lookup cpu, to clear it,
* that algorithm looks straighforward, but it's not * that algorithm looks straightforward, but it's not
* optimal, more optimal algorithm is keeping tree or hash * optimal, more optimal algorithm is keeping tree or hash
* where key is address value is a list of */ * where key is address value is a list of */
for (i = 0; i < smp_cpus; i++) { for (i = 0; i < smp_cpus; i++) {

View File

@ -161,7 +161,7 @@ struct PostCopyFD {
*/ */
void postcopy_register_shared_ufd(struct PostCopyFD *pcfd); void postcopy_register_shared_ufd(struct PostCopyFD *pcfd);
void postcopy_unregister_shared_ufd(struct PostCopyFD *pcfd); void postcopy_unregister_shared_ufd(struct PostCopyFD *pcfd);
/* Call each of the shared 'waker's registerd telling them of /* Call each of the shared 'waker's registered telling them of
* availability of a block. * availability of a block.
*/ */
int postcopy_notify_shared_wake(RAMBlock *rb, uint64_t offset); int postcopy_notify_shared_wake(RAMBlock *rb, uint64_t offset);

View File

@ -256,7 +256,7 @@ int64_t ramblock_recv_bitmap_send(QEMUFile *file,
/* /*
* Always use little endian when sending the bitmap. This is * Always use little endian when sending the bitmap. This is
* required that when source and destination VMs are not using the * required that when source and destination VMs are not using the
* same endianess. (Note: big endian won't work.) * same endianness. (Note: big endian won't work.)
*/ */
bitmap_to_le(le_bitmap, block->receivedmap, nbits); bitmap_to_le(le_bitmap, block->receivedmap, nbits);
@ -275,7 +275,7 @@ int64_t ramblock_recv_bitmap_send(QEMUFile *file,
qemu_put_buffer(file, (const uint8_t *)le_bitmap, size); qemu_put_buffer(file, (const uint8_t *)le_bitmap, size);
/* /*
* Mark as an end, in case the middle part is screwed up due to * Mark as an end, in case the middle part is screwed up due to
* some "misterious" reason. * some "mysterious" reason.
*/ */
qemu_put_be64(file, RAMBLOCK_RECV_BITMAP_ENDING); qemu_put_be64(file, RAMBLOCK_RECV_BITMAP_ENDING);
qemu_fflush(file); qemu_fflush(file);
@ -718,7 +718,7 @@ static int save_xbzrle_page(RAMState *rs, uint8_t **current_data,
/* /*
* Reaching here means the page has hit the xbzrle cache, no matter what * Reaching here means the page has hit the xbzrle cache, no matter what
* encoding result it is (normal encoding, overflow or skipping the page), * encoding result it is (normal encoding, overflow or skipping the page),
* count the page as encoded. This is used to caculate the encoding rate. * count the page as encoded. This is used to calculate the encoding rate.
* *
* Example: 2 pages (8KB) being encoded, first page encoding generates 2KB, * Example: 2 pages (8KB) being encoded, first page encoding generates 2KB,
* 2nd page turns out to be skipped (i.e. no new bytes written to the * 2nd page turns out to be skipped (i.e. no new bytes written to the
@ -3705,7 +3705,7 @@ int ram_dirty_bitmap_reload(MigrationState *s, RAMBlock *block)
/* /*
* Note: see comments in ramblock_recv_bitmap_send() on why we * Note: see comments in ramblock_recv_bitmap_send() on why we
* need the endianess convertion, and the paddings. * need the endianness conversion, and the paddings.
*/ */
local_size = ROUND_UP(local_size, 8); local_size = ROUND_UP(local_size, 8);
@ -3743,7 +3743,7 @@ int ram_dirty_bitmap_reload(MigrationState *s, RAMBlock *block)
} }
/* /*
* Endianess convertion. We are during postcopy (though paused). * Endianness conversion. We are during postcopy (though paused).
* The dirty bitmap won't change. We can directly modify it. * The dirty bitmap won't change. We can directly modify it.
*/ */
bitmap_from_le(block->bmap, le_bitmap, nbits); bitmap_from_le(block->bmap, le_bitmap, nbits);

View File

@ -1511,7 +1511,7 @@ static int qemu_rdma_wait_comp_channel(RDMAContext *rdma)
} else { } else {
/* This is the source side, we're in a separate thread /* This is the source side, we're in a separate thread
* or destination prior to migration_fd_process_incoming() * or destination prior to migration_fd_process_incoming()
* after postcopy, the destination also in a seprate thread. * after postcopy, the destination also in a separate thread.
* we can't yield; so we have to poll the fd. * we can't yield; so we have to poll the fd.
* But we need to be able to handle 'cancel' or an error * But we need to be able to handle 'cancel' or an error
* without hanging forever. * without hanging forever.
@ -2268,7 +2268,7 @@ static inline int qemu_rdma_buffer_mergable(RDMAContext *rdma,
* chunk, then start a new chunk and flush() the old chunk. * chunk, then start a new chunk and flush() the old chunk.
* 3. To keep the hardware busy, we also group chunks into batches * 3. To keep the hardware busy, we also group chunks into batches
* and only require that a batch gets acknowledged in the completion * and only require that a batch gets acknowledged in the completion
* qeueue instead of each individual chunk. * queue instead of each individual chunk.
*/ */
static int qemu_rdma_write(QEMUFile *f, RDMAContext *rdma, static int qemu_rdma_write(QEMUFile *f, RDMAContext *rdma,
uint64_t block_offset, uint64_t offset, uint64_t block_offset, uint64_t offset,
@ -3150,7 +3150,7 @@ static size_t qemu_rdma_save_page(QEMUFile *f, void *opaque,
if (size > 0) { if (size > 0) {
/* /*
* Add this page to the current 'chunk'. If the chunk * Add this page to the current 'chunk'. If the chunk
* is full, or the page doen't belong to the current chunk, * is full, or the page doesn't belong to the current chunk,
* an actual RDMA write will occur and a new chunk will be formed. * an actual RDMA write will occur and a new chunk will be formed.
*/ */
ret = qemu_rdma_write(f, rdma, block_offset, offset, size); ret = qemu_rdma_write(f, rdma, block_offset, offset, size);
@ -4103,7 +4103,7 @@ void rdma_start_outgoing_migration(void *opaque,
goto err; goto err;
} }
/* RDMA postcopy need a seprate queue pair for return path */ /* RDMA postcopy need a separate queue pair for return path */
if (migrate_postcopy()) { if (migrate_postcopy()) {
rdma_return_path = qemu_rdma_data_init(host_port, errp); rdma_return_path = qemu_rdma_data_init(host_port, errp);

View File

@ -2795,7 +2795,7 @@ void qmp_xen_save_devices_state(const char *filename, bool has_live, bool live,
if (!has_live) { if (!has_live) {
/* live default to true so old version of Xen tool stack can have a /* live default to true so old version of Xen tool stack can have a
* successfull live migration */ * successful live migration */
live = true; live = true;
} }
@ -2818,7 +2818,7 @@ void qmp_xen_save_devices_state(const char *filename, bool has_live, bool live,
* "xen-save-devices-state" and in case of migration failure, libxl * "xen-save-devices-state" and in case of migration failure, libxl
* would call "cont". * would call "cont".
* So call bdrv_inactivate_all (release locks) here to let the other * So call bdrv_inactivate_all (release locks) here to let the other
* side of the migration take controle of the images. * side of the migration take control of the images.
*/ */
if (live && !saved_vm_running) { if (live && !saved_vm_running) {
ret = bdrv_inactivate_all(); ret = bdrv_inactivate_all();