memory: allow creating MemoryRegions before accelerators
Compute the DIRTY_MEMORY_CODE bit in memory_region_get_dirty_log_mask instead of memory_region_init_*. This makes it possible to allocate memory backend objects at any time. Reviewed-by: Igor Mammedov <imammedo@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
0572f558cb
commit
0a2949e0be
@ -1548,7 +1548,6 @@ void memory_region_init_ram_shared_nomigrate(MemoryRegion *mr,
|
||||
mr->terminates = true;
|
||||
mr->destructor = memory_region_destructor_ram;
|
||||
mr->ram_block = qemu_ram_alloc(size, share, mr, &err);
|
||||
mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
|
||||
if (err) {
|
||||
mr->size = int128_zero();
|
||||
object_unparent(OBJECT(mr));
|
||||
@ -1573,7 +1572,6 @@ void memory_region_init_resizeable_ram(MemoryRegion *mr,
|
||||
mr->destructor = memory_region_destructor_ram;
|
||||
mr->ram_block = qemu_ram_alloc_resizeable(size, max_size, resized,
|
||||
mr, &err);
|
||||
mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
|
||||
if (err) {
|
||||
mr->size = int128_zero();
|
||||
object_unparent(OBJECT(mr));
|
||||
@ -1598,7 +1596,6 @@ void memory_region_init_ram_from_file(MemoryRegion *mr,
|
||||
mr->destructor = memory_region_destructor_ram;
|
||||
mr->align = align;
|
||||
mr->ram_block = qemu_ram_alloc_from_file(size, mr, ram_flags, path, &err);
|
||||
mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
|
||||
if (err) {
|
||||
mr->size = int128_zero();
|
||||
object_unparent(OBJECT(mr));
|
||||
@ -1622,7 +1619,6 @@ void memory_region_init_ram_from_fd(MemoryRegion *mr,
|
||||
mr->ram_block = qemu_ram_alloc_from_fd(size, mr,
|
||||
share ? RAM_SHARED : 0,
|
||||
fd, &err);
|
||||
mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
|
||||
if (err) {
|
||||
mr->size = int128_zero();
|
||||
object_unparent(OBJECT(mr));
|
||||
@ -1641,7 +1637,6 @@ void memory_region_init_ram_ptr(MemoryRegion *mr,
|
||||
mr->ram = true;
|
||||
mr->terminates = true;
|
||||
mr->destructor = memory_region_destructor_ram;
|
||||
mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
|
||||
|
||||
/* qemu_ram_alloc_from_ptr cannot fail with ptr != NULL. */
|
||||
assert(ptr != NULL);
|
||||
@ -1661,7 +1656,7 @@ void memory_region_init_ram_device_ptr(MemoryRegion *mr,
|
||||
mr->ops = &ram_device_mem_ops;
|
||||
mr->opaque = mr;
|
||||
mr->destructor = memory_region_destructor_ram;
|
||||
mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
|
||||
|
||||
/* qemu_ram_alloc_from_ptr cannot fail with ptr != NULL. */
|
||||
assert(ptr != NULL);
|
||||
mr->ram_block = qemu_ram_alloc_from_ptr(size, ptr, mr, &error_fatal);
|
||||
@ -1819,6 +1814,11 @@ uint8_t memory_region_get_dirty_log_mask(MemoryRegion *mr)
|
||||
memory_region_is_iommu(mr))) {
|
||||
mask |= (1 << DIRTY_MEMORY_MIGRATION);
|
||||
}
|
||||
|
||||
if (tcg_enabled() && rb) {
|
||||
/* TCG only cares about dirty memory logging for RAM, not IOMMU. */
|
||||
mask |= (1 << DIRTY_MEMORY_CODE);
|
||||
}
|
||||
return mask;
|
||||
}
|
||||
|
||||
|
@ -1714,11 +1714,7 @@ static bool object_create_early(const char *type, QemuOpts *opts)
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Memory allocation by backends needs to be done
|
||||
* after configure_accelerator() (due to the tcg_enabled()
|
||||
* checks at memory_region_init_*()).
|
||||
*
|
||||
* Also, allocation of large amounts of memory may delay
|
||||
/* Allocation of large amounts of memory may delay
|
||||
* chardev initialization for too long, and trigger timeouts
|
||||
* on software that waits for a monitor socket to be created
|
||||
* (e.g. libvirt).
|
||||
|
Loading…
Reference in New Issue
Block a user