* RAMBlock vs. MemoryRegion cleanups from Fam

* mru_section optimization from Fam
 * memory.txt improvements from Peter and Xiaoqiang
 * i8257 fix from Hervé
 * -daemonize fix
 * Cleanups and small fixes from Alex, Praneith, Wei
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v2
 
 iQEcBAABCAAGBQJW3bWrAAoJEL/70l94x66De2oH/ik70S43ooJVPLePPspWefAm
 9CuMmv6PK8KhMKqhmZWfneOdjwRYx7U1OYa9KsZErLlSmCul7p8o4JV/Nx+nGgUP
 L4LCJIutEsEyZ1U8dG0jPuq5DyHScCRHPF7IpSid5dpOK5VA4MCmBZgHd4JvwJhi
 VE1xl/5w9xgIkuxQSC8ybLr7SVLeS9qOaDZxci6mRC3ZAJ2NtqR7IpmpmghsXNtP
 iY69a6lSzbfYvJ+ToPFhx1KwcjbZUrV5UNC4ceQ9tYihj89Oi+DOjh5mZ5IuumEb
 OjKMFoKNaHUbY3nPTYqmlHamT8OZnQf1NhCXkd9EMqPGiHywsYbWzIOy7mxVQ68=
 =H40p
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/bonzini/tags/for-upstream' into staging

* RAMBlock vs. MemoryRegion cleanups from Fam
* mru_section optimization from Fam
* memory.txt improvements from Peter and Xiaoqiang
* i8257 fix from Hervé
* -daemonize fix
* Cleanups and small fixes from Alex, Praneith, Wei

# gpg: Signature made Mon 07 Mar 2016 17:08:59 GMT using RSA key ID 78C7AE83
# gpg: Good signature from "Paolo Bonzini <bonzini@gnu.org>"
# gpg:                 aka "Paolo Bonzini <pbonzini@redhat.com>"

* remotes/bonzini/tags/for-upstream:
  scsi-bus: Remove tape command from scsi_req_xfer
  kvm/irqchip: use bitmap utility for gsi tracking
  MAINTAINERS: Add entry for include/sysemu/kvm*.h
  doc/memory.txt: correct description of MemoryRegionOps fields
  doc/memory.txt: correct a logic error
  icount: possible options for sleep are on or off
  exec: Introduce AddressSpaceDispatch.mru_section
  exec: Factor out section_covers_addr
  exec: Pass RAMBlock pointer to qemu_ram_free
  memory: Drop MemoryRegion.ram_addr
  memory: Implement memory_region_get_ram_addr with mr->ram_block
  memory: Move assignment to ram_block to memory_region_init_*
  exec: Return RAMBlock pointer from allocating functions
  i8257: fix Terminal Count status
  log: do not log if QEMU is daemonized but without -D

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2016-03-08 04:53:36 +00:00
commit 97556fe80e
15 changed files with 144 additions and 138 deletions

View File

@ -234,6 +234,7 @@ L: kvm@vger.kernel.org
S: Supported
F: kvm-*
F: */kvm.*
F: include/sysemu/kvm*.h
ARM
M: Peter Maydell <peter.maydell@linaro.org>

4
cpus.c
View File

@ -630,7 +630,7 @@ void configure_icount(QemuOpts *opts, Error **errp)
icount_align_option = qemu_opt_get_bool(opts, "align", false);
if (icount_align_option && !icount_sleep) {
error_setg(errp, "align=on and sleep=no are incompatible");
error_setg(errp, "align=on and sleep=off are incompatible");
}
if (strcmp(option, "auto") != 0) {
errno = 0;
@ -643,7 +643,7 @@ void configure_icount(QemuOpts *opts, Error **errp)
} else if (icount_align_option) {
error_setg(errp, "shift=auto and align=on are incompatible");
} else if (!icount_sleep) {
error_setg(errp, "shift=auto and sleep=no are incompatible");
error_setg(errp, "shift=auto and sleep=off are incompatible");
}
use_icount = 2;

View File

@ -416,8 +416,8 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
/* Write access calls the I/O callback. */
te->addr_write = address | TLB_MMIO;
} else if (memory_region_is_ram(section->mr)
&& cpu_physical_memory_is_clean(section->mr->ram_addr
+ xlat)) {
&& cpu_physical_memory_is_clean(
memory_region_get_ram_addr(section->mr) + xlat)) {
te->addr_write = address | TLB_NOTDIRTY;
} else {
te->addr_write = address;

View File

@ -180,8 +180,8 @@ aliases that leave holes then the lower priority region will appear in these
holes too.)
For example, suppose we have a container A of size 0x8000 with two subregions
B and C. B is a container mapped at 0x2000, size 0x4000, priority 1; C is
an MMIO region mapped at 0x0, size 0x6000, priority 2. B currently has two
B and C. B is a container mapped at 0x2000, size 0x4000, priority 2; C is
an MMIO region mapped at 0x0, size 0x6000, priority 1. B currently has two
of its own subregions: D of size 0x1000 at offset 0 and E of size 0x1000 at
offset 0x2000. As a diagram:
@ -297,8 +297,9 @@ various constraints can be supplied to control how these callbacks are called:
- .valid.min_access_size, .valid.max_access_size define the access sizes
(in bytes) which the device accepts; accesses outside this range will
have device and bus specific behaviour (ignored, or machine check)
- .valid.aligned specifies that the device only accepts naturally aligned
accesses. Unaligned accesses invoke device and bus specific behaviour.
- .valid.unaligned specifies that the *device being modelled* supports
unaligned accesses; if false, unaligned accesses will invoke the
appropriate bus or CPU specific behaviour.
- .impl.min_access_size, .impl.max_access_size define the access sizes
(in bytes) supported by the *implementation*; other access sizes will be
emulated using the ones available. For example a 4-byte write will be
@ -306,5 +307,5 @@ various constraints can be supplied to control how these callbacks are called:
- .impl.unaligned specifies that the *implementation* supports unaligned
accesses; if false, unaligned accesses will be emulated by two aligned
accesses.
- .old_mmio can be used to ease porting from code using
- .old_mmio eases the porting of code that was formerly using
cpu_register_io_memory(). It should not be used in new code.

107
exec.c
View File

@ -135,6 +135,7 @@ typedef struct PhysPageMap {
struct AddressSpaceDispatch {
struct rcu_head rcu;
MemoryRegionSection *mru_section;
/* This is a multi-level map on the physical address space.
* The bottom level has pointers to MemoryRegionSections.
*/
@ -307,6 +308,17 @@ static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
}
}
static inline bool section_covers_addr(const MemoryRegionSection *section,
hwaddr addr)
{
/* Memory topology clips a memory region to [0, 2^64); size.hi > 0 means
* the section must cover the entire address space.
*/
return section->size.hi ||
range_covers_byte(section->offset_within_address_space,
section->size.lo, addr);
}
static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
Node *nodes, MemoryRegionSection *sections)
{
@ -322,9 +334,7 @@ static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
}
if (sections[lp.ptr].size.hi ||
range_covers_byte(sections[lp.ptr].offset_within_address_space,
sections[lp.ptr].size.lo, addr)) {
if (section_covers_addr(&sections[lp.ptr], addr)) {
return &sections[lp.ptr];
} else {
return &sections[PHYS_SECTION_UNASSIGNED];
@ -342,14 +352,25 @@ static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
hwaddr addr,
bool resolve_subpage)
{
MemoryRegionSection *section;
MemoryRegionSection *section = atomic_read(&d->mru_section);
subpage_t *subpage;
bool update;
section = phys_page_find(d->phys_map, addr, d->map.nodes, d->map.sections);
if (section && section != &d->map.sections[PHYS_SECTION_UNASSIGNED] &&
section_covers_addr(section, addr)) {
update = false;
} else {
section = phys_page_find(d->phys_map, addr, d->map.nodes,
d->map.sections);
update = true;
}
if (resolve_subpage && section->mr->subpage) {
subpage = container_of(section->mr, subpage_t, iomem);
section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
}
if (update) {
atomic_set(&d->mru_section, section);
}
return section;
}
@ -1554,7 +1575,7 @@ static void dirty_memory_extend(ram_addr_t old_ram_size,
}
}
static ram_addr_t ram_block_add(RAMBlock *new_block, Error **errp)
static void ram_block_add(RAMBlock *new_block, Error **errp)
{
RAMBlock *block;
RAMBlock *last_block = NULL;
@ -1573,7 +1594,6 @@ static ram_addr_t ram_block_add(RAMBlock *new_block, Error **errp)
if (err) {
error_propagate(errp, err);
qemu_mutex_unlock_ramlist();
return -1;
}
} else {
new_block->host = phys_mem_alloc(new_block->max_length,
@ -1583,7 +1603,6 @@ static ram_addr_t ram_block_add(RAMBlock *new_block, Error **errp)
"cannot set up guest memory '%s'",
memory_region_name(new_block->mr));
qemu_mutex_unlock_ramlist();
return -1;
}
memory_try_enable_merging(new_block->host, new_block->max_length);
}
@ -1631,22 +1650,19 @@ static ram_addr_t ram_block_add(RAMBlock *new_block, Error **errp)
kvm_setup_guest_memory(new_block->host, new_block->max_length);
}
}
return new_block->offset;
}
#ifdef __linux__
ram_addr_t qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
bool share, const char *mem_path,
Error **errp)
RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
bool share, const char *mem_path,
Error **errp)
{
RAMBlock *new_block;
ram_addr_t addr;
Error *local_err = NULL;
if (xen_enabled()) {
error_setg(errp, "-mem-path not supported with Xen");
return -1;
return NULL;
}
if (phys_mem_alloc != qemu_anon_ram_alloc) {
@ -1657,7 +1673,7 @@ ram_addr_t qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
*/
error_setg(errp,
"-mem-path not supported with this accelerator");
return -1;
return NULL;
}
size = HOST_PAGE_ALIGN(size);
@ -1670,29 +1686,28 @@ ram_addr_t qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
mem_path, errp);
if (!new_block->host) {
g_free(new_block);
return -1;
return NULL;
}
addr = ram_block_add(new_block, &local_err);
ram_block_add(new_block, &local_err);
if (local_err) {
g_free(new_block);
error_propagate(errp, local_err);
return -1;
return NULL;
}
return addr;
return new_block;
}
#endif
static
ram_addr_t qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
void (*resized)(const char*,
uint64_t length,
void *host),
void *host, bool resizeable,
MemoryRegion *mr, Error **errp)
RAMBlock *qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
void (*resized)(const char*,
uint64_t length,
void *host),
void *host, bool resizeable,
MemoryRegion *mr, Error **errp)
{
RAMBlock *new_block;
ram_addr_t addr;
Error *local_err = NULL;
size = HOST_PAGE_ALIGN(size);
@ -1711,29 +1726,27 @@ ram_addr_t qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
if (resizeable) {
new_block->flags |= RAM_RESIZEABLE;
}
addr = ram_block_add(new_block, &local_err);
ram_block_add(new_block, &local_err);
if (local_err) {
g_free(new_block);
error_propagate(errp, local_err);
return -1;
return NULL;
}
mr->ram_block = new_block;
return addr;
return new_block;
}
ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
RAMBlock *qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
MemoryRegion *mr, Error **errp)
{
return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp);
}
ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
RAMBlock *qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
{
return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp);
}
ram_addr_t qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
RAMBlock *qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
void (*resized)(const char*,
uint64_t length,
void *host),
@ -1759,22 +1772,15 @@ static void reclaim_ramblock(RAMBlock *block)
g_free(block);
}
void qemu_ram_free(ram_addr_t addr)
void qemu_ram_free(RAMBlock *block)
{
RAMBlock *block;
qemu_mutex_lock_ramlist();
QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
if (addr == block->offset) {
QLIST_REMOVE_RCU(block, next);
ram_list.mru_block = NULL;
/* Write list before version */
smp_wmb();
ram_list.version++;
call_rcu(block, reclaim_ramblock, rcu);
break;
}
}
QLIST_REMOVE_RCU(block, next);
ram_list.mru_block = NULL;
/* Write list before version */
smp_wmb();
ram_list.version++;
call_rcu(block, reclaim_ramblock, rcu);
qemu_mutex_unlock_ramlist();
}
@ -2707,7 +2713,8 @@ MemTxResult address_space_read_continue(AddressSpace *as, hwaddr addr,
}
} else {
/* RAM case */
ptr = qemu_get_ram_ptr(mr->ram_block, mr->ram_addr + addr1);
ptr = qemu_get_ram_ptr(mr->ram_block,
memory_region_get_ram_addr(mr) + addr1);
memcpy(buf, ptr, l);
}

View File

@ -342,6 +342,10 @@ static void i8257_channel_run(I8257State *d, int ichan)
r->now[COUNT], (r->base[COUNT] + 1) << ncont);
r->now[COUNT] = n;
ldebug ("dma_pos %d size %d\n", n, (r->base[COUNT] + 1) << ncont);
if (n == (r->base[COUNT] + 1) << ncont) {
ldebug("transfer done\n");
d->status |= (1 << ichan);
}
}
static void i8257_dma_run(void *opaque)

View File

@ -400,7 +400,7 @@ static int create_shared_memory_BAR(IVShmemState *s, int fd, uint8_t attr,
memory_region_init_ram_ptr(&s->ivshmem, OBJECT(s), "ivshmem.bar2",
s->ivshmem_size, ptr);
qemu_set_ram_fd(s->ivshmem.ram_addr, fd);
qemu_set_ram_fd(memory_region_get_ram_addr(&s->ivshmem), fd);
vmstate_register_ram(&s->ivshmem, DEVICE(s));
memory_region_add_subregion(&s->bar, 0, &s->ivshmem);
@ -661,7 +661,8 @@ static void ivshmem_read(void *opaque, const uint8_t *buf, int size)
}
memory_region_init_ram_ptr(&s->ivshmem, OBJECT(s),
"ivshmem.bar2", s->ivshmem_size, map_ptr);
qemu_set_ram_fd(s->ivshmem.ram_addr, incoming_fd);
qemu_set_ram_fd(memory_region_get_ram_addr(&s->ivshmem),
incoming_fd);
vmstate_register_ram(&s->ivshmem, DEVICE(s));
IVSHMEM_DPRINTF("guest h/w addr = %p, size = %" PRIu64 "\n",
@ -996,8 +997,10 @@ static void pci_ivshmem_exit(PCIDevice *dev)
strerror(errno));
}
if ((fd = qemu_get_ram_fd(s->ivshmem.ram_addr)) != -1)
fd = qemu_get_ram_fd(memory_region_get_ram_addr(&s->ivshmem));
if (fd != -1) {
close(fd);
}
}
vmstate_unregister_ram(&s->ivshmem, DEVICE(dev));

View File

@ -989,7 +989,6 @@ static int scsi_req_xfer(SCSICommand *cmd, SCSIDevice *dev, uint8_t *buf)
}
/* fall through */
case READ_10:
case RECOVER_BUFFERED_DATA:
case READ_12:
case READ_16:
cmd->xfer *= dev->blocksize;

View File

@ -169,7 +169,6 @@ struct MemoryRegion {
bool flush_coalesced_mmio;
bool global_locking;
uint8_t dirty_log_mask;
ram_addr_t ram_addr;
RAMBlock *ram_block;
Object *owner;
const MemoryRegionIOMMUOps *iommu_ops;
@ -978,14 +977,8 @@ void memory_region_add_subregion_overlap(MemoryRegion *mr,
/**
* memory_region_get_ram_addr: Get the ram address associated with a memory
* region
*
* DO NOT USE THIS FUNCTION. This is a temporary workaround while the Xen
* code is being reworked.
*/
static inline ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr)
{
return mr->ram_addr;
}
ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr);
uint64_t memory_region_get_alignment(const MemoryRegion *mr);
/**

View File

@ -94,21 +94,21 @@ ram_addr_t last_ram_offset(void);
void qemu_mutex_lock_ramlist(void);
void qemu_mutex_unlock_ramlist(void);
ram_addr_t qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
bool share, const char *mem_path,
Error **errp);
ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
MemoryRegion *mr, Error **errp);
ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp);
ram_addr_t qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t max_size,
void (*resized)(const char*,
uint64_t length,
void *host),
MemoryRegion *mr, Error **errp);
RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
bool share, const char *mem_path,
Error **errp);
RAMBlock *qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
MemoryRegion *mr, Error **errp);
RAMBlock *qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp);
RAMBlock *qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t max_size,
void (*resized)(const char*,
uint64_t length,
void *host),
MemoryRegion *mr, Error **errp);
int qemu_get_ram_fd(ram_addr_t addr);
void qemu_set_ram_fd(ram_addr_t addr, int fd);
void *qemu_get_ram_block_host_ptr(ram_addr_t addr);
void qemu_ram_free(ram_addr_t addr);
void qemu_ram_free(RAMBlock *block);
int qemu_ram_resize(ram_addr_t base, ram_addr_t newsize, Error **errp);

View File

@ -89,7 +89,7 @@ struct KVMState
#ifdef KVM_CAP_IRQ_ROUTING
struct kvm_irq_routing *irq_routes;
int nr_allocated_irq_routes;
uint32_t *used_gsi_bitmap;
unsigned long *used_gsi_bitmap;
unsigned int gsi_count;
QTAILQ_HEAD(msi_hashtab, KVMMSIRoute) msi_hashtab[KVM_MSI_HASHTAB_SIZE];
#endif
@ -366,7 +366,8 @@ static void kvm_log_stop(MemoryListener *listener,
static int kvm_get_dirty_pages_log_range(MemoryRegionSection *section,
unsigned long *bitmap)
{
ram_addr_t start = section->offset_within_region + section->mr->ram_addr;
ram_addr_t start = section->offset_within_region +
memory_region_get_ram_addr(section->mr);
ram_addr_t pages = int128_get64(section->size) / getpagesize();
cpu_physical_memory_set_dirty_lebitmap(bitmap, start, pages);
@ -950,12 +951,12 @@ typedef struct KVMMSIRoute {
static void set_gsi(KVMState *s, unsigned int gsi)
{
s->used_gsi_bitmap[gsi / 32] |= 1U << (gsi % 32);
set_bit(gsi, s->used_gsi_bitmap);
}
static void clear_gsi(KVMState *s, unsigned int gsi)
{
s->used_gsi_bitmap[gsi / 32] &= ~(1U << (gsi % 32));
clear_bit(gsi, s->used_gsi_bitmap);
}
void kvm_init_irq_routing(KVMState *s)
@ -964,17 +965,9 @@ void kvm_init_irq_routing(KVMState *s)
gsi_count = kvm_check_extension(s, KVM_CAP_IRQ_ROUTING) - 1;
if (gsi_count > 0) {
unsigned int gsi_bits, i;
/* Round up so we can search ints using ffs */
gsi_bits = ALIGN(gsi_count, 32);
s->used_gsi_bitmap = g_malloc0(gsi_bits / 8);
s->used_gsi_bitmap = bitmap_new(gsi_count);
s->gsi_count = gsi_count;
/* Mark any over-allocated bits as already in use */
for (i = gsi_count; i < gsi_bits; i++) {
set_gsi(s, i);
}
}
s->irq_routes = g_malloc0(sizeof(*s->irq_routes));
@ -1104,9 +1097,7 @@ static void kvm_flush_dynamic_msi_routes(KVMState *s)
static int kvm_irqchip_get_virq(KVMState *s)
{
uint32_t *word = s->used_gsi_bitmap;
int max_words = ALIGN(s->gsi_count, 32) / 32;
int i, zeroes;
int next_virq;
/*
* PIC and IOAPIC share the first 16 GSI numbers, thus the available
@ -1119,16 +1110,12 @@ static int kvm_irqchip_get_virq(KVMState *s)
}
/* Return the lowest unused GSI in the bitmap */
for (i = 0; i < max_words; i++) {
zeroes = ctz32(~word[i]);
if (zeroes == 32) {
continue;
}
return zeroes + i * 32;
next_virq = find_first_zero_bit(s->used_gsi_bitmap, s->gsi_count);
if (next_virq >= s->gsi_count) {
return -ENOSPC;
} else {
return next_virq;
}
return -ENOSPC;
}
static KVMMSIRoute *kvm_lookup_msi_route(KVMState *s, MSIMessage msg)

View File

@ -902,12 +902,12 @@ static void memory_region_destructor_none(MemoryRegion *mr)
static void memory_region_destructor_ram(MemoryRegion *mr)
{
qemu_ram_free(mr->ram_addr);
qemu_ram_free(mr->ram_block);
}
static void memory_region_destructor_rom_device(MemoryRegion *mr)
{
qemu_ram_free(mr->ram_addr & TARGET_PAGE_MASK);
qemu_ram_free(mr->ram_block);
}
static bool memory_region_need_escape(char c)
@ -1038,7 +1038,6 @@ static void memory_region_initfn(Object *obj)
ObjectProperty *op;
mr->ops = &unassigned_mem_ops;
mr->ram_addr = RAM_ADDR_INVALID;
mr->enabled = true;
mr->romd_mode = true;
mr->global_locking = true;
@ -1274,7 +1273,7 @@ void memory_region_init_ram(MemoryRegion *mr,
mr->ram = true;
mr->terminates = true;
mr->destructor = memory_region_destructor_ram;
mr->ram_addr = qemu_ram_alloc(size, mr, errp);
mr->ram_block = qemu_ram_alloc(size, mr, errp);
mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
}
@ -1292,7 +1291,8 @@ void memory_region_init_resizeable_ram(MemoryRegion *mr,
mr->ram = true;
mr->terminates = true;
mr->destructor = memory_region_destructor_ram;
mr->ram_addr = qemu_ram_alloc_resizeable(size, max_size, resized, mr, errp);
mr->ram_block = qemu_ram_alloc_resizeable(size, max_size, resized,
mr, errp);
mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
}
@ -1309,7 +1309,7 @@ void memory_region_init_ram_from_file(MemoryRegion *mr,
mr->ram = true;
mr->terminates = true;
mr->destructor = memory_region_destructor_ram;
mr->ram_addr = qemu_ram_alloc_from_file(size, mr, share, path, errp);
mr->ram_block = qemu_ram_alloc_from_file(size, mr, share, path, errp);
mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
}
#endif
@ -1328,7 +1328,7 @@ void memory_region_init_ram_ptr(MemoryRegion *mr,
/* qemu_ram_alloc_from_ptr cannot fail with ptr != NULL. */
assert(ptr != NULL);
mr->ram_addr = qemu_ram_alloc_from_ptr(size, ptr, mr, &error_fatal);
mr->ram_block = qemu_ram_alloc_from_ptr(size, ptr, mr, &error_fatal);
}
void memory_region_set_skip_dump(MemoryRegion *mr)
@ -1362,7 +1362,7 @@ void memory_region_init_rom_device(MemoryRegion *mr,
mr->terminates = true;
mr->rom_device = true;
mr->destructor = memory_region_destructor_rom_device;
mr->ram_addr = qemu_ram_alloc(size, mr, errp);
mr->ram_block = qemu_ram_alloc(size, mr, errp);
}
void memory_region_init_iommu(MemoryRegion *mr,
@ -1527,24 +1527,26 @@ void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client)
bool memory_region_get_dirty(MemoryRegion *mr, hwaddr addr,
hwaddr size, unsigned client)
{
assert(mr->ram_addr != RAM_ADDR_INVALID);
return cpu_physical_memory_get_dirty(mr->ram_addr + addr, size, client);
assert(mr->ram_block);
return cpu_physical_memory_get_dirty(memory_region_get_ram_addr(mr) + addr,
size, client);
}
void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr,
hwaddr size)
{
assert(mr->ram_addr != RAM_ADDR_INVALID);
cpu_physical_memory_set_dirty_range(mr->ram_addr + addr, size,
assert(mr->ram_block);
cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr,
size,
memory_region_get_dirty_log_mask(mr));
}
bool memory_region_test_and_clear_dirty(MemoryRegion *mr, hwaddr addr,
hwaddr size, unsigned client)
{
assert(mr->ram_addr != RAM_ADDR_INVALID);
return cpu_physical_memory_test_and_clear_dirty(mr->ram_addr + addr,
size, client);
assert(mr->ram_block);
return cpu_physical_memory_test_and_clear_dirty(
memory_region_get_ram_addr(mr) + addr, size, client);
}
@ -1587,9 +1589,9 @@ void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode)
void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr,
hwaddr size, unsigned client)
{
assert(mr->ram_addr != RAM_ADDR_INVALID);
cpu_physical_memory_test_and_clear_dirty(mr->ram_addr + addr, size,
client);
assert(mr->ram_block);
cpu_physical_memory_test_and_clear_dirty(
memory_region_get_ram_addr(mr) + addr, size, client);
}
int memory_region_get_fd(MemoryRegion *mr)
@ -1598,9 +1600,9 @@ int memory_region_get_fd(MemoryRegion *mr)
return memory_region_get_fd(mr->alias);
}
assert(mr->ram_addr != RAM_ADDR_INVALID);
assert(mr->ram_block);
return qemu_get_ram_fd(mr->ram_addr & TARGET_PAGE_MASK);
return qemu_get_ram_fd(memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK);
}
void *memory_region_get_ram_ptr(MemoryRegion *mr)
@ -1613,18 +1615,24 @@ void *memory_region_get_ram_ptr(MemoryRegion *mr)
offset += mr->alias_offset;
mr = mr->alias;
}
assert(mr->ram_addr != RAM_ADDR_INVALID);
ptr = qemu_get_ram_ptr(mr->ram_block, mr->ram_addr & TARGET_PAGE_MASK);
assert(mr->ram_block);
ptr = qemu_get_ram_ptr(mr->ram_block,
memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK);
rcu_read_unlock();
return ptr + offset;
}
ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr)
{
return mr->ram_block ? mr->ram_block->offset : RAM_ADDR_INVALID;
}
void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize, Error **errp)
{
assert(mr->ram_addr != RAM_ADDR_INVALID);
assert(mr->ram_block);
qemu_ram_resize(mr->ram_addr, newsize, errp);
qemu_ram_resize(memory_region_get_ram_addr(mr), newsize, errp);
}
static void memory_region_update_coalesced_range_as(MemoryRegion *mr, AddressSpace *as)

View File

@ -3276,7 +3276,7 @@ re-inject them.
ETEXI
DEF("icount", HAS_ARG, QEMU_OPTION_icount, \
"-icount [shift=N|auto][,align=on|off][,sleep=no,rr=record|replay,rrfile=<filename>]\n" \
"-icount [shift=N|auto][,align=on|off][,sleep=on|off,rr=record|replay,rrfile=<filename>]\n" \
" enable virtual instruction counter with 2^N clock ticks per\n" \
" instruction, enable aligning the host and virtual clocks\n" \
" or disable real time cpu sleeping\n", QEMU_ARCH_ALL)
@ -3289,8 +3289,8 @@ then the virtual cpu speed will be automatically adjusted to keep virtual
time within a few seconds of real time.
When the virtual cpu is sleeping, the virtual time will advance at default
speed unless @option{sleep=no} is specified.
With @option{sleep=no}, the virtual time will jump to the next timer deadline
speed unless @option{sleep=on|off} is specified.
With @option{sleep=on|off}, the virtual time will jump to the next timer deadline
instantly whenever the virtual cpu goes to sleep mode and will not advance
if no timer is enabled. This behavior give deterministic execution times from
the guest point of view.

View File

@ -352,7 +352,7 @@ def memory_region_get_ram_ptr(memory_region):
return (memory_region_get_ram_ptr(memory_region["alias"].dereference())
+ memory_region["alias_offset"])
return qemu_get_ram_ptr(memory_region["ram_addr"] & TARGET_PAGE_MASK)
return qemu_get_ram_ptr(memory_region["ram_block"]["offset"])
def get_guest_phys_blocks():

View File

@ -56,7 +56,8 @@ void do_qemu_set_log(int log_flags, bool use_own_buffers)
#ifdef CONFIG_TRACE_LOG
qemu_loglevel |= LOG_TRACE;
#endif
if ((qemu_loglevel || is_daemonized()) && !qemu_logfile) {
if (!qemu_logfile &&
(is_daemonized() ? logfilename != NULL : qemu_loglevel)) {
if (logfilename) {
qemu_logfile = fopen(logfilename, log_append ? "a" : "w");
if (!qemu_logfile) {
@ -72,6 +73,7 @@ void do_qemu_set_log(int log_flags, bool use_own_buffers)
}
} else {
/* Default to stderr if no log file specified */
assert(!is_daemonized());
qemu_logfile = stderr;
}
/* must avoid mmap() usage of glibc by setting a buffer "by hand" */
@ -89,7 +91,8 @@ void do_qemu_set_log(int log_flags, bool use_own_buffers)
log_append = 1;
}
}
if (!qemu_loglevel && !is_daemonized() && qemu_logfile) {
if (qemu_logfile &&
(is_daemonized() ? logfilename == NULL : !qemu_loglevel)) {
qemu_log_close();
}
}