2011-12-15 14:25:22 +01:00
|
|
|
/*
|
|
|
|
* Declarations for obsolete exec.c functions
|
|
|
|
*
|
|
|
|
* Copyright 2011 Red Hat, Inc. and/or its affiliates
|
|
|
|
*
|
|
|
|
* Authors:
|
|
|
|
* Avi Kivity <avi@redhat.com>
|
|
|
|
*
|
2012-01-13 17:44:23 +01:00
|
|
|
* This work is licensed under the terms of the GNU GPL, version 2 or
|
|
|
|
* later. See the COPYING file in the top-level directory.
|
2011-12-15 14:25:22 +01:00
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This header is for use by exec.c and memory.c ONLY. Do not include it.
|
|
|
|
* The functions declared here will be removed soon.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef EXEC_OBSOLETE_H
|
|
|
|
#define EXEC_OBSOLETE_H
|
|
|
|
|
|
|
|
#ifndef WANT_EXEC_OBSOLETE
|
|
|
|
#error Do not include exec-obsolete.h
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifndef CONFIG_USER_ONLY
|
|
|
|
|
2011-12-20 14:59:12 +01:00
|
|
|
ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
|
2011-12-15 14:25:22 +01:00
|
|
|
MemoryRegion *mr);
|
2011-12-20 14:59:12 +01:00
|
|
|
ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr);
|
2011-12-15 14:25:22 +01:00
|
|
|
void qemu_ram_free(ram_addr_t addr);
|
|
|
|
void qemu_ram_free_from_ptr(ram_addr_t addr);
|
|
|
|
|
2012-01-02 12:12:08 +01:00
|
|
|
struct MemoryRegion;
|
2012-01-02 11:17:03 +01:00
|
|
|
struct MemoryRegionSection;
|
|
|
|
void cpu_register_physical_memory_log(struct MemoryRegionSection *section,
|
2012-02-08 16:07:26 +01:00
|
|
|
bool readonly);
|
2011-12-15 14:25:22 +01:00
|
|
|
|
|
|
|
void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size);
|
|
|
|
void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size);
|
|
|
|
|
2011-12-21 13:14:07 +01:00
|
|
|
int cpu_physical_memory_set_dirty_tracking(int enable);
|
|
|
|
|
2011-12-21 13:16:38 +01:00
|
|
|
#define VGA_DIRTY_FLAG 0x01
|
|
|
|
#define CODE_DIRTY_FLAG 0x02
|
|
|
|
#define MIGRATION_DIRTY_FLAG 0x08
|
|
|
|
|
2012-06-22 13:14:17 +02:00
|
|
|
static inline int cpu_physical_memory_get_dirty_flags(ram_addr_t addr)
|
2011-12-21 13:16:38 +01:00
|
|
|
{
|
2012-06-22 13:14:17 +02:00
|
|
|
return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS];
|
2011-12-21 13:16:38 +01:00
|
|
|
}
|
|
|
|
|
2012-06-22 13:14:17 +02:00
|
|
|
/* read dirty bit (return 0 or 1) */
|
|
|
|
static inline int cpu_physical_memory_is_dirty(ram_addr_t addr)
|
2011-12-21 13:16:38 +01:00
|
|
|
{
|
2012-06-22 13:14:17 +02:00
|
|
|
return cpu_physical_memory_get_dirty_flags(addr) == 0xff;
|
2011-12-21 13:16:38 +01:00
|
|
|
}
|
|
|
|
|
2012-01-22 17:38:21 +01:00
|
|
|
static inline int cpu_physical_memory_get_dirty(ram_addr_t start,
|
|
|
|
ram_addr_t length,
|
2011-12-21 13:16:38 +01:00
|
|
|
int dirty_flags)
|
|
|
|
{
|
2012-01-22 17:38:21 +01:00
|
|
|
int ret = 0;
|
|
|
|
ram_addr_t addr, end;
|
|
|
|
|
|
|
|
end = TARGET_PAGE_ALIGN(start + length);
|
|
|
|
start &= TARGET_PAGE_MASK;
|
|
|
|
for (addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
|
2012-06-22 13:14:17 +02:00
|
|
|
ret |= cpu_physical_memory_get_dirty_flags(addr) & dirty_flags;
|
2012-01-22 17:38:21 +01:00
|
|
|
}
|
|
|
|
return ret;
|
2011-12-21 13:16:38 +01:00
|
|
|
}
|
|
|
|
|
2012-06-22 13:14:17 +02:00
|
|
|
static inline int cpu_physical_memory_set_dirty_flags(ram_addr_t addr,
|
|
|
|
int dirty_flags)
|
|
|
|
{
|
2012-06-22 15:21:07 +02:00
|
|
|
if ((dirty_flags & MIGRATION_DIRTY_FLAG) &&
|
|
|
|
!cpu_physical_memory_get_dirty(addr, TARGET_PAGE_SIZE,
|
|
|
|
MIGRATION_DIRTY_FLAG)) {
|
|
|
|
ram_list.dirty_pages++;
|
|
|
|
}
|
2012-06-22 13:14:17 +02:00
|
|
|
return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS] |= dirty_flags;
|
|
|
|
}
|
|
|
|
|
2011-12-21 13:16:38 +01:00
|
|
|
static inline void cpu_physical_memory_set_dirty(ram_addr_t addr)
|
|
|
|
{
|
2012-06-22 13:14:17 +02:00
|
|
|
cpu_physical_memory_set_dirty_flags(addr, 0xff);
|
2011-12-21 13:16:38 +01:00
|
|
|
}
|
|
|
|
|
2012-06-22 13:14:17 +02:00
|
|
|
static inline int cpu_physical_memory_clear_dirty_flags(ram_addr_t addr,
|
|
|
|
int dirty_flags)
|
2011-12-21 13:16:38 +01:00
|
|
|
{
|
2012-06-22 13:14:17 +02:00
|
|
|
int mask = ~dirty_flags;
|
|
|
|
|
2012-06-22 15:21:07 +02:00
|
|
|
if ((dirty_flags & MIGRATION_DIRTY_FLAG) &&
|
|
|
|
cpu_physical_memory_get_dirty(addr, TARGET_PAGE_SIZE,
|
|
|
|
MIGRATION_DIRTY_FLAG)) {
|
|
|
|
ram_list.dirty_pages--;
|
|
|
|
}
|
2012-06-22 13:14:17 +02:00
|
|
|
return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS] &= mask;
|
2011-12-21 13:16:38 +01:00
|
|
|
}
|
|
|
|
|
2011-10-16 18:04:59 +02:00
|
|
|
static inline void cpu_physical_memory_set_dirty_range(ram_addr_t start,
|
|
|
|
ram_addr_t length,
|
|
|
|
int dirty_flags)
|
|
|
|
{
|
|
|
|
ram_addr_t addr, end;
|
|
|
|
|
2012-01-29 15:47:47 +01:00
|
|
|
end = TARGET_PAGE_ALIGN(start + length);
|
|
|
|
start &= TARGET_PAGE_MASK;
|
|
|
|
for (addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
|
2012-06-22 13:14:17 +02:00
|
|
|
cpu_physical_memory_set_dirty_flags(addr, dirty_flags);
|
2011-10-16 18:04:59 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-12-21 13:16:38 +01:00
|
|
|
static inline void cpu_physical_memory_mask_dirty_range(ram_addr_t start,
|
2012-01-22 12:00:44 +01:00
|
|
|
ram_addr_t length,
|
2011-12-21 13:16:38 +01:00
|
|
|
int dirty_flags)
|
|
|
|
{
|
2012-01-22 12:00:44 +01:00
|
|
|
ram_addr_t addr, end;
|
2011-12-21 13:16:38 +01:00
|
|
|
|
2012-01-29 15:47:47 +01:00
|
|
|
end = TARGET_PAGE_ALIGN(start + length);
|
|
|
|
start &= TARGET_PAGE_MASK;
|
|
|
|
for (addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
|
2012-06-22 13:14:17 +02:00
|
|
|
cpu_physical_memory_clear_dirty_flags(addr, dirty_flags);
|
2011-12-21 13:16:38 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
|
|
|
|
int dirty_flags);
|
2012-02-08 15:54:16 +01:00
|
|
|
|
|
|
|
extern const IORangeOps memory_region_iorange_ops;
|
|
|
|
|
2011-12-15 14:25:22 +01:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#endif
|