* KVM error improvement from Laurent

* CONFIG_PARALLEL fix from Mirek
 * Atomic/optimized dirty bitmap access from myself and Stefan
 * BUILD_DIR convenience/bugfix from Peter C
 * Memory leak fix from Shannon
 * SMM improvements (though still TCG only) from myself and Gerd, acked by mst
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v2
 
 iQEcBAABCAAGBQJVceAwAAoJEL/70l94x66Dyz4H/RHS/OUGo6HOwG1FZ4l8RxRl
 FY+pwJqinxFyGySmMLVHEeQCsIfxgi8bOmuWblG7sdt245nhMIj2jglyEOCUA3RN
 Q9qxQr6QyXBWiwK4bfB7xI1z3/mc8cVvuxjtkLaBMa16A4MXMunWCDcyhsX9/0Vw
 VySgTgBbn5AyY5x58TbkB7Tl6hMZgxF0yNwU6IGQvP079dgREAL2tzR1Wk8kPC80
 ltLWlrwTAzF2km5m6rmstpMeZ/XIaq3DD2LU03SyUhefMsYowGKK+7Boo4lHpVm9
 XAlxflahN7VGtQuno5RpYNNSzGqSJgqu5X5JxCMnbWdPi4sX3bijQdcUhW3/0oo=
 =KPIz
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/bonzini/tags/for-upstream' into staging

* KVM error improvement from Laurent
* CONFIG_PARALLEL fix from Mirek
* Atomic/optimized dirty bitmap access from myself and Stefan
* BUILD_DIR convenience/bugfix from Peter C
* Memory leak fix from Shannon
* SMM improvements (though still TCG only) from myself and Gerd, acked by mst

# gpg: Signature made Fri Jun  5 18:45:20 2015 BST using RSA key ID 78C7AE83
# gpg: Good signature from "Paolo Bonzini <bonzini@gnu.org>"
# gpg:                 aka "Paolo Bonzini <pbonzini@redhat.com>"
# gpg: WARNING: This key is not certified with sufficiently trusted signatures!
# gpg:          It is not certain that the signature belongs to the owner.
# Primary key fingerprint: 46F5 9FBD 57D6 12E7 BFD4  E2F7 7E15 100C CD36 69B1
#      Subkey fingerprint: F133 3857 4B66 2389 866C  7682 BFFB D25F 78C7 AE83

* remotes/bonzini/tags/for-upstream: (62 commits)
  update Linux headers from kvm/next
  atomics: add explicit compiler fence in __atomic memory barriers
  ich9: implement SMI_LOCK
  q35: implement TSEG
  q35: add test for SMRAM.D_LCK
  q35: implement SMRAM.D_LCK
  q35: add config space wmask for SMRAM and ESMRAMC
  q35: fix ESMRAMC default
  q35: implement high SMRAM
  hw/i386: remove smram_update
  target-i386: use memory API to implement SMRAM
  hw/i386: add a separate region that tracks the SMRAME bit
  target-i386: create a separate AddressSpace for each CPU
  vl: run "late" notifiers immediately
  qom: add object_property_add_const_link
  vl: allow full-blown QemuOpts syntax for -global
  pflash_cfi01: add secure property
  pflash_cfi01: change to new-style MMIO accessors
  pflash_cfi01: change big-endian property to BIT type
  target-i386: wake up processors that receive an SMI
  ...

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2015-06-08 15:57:41 +01:00
commit ee09f84e6b
75 changed files with 1548 additions and 1043 deletions

View File

@ -1,5 +1,7 @@
# -*- Mode: makefile -*-
BUILD_DIR?=$(CURDIR)/..
include ../config-host.mak
include config-target.mak
include config-devices.mak

View File

@ -609,52 +609,10 @@ ram_addr_t migration_bitmap_find_and_reset_dirty(MemoryRegion *mr,
return (next - base) << TARGET_PAGE_BITS;
}
static inline bool migration_bitmap_set_dirty(ram_addr_t addr)
{
bool ret;
int nr = addr >> TARGET_PAGE_BITS;
ret = test_and_set_bit(nr, migration_bitmap);
if (!ret) {
migration_dirty_pages++;
}
return ret;
}
static void migration_bitmap_sync_range(ram_addr_t start, ram_addr_t length)
{
ram_addr_t addr;
unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS);
/* start address is aligned at the start of a word? */
if (((page * BITS_PER_LONG) << TARGET_PAGE_BITS) == start) {
int k;
int nr = BITS_TO_LONGS(length >> TARGET_PAGE_BITS);
unsigned long *src = ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION];
for (k = page; k < page + nr; k++) {
if (src[k]) {
unsigned long new_dirty;
new_dirty = ~migration_bitmap[k];
migration_bitmap[k] |= src[k];
new_dirty &= src[k];
migration_dirty_pages += ctpopl(new_dirty);
src[k] = 0;
}
}
} else {
for (addr = 0; addr < length; addr += TARGET_PAGE_SIZE) {
if (cpu_physical_memory_get_dirty(start + addr,
TARGET_PAGE_SIZE,
DIRTY_MEMORY_MIGRATION)) {
cpu_physical_memory_reset_dirty(start + addr,
TARGET_PAGE_SIZE,
DIRTY_MEMORY_MIGRATION);
migration_bitmap_set_dirty(start + addr);
}
}
}
migration_dirty_pages +=
cpu_physical_memory_sync_dirty_bitmap(migration_bitmap, start, length);
}

View File

@ -108,10 +108,6 @@ void cpu_list_unlock(void)
/***********************************************************/
/* CPUX86 core interface */
void cpu_smm_update(CPUX86State *env)
{
}
uint64_t cpu_get_tsc(CPUX86State *env)
{
return cpu_get_real_ticks();

84
cpus.c
View File

@ -105,6 +105,7 @@ static bool all_cpu_threads_idle(void)
/* Protected by TimersState seqlock */
static bool icount_sleep = true;
static int64_t vm_clock_warp_start = -1;
/* Conversion factor from emulated instructions to virtual clock ticks. */
static int icount_time_shift;
@ -393,15 +394,18 @@ void qemu_clock_warp(QEMUClockType type)
return;
}
/*
* If the CPUs have been sleeping, advance QEMU_CLOCK_VIRTUAL timer now.
* This ensures that the deadline for the timer is computed correctly below.
* This also makes sure that the insn counter is synchronized before the
* CPU starts running, in case the CPU is woken by an event other than
* the earliest QEMU_CLOCK_VIRTUAL timer.
*/
icount_warp_rt(NULL);
timer_del(icount_warp_timer);
if (icount_sleep) {
/*
* If the CPUs have been sleeping, advance QEMU_CLOCK_VIRTUAL timer now.
* This ensures that the deadline for the timer is computed correctly
* below.
* This also makes sure that the insn counter is synchronized before
* the CPU starts running, in case the CPU is woken by an event other
* than the earliest QEMU_CLOCK_VIRTUAL timer.
*/
icount_warp_rt(NULL);
timer_del(icount_warp_timer);
}
if (!all_cpu_threads_idle()) {
return;
}
@ -415,6 +419,11 @@ void qemu_clock_warp(QEMUClockType type)
clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT);
deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
if (deadline < 0) {
static bool notified;
if (!icount_sleep && !notified) {
error_report("WARNING: icount sleep disabled and no active timers");
notified = true;
}
return;
}
@ -425,23 +434,35 @@ void qemu_clock_warp(QEMUClockType type)
* interrupt to wake it up, but the interrupt never comes because
* the vCPU isn't running any insns and thus doesn't advance the
* QEMU_CLOCK_VIRTUAL.
*
* An extreme solution for this problem would be to never let VCPUs
* sleep in icount mode if there is a pending QEMU_CLOCK_VIRTUAL
* timer; rather time could just advance to the next QEMU_CLOCK_VIRTUAL
* event. Instead, we do stop VCPUs and only advance QEMU_CLOCK_VIRTUAL
* after some "real" time, (related to the time left until the next
* event) has passed. The QEMU_CLOCK_VIRTUAL_RT clock will do this.
* This avoids that the warps are visible externally; for example,
* you will not be sending network packets continuously instead of
* every 100ms.
*/
seqlock_write_lock(&timers_state.vm_clock_seqlock);
if (vm_clock_warp_start == -1 || vm_clock_warp_start > clock) {
vm_clock_warp_start = clock;
if (!icount_sleep) {
/*
* We never let VCPUs sleep in no sleep icount mode.
* If there is a pending QEMU_CLOCK_VIRTUAL timer we just advance
* to the next QEMU_CLOCK_VIRTUAL event and notify it.
* It is useful when we want a deterministic execution time,
* isolated from host latencies.
*/
seqlock_write_lock(&timers_state.vm_clock_seqlock);
timers_state.qemu_icount_bias += deadline;
seqlock_write_unlock(&timers_state.vm_clock_seqlock);
qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
} else {
/*
* We do stop VCPUs and only advance QEMU_CLOCK_VIRTUAL after some
* "real" time, (related to the time left until the next event) has
* passed. The QEMU_CLOCK_VIRTUAL_RT clock will do this.
* This avoids that the warps are visible externally; for example,
* you will not be sending network packets continuously instead of
* every 100ms.
*/
seqlock_write_lock(&timers_state.vm_clock_seqlock);
if (vm_clock_warp_start == -1 || vm_clock_warp_start > clock) {
vm_clock_warp_start = clock;
}
seqlock_write_unlock(&timers_state.vm_clock_seqlock);
timer_mod_anticipate(icount_warp_timer, clock + deadline);
}
seqlock_write_unlock(&timers_state.vm_clock_seqlock);
timer_mod_anticipate(icount_warp_timer, clock + deadline);
} else if (deadline == 0) {
qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
}
@ -504,9 +525,18 @@ void configure_icount(QemuOpts *opts, Error **errp)
}
return;
}
icount_sleep = qemu_opt_get_bool(opts, "sleep", true);
if (icount_sleep) {
icount_warp_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL_RT,
icount_warp_rt, NULL);
}
icount_align_option = qemu_opt_get_bool(opts, "align", false);
icount_warp_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL_RT,
icount_warp_rt, NULL);
if (icount_align_option && !icount_sleep) {
error_setg(errp, "align=on and sleep=no are incompatible");
}
if (strcmp(option, "auto") != 0) {
errno = 0;
icount_time_shift = strtol(option, &rem_str, 0);
@ -517,6 +547,8 @@ void configure_icount(QemuOpts *opts, Error **errp)
return;
} else if (icount_align_option) {
error_setg(errp, "shift=auto and align=on are incompatible");
} else if (!icount_sleep) {
error_setg(errp, "shift=auto and sleep=no are incompatible");
}
use_icount = 2;

View File

@ -125,14 +125,13 @@ void tlb_flush_page(CPUState *cpu, target_ulong addr)
can be detected */
void tlb_protect_code(ram_addr_t ram_addr)
{
cpu_physical_memory_reset_dirty(ram_addr, TARGET_PAGE_SIZE,
DIRTY_MEMORY_CODE);
cpu_physical_memory_test_and_clear_dirty(ram_addr, TARGET_PAGE_SIZE,
DIRTY_MEMORY_CODE);
}
/* update the TLB so that writes in physical page 'phys_addr' are no longer
tested for self modifying code */
void tlb_unprotect_code_phys(CPUState *cpu, ram_addr_t ram_addr,
target_ulong vaddr)
void tlb_unprotect_code(ram_addr_t ram_addr)
{
cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE);
}

123
exec.c
View File

@ -59,8 +59,6 @@
//#define DEBUG_SUBPAGE
#if !defined(CONFIG_USER_ONLY)
static bool in_migration;
/* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
* are protected by the ramlist lock.
*/
@ -173,17 +171,22 @@ static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
}
}
static uint32_t phys_map_node_alloc(PhysPageMap *map)
static uint32_t phys_map_node_alloc(PhysPageMap *map, bool leaf)
{
unsigned i;
uint32_t ret;
PhysPageEntry e;
PhysPageEntry *p;
ret = map->nodes_nb++;
p = map->nodes[ret];
assert(ret != PHYS_MAP_NODE_NIL);
assert(ret != map->nodes_nb_alloc);
e.skip = leaf ? 0 : 1;
e.ptr = leaf ? PHYS_SECTION_UNASSIGNED : PHYS_MAP_NODE_NIL;
for (i = 0; i < P_L2_SIZE; ++i) {
map->nodes[ret][i].skip = 1;
map->nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
memcpy(&p[i], &e, sizeof(e));
}
return ret;
}
@ -193,21 +196,12 @@ static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
int level)
{
PhysPageEntry *p;
int i;
hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
lp->ptr = phys_map_node_alloc(map);
p = map->nodes[lp->ptr];
if (level == 0) {
for (i = 0; i < P_L2_SIZE; i++) {
p[i].skip = 0;
p[i].ptr = PHYS_SECTION_UNASSIGNED;
}
}
} else {
p = map->nodes[lp->ptr];
lp->ptr = phys_map_node_alloc(map, level == 0);
}
p = map->nodes[lp->ptr];
lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
while (*nb && lp < &p[P_L2_SIZE]) {
@ -858,21 +852,27 @@ static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
}
/* Note: start and end must be within the same ram block. */
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t length,
unsigned client)
bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
ram_addr_t length,
unsigned client)
{
if (length == 0)
return;
cpu_physical_memory_clear_dirty_range_type(start, length, client);
unsigned long end, page;
bool dirty;
if (tcg_enabled()) {
if (length == 0) {
return false;
}
end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
page = start >> TARGET_PAGE_BITS;
dirty = bitmap_test_and_clear_atomic(ram_list.dirty_memory[client],
page, end - page);
if (dirty && tcg_enabled()) {
tlb_reset_dirty_range_all(start, length);
}
}
static void cpu_physical_memory_set_dirty_tracking(bool enable)
{
in_migration = enable;
return dirty;
}
/* Called from RCU critical section */
@ -1362,7 +1362,8 @@ int qemu_ram_resize(ram_addr_t base, ram_addr_t newsize, Error **errp)
cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
block->used_length = newsize;
cpu_physical_memory_set_dirty_range(block->offset, block->used_length);
cpu_physical_memory_set_dirty_range(block->offset, block->used_length,
DIRTY_CLIENTS_ALL);
memory_region_set_size(block->mr, newsize);
if (block->resized) {
block->resized(block->idstr, newsize, block->host);
@ -1436,7 +1437,8 @@ static ram_addr_t ram_block_add(RAMBlock *new_block, Error **errp)
}
}
cpu_physical_memory_set_dirty_range(new_block->offset,
new_block->used_length);
new_block->used_length,
DIRTY_CLIENTS_ALL);
if (new_block->host) {
qemu_ram_setup_dump(new_block->host, new_block->max_length);
@ -1824,7 +1826,11 @@ static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
default:
abort();
}
cpu_physical_memory_set_dirty_range_nocode(ram_addr, size);
/* Set both VGA and migration bits for simplicity and to remove
* the notdirty callback faster.
*/
cpu_physical_memory_set_dirty_range(ram_addr, size,
DIRTY_CLIENTS_NOCODE);
/* we remove the notdirty callback only if the code has been
flushed */
if (!cpu_physical_memory_is_clean(ram_addr)) {
@ -2165,22 +2171,6 @@ static void tcg_commit(MemoryListener *listener)
}
}
static void core_log_global_start(MemoryListener *listener)
{
cpu_physical_memory_set_dirty_tracking(true);
}
static void core_log_global_stop(MemoryListener *listener)
{
cpu_physical_memory_set_dirty_tracking(false);
}
static MemoryListener core_memory_listener = {
.log_global_start = core_log_global_start,
.log_global_stop = core_log_global_stop,
.priority = 1,
};
void address_space_init_dispatch(AddressSpace *as)
{
as->dispatch = NULL;
@ -2220,8 +2210,6 @@ static void memory_map_init(void)
memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
65536);
address_space_init(&address_space_io, system_io, "I/O");
memory_listener_register(&core_memory_listener, &address_space_memory);
}
MemoryRegion *get_system_memory(void)
@ -2279,14 +2267,23 @@ int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
#else
static void invalidate_and_set_dirty(hwaddr addr,
static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
hwaddr length)
{
if (cpu_physical_memory_range_includes_clean(addr, length)) {
tb_invalidate_phys_range(addr, addr + length, 0);
cpu_physical_memory_set_dirty_range_nocode(addr, length);
uint8_t dirty_log_mask = memory_region_get_dirty_log_mask(mr);
/* No early return if dirty_log_mask is or becomes 0, because
* cpu_physical_memory_set_dirty_range will still call
* xen_modified_memory.
*/
if (dirty_log_mask) {
dirty_log_mask =
cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask);
}
xen_modified_memory(addr, length);
if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
tb_invalidate_phys_range(addr, addr + length);
dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
}
cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask);
}
static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
@ -2371,7 +2368,7 @@ MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
/* RAM case */
ptr = qemu_get_ram_ptr(addr1);
memcpy(ptr, buf, l);
invalidate_and_set_dirty(addr1, l);
invalidate_and_set_dirty(mr, addr1, l);
}
} else {
if (!memory_access_is_direct(mr, is_write)) {
@ -2468,7 +2465,7 @@ static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
switch (type) {
case WRITE_DATA:
memcpy(ptr, buf, l);
invalidate_and_set_dirty(addr1, l);
invalidate_and_set_dirty(mr, addr1, l);
break;
case FLUSH_CACHE:
flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
@ -2693,7 +2690,7 @@ void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
mr = qemu_ram_addr_from_host(buffer, &addr1);
assert(mr != NULL);
if (is_write) {
invalidate_and_set_dirty(addr1, access_len);
invalidate_and_set_dirty(mr, addr1, access_len);
}
if (xen_enabled()) {
xen_invalidate_map_cache_entry(buffer);
@ -3022,6 +3019,7 @@ void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val,
hwaddr l = 4;
hwaddr addr1;
MemTxResult r;
uint8_t dirty_log_mask;
rcu_read_lock();
mr = address_space_translate(as, addr, &addr1, &l,
@ -3033,14 +3031,9 @@ void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val,
ptr = qemu_get_ram_ptr(addr1);
stl_p(ptr, val);
if (unlikely(in_migration)) {
if (cpu_physical_memory_is_clean(addr1)) {
/* invalidate code */
tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
/* set dirty bit */
cpu_physical_memory_set_dirty_range_nocode(addr1, 4);
}
}
dirty_log_mask = memory_region_get_dirty_log_mask(mr);
dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
cpu_physical_memory_set_dirty_range(addr1, 4, dirty_log_mask);
r = MEMTX_OK;
}
if (result) {
@ -3096,7 +3089,7 @@ static inline void address_space_stl_internal(AddressSpace *as,
stl_p(ptr, val);
break;
}
invalidate_and_set_dirty(addr1, 4);
invalidate_and_set_dirty(mr, addr1, 4);
r = MEMTX_OK;
}
if (result) {
@ -3200,7 +3193,7 @@ static inline void address_space_stw_internal(AddressSpace *as,
stw_p(ptr, val);
break;
}
invalidate_and_set_dirty(addr1, 2);
invalidate_and_set_dirty(mr, addr1, 2);
r = MEMTX_OK;
}
if (result) {

View File

@ -94,7 +94,8 @@ static void ich9_smi_writel(void *opaque, hwaddr addr, uint64_t val,
ICH9LPCPMRegs *pm = opaque;
switch (addr) {
case 0:
pm->smi_en = val;
pm->smi_en &= ~pm->smi_en_wmask;
pm->smi_en |= (val & pm->smi_en_wmask);
break;
}
}
@ -198,6 +199,7 @@ static void pm_reset(void *opaque)
* support SMM mode. */
pm->smi_en |= ICH9_PMIO_SMI_EN_APMC_EN;
}
pm->smi_en_wmask = ~0;
acpi_update_sci(&pm->acpi_regs, pm->irq);
}

View File

@ -525,7 +525,7 @@ static pflash_t *ve_pflash_cfi01_register(hwaddr base, const char *name,
qdev_prop_set_uint64(dev, "sector-length", VEXPRESS_FLASH_SECT_SIZE);
qdev_prop_set_uint8(dev, "width", 4);
qdev_prop_set_uint8(dev, "device-width", 2);
qdev_prop_set_uint8(dev, "big-endian", 0);
qdev_prop_set_bit(dev, "big-endian", false);
qdev_prop_set_uint16(dev, "id0", 0x89);
qdev_prop_set_uint16(dev, "id1", 0x18);
qdev_prop_set_uint16(dev, "id2", 0x00);

View File

@ -555,7 +555,7 @@ static void create_one_flash(const char *name, hwaddr flashbase,
qdev_prop_set_uint64(dev, "sector-length", sectorlength);
qdev_prop_set_uint8(dev, "width", 4);
qdev_prop_set_uint8(dev, "device-width", 2);
qdev_prop_set_uint8(dev, "big-endian", 0);
qdev_prop_set_bit(dev, "big-endian", false);
qdev_prop_set_uint16(dev, "id0", 0x89);
qdev_prop_set_uint16(dev, "id1", 0x18);
qdev_prop_set_uint16(dev, "id2", 0x00);

View File

@ -64,6 +64,9 @@ do { \
#define TYPE_CFI_PFLASH01 "cfi.pflash01"
#define CFI_PFLASH01(obj) OBJECT_CHECK(pflash_t, (obj), TYPE_CFI_PFLASH01)
#define PFLASH_BE 0
#define PFLASH_SECURE 1
struct pflash_t {
/*< private >*/
SysBusDevice parent_obj;
@ -75,7 +78,7 @@ struct pflash_t {
uint8_t bank_width;
uint8_t device_width; /* If 0, device width not specified. */
uint8_t max_device_width; /* max device width in bytes */
uint8_t be;
uint32_t features;
uint8_t wcycle; /* if 0, the flash is read normally */
int ro;
uint8_t cmd;
@ -235,12 +238,57 @@ static uint32_t pflash_devid_query(pflash_t *pfl, hwaddr offset)
return resp;
}
static uint32_t pflash_data_read(pflash_t *pfl, hwaddr offset,
int width, int be)
{
uint8_t *p;
uint32_t ret;
p = pfl->storage;
switch (width) {
case 1:
ret = p[offset];
DPRINTF("%s: data offset " TARGET_FMT_plx " %02x\n",
__func__, offset, ret);
break;
case 2:
if (be) {
ret = p[offset] << 8;
ret |= p[offset + 1];
} else {
ret = p[offset];
ret |= p[offset + 1] << 8;
}
DPRINTF("%s: data offset " TARGET_FMT_plx " %04x\n",
__func__, offset, ret);
break;
case 4:
if (be) {
ret = p[offset] << 24;
ret |= p[offset + 1] << 16;
ret |= p[offset + 2] << 8;
ret |= p[offset + 3];
} else {
ret = p[offset];
ret |= p[offset + 1] << 8;
ret |= p[offset + 2] << 16;
ret |= p[offset + 3] << 24;
}
DPRINTF("%s: data offset " TARGET_FMT_plx " %08x\n",
__func__, offset, ret);
break;
default:
DPRINTF("BUG in %s\n", __func__);
abort();
}
return ret;
}
static uint32_t pflash_read (pflash_t *pfl, hwaddr offset,
int width, int be)
{
hwaddr boff;
uint32_t ret;
uint8_t *p;
ret = -1;
@ -257,43 +305,7 @@ static uint32_t pflash_read (pflash_t *pfl, hwaddr offset,
/* fall through to read code */
case 0x00:
/* Flash area read */
p = pfl->storage;
switch (width) {
case 1:
ret = p[offset];
DPRINTF("%s: data offset " TARGET_FMT_plx " %02x\n",
__func__, offset, ret);
break;
case 2:
if (be) {
ret = p[offset] << 8;
ret |= p[offset + 1];
} else {
ret = p[offset];
ret |= p[offset + 1] << 8;
}
DPRINTF("%s: data offset " TARGET_FMT_plx " %04x\n",
__func__, offset, ret);
break;
case 4:
if (be) {
ret = p[offset] << 24;
ret |= p[offset + 1] << 16;
ret |= p[offset + 2] << 8;
ret |= p[offset + 3];
} else {
ret = p[offset];
ret |= p[offset + 1] << 8;
ret |= p[offset + 2] << 16;
ret |= p[offset + 3] << 24;
}
DPRINTF("%s: data offset " TARGET_FMT_plx " %08x\n",
__func__, offset, ret);
break;
default:
DPRINTF("BUG in %s\n", __func__);
}
ret = pflash_data_read(pfl, offset, width, be);
break;
case 0x10: /* Single byte program */
case 0x20: /* Block erase */
@ -648,101 +660,37 @@ static void pflash_write(pflash_t *pfl, hwaddr offset,
}
static uint32_t pflash_readb_be(void *opaque, hwaddr addr)
{
return pflash_read(opaque, addr, 1, 1);
}
static uint32_t pflash_readb_le(void *opaque, hwaddr addr)
{
return pflash_read(opaque, addr, 1, 0);
}
static uint32_t pflash_readw_be(void *opaque, hwaddr addr)
static MemTxResult pflash_mem_read_with_attrs(void *opaque, hwaddr addr, uint64_t *value,
unsigned len, MemTxAttrs attrs)
{
pflash_t *pfl = opaque;
bool be = !!(pfl->features & (1 << PFLASH_BE));
return pflash_read(pfl, addr, 2, 1);
if ((pfl->features & (1 << PFLASH_SECURE)) && !attrs.secure) {
*value = pflash_data_read(opaque, addr, len, be);
} else {
*value = pflash_read(opaque, addr, len, be);
}
return MEMTX_OK;
}
static uint32_t pflash_readw_le(void *opaque, hwaddr addr)
static MemTxResult pflash_mem_write_with_attrs(void *opaque, hwaddr addr, uint64_t value,
unsigned len, MemTxAttrs attrs)
{
pflash_t *pfl = opaque;
bool be = !!(pfl->features & (1 << PFLASH_BE));
return pflash_read(pfl, addr, 2, 0);
if ((pfl->features & (1 << PFLASH_SECURE)) && !attrs.secure) {
return MEMTX_ERROR;
} else {
pflash_write(opaque, addr, value, len, be);
return MEMTX_OK;
}
}
static uint32_t pflash_readl_be(void *opaque, hwaddr addr)
{
pflash_t *pfl = opaque;
return pflash_read(pfl, addr, 4, 1);
}
static uint32_t pflash_readl_le(void *opaque, hwaddr addr)
{
pflash_t *pfl = opaque;
return pflash_read(pfl, addr, 4, 0);
}
static void pflash_writeb_be(void *opaque, hwaddr addr,
uint32_t value)
{
pflash_write(opaque, addr, value, 1, 1);
}
static void pflash_writeb_le(void *opaque, hwaddr addr,
uint32_t value)
{
pflash_write(opaque, addr, value, 1, 0);
}
static void pflash_writew_be(void *opaque, hwaddr addr,
uint32_t value)
{
pflash_t *pfl = opaque;
pflash_write(pfl, addr, value, 2, 1);
}
static void pflash_writew_le(void *opaque, hwaddr addr,
uint32_t value)
{
pflash_t *pfl = opaque;
pflash_write(pfl, addr, value, 2, 0);
}
static void pflash_writel_be(void *opaque, hwaddr addr,
uint32_t value)
{
pflash_t *pfl = opaque;
pflash_write(pfl, addr, value, 4, 1);
}
static void pflash_writel_le(void *opaque, hwaddr addr,
uint32_t value)
{
pflash_t *pfl = opaque;
pflash_write(pfl, addr, value, 4, 0);
}
static const MemoryRegionOps pflash_cfi01_ops_be = {
.old_mmio = {
.read = { pflash_readb_be, pflash_readw_be, pflash_readl_be, },
.write = { pflash_writeb_be, pflash_writew_be, pflash_writel_be, },
},
.endianness = DEVICE_NATIVE_ENDIAN,
};
static const MemoryRegionOps pflash_cfi01_ops_le = {
.old_mmio = {
.read = { pflash_readb_le, pflash_readw_le, pflash_readl_le, },
.write = { pflash_writeb_le, pflash_writew_le, pflash_writel_le, },
},
static const MemoryRegionOps pflash_cfi01_ops = {
.read_with_attrs = pflash_mem_read_with_attrs,
.write_with_attrs = pflash_mem_write_with_attrs,
.endianness = DEVICE_NATIVE_ENDIAN,
};
@ -773,7 +721,8 @@ static void pflash_cfi01_realize(DeviceState *dev, Error **errp)
memory_region_init_rom_device(
&pfl->mem, OBJECT(dev),
pfl->be ? &pflash_cfi01_ops_be : &pflash_cfi01_ops_le, pfl,
&pflash_cfi01_ops,
pfl,
pfl->name, total_len, &local_err);
if (local_err) {
error_propagate(errp, local_err);
@ -925,7 +874,8 @@ static Property pflash_cfi01_properties[] = {
DEFINE_PROP_UINT8("width", struct pflash_t, bank_width, 0),
DEFINE_PROP_UINT8("device-width", struct pflash_t, device_width, 0),
DEFINE_PROP_UINT8("max-device-width", struct pflash_t, max_device_width, 0),
DEFINE_PROP_UINT8("big-endian", struct pflash_t, be, 0),
DEFINE_PROP_BIT("big-endian", struct pflash_t, features, PFLASH_BE, 0),
DEFINE_PROP_BIT("secure", struct pflash_t, features, PFLASH_SECURE, 0),
DEFINE_PROP_UINT16("id0", struct pflash_t, ident0, 0),
DEFINE_PROP_UINT16("id1", struct pflash_t, ident1, 0),
DEFINE_PROP_UINT16("id2", struct pflash_t, ident2, 0),
@ -975,7 +925,7 @@ pflash_t *pflash_cfi01_register(hwaddr base,
qdev_prop_set_uint32(dev, "num-blocks", nb_blocs);
qdev_prop_set_uint64(dev, "sector-length", sector_len);
qdev_prop_set_uint8(dev, "width", bank_width);
qdev_prop_set_uint8(dev, "big-endian", !!be);
qdev_prop_set_bit(dev, "big-endian", !!be);
qdev_prop_set_uint16(dev, "id0", id0);
qdev_prop_set_uint16(dev, "id1", id1);
qdev_prop_set_uint16(dev, "id2", id2);

View File

@ -641,28 +641,3 @@ static void parallel_register_types(void)
}
type_init(parallel_register_types)
static void parallel_init(ISABus *bus, int index, CharDriverState *chr)
{
DeviceState *dev;
ISADevice *isadev;
isadev = isa_create(bus, "isa-parallel");
dev = DEVICE(isadev);
qdev_prop_set_uint32(dev, "index", index);
qdev_prop_set_chr(dev, "chardev", chr);
qdev_init_nofail(dev);
}
void parallel_hds_isa_init(ISABus *bus, int n)
{
int i;
assert(n <= MAX_PARALLEL_PORTS);
for (i = 0; i < n; i++) {
if (parallel_hds[i]) {
parallel_init(bus, i, parallel_hds[i]);
}
}
}

View File

@ -106,6 +106,7 @@ static void cg3_update_display(void *opaque)
pix = memory_region_get_ram_ptr(&s->vram_mem);
data = (uint32_t *)surface_data(surface);
memory_region_sync_dirty_bitmap(&s->vram_mem);
for (y = 0; y < height; y++) {
int update = s->full_update;
@ -309,6 +310,7 @@ static void cg3_realizefn(DeviceState *dev, Error **errp)
memory_region_init_ram(&s->vram_mem, NULL, "cg3.vram", s->vram_size,
&error_abort);
memory_region_set_log(&s->vram_mem, true, DIRTY_MEMORY_VGA);
vmstate_register_ram_global(&s->vram_mem);
sysbus_init_mmio(sbd, &s->vram_mem);

View File

@ -1109,6 +1109,12 @@ static inline int fimd_get_buffer_id(Exynos4210fimdWindow *w)
}
}
static void exynos4210_fimd_invalidate(void *opaque)
{
Exynos4210fimdState *s = (Exynos4210fimdState *)opaque;
s->invalidate = true;
}
/* Updates specified window's MemorySection based on values of WINCON,
* VIDOSDA, VIDOSDB, VIDWADDx and SHADOWCON registers */
static void fimd_update_memory_section(Exynos4210fimdState *s, unsigned win)
@ -1136,7 +1142,11 @@ static void fimd_update_memory_section(Exynos4210fimdState *s, unsigned win)
/* TODO: add .exit and unref the region there. Not needed yet since sysbus
* does not support hot-unplug.
*/
memory_region_unref(w->mem_section.mr);
if (w->mem_section.mr) {
memory_region_set_log(w->mem_section.mr, false, DIRTY_MEMORY_VGA);
memory_region_unref(w->mem_section.mr);
}
w->mem_section = memory_region_find(sysbus_address_space(sbd),
fb_start_addr, w->fb_len);
assert(w->mem_section.mr);
@ -1162,6 +1172,8 @@ static void fimd_update_memory_section(Exynos4210fimdState *s, unsigned win)
cpu_physical_memory_unmap(w->host_fb_addr, fb_mapped_len, 0, 0);
goto error_return;
}
memory_region_set_log(w->mem_section.mr, true, DIRTY_MEMORY_VGA);
exynos4210_fimd_invalidate(s);
return;
error_return:
@ -1224,12 +1236,6 @@ static void exynos4210_fimd_update_irq(Exynos4210fimdState *s)
}
}
static void exynos4210_fimd_invalidate(void *opaque)
{
Exynos4210fimdState *s = (Exynos4210fimdState *)opaque;
s->invalidate = true;
}
static void exynos4210_update_resolution(Exynos4210fimdState *s)
{
DisplaySurface *surface = qemu_console_surface(s->console);

View File

@ -63,6 +63,10 @@ void framebuffer_update_display(
assert(mem_section.offset_within_address_space == base);
memory_region_sync_dirty_bitmap(mem);
if (!memory_region_is_logging(mem, DIRTY_MEMORY_VGA)) {
invalidate = true;
}
src_base = cpu_physical_memory_map(base, &src_len, 0);
/* If we can't map the framebuffer then bail. We could try harder,
but it's not really worth it as dirty flag tracking will probably

View File

@ -260,6 +260,7 @@ static void g364fb_update_display(void *opaque)
qemu_console_resize(s->con, s->width, s->height);
}
memory_region_sync_dirty_bitmap(&s->mem_vram);
if (s->ctla & CTLA_FORCE_BLANK) {
g364fb_draw_blank(s);
} else if (s->depth == 8) {
@ -489,7 +490,7 @@ static void g364fb_init(DeviceState *dev, G364State *s)
memory_region_init_ram_ptr(&s->mem_vram, NULL, "vram",
s->vram_size, s->vram);
vmstate_register_ram(&s->mem_vram, dev);
memory_region_set_coalescing(&s->mem_vram);
memory_region_set_log(&s->mem_vram, true, DIRTY_MEMORY_VGA);
}
#define TYPE_G364 "sysbus-g364"

View File

@ -1322,6 +1322,7 @@ static void sm501_draw_crt(SM501State * s)
}
/* draw each line according to conditions */
memory_region_sync_dirty_bitmap(&s->local_mem_region);
for (y = 0; y < height; y++) {
int update_hwc = draw_hwc_line ? within_hwc_y_range(s, y, 1) : 0;
int update = full_update || update_hwc;
@ -1412,6 +1413,7 @@ void sm501_init(MemoryRegion *address_space_mem, uint32_t base,
memory_region_init_ram(&s->local_mem_region, NULL, "sm501.local",
local_mem_bytes, &error_abort);
vmstate_register_ram_global(&s->local_mem_region);
memory_region_set_log(&s->local_mem_region, true, DIRTY_MEMORY_VGA);
s->local_mem = memory_region_get_ram_ptr(&s->local_mem_region);
memory_region_add_subregion(address_space_mem, base, &s->local_mem_region);

View File

@ -353,6 +353,7 @@ static void tcx_update_display(void *opaque)
return;
}
memory_region_sync_dirty_bitmap(&ts->vram_mem);
for (y = 0; y < ts->height; page += TARGET_PAGE_SIZE) {
if (memory_region_get_dirty(&ts->vram_mem, page, TARGET_PAGE_SIZE,
DIRTY_MEMORY_VGA)) {
@ -446,6 +447,7 @@ static void tcx24_update_display(void *opaque)
dd = surface_stride(surface);
ds = 1024;
memory_region_sync_dirty_bitmap(&ts->vram_mem);
for (y = 0; y < ts->height; page += TARGET_PAGE_SIZE,
page24 += TARGET_PAGE_SIZE, cpage += TARGET_PAGE_SIZE) {
if (tcx24_check_dirty(ts, page, page24, cpage)) {
@ -1006,6 +1008,7 @@ static void tcx_realizefn(DeviceState *dev, Error **errp)
memory_region_init_ram(&s->vram_mem, OBJECT(s), "tcx.vram",
s->vram_size * (1 + 4 + 4), &error_abort);
vmstate_register_ram_global(&s->vram_mem);
memory_region_set_log(&s->vram_mem, true, DIRTY_MEMORY_VGA);
vram_base = memory_region_get_ram_ptr(&s->vram_mem);
/* 10/ROM : FCode ROM */

View File

@ -1124,7 +1124,7 @@ static void vmsvga_update_display(void *opaque)
* Is it more efficient to look at vram VGA-dirty bits or wait
* for the driver to issue SVGA_CMD_UPDATE?
*/
if (memory_region_is_logging(&s->vga.vram)) {
if (memory_region_is_logging(&s->vga.vram, DIRTY_MEMORY_VGA)) {
vga_sync_dirty_bitmap(&s->vga);
dirty = memory_region_get_dirty(&s->vga.vram, 0,
surface_stride(surface) * surface_height(surface),

View File

@ -164,27 +164,6 @@ uint64_t cpu_get_tsc(CPUX86State *env)
return cpu_get_ticks();
}
/* SMM support */
static cpu_set_smm_t smm_set;
static void *smm_arg;
void cpu_smm_register(cpu_set_smm_t callback, void *arg)
{
assert(smm_set == NULL);
assert(smm_arg == NULL);
smm_set = callback;
smm_arg = arg;
}
void cpu_smm_update(CPUX86State *env)
{
if (smm_set && smm_arg && CPU(x86_env_get_cpu(env)) == first_cpu) {
smm_set(!!(env->hflags & HF_SMM_MASK), smm_arg);
}
}
/* IRQ handling */
int cpu_get_pic_interrupt(CPUX86State *env)
{

View File

@ -21,6 +21,7 @@
#include "hw/sysbus.h"
#include "sysemu/sysemu.h"
#include "hw/isa/isa.h"
#include "hw/i386/pc.h"
static ISABus *isabus;
@ -267,3 +268,28 @@ MemoryRegion *isa_address_space_io(ISADevice *dev)
}
type_init(isabus_register_types)
static void parallel_init(ISABus *bus, int index, CharDriverState *chr)
{
DeviceState *dev;
ISADevice *isadev;
isadev = isa_create(bus, "isa-parallel");
dev = DEVICE(isadev);
qdev_prop_set_uint32(dev, "index", index);
qdev_prop_set_chr(dev, "chardev", chr);
qdev_init_nofail(dev);
}
void parallel_hds_isa_init(ISABus *bus, int n)
{
int i;
assert(n <= MAX_PARALLEL_PORTS);
for (i = 0; i < n; i++) {
if (parallel_hds[i]) {
parallel_init(bus, i, parallel_hds[i]);
}
}
}

View File

@ -407,12 +407,28 @@ static void ich9_lpc_rcba_update(ICH9LPCState *lpc, uint32_t rbca_old)
}
}
/* config:GEN_PMCON* */
static void
ich9_lpc_pmcon_update(ICH9LPCState *lpc)
{
uint16_t gen_pmcon_1 = pci_get_word(lpc->d.config + ICH9_LPC_GEN_PMCON_1);
uint16_t wmask;
if (gen_pmcon_1 & ICH9_LPC_GEN_PMCON_1_SMI_LOCK) {
wmask = pci_get_word(lpc->d.wmask + ICH9_LPC_GEN_PMCON_1);
wmask &= ~ICH9_LPC_GEN_PMCON_1_SMI_LOCK;
pci_set_word(lpc->d.wmask + ICH9_LPC_GEN_PMCON_1, wmask);
lpc->pm.smi_en_wmask &= ~1;
}
}
static int ich9_lpc_post_load(void *opaque, int version_id)
{
ICH9LPCState *lpc = opaque;
ich9_lpc_pmbase_update(lpc);
ich9_lpc_rcba_update(lpc, 0 /* disabled ICH9_LPC_RBCA_EN */);
ich9_lpc_pmcon_update(lpc);
return 0;
}
@ -435,6 +451,9 @@ static void ich9_lpc_config_write(PCIDevice *d,
if (ranges_overlap(addr, len, ICH9_LPC_PIRQE_ROUT, 4)) {
pci_bus_fire_intx_routing_notifier(lpc->d.bus);
}
if (ranges_overlap(addr, len, ICH9_LPC_GEN_PMCON_1, 8)) {
ich9_lpc_pmcon_update(lpc);
}
}
static void ich9_lpc_reset(DeviceState *qdev)

View File

@ -31,26 +31,6 @@
#include "sysemu/sysemu.h"
#include "hw/pci-host/pam.h"
void smram_update(MemoryRegion *smram_region, uint8_t smram,
uint8_t smm_enabled)
{
bool smram_enabled;
smram_enabled = ((smm_enabled && (smram & SMRAM_G_SMRAME)) ||
(smram & SMRAM_D_OPEN));
memory_region_set_enabled(smram_region, !smram_enabled);
}
void smram_set_smm(uint8_t *host_smm_enabled, int smm, uint8_t smram,
MemoryRegion *smram_region)
{
uint8_t smm_enabled = (smm != 0);
if (*host_smm_enabled != smm_enabled) {
*host_smm_enabled = smm_enabled;
smram_update(smram_region, smram, *host_smm_enabled);
}
}
void init_pam(DeviceState *dev, MemoryRegion *ram_memory,
MemoryRegion *system_memory, MemoryRegion *pci_address_space,
PAMMemoryRegion *mem, uint32_t start, uint32_t size)

View File

@ -105,7 +105,7 @@ struct PCII440FXState {
MemoryRegion *ram_memory;
PAMMemoryRegion pam_regions[13];
MemoryRegion smram_region;
uint8_t smm_enabled;
MemoryRegion smram, low_smram;
};
@ -138,18 +138,10 @@ static void i440fx_update_memory_mappings(PCII440FXState *d)
pam_update(&d->pam_regions[i], i,
pd->config[I440FX_PAM + ((i + 1) / 2)]);
}
smram_update(&d->smram_region, pd->config[I440FX_SMRAM], d->smm_enabled);
memory_region_transaction_commit();
}
static void i440fx_set_smm(int val, void *arg)
{
PCII440FXState *d = arg;
PCIDevice *pd = PCI_DEVICE(d);
memory_region_transaction_begin();
smram_set_smm(&d->smm_enabled, val, pd->config[I440FX_SMRAM],
&d->smram_region);
memory_region_set_enabled(&d->smram_region,
!(pd->config[I440FX_SMRAM] & SMRAM_D_OPEN));
memory_region_set_enabled(&d->smram,
pd->config[I440FX_SMRAM] & SMRAM_G_SMRAME);
memory_region_transaction_commit();
}
@ -172,12 +164,13 @@ static int i440fx_load_old(QEMUFile* f, void *opaque, int version_id)
PCII440FXState *d = opaque;
PCIDevice *pd = PCI_DEVICE(d);
int ret, i;
uint8_t smm_enabled;
ret = pci_device_load(pd, f);
if (ret < 0)
return ret;
i440fx_update_memory_mappings(d);
qemu_get_8s(f, &d->smm_enabled);
qemu_get_8s(f, &smm_enabled);
if (version_id == 2) {
for (i = 0; i < PIIX_NUM_PIRQS; i++) {
@ -205,7 +198,10 @@ static const VMStateDescription vmstate_i440fx = {
.post_load = i440fx_post_load,
.fields = (VMStateField[]) {
VMSTATE_PCI_DEVICE(parent_obj, PCII440FXState),
VMSTATE_UINT8(smm_enabled, PCII440FXState),
/* Used to be smm_enabled, which was basically always zero because
* SeaBIOS hardly uses SMM. SMRAM is now handled by CPU code.
*/
VMSTATE_UNUSED(1),
VMSTATE_END_OF_LIST()
}
};
@ -297,11 +293,7 @@ static void i440fx_pcihost_realize(DeviceState *dev, Error **errp)
static void i440fx_realize(PCIDevice *dev, Error **errp)
{
PCII440FXState *d = I440FX_PCI_DEVICE(dev);
dev->config[I440FX_SMRAM] = 0x02;
cpu_smm_register(&i440fx_set_smm, d);
}
PCIBus *i440fx_init(PCII440FXState **pi440fx_state,
@ -346,11 +338,23 @@ PCIBus *i440fx_init(PCII440FXState **pi440fx_state,
pc_pci_as_mapping_init(OBJECT(f), f->system_memory,
f->pci_address_space);
/* if *disabled* show SMRAM to all CPUs */
memory_region_init_alias(&f->smram_region, OBJECT(d), "smram-region",
f->pci_address_space, 0xa0000, 0x20000);
memory_region_add_subregion_overlap(f->system_memory, 0xa0000,
&f->smram_region, 1);
memory_region_set_enabled(&f->smram_region, false);
memory_region_set_enabled(&f->smram_region, true);
/* smram, as seen by SMM CPUs */
memory_region_init(&f->smram, OBJECT(d), "smram", 1ull << 32);
memory_region_set_enabled(&f->smram, true);
memory_region_init_alias(&f->low_smram, OBJECT(d), "smram-low",
f->ram_memory, 0xa0000, 0x20000);
memory_region_set_enabled(&f->low_smram, true);
memory_region_add_subregion(&f->smram, 0xa0000, &f->low_smram);
object_property_add_const_link(qdev_get_machine(), "smram",
OBJECT(&f->smram), &error_abort);
init_pam(dev, f->ram_memory, f->system_memory, f->pci_address_space,
&f->pam_regions[0], PAM_BIOS_BASE, PAM_BIOS_SIZE);
for (i = 0; i < 12; ++i) {

View File

@ -198,6 +198,28 @@ static const TypeInfo q35_host_info = {
* MCH D0:F0
*/
static uint64_t tseg_blackhole_read(void *ptr, hwaddr reg, unsigned size)
{
return 0xffffffff;
}
static void tseg_blackhole_write(void *opaque, hwaddr addr, uint64_t val,
unsigned width)
{
/* nothing */
}
static const MemoryRegionOps tseg_blackhole_ops = {
.read = tseg_blackhole_read,
.write = tseg_blackhole_write,
.endianness = DEVICE_NATIVE_ENDIAN,
.valid.min_access_size = 1,
.valid.max_access_size = 4,
.impl.min_access_size = 4,
.impl.max_access_size = 4,
.endianness = DEVICE_LITTLE_ENDIAN,
};
/* PCIe MMCFG */
static void mch_update_pciexbar(MCHPCIState *mch)
{
@ -266,21 +288,70 @@ static void mch_update_pam(MCHPCIState *mch)
static void mch_update_smram(MCHPCIState *mch)
{
PCIDevice *pd = PCI_DEVICE(mch);
bool h_smrame = (pd->config[MCH_HOST_BRIDGE_ESMRAMC] & MCH_HOST_BRIDGE_ESMRAMC_H_SMRAME);
uint32_t tseg_size;
/* implement SMRAM.D_LCK */
if (pd->config[MCH_HOST_BRIDGE_SMRAM] & MCH_HOST_BRIDGE_SMRAM_D_LCK) {
pd->config[MCH_HOST_BRIDGE_SMRAM] &= ~MCH_HOST_BRIDGE_SMRAM_D_OPEN;
pd->wmask[MCH_HOST_BRIDGE_SMRAM] = MCH_HOST_BRIDGE_SMRAM_WMASK_LCK;
pd->wmask[MCH_HOST_BRIDGE_ESMRAMC] = MCH_HOST_BRIDGE_ESMRAMC_WMASK_LCK;
}
memory_region_transaction_begin();
smram_update(&mch->smram_region, pd->config[MCH_HOST_BRIDGE_SMRAM],
mch->smm_enabled);
memory_region_transaction_commit();
}
static void mch_set_smm(int smm, void *arg)
{
MCHPCIState *mch = arg;
PCIDevice *pd = PCI_DEVICE(mch);
if (pd->config[MCH_HOST_BRIDGE_SMRAM] & SMRAM_D_OPEN) {
/* Hide (!) low SMRAM if H_SMRAME = 1 */
memory_region_set_enabled(&mch->smram_region, h_smrame);
/* Show high SMRAM if H_SMRAME = 1 */
memory_region_set_enabled(&mch->open_high_smram, h_smrame);
} else {
/* Hide high SMRAM and low SMRAM */
memory_region_set_enabled(&mch->smram_region, true);
memory_region_set_enabled(&mch->open_high_smram, false);
}
if (pd->config[MCH_HOST_BRIDGE_SMRAM] & SMRAM_G_SMRAME) {
memory_region_set_enabled(&mch->low_smram, !h_smrame);
memory_region_set_enabled(&mch->high_smram, h_smrame);
} else {
memory_region_set_enabled(&mch->low_smram, false);
memory_region_set_enabled(&mch->high_smram, false);
}
if (pd->config[MCH_HOST_BRIDGE_ESMRAMC] & MCH_HOST_BRIDGE_ESMRAMC_T_EN) {
switch (pd->config[MCH_HOST_BRIDGE_ESMRAMC] &
MCH_HOST_BRIDGE_ESMRAMC_TSEG_SZ_MASK) {
case MCH_HOST_BRIDGE_ESMRAMC_TSEG_SZ_1MB:
tseg_size = 1024 * 1024;
break;
case MCH_HOST_BRIDGE_ESMRAMC_TSEG_SZ_2MB:
tseg_size = 1024 * 1024 * 2;
break;
case MCH_HOST_BRIDGE_ESMRAMC_TSEG_SZ_8MB:
tseg_size = 1024 * 1024 * 8;
break;
default:
tseg_size = 0;
break;
}
} else {
tseg_size = 0;
}
memory_region_del_subregion(mch->system_memory, &mch->tseg_blackhole);
memory_region_set_enabled(&mch->tseg_blackhole, tseg_size);
memory_region_set_size(&mch->tseg_blackhole, tseg_size);
memory_region_add_subregion_overlap(mch->system_memory,
mch->below_4g_mem_size - tseg_size,
&mch->tseg_blackhole, 1);
memory_region_set_enabled(&mch->tseg_window, tseg_size);
memory_region_set_size(&mch->tseg_window, tseg_size);
memory_region_set_address(&mch->tseg_window,
mch->below_4g_mem_size - tseg_size);
memory_region_set_alias_offset(&mch->tseg_window,
mch->below_4g_mem_size - tseg_size);
memory_region_transaction_begin();
smram_set_smm(&mch->smm_enabled, smm, pd->config[MCH_HOST_BRIDGE_SMRAM],
&mch->smram_region);
memory_region_transaction_commit();
}
@ -289,7 +360,6 @@ static void mch_write_config(PCIDevice *d,
{
MCHPCIState *mch = MCH_PCI_DEVICE(d);
/* XXX: implement SMRAM.D_LOCK */
pci_default_write_config(d, address, val, len);
if (ranges_overlap(address, len, MCH_HOST_BRIDGE_PAM0,
@ -329,7 +399,10 @@ static const VMStateDescription vmstate_mch = {
.post_load = mch_post_load,
.fields = (VMStateField[]) {
VMSTATE_PCI_DEVICE(parent_obj, MCHPCIState),
VMSTATE_UINT8(smm_enabled, MCHPCIState),
/* Used to be smm_enabled, which was basically always zero because
* SeaBIOS hardly uses SMM. SMRAM is now handled by CPU code.
*/
VMSTATE_UNUSED(1),
VMSTATE_END_OF_LIST()
}
};
@ -343,6 +416,9 @@ static void mch_reset(DeviceState *qdev)
MCH_HOST_BRIDGE_PCIEXBAR_DEFAULT);
d->config[MCH_HOST_BRIDGE_SMRAM] = MCH_HOST_BRIDGE_SMRAM_DEFAULT;
d->config[MCH_HOST_BRIDGE_ESMRAMC] = MCH_HOST_BRIDGE_ESMRAMC_DEFAULT;
d->wmask[MCH_HOST_BRIDGE_SMRAM] = MCH_HOST_BRIDGE_SMRAM_WMASK;
d->wmask[MCH_HOST_BRIDGE_ESMRAMC] = MCH_HOST_BRIDGE_ESMRAMC_WMASK;
mch_update(mch);
}
@ -399,13 +475,47 @@ static void mch_realize(PCIDevice *d, Error **errp)
pc_pci_as_mapping_init(OBJECT(mch), mch->system_memory,
mch->pci_address_space);
/* smram */
cpu_smm_register(&mch_set_smm, mch);
/* if *disabled* show SMRAM to all CPUs */
memory_region_init_alias(&mch->smram_region, OBJECT(mch), "smram-region",
mch->pci_address_space, 0xa0000, 0x20000);
memory_region_add_subregion_overlap(mch->system_memory, 0xa0000,
&mch->smram_region, 1);
memory_region_set_enabled(&mch->smram_region, false);
memory_region_set_enabled(&mch->smram_region, true);
memory_region_init_alias(&mch->open_high_smram, OBJECT(mch), "smram-open-high",
mch->ram_memory, 0xa0000, 0x20000);
memory_region_add_subregion_overlap(mch->system_memory, 0xfeda0000,
&mch->open_high_smram, 1);
memory_region_set_enabled(&mch->open_high_smram, false);
/* smram, as seen by SMM CPUs */
memory_region_init(&mch->smram, OBJECT(mch), "smram", 1ull << 32);
memory_region_set_enabled(&mch->smram, true);
memory_region_init_alias(&mch->low_smram, OBJECT(mch), "smram-low",
mch->ram_memory, 0xa0000, 0x20000);
memory_region_set_enabled(&mch->low_smram, true);
memory_region_add_subregion(&mch->smram, 0xa0000, &mch->low_smram);
memory_region_init_alias(&mch->high_smram, OBJECT(mch), "smram-high",
mch->ram_memory, 0xa0000, 0x20000);
memory_region_set_enabled(&mch->high_smram, true);
memory_region_add_subregion(&mch->smram, 0xfeda0000, &mch->high_smram);
memory_region_init_io(&mch->tseg_blackhole, OBJECT(mch),
&tseg_blackhole_ops, NULL,
"tseg-blackhole", 0);
memory_region_set_enabled(&mch->tseg_blackhole, false);
memory_region_add_subregion_overlap(mch->system_memory,
mch->below_4g_mem_size,
&mch->tseg_blackhole, 1);
memory_region_init_alias(&mch->tseg_window, OBJECT(mch), "tseg-window",
mch->ram_memory, mch->below_4g_mem_size, 0);
memory_region_set_enabled(&mch->tseg_window, false);
memory_region_add_subregion(&mch->smram, mch->below_4g_mem_size,
&mch->tseg_window);
object_property_add_const_link(qdev_get_machine(), "smram",
OBJECT(&mch->smram), &error_abort);
init_pam(DEVICE(mch), mch->ram_memory, mch->system_memory,
mch->pci_address_space, &mch->pam_regions[0],
PAM_BIOS_BASE, PAM_BIOS_SIZE);

View File

@ -42,7 +42,7 @@ static void *vring_map(MemoryRegion **mr, hwaddr phys, hwaddr len,
}
/* Ignore regions with dirty logging, we cannot mark them dirty */
if (memory_region_is_logging(section.mr)) {
if (memory_region_get_dirty_log_mask(section.mr)) {
goto out;
}

View File

@ -416,7 +416,8 @@ static void vhost_set_memory(MemoryListener *listener,
memory_listener);
hwaddr start_addr = section->offset_within_address_space;
ram_addr_t size = int128_get64(section->size);
bool log_dirty = memory_region_is_logging(section->mr);
bool log_dirty =
memory_region_get_dirty_log_mask(section->mr) & ~(1 << DIRTY_MEMORY_MIGRATION);
int s = offsetof(struct vhost_memory, regions) +
(dev->mem->nregions + 1) * sizeof dev->mem->regions[0];
void *ram;
@ -675,13 +676,15 @@ static void vhost_log_global_stop(MemoryListener *listener)
}
static void vhost_log_start(MemoryListener *listener,
MemoryRegionSection *section)
MemoryRegionSection *section,
int old, int new)
{
/* FIXME: implement */
}
static void vhost_log_stop(MemoryListener *listener,
MemoryRegionSection *section)
MemoryRegionSection *section,
int old, int new)
{
/* FIXME: implement */
}

View File

@ -22,8 +22,7 @@
#if !defined(CONFIG_USER_ONLY)
/* cputlb.c */
void tlb_protect_code(ram_addr_t ram_addr);
void tlb_unprotect_code_phys(CPUState *cpu, ram_addr_t ram_addr,
target_ulong vaddr);
void tlb_unprotect_code(ram_addr_t ram_addr);
void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, uintptr_t start,
uintptr_t length);
void cpu_tlb_reset_dirty_all(ram_addr_t start1, ram_addr_t length);

View File

@ -90,11 +90,7 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
int cflags);
void cpu_exec_init(CPUArchState *env);
void QEMU_NORETURN cpu_loop_exit(CPUState *cpu);
int page_unprotect(target_ulong address, uintptr_t pc, void *puc);
void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
int is_cpu_write_access);
void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end,
int is_cpu_write_access);
#if !defined(CONFIG_USER_ONLY)
bool qemu_in_vcpu_thread(void);
void cpu_reload_memory_map(CPUState *cpu);

View File

@ -29,7 +29,9 @@ typedef struct MemTxAttrs {
* "didn't specify" if necessary.
*/
unsigned int unspecified:1;
/* ARM/AMBA TrustZone Secure access */
/* ARM/AMBA: TrustZone Secure access
* x86: System Management Mode access
*/
unsigned int secure:1;
/* Memory access is usermode (unprivileged) */
unsigned int user:1;

View File

@ -206,8 +206,10 @@ struct MemoryListener {
void (*region_add)(MemoryListener *listener, MemoryRegionSection *section);
void (*region_del)(MemoryListener *listener, MemoryRegionSection *section);
void (*region_nop)(MemoryListener *listener, MemoryRegionSection *section);
void (*log_start)(MemoryListener *listener, MemoryRegionSection *section);
void (*log_stop)(MemoryListener *listener, MemoryRegionSection *section);
void (*log_start)(MemoryListener *listener, MemoryRegionSection *section,
int old, int new);
void (*log_stop)(MemoryListener *listener, MemoryRegionSection *section,
int old, int new);
void (*log_sync)(MemoryListener *listener, MemoryRegionSection *section);
void (*log_global_start)(MemoryListener *listener);
void (*log_global_stop)(MemoryListener *listener);
@ -591,11 +593,23 @@ const char *memory_region_name(const MemoryRegion *mr);
/**
* memory_region_is_logging: return whether a memory region is logging writes
*
* Returns %true if the memory region is logging writes
* Returns %true if the memory region is logging writes for the given client
*
* @mr: the memory region being queried
* @client: the client being queried
*/
bool memory_region_is_logging(MemoryRegion *mr, uint8_t client);
/**
* memory_region_get_dirty_log_mask: return the clients for which a
* memory region is logging writes.
*
* Returns a bitmap of clients, in which the DIRTY_MEMORY_* constants
* are the bit indices.
*
* @mr: the memory region being queried
*/
bool memory_region_is_logging(MemoryRegion *mr);
uint8_t memory_region_get_dirty_log_mask(MemoryRegion *mr);
/**
* memory_region_is_rom: check whether a memory region is ROM
@ -647,8 +661,7 @@ void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize,
*
* @mr: the memory region being updated.
* @log: whether dirty logging is to be enabled or disabled.
* @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
* %DIRTY_MEMORY_VGA.
* @client: the user of the logging information; %DIRTY_MEMORY_VGA only.
*/
void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client);

View File

@ -41,6 +41,9 @@ void qemu_ram_free_from_ptr(ram_addr_t addr);
int qemu_ram_resize(ram_addr_t base, ram_addr_t newsize, Error **errp);
#define DIRTY_CLIENTS_ALL ((1 << DIRTY_MEMORY_NUM) - 1)
#define DIRTY_CLIENTS_NOCODE (DIRTY_CLIENTS_ALL & ~(1 << DIRTY_MEMORY_CODE))
static inline bool cpu_physical_memory_get_dirty(ram_addr_t start,
ram_addr_t length,
unsigned client)
@ -56,7 +59,7 @@ static inline bool cpu_physical_memory_get_dirty(ram_addr_t start,
return next < end;
}
static inline bool cpu_physical_memory_get_clean(ram_addr_t start,
static inline bool cpu_physical_memory_all_dirty(ram_addr_t start,
ram_addr_t length,
unsigned client)
{
@ -68,7 +71,7 @@ static inline bool cpu_physical_memory_get_clean(ram_addr_t start,
page = start >> TARGET_PAGE_BITS;
next = find_next_zero_bit(ram_list.dirty_memory[client], end, page);
return next < end;
return next >= end;
}
static inline bool cpu_physical_memory_get_dirty_flag(ram_addr_t addr,
@ -86,44 +89,52 @@ static inline bool cpu_physical_memory_is_clean(ram_addr_t addr)
return !(vga && code && migration);
}
static inline bool cpu_physical_memory_range_includes_clean(ram_addr_t start,
ram_addr_t length)
static inline uint8_t cpu_physical_memory_range_includes_clean(ram_addr_t start,
ram_addr_t length,
uint8_t mask)
{
bool vga = cpu_physical_memory_get_clean(start, length, DIRTY_MEMORY_VGA);
bool code = cpu_physical_memory_get_clean(start, length, DIRTY_MEMORY_CODE);
bool migration =
cpu_physical_memory_get_clean(start, length, DIRTY_MEMORY_MIGRATION);
return vga || code || migration;
uint8_t ret = 0;
if (mask & (1 << DIRTY_MEMORY_VGA) &&
!cpu_physical_memory_all_dirty(start, length, DIRTY_MEMORY_VGA)) {
ret |= (1 << DIRTY_MEMORY_VGA);
}
if (mask & (1 << DIRTY_MEMORY_CODE) &&
!cpu_physical_memory_all_dirty(start, length, DIRTY_MEMORY_CODE)) {
ret |= (1 << DIRTY_MEMORY_CODE);
}
if (mask & (1 << DIRTY_MEMORY_MIGRATION) &&
!cpu_physical_memory_all_dirty(start, length, DIRTY_MEMORY_MIGRATION)) {
ret |= (1 << DIRTY_MEMORY_MIGRATION);
}
return ret;
}
static inline void cpu_physical_memory_set_dirty_flag(ram_addr_t addr,
unsigned client)
{
assert(client < DIRTY_MEMORY_NUM);
set_bit(addr >> TARGET_PAGE_BITS, ram_list.dirty_memory[client]);
}
static inline void cpu_physical_memory_set_dirty_range_nocode(ram_addr_t start,
ram_addr_t length)
{
unsigned long end, page;
end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
page = start >> TARGET_PAGE_BITS;
bitmap_set(ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION], page, end - page);
bitmap_set(ram_list.dirty_memory[DIRTY_MEMORY_VGA], page, end - page);
set_bit_atomic(addr >> TARGET_PAGE_BITS, ram_list.dirty_memory[client]);
}
static inline void cpu_physical_memory_set_dirty_range(ram_addr_t start,
ram_addr_t length)
ram_addr_t length,
uint8_t mask)
{
unsigned long end, page;
unsigned long **d = ram_list.dirty_memory;
end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
page = start >> TARGET_PAGE_BITS;
bitmap_set(ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION], page, end - page);
bitmap_set(ram_list.dirty_memory[DIRTY_MEMORY_VGA], page, end - page);
bitmap_set(ram_list.dirty_memory[DIRTY_MEMORY_CODE], page, end - page);
if (likely(mask & (1 << DIRTY_MEMORY_MIGRATION))) {
bitmap_set_atomic(d[DIRTY_MEMORY_MIGRATION], page, end - page);
}
if (unlikely(mask & (1 << DIRTY_MEMORY_VGA))) {
bitmap_set_atomic(d[DIRTY_MEMORY_VGA], page, end - page);
}
if (unlikely(mask & (1 << DIRTY_MEMORY_CODE))) {
bitmap_set_atomic(d[DIRTY_MEMORY_CODE], page, end - page);
}
xen_modified_memory(start, length);
}
@ -149,14 +160,18 @@ static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap,
for (k = 0; k < nr; k++) {
if (bitmap[k]) {
unsigned long temp = leul_to_cpu(bitmap[k]);
unsigned long **d = ram_list.dirty_memory;
ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION][page + k] |= temp;
ram_list.dirty_memory[DIRTY_MEMORY_VGA][page + k] |= temp;
ram_list.dirty_memory[DIRTY_MEMORY_CODE][page + k] |= temp;
atomic_or(&d[DIRTY_MEMORY_MIGRATION][page + k], temp);
atomic_or(&d[DIRTY_MEMORY_VGA][page + k], temp);
if (tcg_enabled()) {
atomic_or(&d[DIRTY_MEMORY_CODE][page + k], temp);
}
}
}
xen_modified_memory(start, pages);
xen_modified_memory(start, pages << TARGET_PAGE_BITS);
} else {
uint8_t clients = tcg_enabled() ? DIRTY_CLIENTS_ALL : DIRTY_CLIENTS_NOCODE;
/*
* bitmap-traveling is faster than memory-traveling (for addr...)
* especially when most of the memory is not dirty.
@ -171,7 +186,7 @@ static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap,
addr = page_number * TARGET_PAGE_SIZE;
ram_addr = start + addr;
cpu_physical_memory_set_dirty_range(ram_addr,
TARGET_PAGE_SIZE * hpratio);
TARGET_PAGE_SIZE * hpratio, clients);
} while (c != 0);
}
}
@ -179,29 +194,60 @@ static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap,
}
#endif /* not _WIN32 */
static inline void cpu_physical_memory_clear_dirty_range_type(ram_addr_t start,
ram_addr_t length,
unsigned client)
{
unsigned long end, page;
assert(client < DIRTY_MEMORY_NUM);
end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
page = start >> TARGET_PAGE_BITS;
bitmap_clear(ram_list.dirty_memory[client], page, end - page);
}
bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
ram_addr_t length,
unsigned client);
static inline void cpu_physical_memory_clear_dirty_range(ram_addr_t start,
ram_addr_t length)
{
cpu_physical_memory_clear_dirty_range_type(start, length, DIRTY_MEMORY_MIGRATION);
cpu_physical_memory_clear_dirty_range_type(start, length, DIRTY_MEMORY_VGA);
cpu_physical_memory_clear_dirty_range_type(start, length, DIRTY_MEMORY_CODE);
cpu_physical_memory_test_and_clear_dirty(start, length, DIRTY_MEMORY_MIGRATION);
cpu_physical_memory_test_and_clear_dirty(start, length, DIRTY_MEMORY_VGA);
cpu_physical_memory_test_and_clear_dirty(start, length, DIRTY_MEMORY_CODE);
}
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t length,
unsigned client);
static inline
uint64_t cpu_physical_memory_sync_dirty_bitmap(unsigned long *dest,
ram_addr_t start,
ram_addr_t length)
{
ram_addr_t addr;
unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS);
uint64_t num_dirty = 0;
/* start address is aligned at the start of a word? */
if (((page * BITS_PER_LONG) << TARGET_PAGE_BITS) == start) {
int k;
int nr = BITS_TO_LONGS(length >> TARGET_PAGE_BITS);
unsigned long *src = ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION];
for (k = page; k < page + nr; k++) {
if (src[k]) {
unsigned long bits = atomic_xchg(&src[k], 0);
unsigned long new_dirty;
new_dirty = ~dest[k];
dest[k] |= bits;
new_dirty &= bits;
num_dirty += ctpopl(new_dirty);
}
}
} else {
for (addr = 0; addr < length; addr += TARGET_PAGE_SIZE) {
if (cpu_physical_memory_test_and_clear_dirty(
start + addr,
TARGET_PAGE_SIZE,
DIRTY_MEMORY_MIGRATION)) {
long k = (start + addr) >> TARGET_PAGE_BITS;
if (!test_and_set_bit(k, dest)) {
num_dirty++;
}
}
}
}
return num_dirty;
}
#endif
#endif

View File

@ -39,6 +39,7 @@ typedef struct ICH9LPCPMRegs {
MemoryRegion io_smi;
uint32_t smi_en;
uint32_t smi_en_wmask;
uint32_t smi_sts;
qemu_irq irq; /* SCI */

View File

@ -152,6 +152,12 @@ Object *ich9_lpc_find(void);
#define ICH9_LPC_PIRQ_ROUT_MASK Q35_MASK(8, 3, 0)
#define ICH9_LPC_PIRQ_ROUT_DEFAULT 0x80
#define ICH9_LPC_GEN_PMCON_1 0xa0
#define ICH9_LPC_GEN_PMCON_1_SMI_LOCK (1 << 4)
#define ICH9_LPC_GEN_PMCON_2 0xa2
#define ICH9_LPC_GEN_PMCON_3 0xa4
#define ICH9_LPC_GEN_PMCON_LOCK 0xa6
#define ICH9_LPC_RCBA 0xf0
#define ICH9_LPC_RCBA_BA_MASK Q35_MASK(32, 31, 14)
#define ICH9_LPC_RCBA_EN 0x1

View File

@ -210,7 +210,6 @@ void pc_nic_init(ISABus *isa_bus, PCIBus *pci_bus);
void pc_pci_device_init(PCIBus *pci_bus);
typedef void (*cpu_set_smm_t)(int smm, void *arg);
void cpu_smm_register(cpu_set_smm_t callback, void *arg);
void ioapic_init_gsi(GSIState *gsi_state, const char *parent_name);

View File

@ -86,10 +86,6 @@ typedef struct PAMMemoryRegion {
unsigned current;
} PAMMemoryRegion;
void smram_update(MemoryRegion *smram_region, uint8_t smram,
uint8_t smm_enabled);
void smram_set_smm(uint8_t *host_smm_enabled, int smm, uint8_t smram,
MemoryRegion *smram_region);
void init_pam(DeviceState *dev, MemoryRegion *ram, MemoryRegion *system,
MemoryRegion *pci, PAMMemoryRegion *mem, uint32_t start, uint32_t size);
void pam_update(PAMMemoryRegion *mem, int idx, uint8_t val);

View File

@ -52,9 +52,10 @@ typedef struct MCHPCIState {
MemoryRegion *system_memory;
MemoryRegion *address_space_io;
PAMMemoryRegion pam_regions[13];
MemoryRegion smram_region;
MemoryRegion smram_region, open_high_smram;
MemoryRegion smram, low_smram, high_smram;
MemoryRegion tseg_blackhole, tseg_window;
PcPciInfo pci_info;
uint8_t smm_enabled;
ram_addr_t below_4g_mem_size;
ram_addr_t above_4g_mem_size;
uint64_t pci_hole64_size;
@ -127,8 +128,7 @@ typedef struct Q35PCIHost {
#define MCH_HOST_BRIDGE_PAM_MASK ((uint8_t)0x3)
#define MCH_HOST_BRIDGE_SMRAM 0x9d
#define MCH_HOST_BRIDGE_SMRAM_SIZE 1
#define MCH_HOST_BRIDGE_SMRAM_DEFAULT ((uint8_t)0x2)
#define MCH_HOST_BRIDGE_SMRAM_SIZE 2
#define MCH_HOST_BRIDGE_SMRAM_D_OPEN ((uint8_t)(1 << 6))
#define MCH_HOST_BRIDGE_SMRAM_D_CLS ((uint8_t)(1 << 5))
#define MCH_HOST_BRIDGE_SMRAM_D_LCK ((uint8_t)(1 << 4))
@ -139,18 +139,36 @@ typedef struct Q35PCIHost {
#define MCH_HOST_BRIDGE_SMRAM_C_END 0xc0000
#define MCH_HOST_BRIDGE_SMRAM_C_SIZE 0x20000
#define MCH_HOST_BRIDGE_UPPER_SYSTEM_BIOS_END 0x100000
#define MCH_HOST_BRIDGE_SMRAM_DEFAULT \
MCH_HOST_BRIDGE_SMRAM_C_BASE_SEG
#define MCH_HOST_BRIDGE_SMRAM_WMASK \
(MCH_HOST_BRIDGE_SMRAM_D_OPEN | \
MCH_HOST_BRIDGE_SMRAM_D_CLS | \
MCH_HOST_BRIDGE_SMRAM_D_LCK | \
MCH_HOST_BRIDGE_SMRAM_G_SMRAME)
#define MCH_HOST_BRIDGE_SMRAM_WMASK_LCK \
MCH_HOST_BRIDGE_SMRAM_D_CLS
#define MCH_HOST_BRIDGE_ESMRAMC 0x9e
#define MCH_HOST_BRIDGE_ESMRAMC_H_SMRAME ((uint8_t)(1 << 6))
#define MCH_HOST_BRIDGE_ESMRAMC_E_SMERR ((uint8_t)(1 << 5))
#define MCH_HOST_BRIDGE_ESMRAMC_SM_CACHE ((uint8_t)(1 << 4))
#define MCH_HOST_BRIDGE_ESMRAMC_SM_L1 ((uint8_t)(1 << 3))
#define MCH_HOST_BRIDGE_ESMRAMC_SM_L2 ((uint8_t)(1 << 2))
#define MCH_HOST_BRIDGE_ESMRAMC_H_SMRAME ((uint8_t)(1 << 7))
#define MCH_HOST_BRIDGE_ESMRAMC_E_SMERR ((uint8_t)(1 << 6))
#define MCH_HOST_BRIDGE_ESMRAMC_SM_CACHE ((uint8_t)(1 << 5))
#define MCH_HOST_BRIDGE_ESMRAMC_SM_L1 ((uint8_t)(1 << 4))
#define MCH_HOST_BRIDGE_ESMRAMC_SM_L2 ((uint8_t)(1 << 3))
#define MCH_HOST_BRIDGE_ESMRAMC_TSEG_SZ_MASK ((uint8_t)(0x3 << 1))
#define MCH_HOST_BRIDGE_ESMRAMC_TSEG_SZ_1MB ((uint8_t)(0x0 << 1))
#define MCH_HOST_BRIDGE_ESMRAMC_TSEG_SZ_2MB ((uint8_t)(0x1 << 1))
#define MCH_HOST_BRIDGE_ESMRAMC_TSEG_SZ_8MB ((uint8_t)(0x2 << 1))
#define MCH_HOST_BRIDGE_ESMRAMC_T_EN ((uint8_t)1)
#define MCH_HOST_BRIDGE_ESMRAMC_DEFAULT \
(MCH_HOST_BRIDGE_ESMRAMC_SM_CACHE | \
MCH_HOST_BRIDGE_ESMRAMC_SM_L1 | \
MCH_HOST_BRIDGE_ESMRAMC_SM_L2)
#define MCH_HOST_BRIDGE_ESMRAMC_WMASK \
(MCH_HOST_BRIDGE_ESMRAMC_H_SMRAME | \
MCH_HOST_BRIDGE_ESMRAMC_TSEG_SZ_MASK | \
MCH_HOST_BRIDGE_ESMRAMC_T_EN)
#define MCH_HOST_BRIDGE_ESMRAMC_WMASK_LCK 0
/* D1:F0 PCIE* port*/
#define MCH_PCIE_DEV 1

View File

@ -99,7 +99,13 @@
#ifndef smp_wmb
#ifdef __ATOMIC_RELEASE
#define smp_wmb() __atomic_thread_fence(__ATOMIC_RELEASE)
/* __atomic_thread_fence does not include a compiler barrier; instead,
* the barrier is part of __atomic_load/__atomic_store's "volatile-like"
* semantics. If smp_wmb() is a no-op, absence of the barrier means that
* the compiler is free to reorder stores on each side of the barrier.
* Add one here, and similarly in smp_rmb() and smp_read_barrier_depends().
*/
#define smp_wmb() ({ barrier(); __atomic_thread_fence(__ATOMIC_RELEASE); barrier(); })
#else
#define smp_wmb() __sync_synchronize()
#endif
@ -107,7 +113,7 @@
#ifndef smp_rmb
#ifdef __ATOMIC_ACQUIRE
#define smp_rmb() __atomic_thread_fence(__ATOMIC_ACQUIRE)
#define smp_rmb() ({ barrier(); __atomic_thread_fence(__ATOMIC_ACQUIRE); barrier(); })
#else
#define smp_rmb() __sync_synchronize()
#endif
@ -115,7 +121,7 @@
#ifndef smp_read_barrier_depends
#ifdef __ATOMIC_CONSUME
#define smp_read_barrier_depends() __atomic_thread_fence(__ATOMIC_CONSUME)
#define smp_read_barrier_depends() ({ barrier(); __atomic_thread_fence(__ATOMIC_CONSUME); barrier(); })
#else
#define smp_read_barrier_depends() barrier()
#endif

View File

@ -39,7 +39,9 @@
* bitmap_empty(src, nbits) Are all bits zero in *src?
* bitmap_full(src, nbits) Are all bits set in *src?
* bitmap_set(dst, pos, nbits) Set specified bit area
* bitmap_set_atomic(dst, pos, nbits) Set specified bit area with atomic ops
* bitmap_clear(dst, pos, nbits) Clear specified bit area
* bitmap_test_and_clear_atomic(dst, pos, nbits) Test and clear area
* bitmap_find_next_zero_area(buf, len, pos, n, mask) Find bit free area
*/
@ -226,7 +228,9 @@ static inline int bitmap_intersects(const unsigned long *src1,
}
void bitmap_set(unsigned long *map, long i, long len);
void bitmap_set_atomic(unsigned long *map, long i, long len);
void bitmap_clear(unsigned long *map, long start, long nr);
bool bitmap_test_and_clear_atomic(unsigned long *map, long start, long nr);
unsigned long bitmap_find_next_zero_area(unsigned long *map,
unsigned long size,
unsigned long start,

View File

@ -16,6 +16,7 @@
#include <assert.h>
#include "host-utils.h"
#include "atomic.h"
#define BITS_PER_BYTE CHAR_BIT
#define BITS_PER_LONG (sizeof (unsigned long) * BITS_PER_BYTE)
@ -38,6 +39,19 @@ static inline void set_bit(long nr, unsigned long *addr)
*p |= mask;
}
/**
* set_bit_atomic - Set a bit in memory atomically
* @nr: the bit to set
* @addr: the address to start counting from
*/
static inline void set_bit_atomic(long nr, unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
unsigned long *p = addr + BIT_WORD(nr);
atomic_or(p, mask);
}
/**
* clear_bit - Clears a bit in memory
* @nr: Bit to clear

View File

@ -1289,6 +1289,24 @@ void object_property_add_alias(Object *obj, const char *name,
Object *target_obj, const char *target_name,
Error **errp);
/**
* object_property_add_const_link:
* @obj: the object to add a property to
* @name: the name of the property
* @target: the object to be referred by the link
* @errp: if an error occurs, a pointer to an area to store the error
*
* Add an unmodifiable link for a property on an object. This function will
* add a property of type link<TYPE> where TYPE is the type of @target.
*
* The caller must ensure that @target stays alive as long as
* this property exists. In the case @target is a child of @obj,
* this will be the case. Otherwise, the caller is responsible for
* taking a reference.
*/
void object_property_add_const_link(Object *obj, const char *name,
Object *target, Error **errp);
/**
* object_property_set_description:
* @obj: the object owning the property

View File

@ -155,7 +155,7 @@ static inline unsigned vring_size(unsigned int num, unsigned long align)
}
/* The following is used with USED_EVENT_IDX and AVAIL_EVENT_IDX */
/* Assuming a given event_idx value from the other size, if
/* Assuming a given event_idx value from the other side, if
* we have just incremented index from old to new_idx,
* should we trigger an event? */
static inline int vring_need_event(uint16_t event_idx, uint16_t new_idx, uint16_t old)

View File

@ -241,10 +241,6 @@ void dpy_text_resize(QemuConsole *con, int w, int h);
void dpy_mouse_set(QemuConsole *con, int x, int y, int on);
void dpy_cursor_define(QemuConsole *con, QEMUCursor *cursor);
bool dpy_cursor_define_supported(QemuConsole *con);
void dpy_gfx_update_dirty(QemuConsole *con,
MemoryRegion *address_space,
uint64_t base,
bool invalidate);
bool dpy_gfx_check_format(QemuConsole *con,
pixman_format_code_t format);

View File

@ -83,7 +83,6 @@ struct KVMState
struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
bool coalesced_flush_in_progress;
int broken_set_mem_region;
int migration_log;
int vcpu_events;
int robust_singlestep;
int debugregs;
@ -234,9 +233,6 @@ static int kvm_set_user_memory_region(KVMState *s, KVMSlot *slot)
mem.guest_phys_addr = slot->start_addr;
mem.userspace_addr = (unsigned long)slot->ram;
mem.flags = slot->flags;
if (s->migration_log) {
mem.flags |= KVM_MEM_LOG_DIRTY_PAGES;
}
if (slot->memory_size && mem.flags & KVM_MEM_READONLY) {
/* Set the slot size to 0 before setting the slot to the desired
@ -317,10 +313,6 @@ static int kvm_slot_dirty_pages_log_change(KVMSlot *mem, bool log_dirty)
mem->flags = flags;
/* If nothing changed effectively, no need to issue ioctl */
if (s->migration_log) {
flags |= KVM_MEM_LOG_DIRTY_PAGES;
}
if (flags == old_flags) {
return 0;
}
@ -335,19 +327,22 @@ static int kvm_dirty_pages_log_change(hwaddr phys_addr,
KVMSlot *mem = kvm_lookup_matching_slot(s, phys_addr, phys_addr + size);
if (mem == NULL) {
fprintf(stderr, "BUG: %s: invalid parameters " TARGET_FMT_plx "-"
TARGET_FMT_plx "\n", __func__, phys_addr,
(hwaddr)(phys_addr + size - 1));
return -EINVAL;
return 0;
} else {
return kvm_slot_dirty_pages_log_change(mem, log_dirty);
}
return kvm_slot_dirty_pages_log_change(mem, log_dirty);
}
static void kvm_log_start(MemoryListener *listener,
MemoryRegionSection *section)
MemoryRegionSection *section,
int old, int new)
{
int r;
if (old != 0) {
return;
}
r = kvm_dirty_pages_log_change(section->offset_within_address_space,
int128_get64(section->size), true);
if (r < 0) {
@ -356,10 +351,15 @@ static void kvm_log_start(MemoryListener *listener,
}
static void kvm_log_stop(MemoryListener *listener,
MemoryRegionSection *section)
MemoryRegionSection *section,
int old, int new)
{
int r;
if (new != 0) {
return;
}
r = kvm_dirty_pages_log_change(section->offset_within_address_space,
int128_get64(section->size), false);
if (r < 0) {
@ -367,31 +367,6 @@ static void kvm_log_stop(MemoryListener *listener,
}
}
static int kvm_set_migration_log(bool enable)
{
KVMState *s = kvm_state;
KVMSlot *mem;
int i, err;
s->migration_log = enable;
for (i = 0; i < s->nr_slots; i++) {
mem = &s->slots[i];
if (!mem->memory_size) {
continue;
}
if (!!(mem->flags & KVM_MEM_LOG_DIRTY_PAGES) == enable) {
continue;
}
err = kvm_set_user_memory_region(s, mem);
if (err) {
return err;
}
}
return 0;
}
/* get kvm's dirty pages bitmap and update qemu's */
static int kvm_get_dirty_pages_log_range(MemoryRegionSection *section,
unsigned long *bitmap)
@ -663,7 +638,7 @@ static void kvm_set_phys_mem(MemoryRegionSection *section, bool add)
KVMSlot *mem, old;
int err;
MemoryRegion *mr = section->mr;
bool log_dirty = memory_region_is_logging(mr);
bool log_dirty = memory_region_get_dirty_log_mask(mr) != 0;
bool writeable = !mr->readonly && !mr->rom_device;
bool readonly_flag = mr->readonly || memory_region_is_romd(mr);
hwaddr start_addr = section->offset_within_address_space;
@ -715,7 +690,7 @@ static void kvm_set_phys_mem(MemoryRegionSection *section, bool add)
old = *mem;
if ((mem->flags & KVM_MEM_LOG_DIRTY_PAGES) || s->migration_log) {
if (mem->flags & KVM_MEM_LOG_DIRTY_PAGES) {
kvm_physical_sync_dirty_bitmap(section);
}
@ -844,22 +819,6 @@ static void kvm_log_sync(MemoryListener *listener,
}
}
static void kvm_log_global_start(struct MemoryListener *listener)
{
int r;
r = kvm_set_migration_log(1);
assert(r >= 0);
}
static void kvm_log_global_stop(struct MemoryListener *listener)
{
int r;
r = kvm_set_migration_log(0);
assert(r >= 0);
}
static void kvm_mem_ioeventfd_add(MemoryListener *listener,
MemoryRegionSection *section,
bool match_data, uint64_t data,
@ -935,8 +894,6 @@ static MemoryListener kvm_memory_listener = {
.log_start = kvm_log_start,
.log_stop = kvm_log_stop,
.log_sync = kvm_log_sync,
.log_global_start = kvm_log_global_start,
.log_global_stop = kvm_log_global_stop,
.eventfd_add = kvm_mem_ioeventfd_add,
.eventfd_del = kvm_mem_ioeventfd_del,
.coalesced_mmio_add = kvm_coalesce_mmio_region,
@ -1828,6 +1785,14 @@ int kvm_cpu_exec(CPUState *cpu)
}
fprintf(stderr, "error: kvm run failed %s\n",
strerror(-run_ret));
#ifdef TARGET_PPC
if (run_ret == -EBUSY) {
fprintf(stderr,
"This is probably because your SMT is enabled.\n"
"VCPU can only run on primary threads with all "
"secondary threads offline.\n");
}
#endif
ret = -1;
break;
}

View File

@ -106,6 +106,8 @@ struct kvm_ioapic_state {
#define KVM_IRQCHIP_IOAPIC 2
#define KVM_NR_IRQCHIPS 3
#define KVM_RUN_X86_SMM (1 << 0)
/* for KVM_GET_REGS and KVM_SET_REGS */
struct kvm_regs {
/* out (KVM_GET_REGS) / in (KVM_SET_REGS) */
@ -281,6 +283,7 @@ struct kvm_reinject_control {
#define KVM_VCPUEVENT_VALID_NMI_PENDING 0x00000001
#define KVM_VCPUEVENT_VALID_SIPI_VECTOR 0x00000002
#define KVM_VCPUEVENT_VALID_SHADOW 0x00000004
#define KVM_VCPUEVENT_VALID_SMM 0x00000008
/* Interrupt shadow states */
#define KVM_X86_SHADOW_INT_MOV_SS 0x01
@ -309,7 +312,13 @@ struct kvm_vcpu_events {
} nmi;
__u32 sipi_vector;
__u32 flags;
__u32 reserved[10];
struct {
__u8 smm;
__u8 pending;
__u8 smm_inside_nmi;
__u8 latched_init;
} smi;
__u32 reserved[9];
};
/* for KVM_GET/SET_DEBUGREGS */
@ -345,4 +354,7 @@ struct kvm_xcrs {
struct kvm_sync_regs {
};
#define KVM_QUIRK_LINT0_REENABLED (1 << 0)
#define KVM_QUIRK_CD_NW_CLEARED (1 << 1)
#endif /* _ASM_X86_KVM_H */

View File

@ -202,7 +202,7 @@ struct kvm_run {
__u32 exit_reason;
__u8 ready_for_interrupt_injection;
__u8 if_flag;
__u8 padding2[2];
__u16 flags;
/* in (pre_kvm_run), out (post_kvm_run) */
__u64 cr8;
@ -814,6 +814,9 @@ struct kvm_ppc_smmu_info {
#define KVM_CAP_S390_INJECT_IRQ 113
#define KVM_CAP_S390_IRQ_STATE 114
#define KVM_CAP_PPC_HWRNG 115
#define KVM_CAP_DISABLE_QUIRKS 116
#define KVM_CAP_X86_SMM 117
#define KVM_CAP_MULTI_ADDRESS_SPACE 118
#ifdef KVM_CAP_IRQ_ROUTING
@ -1199,6 +1202,8 @@ struct kvm_s390_ucas_mapping {
/* Available with KVM_CAP_S390_IRQ_STATE */
#define KVM_S390_SET_IRQ_STATE _IOW(KVMIO, 0xb5, struct kvm_s390_irq_state)
#define KVM_S390_GET_IRQ_STATE _IOW(KVMIO, 0xb6, struct kvm_s390_irq_state)
/* Available with KVM_CAP_X86_SMM */
#define KVM_SMI _IO(KVMIO, 0xb7)
#define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0)
#define KVM_DEV_ASSIGN_PCI_2_3 (1 << 1)

View File

@ -215,10 +215,6 @@ void cpu_list_unlock(void)
/***********************************************************/
/* CPUX86 core interface */
void cpu_smm_update(CPUX86State *env)
{
}
uint64_t cpu_get_tsc(CPUX86State *env)
{
return cpu_get_real_ticks();

View File

@ -30,6 +30,7 @@
#include "qemu.h"
#include "qemu-common.h"
#include "translate-all.h"
//#define DEBUG_MMAP
@ -574,7 +575,7 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int prot,
page_dump(stdout);
printf("\n");
#endif
tb_invalidate_phys_range(start, start + len, 0);
tb_invalidate_phys_range(start, start + len);
mmap_unlock();
return start;
fail:
@ -679,7 +680,7 @@ int target_munmap(abi_ulong start, abi_ulong len)
if (ret == 0) {
page_set_flags(start, start + len, 0);
tb_invalidate_phys_range(start, start + len, 0);
tb_invalidate_phys_range(start, start + len);
}
mmap_unlock();
return ret;
@ -758,7 +759,7 @@ abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
page_set_flags(old_addr, old_addr + old_size, 0);
page_set_flags(new_addr, new_addr + new_size, prot | PAGE_VALID);
}
tb_invalidate_phys_range(new_addr, new_addr + new_size, 0);
tb_invalidate_phys_range(new_addr, new_addr + new_size);
mmap_unlock();
return new_addr;
}

View File

@ -28,6 +28,8 @@
//#define DEBUG_UNASSIGNED
#define RAM_ADDR_INVALID (~(ram_addr_t)0)
static unsigned memory_region_transaction_depth;
static bool memory_region_update_pending;
static bool ioeventfd_update_pending;
@ -152,7 +154,7 @@ static bool memory_listener_match(MemoryListener *listener,
} while (0)
/* No need to ref/unref .mr, the FlatRange keeps it alive. */
#define MEMORY_LISTENER_UPDATE_REGION(fr, as, dir, callback) \
#define MEMORY_LISTENER_UPDATE_REGION(fr, as, dir, callback, _args...) \
MEMORY_LISTENER_CALL(callback, dir, (&(MemoryRegionSection) { \
.mr = (fr)->mr, \
.address_space = (as), \
@ -160,7 +162,7 @@ static bool memory_listener_match(MemoryListener *listener,
.size = (fr)->addr.size, \
.offset_within_address_space = int128_get64((fr)->addr.start), \
.readonly = (fr)->readonly, \
}))
}), ##_args)
struct CoalescedMemoryRange {
AddrRange addr;
@ -588,7 +590,7 @@ static void render_memory_region(FlatView *view,
remain = clip.size;
fr.mr = mr;
fr.dirty_log_mask = mr->dirty_log_mask;
fr.dirty_log_mask = memory_region_get_dirty_log_mask(mr);
fr.romd_mode = mr->romd_mode;
fr.readonly = readonly;
@ -774,10 +776,15 @@ static void address_space_update_topology_pass(AddressSpace *as,
if (adding) {
MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_nop);
if (frold->dirty_log_mask && !frnew->dirty_log_mask) {
MEMORY_LISTENER_UPDATE_REGION(frnew, as, Reverse, log_stop);
} else if (frnew->dirty_log_mask && !frold->dirty_log_mask) {
MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, log_start);
if (frnew->dirty_log_mask & ~frold->dirty_log_mask) {
MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, log_start,
frold->dirty_log_mask,
frnew->dirty_log_mask);
}
if (frold->dirty_log_mask & ~frnew->dirty_log_mask) {
MEMORY_LISTENER_UPDATE_REGION(frnew, as, Reverse, log_stop,
frold->dirty_log_mask,
frnew->dirty_log_mask);
}
}
@ -1002,6 +1009,7 @@ static void memory_region_initfn(Object *obj)
ObjectProperty *op;
mr->ops = &unassigned_mem_ops;
mr->ram_addr = RAM_ADDR_INVALID;
mr->enabled = true;
mr->romd_mode = true;
mr->destructor = memory_region_destructor_none;
@ -1193,7 +1201,6 @@ void memory_region_init_io(MemoryRegion *mr,
mr->ops = ops;
mr->opaque = opaque;
mr->terminates = true;
mr->ram_addr = ~(ram_addr_t)0;
}
void memory_region_init_ram(MemoryRegion *mr,
@ -1207,6 +1214,7 @@ void memory_region_init_ram(MemoryRegion *mr,
mr->terminates = true;
mr->destructor = memory_region_destructor_ram;
mr->ram_addr = qemu_ram_alloc(size, mr, errp);
mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
}
void memory_region_init_resizeable_ram(MemoryRegion *mr,
@ -1224,6 +1232,7 @@ void memory_region_init_resizeable_ram(MemoryRegion *mr,
mr->terminates = true;
mr->destructor = memory_region_destructor_ram;
mr->ram_addr = qemu_ram_alloc_resizeable(size, max_size, resized, mr, errp);
mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
}
#ifdef __linux__
@ -1240,6 +1249,7 @@ void memory_region_init_ram_from_file(MemoryRegion *mr,
mr->terminates = true;
mr->destructor = memory_region_destructor_ram;
mr->ram_addr = qemu_ram_alloc_from_file(size, mr, share, path, errp);
mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
}
#endif
@ -1253,6 +1263,7 @@ void memory_region_init_ram_ptr(MemoryRegion *mr,
mr->ram = true;
mr->terminates = true;
mr->destructor = memory_region_destructor_ram_from_ptr;
mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
/* qemu_ram_alloc_from_ptr cannot fail with ptr != NULL. */
assert(ptr != NULL);
@ -1389,9 +1400,18 @@ bool memory_region_is_skip_dump(MemoryRegion *mr)
return mr->skip_dump;
}
bool memory_region_is_logging(MemoryRegion *mr)
uint8_t memory_region_get_dirty_log_mask(MemoryRegion *mr)
{
return mr->dirty_log_mask;
uint8_t mask = mr->dirty_log_mask;
if (global_dirty_log) {
mask |= (1 << DIRTY_MEMORY_MIGRATION);
}
return mask;
}
bool memory_region_is_logging(MemoryRegion *mr, uint8_t client)
{
return memory_region_get_dirty_log_mask(mr) & (1 << client);
}
bool memory_region_is_rom(MemoryRegion *mr)
@ -1425,6 +1445,7 @@ void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client)
{
uint8_t mask = 1 << client;
assert(client == DIRTY_MEMORY_VGA);
memory_region_transaction_begin();
mr->dirty_log_mask = (mr->dirty_log_mask & ~mask) | (log * mask);
memory_region_update_pending |= mr->enabled;
@ -1434,27 +1455,24 @@ void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client)
bool memory_region_get_dirty(MemoryRegion *mr, hwaddr addr,
hwaddr size, unsigned client)
{
assert(mr->terminates);
assert(mr->ram_addr != RAM_ADDR_INVALID);
return cpu_physical_memory_get_dirty(mr->ram_addr + addr, size, client);
}
void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr,
hwaddr size)
{
assert(mr->terminates);
cpu_physical_memory_set_dirty_range(mr->ram_addr + addr, size);
assert(mr->ram_addr != RAM_ADDR_INVALID);
cpu_physical_memory_set_dirty_range(mr->ram_addr + addr, size,
memory_region_get_dirty_log_mask(mr));
}
bool memory_region_test_and_clear_dirty(MemoryRegion *mr, hwaddr addr,
hwaddr size, unsigned client)
{
bool ret;
assert(mr->terminates);
ret = cpu_physical_memory_get_dirty(mr->ram_addr + addr, size, client);
if (ret) {
cpu_physical_memory_reset_dirty(mr->ram_addr + addr, size, client);
}
return ret;
assert(mr->ram_addr != RAM_ADDR_INVALID);
return cpu_physical_memory_test_and_clear_dirty(mr->ram_addr + addr,
size, client);
}
@ -1497,8 +1515,9 @@ void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode)
void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr,
hwaddr size, unsigned client)
{
assert(mr->terminates);
cpu_physical_memory_reset_dirty(mr->ram_addr + addr, size, client);
assert(mr->ram_addr != RAM_ADDR_INVALID);
cpu_physical_memory_test_and_clear_dirty(mr->ram_addr + addr, size,
client);
}
int memory_region_get_fd(MemoryRegion *mr)
@ -1507,7 +1526,7 @@ int memory_region_get_fd(MemoryRegion *mr)
return memory_region_get_fd(mr->alias);
}
assert(mr->terminates);
assert(mr->ram_addr != RAM_ADDR_INVALID);
return qemu_get_ram_fd(mr->ram_addr & TARGET_PAGE_MASK);
}
@ -1518,14 +1537,14 @@ void *memory_region_get_ram_ptr(MemoryRegion *mr)
return memory_region_get_ram_ptr(mr->alias) + mr->alias_offset;
}
assert(mr->terminates);
assert(mr->ram_addr != RAM_ADDR_INVALID);
return qemu_get_ram_ptr(mr->ram_addr & TARGET_PAGE_MASK);
}
void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize, Error **errp)
{
assert(mr->terminates);
assert(mr->ram_addr != RAM_ADDR_INVALID);
qemu_ram_resize(mr->ram_addr, newsize, errp);
}
@ -1947,12 +1966,24 @@ void address_space_sync_dirty_bitmap(AddressSpace *as)
void memory_global_dirty_log_start(void)
{
global_dirty_log = true;
MEMORY_LISTENER_CALL_GLOBAL(log_global_start, Forward);
/* Refresh DIRTY_LOG_MIGRATION bit. */
memory_region_transaction_begin();
memory_region_update_pending = true;
memory_region_transaction_commit();
}
void memory_global_dirty_log_stop(void)
{
global_dirty_log = false;
/* Refresh DIRTY_LOG_MIGRATION bit. */
memory_region_transaction_begin();
memory_region_update_pending = true;
memory_region_transaction_commit();
MEMORY_LISTENER_CALL_GLOBAL(log_global_stop, Reverse);
}

View File

@ -822,15 +822,19 @@ int qemu_global_option(const char *str)
QemuOpts *opts;
int rc, offset;
rc = sscanf(str, "%63[^.].%63[^=]%n", driver, property, &offset);
if (rc < 2 || str[offset] != '=') {
error_report("can't parse: \"%s\"", str);
rc = sscanf(str, "%63[^.=].%63[^=]%n", driver, property, &offset);
if (rc == 2 && str[offset] == '=') {
opts = qemu_opts_create(&qemu_global_opts, NULL, 0, &error_abort);
qemu_opt_set(opts, "driver", driver, &error_abort);
qemu_opt_set(opts, "property", property, &error_abort);
qemu_opt_set(opts, "value", str + offset + 1, &error_abort);
return 0;
}
opts = qemu_opts_parse(&qemu_global_opts, str, false);
if (!opts) {
return -1;
}
opts = qemu_opts_create(&qemu_global_opts, NULL, 0, &error_abort);
qemu_opt_set(opts, "driver", driver, &error_abort);
qemu_opt_set(opts, "property", property, &error_abort);
qemu_opt_set(opts, "value", str + offset + 1, &error_abort);
return 0;
}

View File

@ -53,6 +53,7 @@ static int persistent = 0;
static enum { RUNNING, TERMINATE, TERMINATING, TERMINATED } state;
static int shared = 1;
static int nb_fds;
static int server_fd;
static void usage(const char *name)
{
@ -340,7 +341,7 @@ out:
return (void *) EXIT_FAILURE;
}
static int nbd_can_accept(void *opaque)
static int nbd_can_accept(void)
{
return nb_fds < shared;
}
@ -351,19 +352,21 @@ static void nbd_export_closed(NBDExport *exp)
state = TERMINATED;
}
static void nbd_update_server_fd_handler(int fd);
static void nbd_client_closed(NBDClient *client)
{
nb_fds--;
if (nb_fds == 0 && !persistent && state == RUNNING) {
state = TERMINATE;
}
nbd_update_server_fd_handler(server_fd);
qemu_notify_event();
nbd_client_put(client);
}
static void nbd_accept(void *opaque)
{
int server_fd = (uintptr_t) opaque;
struct sockaddr_in addr;
socklen_t addr_len = sizeof(addr);
@ -380,12 +383,22 @@ static void nbd_accept(void *opaque)
if (nbd_client_new(exp, fd, nbd_client_closed)) {
nb_fds++;
nbd_update_server_fd_handler(server_fd);
} else {
shutdown(fd, 2);
close(fd);
}
}
static void nbd_update_server_fd_handler(int fd)
{
if (nbd_can_accept()) {
qemu_set_fd_handler(fd, nbd_accept, NULL, (void *)(uintptr_t)fd);
} else {
qemu_set_fd_handler(fd, NULL, NULL, NULL);
}
}
int main(int argc, char **argv)
{
BlockBackend *blk;
@ -761,8 +774,8 @@ int main(int argc, char **argv)
memset(&client_thread, 0, sizeof(client_thread));
}
qemu_set_fd_handler2(fd, nbd_can_accept, nbd_accept, NULL,
(void *)(uintptr_t)fd);
server_fd = fd;
nbd_update_server_fd_handler(fd);
/* now when the initialization is (almost) complete, chdir("/")
* to free any busy filesystems */

View File

@ -171,11 +171,13 @@ Set parameter @var{arg} for item @var{id} of type @var{group}\n"
ETEXI
DEF("global", HAS_ARG, QEMU_OPTION_global,
"-global driver.prop=value\n"
"-global driver.property=value\n"
"-global driver=driver,property=property,value=value\n"
" set a global default for a driver property\n",
QEMU_ARCH_ALL)
STEXI
@item -global @var{driver}.@var{prop}=@var{value}
@itemx -global driver=@var{driver},property=@var{property},value=@var{value}
@findex -global
Set default value of @var{driver}'s property @var{prop} to @var{value}, e.g.:
@ -186,6 +188,9 @@ qemu-system-i386 -global ide-drive.physical_block_size=4096 -drive file=file,if=
In particular, you can use this to set driver properties for devices which are
created automatically by the machine model. To create a device which is not
created automatically and set properties on it, use -@option{device}.
The two syntaxes are equivalent. The longer one works for drivers whose name
contains a dot.
ETEXI
DEF("boot", HAS_ARG, QEMU_OPTION_boot,
@ -3099,9 +3104,10 @@ re-inject them.
ETEXI
DEF("icount", HAS_ARG, QEMU_OPTION_icount, \
"-icount [shift=N|auto][,align=on|off]\n" \
"-icount [shift=N|auto][,align=on|off][,sleep=no]\n" \
" enable virtual instruction counter with 2^N clock ticks per\n" \
" instruction and enable aligning the host and virtual clocks\n", QEMU_ARCH_ALL)
" instruction, enable aligning the host and virtual clocks\n" \
" or disable real time cpu sleeping\n", QEMU_ARCH_ALL)
STEXI
@item -icount [shift=@var{N}|auto]
@findex -icount
@ -3110,6 +3116,13 @@ instruction every 2^@var{N} ns of virtual time. If @code{auto} is specified
then the virtual cpu speed will be automatically adjusted to keep virtual
time within a few seconds of real time.
When the virtual cpu is sleeping, the virtual time will advance at default
speed unless @option{sleep=no} is specified.
With @option{sleep=no}, the virtual time will jump to the next timer deadline
instantly whenever the virtual cpu goes to sleep mode and will not advance
if no timer is enabled. This behavior give deterministic execution times from
the guest point of view.
Note that while this option can give deterministic behavior, it does not
provide cycle accurate emulation. Modern CPUs contain superscalar out of
order cores with complex cache hierarchies. The number of instructions

View File

@ -1266,6 +1266,22 @@ out:
g_free(full_type);
}
void object_property_add_const_link(Object *obj, const char *name,
Object *target, Error **errp)
{
char *link_type;
ObjectProperty *op;
link_type = g_strdup_printf("link<%s>", object_get_typename(target));
op = object_property_add(obj, name, link_type,
object_get_child_property, NULL,
NULL, target, errp);
if (op != NULL) {
op->resolve = object_resolve_child_property;
}
g_free(link_type);
}
gchar *object_get_canonical_path_component(Object *obj)
{
ObjectProperty *prop = NULL;

View File

@ -5,5 +5,3 @@ obj-y += gdbstub.o
obj-$(CONFIG_SOFTMMU) += machine.o arch_memory_mapping.o arch_dump.o
obj-$(CONFIG_KVM) += kvm.o
obj-$(call lnot,$(CONFIG_KVM)) += kvm-stub.o
obj-$(CONFIG_LINUX_USER) += ioport-user.o
obj-$(CONFIG_BSD_USER) += ioport-user.o

View File

@ -23,6 +23,7 @@
#include "qom/cpu.h"
#include "cpu.h"
#include "qapi/error.h"
#include "qemu/notify.h"
#ifdef TARGET_X86_64
#define TYPE_X86_CPU "x86_64-cpu"
@ -111,6 +112,8 @@ typedef struct X86CPU {
/* in order to simplify APIC support, we leave this pointer to the
user */
struct DeviceState *apic_state;
struct MemoryRegion *cpu_as_root, *cpu_as_mem, *smram;
Notifier machine_done;
} X86CPU;
static inline X86CPU *x86_env_get_cpu(CPUX86State *env)

View File

@ -44,6 +44,7 @@
#include "hw/qdev-properties.h"
#include "hw/cpu/icc_bus.h"
#ifndef CONFIG_USER_ONLY
#include "exec/address-spaces.h"
#include "hw/xen/xen.h"
#include "hw/i386/apic_internal.h"
#endif
@ -2750,6 +2751,21 @@ static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
errp);
}
static void x86_cpu_machine_done(Notifier *n, void *unused)
{
X86CPU *cpu = container_of(n, X86CPU, machine_done);
MemoryRegion *smram =
(MemoryRegion *) object_resolve_path("/machine/smram", NULL);
if (smram) {
cpu->smram = g_new(MemoryRegion, 1);
memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
smram, 0, 1ull << 32);
memory_region_set_enabled(cpu->smram, false);
memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
}
}
#else
static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
{
@ -2811,6 +2827,32 @@ static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
#endif
mce_init(cpu);
#ifndef CONFIG_USER_ONLY
if (tcg_enabled()) {
cpu->cpu_as_mem = g_new(MemoryRegion, 1);
cpu->cpu_as_root = g_new(MemoryRegion, 1);
cs->as = g_new(AddressSpace, 1);
/* Outer container... */
memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
memory_region_set_enabled(cpu->cpu_as_root, true);
/* ... with two regions inside: normal system memory with low
* priority, and...
*/
memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
get_system_memory(), 0, ~0ull);
memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
memory_region_set_enabled(cpu->cpu_as_mem, true);
address_space_init(cs->as, cpu->cpu_as_root, "CPU");
/* ... SMRAM with higher priority, linked from /machine/smram. */
cpu->machine_done.notify = x86_cpu_machine_done;
qemu_add_machine_init_done_notifier(&cpu->machine_done);
}
#endif
qemu_init_vcpu(cs);
/* Only Intel CPUs support hyperthreading. Even though QEMU fixes this
@ -2834,6 +2876,7 @@ static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
cpu_reset(cs);
xcc->parent_realize(dev, &local_err);
out:
if (local_err != NULL) {
error_propagate(errp, local_err);
@ -3063,7 +3106,9 @@ static bool x86_cpu_has_work(CPUState *cs)
(cs->interrupt_request & (CPU_INTERRUPT_NMI |
CPU_INTERRUPT_INIT |
CPU_INTERRUPT_SIPI |
CPU_INTERRUPT_MCE));
CPU_INTERRUPT_MCE)) ||
((cs->interrupt_request & CPU_INTERRUPT_SMI) &&
!(env->hflags & HF_SMM_MASK));
}
static Property x86_cpu_properties[] = {

View File

@ -180,15 +180,17 @@
/* hflags2 */
#define HF2_GIF_SHIFT 0 /* if set CPU takes interrupts */
#define HF2_HIF_SHIFT 1 /* value of IF_MASK when entering SVM */
#define HF2_NMI_SHIFT 2 /* CPU serving NMI */
#define HF2_VINTR_SHIFT 3 /* value of V_INTR_MASKING bit */
#define HF2_GIF_SHIFT 0 /* if set CPU takes interrupts */
#define HF2_HIF_SHIFT 1 /* value of IF_MASK when entering SVM */
#define HF2_NMI_SHIFT 2 /* CPU serving NMI */
#define HF2_VINTR_SHIFT 3 /* value of V_INTR_MASKING bit */
#define HF2_SMM_INSIDE_NMI_SHIFT 4 /* CPU serving SMI nested inside NMI */
#define HF2_GIF_MASK (1 << HF2_GIF_SHIFT)
#define HF2_HIF_MASK (1 << HF2_HIF_SHIFT)
#define HF2_NMI_MASK (1 << HF2_NMI_SHIFT)
#define HF2_VINTR_MASK (1 << HF2_VINTR_SHIFT)
#define HF2_GIF_MASK (1 << HF2_GIF_SHIFT)
#define HF2_HIF_MASK (1 << HF2_HIF_SHIFT)
#define HF2_NMI_MASK (1 << HF2_NMI_SHIFT)
#define HF2_VINTR_MASK (1 << HF2_VINTR_SHIFT)
#define HF2_SMM_INSIDE_NMI_MASK (1 << HF2_SMM_INSIDE_NMI_SHIFT)
#define CR0_PE_SHIFT 0
#define CR0_MP_SHIFT 1
@ -1105,6 +1107,18 @@ int x86_cpu_handle_mmu_fault(CPUState *cpu, vaddr addr,
int is_write, int mmu_idx);
void x86_cpu_set_a20(X86CPU *cpu, int a20_state);
#ifndef CONFIG_USER_ONLY
uint8_t x86_ldub_phys(CPUState *cs, hwaddr addr);
uint32_t x86_lduw_phys(CPUState *cs, hwaddr addr);
uint32_t x86_ldl_phys(CPUState *cs, hwaddr addr);
uint64_t x86_ldq_phys(CPUState *cs, hwaddr addr);
void x86_stb_phys(CPUState *cs, hwaddr addr, uint8_t val);
void x86_stl_phys_notdirty(CPUState *cs, hwaddr addr, uint32_t val);
void x86_stw_phys(CPUState *cs, hwaddr addr, uint32_t val);
void x86_stl_phys(CPUState *cs, hwaddr addr, uint32_t val);
void x86_stq_phys(CPUState *cs, hwaddr addr, uint64_t val);
#endif
static inline bool hw_local_breakpoint_enabled(unsigned long dr7, int index)
{
return (dr7 >> (index * 2)) & 1;
@ -1143,7 +1157,6 @@ void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3);
void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4);
/* hw/pc.c */
void cpu_smm_update(CPUX86State *env);
uint64_t cpu_get_tsc(CPUX86State *env);
#define TARGET_PAGE_BITS 12
@ -1292,6 +1305,11 @@ static inline void cpu_load_efer(CPUX86State *env, uint64_t val)
}
}
static inline MemTxAttrs cpu_get_mem_attrs(CPUX86State *env)
{
return ((MemTxAttrs) { .secure = (env->hflags & HF_SMM_MASK) != 0 });
}
/* fpu_helper.c */
void cpu_set_mxcsr(CPUX86State *env, uint32_t val);
void cpu_set_fpuc(CPUX86State *env, uint16_t val);
@ -1304,7 +1322,9 @@ void cpu_vmexit(CPUX86State *nenv, uint32_t exit_code, uint64_t exit_info_1);
/* seg_helper.c */
void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw);
/* smm_helper.c */
void do_smm_enter(X86CPU *cpu);
void cpu_smm_update(X86CPU *cpu);
void cpu_report_tpr_access(CPUX86State *env, TPRAccess access);

View File

@ -565,7 +565,7 @@ int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
env->a20_mask;
pml4e = ldq_phys(cs->as, pml4e_addr);
pml4e = x86_ldq_phys(cs, pml4e_addr);
if (!(pml4e & PG_PRESENT_MASK)) {
goto do_fault;
}
@ -574,12 +574,12 @@ int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
}
if (!(pml4e & PG_ACCESSED_MASK)) {
pml4e |= PG_ACCESSED_MASK;
stl_phys_notdirty(cs->as, pml4e_addr, pml4e);
x86_stl_phys_notdirty(cs, pml4e_addr, pml4e);
}
ptep = pml4e ^ PG_NX_MASK;
pdpe_addr = ((pml4e & PG_ADDRESS_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
env->a20_mask;
pdpe = ldq_phys(cs->as, pdpe_addr);
pdpe = x86_ldq_phys(cs, pdpe_addr);
if (!(pdpe & PG_PRESENT_MASK)) {
goto do_fault;
}
@ -589,7 +589,7 @@ int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
ptep &= pdpe ^ PG_NX_MASK;
if (!(pdpe & PG_ACCESSED_MASK)) {
pdpe |= PG_ACCESSED_MASK;
stl_phys_notdirty(cs->as, pdpe_addr, pdpe);
x86_stl_phys_notdirty(cs, pdpe_addr, pdpe);
}
if (pdpe & PG_PSE_MASK) {
/* 1 GB page */
@ -604,7 +604,7 @@ int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
/* XXX: load them when cr3 is loaded ? */
pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
env->a20_mask;
pdpe = ldq_phys(cs->as, pdpe_addr);
pdpe = x86_ldq_phys(cs, pdpe_addr);
if (!(pdpe & PG_PRESENT_MASK)) {
goto do_fault;
}
@ -617,7 +617,7 @@ int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
pde_addr = ((pdpe & PG_ADDRESS_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
env->a20_mask;
pde = ldq_phys(cs->as, pde_addr);
pde = x86_ldq_phys(cs, pde_addr);
if (!(pde & PG_PRESENT_MASK)) {
goto do_fault;
}
@ -635,11 +635,11 @@ int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
/* 4 KB page */
if (!(pde & PG_ACCESSED_MASK)) {
pde |= PG_ACCESSED_MASK;
stl_phys_notdirty(cs->as, pde_addr, pde);
x86_stl_phys_notdirty(cs, pde_addr, pde);
}
pte_addr = ((pde & PG_ADDRESS_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
env->a20_mask;
pte = ldq_phys(cs->as, pte_addr);
pte = x86_ldq_phys(cs, pte_addr);
if (!(pte & PG_PRESENT_MASK)) {
goto do_fault;
}
@ -655,7 +655,7 @@ int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
/* page directory entry */
pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
env->a20_mask;
pde = ldl_phys(cs->as, pde_addr);
pde = x86_ldl_phys(cs, pde_addr);
if (!(pde & PG_PRESENT_MASK)) {
goto do_fault;
}
@ -676,13 +676,13 @@ int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
if (!(pde & PG_ACCESSED_MASK)) {
pde |= PG_ACCESSED_MASK;
stl_phys_notdirty(cs->as, pde_addr, pde);
x86_stl_phys_notdirty(cs, pde_addr, pde);
}
/* page directory entry */
pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
env->a20_mask;
pte = ldl_phys(cs->as, pte_addr);
pte = x86_ldl_phys(cs, pte_addr);
if (!(pte & PG_PRESENT_MASK)) {
goto do_fault;
}
@ -737,7 +737,7 @@ do_check_protect_pse36:
if (is_dirty) {
pte |= PG_DIRTY_MASK;
}
stl_phys_notdirty(cs->as, pte_addr, pte);
x86_stl_phys_notdirty(cs, pte_addr, pte);
}
/* the page can be put in the TLB */
@ -771,7 +771,8 @@ do_check_protect_pse36:
page_offset = vaddr & (page_size - 1);
paddr = pte + page_offset;
tlb_set_page(cs, vaddr, paddr, prot, mmu_idx, page_size);
tlb_set_page_with_attrs(cs, vaddr, paddr, cpu_get_mem_attrs(env),
prot, mmu_idx, page_size);
return 0;
do_fault_rsvd:
error_code |= PG_ERROR_RSVD_MASK;
@ -788,7 +789,7 @@ do_check_protect_pse36:
error_code |= PG_ERROR_I_D_MASK;
if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
/* cr2 is not modified in case of exceptions */
stq_phys(cs->as,
x86_stq_phys(cs,
env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
addr);
} else {
@ -827,13 +828,13 @@ hwaddr x86_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
}
pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
env->a20_mask;
pml4e = ldq_phys(cs->as, pml4e_addr);
pml4e = x86_ldq_phys(cs, pml4e_addr);
if (!(pml4e & PG_PRESENT_MASK)) {
return -1;
}
pdpe_addr = ((pml4e & PG_ADDRESS_MASK) +
(((addr >> 30) & 0x1ff) << 3)) & env->a20_mask;
pdpe = ldq_phys(cs->as, pdpe_addr);
pdpe = x86_ldq_phys(cs, pdpe_addr);
if (!(pdpe & PG_PRESENT_MASK)) {
return -1;
}
@ -848,14 +849,14 @@ hwaddr x86_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
{
pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
env->a20_mask;
pdpe = ldq_phys(cs->as, pdpe_addr);
pdpe = x86_ldq_phys(cs, pdpe_addr);
if (!(pdpe & PG_PRESENT_MASK))
return -1;
}
pde_addr = ((pdpe & PG_ADDRESS_MASK) +
(((addr >> 21) & 0x1ff) << 3)) & env->a20_mask;
pde = ldq_phys(cs->as, pde_addr);
pde = x86_ldq_phys(cs, pde_addr);
if (!(pde & PG_PRESENT_MASK)) {
return -1;
}
@ -868,7 +869,7 @@ hwaddr x86_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
pte_addr = ((pde & PG_ADDRESS_MASK) +
(((addr >> 12) & 0x1ff) << 3)) & env->a20_mask;
page_size = 4096;
pte = ldq_phys(cs->as, pte_addr);
pte = x86_ldq_phys(cs, pte_addr);
}
if (!(pte & PG_PRESENT_MASK)) {
return -1;
@ -878,7 +879,7 @@ hwaddr x86_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
/* page directory entry */
pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
pde = ldl_phys(cs->as, pde_addr);
pde = x86_ldl_phys(cs, pde_addr);
if (!(pde & PG_PRESENT_MASK))
return -1;
if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
@ -887,7 +888,7 @@ hwaddr x86_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
} else {
/* page directory entry */
pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
pte = ldl_phys(cs->as, pte_addr);
pte = x86_ldl_phys(cs, pte_addr);
if (!(pte & PG_PRESENT_MASK)) {
return -1;
}
@ -1276,3 +1277,95 @@ void x86_cpu_exec_exit(CPUState *cs)
env->eflags = cpu_compute_eflags(env);
}
#ifndef CONFIG_USER_ONLY
uint8_t x86_ldub_phys(CPUState *cs, hwaddr addr)
{
X86CPU *cpu = X86_CPU(cs);
CPUX86State *env = &cpu->env;
return address_space_ldub(cs->as, addr,
cpu_get_mem_attrs(env),
NULL);
}
uint32_t x86_lduw_phys(CPUState *cs, hwaddr addr)
{
X86CPU *cpu = X86_CPU(cs);
CPUX86State *env = &cpu->env;
return address_space_lduw(cs->as, addr,
cpu_get_mem_attrs(env),
NULL);
}
uint32_t x86_ldl_phys(CPUState *cs, hwaddr addr)
{
X86CPU *cpu = X86_CPU(cs);
CPUX86State *env = &cpu->env;
return address_space_ldl(cs->as, addr,
cpu_get_mem_attrs(env),
NULL);
}
uint64_t x86_ldq_phys(CPUState *cs, hwaddr addr)
{
X86CPU *cpu = X86_CPU(cs);
CPUX86State *env = &cpu->env;
return address_space_ldq(cs->as, addr,
cpu_get_mem_attrs(env),
NULL);
}
void x86_stb_phys(CPUState *cs, hwaddr addr, uint8_t val)
{
X86CPU *cpu = X86_CPU(cs);
CPUX86State *env = &cpu->env;
address_space_stb(cs->as, addr, val,
cpu_get_mem_attrs(env),
NULL);
}
void x86_stl_phys_notdirty(CPUState *cs, hwaddr addr, uint32_t val)
{
X86CPU *cpu = X86_CPU(cs);
CPUX86State *env = &cpu->env;
address_space_stl_notdirty(cs->as, addr, val,
cpu_get_mem_attrs(env),
NULL);
}
void x86_stw_phys(CPUState *cs, hwaddr addr, uint32_t val)
{
X86CPU *cpu = X86_CPU(cs);
CPUX86State *env = &cpu->env;
address_space_stw(cs->as, addr, val,
cpu_get_mem_attrs(env),
NULL);
}
void x86_stl_phys(CPUState *cs, hwaddr addr, uint32_t val)
{
X86CPU *cpu = X86_CPU(cs);
CPUX86State *env = &cpu->env;
address_space_stl(cs->as, addr, val,
cpu_get_mem_attrs(env),
NULL);
}
void x86_stq_phys(CPUState *cs, hwaddr addr, uint64_t val)
{
X86CPU *cpu = X86_CPU(cs);
CPUX86State *env = &cpu->env;
address_space_stq(cs->as, addr, val,
cpu_get_mem_attrs(env),
NULL);
}
#endif

View File

@ -86,12 +86,12 @@ DEF_HELPER_1(wrmsr, void, env)
DEF_HELPER_2(check_iob, void, env, i32)
DEF_HELPER_2(check_iow, void, env, i32)
DEF_HELPER_2(check_iol, void, env, i32)
DEF_HELPER_2(outb, void, i32, i32)
DEF_HELPER_1(inb, tl, i32)
DEF_HELPER_2(outw, void, i32, i32)
DEF_HELPER_1(inw, tl, i32)
DEF_HELPER_2(outl, void, i32, i32)
DEF_HELPER_1(inl, tl, i32)
DEF_HELPER_3(outb, void, env, i32, i32)
DEF_HELPER_2(inb, tl, env, i32)
DEF_HELPER_3(outw, void, env, i32, i32)
DEF_HELPER_2(inw, tl, env, i32)
DEF_HELPER_3(outl, void, env, i32, i32)
DEF_HELPER_2(inl, tl, env, i32)
DEF_HELPER_3(svm_check_intercept_param, void, env, i32, i64)
DEF_HELPER_3(vmexit, void, env, i32, i64)

View File

@ -1,60 +0,0 @@
/*
* qemu user ioport functions
*
* Copyright (c) 2003-2008 Fabrice Bellard
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <stdio.h>
#include "qemu.h"
#include "qemu-common.h"
#include "exec/ioport.h"
void cpu_outb(pio_addr_t addr, uint8_t val)
{
fprintf(stderr, "outb: port=0x%04"FMT_pioaddr", data=%02"PRIx8"\n",
addr, val);
}
void cpu_outw(pio_addr_t addr, uint16_t val)
{
fprintf(stderr, "outw: port=0x%04"FMT_pioaddr", data=%04"PRIx16"\n",
addr, val);
}
void cpu_outl(pio_addr_t addr, uint32_t val)
{
fprintf(stderr, "outl: port=0x%04"FMT_pioaddr", data=%08"PRIx32"\n",
addr, val);
}
uint8_t cpu_inb(pio_addr_t addr)
{
fprintf(stderr, "inb: port=0x%04"FMT_pioaddr"\n", addr);
return 0;
}
uint16_t cpu_inw(pio_addr_t addr)
{
fprintf(stderr, "inw: port=0x%04"FMT_pioaddr"\n", addr);
return 0;
}
uint32_t cpu_inl(pio_addr_t addr)
{
fprintf(stderr, "inl: port=0x%04"FMT_pioaddr"\n", addr);
return 0;
}

View File

@ -2259,7 +2259,7 @@ MemTxAttrs kvm_arch_post_run(CPUState *cpu, struct kvm_run *run)
}
cpu_set_apic_tpr(x86_cpu->apic_state, run->cr8);
cpu_set_apic_base(x86_cpu->apic_state, run->apic_base);
return MEMTXATTRS_UNSPECIFIED;
return cpu_get_mem_attrs(env);
}
int kvm_arch_process_async_events(CPUState *cs)

View File

@ -372,6 +372,9 @@ static int cpu_post_load(void *opaque, int version_id)
}
tlb_flush(cs, 1);
if (tcg_enabled()) {
cpu_smm_update(cpu);
}
return 0;
}

View File

@ -18,38 +18,71 @@
*/
#include "cpu.h"
#include "exec/ioport.h"
#include "exec/helper-proto.h"
#include "exec/cpu_ldst.h"
#include "exec/address-spaces.h"
void helper_outb(uint32_t port, uint32_t data)
void helper_outb(CPUX86State *env, uint32_t port, uint32_t data)
{
cpu_outb(port, data & 0xff);
#ifdef CONFIG_USER_ONLY
fprintf(stderr, "outb: port=0x%04x, data=%02x\n", port, data);
#else
address_space_stb(&address_space_io, port, data,
cpu_get_mem_attrs(env), NULL);
#endif
}
target_ulong helper_inb(uint32_t port)
target_ulong helper_inb(CPUX86State *env, uint32_t port)
{
return cpu_inb(port);
#ifdef CONFIG_USER_ONLY
fprintf(stderr, "inb: port=0x%04x\n", port);
return 0;
#else
return address_space_ldub(&address_space_io, port,
cpu_get_mem_attrs(env), NULL);
#endif
}
void helper_outw(uint32_t port, uint32_t data)
void helper_outw(CPUX86State *env, uint32_t port, uint32_t data)
{
cpu_outw(port, data & 0xffff);
#ifdef CONFIG_USER_ONLY
fprintf(stderr, "outw: port=0x%04x, data=%04x\n", port, data);
#else
address_space_stw(&address_space_io, port, data,
cpu_get_mem_attrs(env), NULL);
#endif
}
target_ulong helper_inw(uint32_t port)
target_ulong helper_inw(CPUX86State *env, uint32_t port)
{
return cpu_inw(port);
#ifdef CONFIG_USER_ONLY
fprintf(stderr, "inw: port=0x%04x\n", port);
return 0;
#else
return address_space_lduw(&address_space_io, port,
cpu_get_mem_attrs(env), NULL);
#endif
}
void helper_outl(uint32_t port, uint32_t data)
void helper_outl(CPUX86State *env, uint32_t port, uint32_t data)
{
cpu_outl(port, data);
#ifdef CONFIG_USER_ONLY
fprintf(stderr, "outw: port=0x%04x, data=%08x\n", port, data);
#else
address_space_stl(&address_space_io, port, data,
cpu_get_mem_attrs(env), NULL);
#endif
}
target_ulong helper_inl(uint32_t port)
target_ulong helper_inl(CPUX86State *env, uint32_t port)
{
return cpu_inl(port);
#ifdef CONFIG_USER_ONLY
fprintf(stderr, "inl: port=0x%04x\n", port);
return 0;
#else
return address_space_ldl(&address_space_io, port,
cpu_get_mem_attrs(env), NULL);
#endif
}
void helper_into(CPUX86State *env, int next_eip_addend)

View File

@ -1144,7 +1144,7 @@ static void handle_even_inj(CPUX86State *env, int intno, int is_int,
int error_code, int is_hw, int rm)
{
CPUState *cs = CPU(x86_env_get_cpu(env));
uint32_t event_inj = ldl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
control.event_inj));
if (!(event_inj & SVM_EVTINJ_VALID)) {
@ -1158,11 +1158,11 @@ static void handle_even_inj(CPUX86State *env, int intno, int is_int,
event_inj = intno | type | SVM_EVTINJ_VALID;
if (!rm && exception_has_error_code(intno)) {
event_inj |= SVM_EVTINJ_VALID_ERR;
stl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
control.event_inj_err),
error_code);
}
stl_phys(cs->as,
x86_stl_phys(cs,
env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
event_inj);
}
@ -1240,11 +1240,11 @@ static void do_interrupt_all(X86CPU *cpu, int intno, int is_int,
#if !defined(CONFIG_USER_ONLY)
if (env->hflags & HF_SVMI_MASK) {
CPUState *cs = CPU(cpu);
uint32_t event_inj = ldl_phys(cs->as, env->vm_vmcb +
uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb +
offsetof(struct vmcb,
control.event_inj));
stl_phys(cs->as,
x86_stl_phys(cs,
env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
event_inj & ~SVM_EVTINJ_VALID);
}
@ -1339,7 +1339,7 @@ bool x86_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
int intno;
/* FIXME: this should respect TPR */
cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR, 0);
intno = ldl_phys(cs->as, env->vm_vmcb
intno = x86_ldl_phys(cs, env->vm_vmcb
+ offsetof(struct vmcb, control.int_vector));
qemu_log_mask(CPU_LOG_TB_IN_ASM,
"Servicing virtual hardware INT=0x%02x\n", intno);

View File

@ -40,6 +40,16 @@ void helper_rsm(CPUX86State *env)
#define SMM_REVISION_ID 0x00020000
#endif
void cpu_smm_update(X86CPU *cpu)
{
CPUX86State *env = &cpu->env;
bool smm_enabled = (env->hflags & HF_SMM_MASK);
if (cpu->smram) {
memory_region_set_enabled(cpu->smram, smm_enabled);
}
}
void do_smm_enter(X86CPU *cpu)
{
CPUX86State *env = &cpu->env;
@ -52,7 +62,12 @@ void do_smm_enter(X86CPU *cpu)
log_cpu_state_mask(CPU_LOG_INT, CPU(cpu), CPU_DUMP_CCOP);
env->hflags |= HF_SMM_MASK;
cpu_smm_update(env);
if (env->hflags2 & HF2_NMI_MASK) {
env->hflags2 |= HF2_SMM_INSIDE_NMI_MASK;
} else {
env->hflags2 |= HF2_NMI_MASK;
}
cpu_smm_update(cpu);
sm_state = env->smbase + 0x8000;
@ -60,83 +75,83 @@ void do_smm_enter(X86CPU *cpu)
for (i = 0; i < 6; i++) {
dt = &env->segs[i];
offset = 0x7e00 + i * 16;
stw_phys(cs->as, sm_state + offset, dt->selector);
stw_phys(cs->as, sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
stl_phys(cs->as, sm_state + offset + 4, dt->limit);
stq_phys(cs->as, sm_state + offset + 8, dt->base);
x86_stw_phys(cs, sm_state + offset, dt->selector);
x86_stw_phys(cs, sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
x86_stl_phys(cs, sm_state + offset + 4, dt->limit);
x86_stq_phys(cs, sm_state + offset + 8, dt->base);
}
stq_phys(cs->as, sm_state + 0x7e68, env->gdt.base);
stl_phys(cs->as, sm_state + 0x7e64, env->gdt.limit);
x86_stq_phys(cs, sm_state + 0x7e68, env->gdt.base);
x86_stl_phys(cs, sm_state + 0x7e64, env->gdt.limit);
stw_phys(cs->as, sm_state + 0x7e70, env->ldt.selector);
stq_phys(cs->as, sm_state + 0x7e78, env->ldt.base);
stl_phys(cs->as, sm_state + 0x7e74, env->ldt.limit);
stw_phys(cs->as, sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
x86_stw_phys(cs, sm_state + 0x7e70, env->ldt.selector);
x86_stq_phys(cs, sm_state + 0x7e78, env->ldt.base);
x86_stl_phys(cs, sm_state + 0x7e74, env->ldt.limit);
x86_stw_phys(cs, sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
stq_phys(cs->as, sm_state + 0x7e88, env->idt.base);
stl_phys(cs->as, sm_state + 0x7e84, env->idt.limit);
x86_stq_phys(cs, sm_state + 0x7e88, env->idt.base);
x86_stl_phys(cs, sm_state + 0x7e84, env->idt.limit);
stw_phys(cs->as, sm_state + 0x7e90, env->tr.selector);
stq_phys(cs->as, sm_state + 0x7e98, env->tr.base);
stl_phys(cs->as, sm_state + 0x7e94, env->tr.limit);
stw_phys(cs->as, sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
x86_stw_phys(cs, sm_state + 0x7e90, env->tr.selector);
x86_stq_phys(cs, sm_state + 0x7e98, env->tr.base);
x86_stl_phys(cs, sm_state + 0x7e94, env->tr.limit);
x86_stw_phys(cs, sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
stq_phys(cs->as, sm_state + 0x7ed0, env->efer);
x86_stq_phys(cs, sm_state + 0x7ed0, env->efer);
stq_phys(cs->as, sm_state + 0x7ff8, env->regs[R_EAX]);
stq_phys(cs->as, sm_state + 0x7ff0, env->regs[R_ECX]);
stq_phys(cs->as, sm_state + 0x7fe8, env->regs[R_EDX]);
stq_phys(cs->as, sm_state + 0x7fe0, env->regs[R_EBX]);
stq_phys(cs->as, sm_state + 0x7fd8, env->regs[R_ESP]);
stq_phys(cs->as, sm_state + 0x7fd0, env->regs[R_EBP]);
stq_phys(cs->as, sm_state + 0x7fc8, env->regs[R_ESI]);
stq_phys(cs->as, sm_state + 0x7fc0, env->regs[R_EDI]);
x86_stq_phys(cs, sm_state + 0x7ff8, env->regs[R_EAX]);
x86_stq_phys(cs, sm_state + 0x7ff0, env->regs[R_ECX]);
x86_stq_phys(cs, sm_state + 0x7fe8, env->regs[R_EDX]);
x86_stq_phys(cs, sm_state + 0x7fe0, env->regs[R_EBX]);
x86_stq_phys(cs, sm_state + 0x7fd8, env->regs[R_ESP]);
x86_stq_phys(cs, sm_state + 0x7fd0, env->regs[R_EBP]);
x86_stq_phys(cs, sm_state + 0x7fc8, env->regs[R_ESI]);
x86_stq_phys(cs, sm_state + 0x7fc0, env->regs[R_EDI]);
for (i = 8; i < 16; i++) {
stq_phys(cs->as, sm_state + 0x7ff8 - i * 8, env->regs[i]);
x86_stq_phys(cs, sm_state + 0x7ff8 - i * 8, env->regs[i]);
}
stq_phys(cs->as, sm_state + 0x7f78, env->eip);
stl_phys(cs->as, sm_state + 0x7f70, cpu_compute_eflags(env));
stl_phys(cs->as, sm_state + 0x7f68, env->dr[6]);
stl_phys(cs->as, sm_state + 0x7f60, env->dr[7]);
x86_stq_phys(cs, sm_state + 0x7f78, env->eip);
x86_stl_phys(cs, sm_state + 0x7f70, cpu_compute_eflags(env));
x86_stl_phys(cs, sm_state + 0x7f68, env->dr[6]);
x86_stl_phys(cs, sm_state + 0x7f60, env->dr[7]);
stl_phys(cs->as, sm_state + 0x7f48, env->cr[4]);
stq_phys(cs->as, sm_state + 0x7f50, env->cr[3]);
stl_phys(cs->as, sm_state + 0x7f58, env->cr[0]);
x86_stl_phys(cs, sm_state + 0x7f48, env->cr[4]);
x86_stq_phys(cs, sm_state + 0x7f50, env->cr[3]);
x86_stl_phys(cs, sm_state + 0x7f58, env->cr[0]);
stl_phys(cs->as, sm_state + 0x7efc, SMM_REVISION_ID);
stl_phys(cs->as, sm_state + 0x7f00, env->smbase);
x86_stl_phys(cs, sm_state + 0x7efc, SMM_REVISION_ID);
x86_stl_phys(cs, sm_state + 0x7f00, env->smbase);
#else
stl_phys(cs->as, sm_state + 0x7ffc, env->cr[0]);
stl_phys(cs->as, sm_state + 0x7ff8, env->cr[3]);
stl_phys(cs->as, sm_state + 0x7ff4, cpu_compute_eflags(env));
stl_phys(cs->as, sm_state + 0x7ff0, env->eip);
stl_phys(cs->as, sm_state + 0x7fec, env->regs[R_EDI]);
stl_phys(cs->as, sm_state + 0x7fe8, env->regs[R_ESI]);
stl_phys(cs->as, sm_state + 0x7fe4, env->regs[R_EBP]);
stl_phys(cs->as, sm_state + 0x7fe0, env->regs[R_ESP]);
stl_phys(cs->as, sm_state + 0x7fdc, env->regs[R_EBX]);
stl_phys(cs->as, sm_state + 0x7fd8, env->regs[R_EDX]);
stl_phys(cs->as, sm_state + 0x7fd4, env->regs[R_ECX]);
stl_phys(cs->as, sm_state + 0x7fd0, env->regs[R_EAX]);
stl_phys(cs->as, sm_state + 0x7fcc, env->dr[6]);
stl_phys(cs->as, sm_state + 0x7fc8, env->dr[7]);
x86_stl_phys(cs, sm_state + 0x7ffc, env->cr[0]);
x86_stl_phys(cs, sm_state + 0x7ff8, env->cr[3]);
x86_stl_phys(cs, sm_state + 0x7ff4, cpu_compute_eflags(env));
x86_stl_phys(cs, sm_state + 0x7ff0, env->eip);
x86_stl_phys(cs, sm_state + 0x7fec, env->regs[R_EDI]);
x86_stl_phys(cs, sm_state + 0x7fe8, env->regs[R_ESI]);
x86_stl_phys(cs, sm_state + 0x7fe4, env->regs[R_EBP]);
x86_stl_phys(cs, sm_state + 0x7fe0, env->regs[R_ESP]);
x86_stl_phys(cs, sm_state + 0x7fdc, env->regs[R_EBX]);
x86_stl_phys(cs, sm_state + 0x7fd8, env->regs[R_EDX]);
x86_stl_phys(cs, sm_state + 0x7fd4, env->regs[R_ECX]);
x86_stl_phys(cs, sm_state + 0x7fd0, env->regs[R_EAX]);
x86_stl_phys(cs, sm_state + 0x7fcc, env->dr[6]);
x86_stl_phys(cs, sm_state + 0x7fc8, env->dr[7]);
stl_phys(cs->as, sm_state + 0x7fc4, env->tr.selector);
stl_phys(cs->as, sm_state + 0x7f64, env->tr.base);
stl_phys(cs->as, sm_state + 0x7f60, env->tr.limit);
stl_phys(cs->as, sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
x86_stl_phys(cs, sm_state + 0x7fc4, env->tr.selector);
x86_stl_phys(cs, sm_state + 0x7f64, env->tr.base);
x86_stl_phys(cs, sm_state + 0x7f60, env->tr.limit);
x86_stl_phys(cs, sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
stl_phys(cs->as, sm_state + 0x7fc0, env->ldt.selector);
stl_phys(cs->as, sm_state + 0x7f80, env->ldt.base);
stl_phys(cs->as, sm_state + 0x7f7c, env->ldt.limit);
stl_phys(cs->as, sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
x86_stl_phys(cs, sm_state + 0x7fc0, env->ldt.selector);
x86_stl_phys(cs, sm_state + 0x7f80, env->ldt.base);
x86_stl_phys(cs, sm_state + 0x7f7c, env->ldt.limit);
x86_stl_phys(cs, sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
stl_phys(cs->as, sm_state + 0x7f74, env->gdt.base);
stl_phys(cs->as, sm_state + 0x7f70, env->gdt.limit);
x86_stl_phys(cs, sm_state + 0x7f74, env->gdt.base);
x86_stl_phys(cs, sm_state + 0x7f70, env->gdt.limit);
stl_phys(cs->as, sm_state + 0x7f58, env->idt.base);
stl_phys(cs->as, sm_state + 0x7f54, env->idt.limit);
x86_stl_phys(cs, sm_state + 0x7f58, env->idt.base);
x86_stl_phys(cs, sm_state + 0x7f54, env->idt.limit);
for (i = 0; i < 6; i++) {
dt = &env->segs[i];
@ -145,15 +160,15 @@ void do_smm_enter(X86CPU *cpu)
} else {
offset = 0x7f2c + (i - 3) * 12;
}
stl_phys(cs->as, sm_state + 0x7fa8 + i * 4, dt->selector);
stl_phys(cs->as, sm_state + offset + 8, dt->base);
stl_phys(cs->as, sm_state + offset + 4, dt->limit);
stl_phys(cs->as, sm_state + offset, (dt->flags >> 8) & 0xf0ff);
x86_stl_phys(cs, sm_state + 0x7fa8 + i * 4, dt->selector);
x86_stl_phys(cs, sm_state + offset + 8, dt->base);
x86_stl_phys(cs, sm_state + offset + 4, dt->limit);
x86_stl_phys(cs, sm_state + offset, (dt->flags >> 8) & 0xf0ff);
}
stl_phys(cs->as, sm_state + 0x7f14, env->cr[4]);
x86_stl_phys(cs, sm_state + 0x7f14, env->cr[4]);
stl_phys(cs->as, sm_state + 0x7efc, SMM_REVISION_ID);
stl_phys(cs->as, sm_state + 0x7ef8, env->smbase);
x86_stl_phys(cs, sm_state + 0x7efc, SMM_REVISION_ID);
x86_stl_phys(cs, sm_state + 0x7ef8, env->smbase);
#endif
/* init SMM cpu state */
@ -172,22 +187,22 @@ void do_smm_enter(X86CPU *cpu)
cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
0xffffffff,
DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
DESC_A_MASK);
DESC_G_MASK | DESC_A_MASK);
cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff,
DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
DESC_A_MASK);
DESC_G_MASK | DESC_A_MASK);
cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff,
DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
DESC_A_MASK);
DESC_G_MASK | DESC_A_MASK);
cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff,
DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
DESC_A_MASK);
DESC_G_MASK | DESC_A_MASK);
cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff,
DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
DESC_A_MASK);
DESC_G_MASK | DESC_A_MASK);
cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff,
DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
DESC_A_MASK);
DESC_G_MASK | DESC_A_MASK);
}
void helper_rsm(CPUX86State *env)
@ -200,91 +215,91 @@ void helper_rsm(CPUX86State *env)
sm_state = env->smbase + 0x8000;
#ifdef TARGET_X86_64
cpu_load_efer(env, ldq_phys(cs->as, sm_state + 0x7ed0));
cpu_load_efer(env, x86_ldq_phys(cs, sm_state + 0x7ed0));
env->gdt.base = ldq_phys(cs->as, sm_state + 0x7e68);
env->gdt.limit = ldl_phys(cs->as, sm_state + 0x7e64);
env->gdt.base = x86_ldq_phys(cs, sm_state + 0x7e68);
env->gdt.limit = x86_ldl_phys(cs, sm_state + 0x7e64);
env->ldt.selector = lduw_phys(cs->as, sm_state + 0x7e70);
env->ldt.base = ldq_phys(cs->as, sm_state + 0x7e78);
env->ldt.limit = ldl_phys(cs->as, sm_state + 0x7e74);
env->ldt.flags = (lduw_phys(cs->as, sm_state + 0x7e72) & 0xf0ff) << 8;
env->ldt.selector = x86_lduw_phys(cs, sm_state + 0x7e70);
env->ldt.base = x86_ldq_phys(cs, sm_state + 0x7e78);
env->ldt.limit = x86_ldl_phys(cs, sm_state + 0x7e74);
env->ldt.flags = (x86_lduw_phys(cs, sm_state + 0x7e72) & 0xf0ff) << 8;
env->idt.base = ldq_phys(cs->as, sm_state + 0x7e88);
env->idt.limit = ldl_phys(cs->as, sm_state + 0x7e84);
env->idt.base = x86_ldq_phys(cs, sm_state + 0x7e88);
env->idt.limit = x86_ldl_phys(cs, sm_state + 0x7e84);
env->tr.selector = lduw_phys(cs->as, sm_state + 0x7e90);
env->tr.base = ldq_phys(cs->as, sm_state + 0x7e98);
env->tr.limit = ldl_phys(cs->as, sm_state + 0x7e94);
env->tr.flags = (lduw_phys(cs->as, sm_state + 0x7e92) & 0xf0ff) << 8;
env->tr.selector = x86_lduw_phys(cs, sm_state + 0x7e90);
env->tr.base = x86_ldq_phys(cs, sm_state + 0x7e98);
env->tr.limit = x86_ldl_phys(cs, sm_state + 0x7e94);
env->tr.flags = (x86_lduw_phys(cs, sm_state + 0x7e92) & 0xf0ff) << 8;
env->regs[R_EAX] = ldq_phys(cs->as, sm_state + 0x7ff8);
env->regs[R_ECX] = ldq_phys(cs->as, sm_state + 0x7ff0);
env->regs[R_EDX] = ldq_phys(cs->as, sm_state + 0x7fe8);
env->regs[R_EBX] = ldq_phys(cs->as, sm_state + 0x7fe0);
env->regs[R_ESP] = ldq_phys(cs->as, sm_state + 0x7fd8);
env->regs[R_EBP] = ldq_phys(cs->as, sm_state + 0x7fd0);
env->regs[R_ESI] = ldq_phys(cs->as, sm_state + 0x7fc8);
env->regs[R_EDI] = ldq_phys(cs->as, sm_state + 0x7fc0);
env->regs[R_EAX] = x86_ldq_phys(cs, sm_state + 0x7ff8);
env->regs[R_ECX] = x86_ldq_phys(cs, sm_state + 0x7ff0);
env->regs[R_EDX] = x86_ldq_phys(cs, sm_state + 0x7fe8);
env->regs[R_EBX] = x86_ldq_phys(cs, sm_state + 0x7fe0);
env->regs[R_ESP] = x86_ldq_phys(cs, sm_state + 0x7fd8);
env->regs[R_EBP] = x86_ldq_phys(cs, sm_state + 0x7fd0);
env->regs[R_ESI] = x86_ldq_phys(cs, sm_state + 0x7fc8);
env->regs[R_EDI] = x86_ldq_phys(cs, sm_state + 0x7fc0);
for (i = 8; i < 16; i++) {
env->regs[i] = ldq_phys(cs->as, sm_state + 0x7ff8 - i * 8);
env->regs[i] = x86_ldq_phys(cs, sm_state + 0x7ff8 - i * 8);
}
env->eip = ldq_phys(cs->as, sm_state + 0x7f78);
cpu_load_eflags(env, ldl_phys(cs->as, sm_state + 0x7f70),
env->eip = x86_ldq_phys(cs, sm_state + 0x7f78);
cpu_load_eflags(env, x86_ldl_phys(cs, sm_state + 0x7f70),
~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
env->dr[6] = ldl_phys(cs->as, sm_state + 0x7f68);
env->dr[7] = ldl_phys(cs->as, sm_state + 0x7f60);
env->dr[6] = x86_ldl_phys(cs, sm_state + 0x7f68);
env->dr[7] = x86_ldl_phys(cs, sm_state + 0x7f60);
cpu_x86_update_cr4(env, ldl_phys(cs->as, sm_state + 0x7f48));
cpu_x86_update_cr3(env, ldq_phys(cs->as, sm_state + 0x7f50));
cpu_x86_update_cr0(env, ldl_phys(cs->as, sm_state + 0x7f58));
cpu_x86_update_cr4(env, x86_ldl_phys(cs, sm_state + 0x7f48));
cpu_x86_update_cr3(env, x86_ldq_phys(cs, sm_state + 0x7f50));
cpu_x86_update_cr0(env, x86_ldl_phys(cs, sm_state + 0x7f58));
for (i = 0; i < 6; i++) {
offset = 0x7e00 + i * 16;
cpu_x86_load_seg_cache(env, i,
lduw_phys(cs->as, sm_state + offset),
ldq_phys(cs->as, sm_state + offset + 8),
ldl_phys(cs->as, sm_state + offset + 4),
(lduw_phys(cs->as, sm_state + offset + 2) &
x86_lduw_phys(cs, sm_state + offset),
x86_ldq_phys(cs, sm_state + offset + 8),
x86_ldl_phys(cs, sm_state + offset + 4),
(x86_lduw_phys(cs, sm_state + offset + 2) &
0xf0ff) << 8);
}
val = ldl_phys(cs->as, sm_state + 0x7efc); /* revision ID */
val = x86_ldl_phys(cs, sm_state + 0x7efc); /* revision ID */
if (val & 0x20000) {
env->smbase = ldl_phys(cs->as, sm_state + 0x7f00) & ~0x7fff;
env->smbase = x86_ldl_phys(cs, sm_state + 0x7f00) & ~0x7fff;
}
#else
cpu_x86_update_cr0(env, ldl_phys(cs->as, sm_state + 0x7ffc));
cpu_x86_update_cr3(env, ldl_phys(cs->as, sm_state + 0x7ff8));
cpu_load_eflags(env, ldl_phys(cs->as, sm_state + 0x7ff4),
cpu_x86_update_cr0(env, x86_ldl_phys(cs, sm_state + 0x7ffc));
cpu_x86_update_cr3(env, x86_ldl_phys(cs, sm_state + 0x7ff8));
cpu_load_eflags(env, x86_ldl_phys(cs, sm_state + 0x7ff4),
~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
env->eip = ldl_phys(cs->as, sm_state + 0x7ff0);
env->regs[R_EDI] = ldl_phys(cs->as, sm_state + 0x7fec);
env->regs[R_ESI] = ldl_phys(cs->as, sm_state + 0x7fe8);
env->regs[R_EBP] = ldl_phys(cs->as, sm_state + 0x7fe4);
env->regs[R_ESP] = ldl_phys(cs->as, sm_state + 0x7fe0);
env->regs[R_EBX] = ldl_phys(cs->as, sm_state + 0x7fdc);
env->regs[R_EDX] = ldl_phys(cs->as, sm_state + 0x7fd8);
env->regs[R_ECX] = ldl_phys(cs->as, sm_state + 0x7fd4);
env->regs[R_EAX] = ldl_phys(cs->as, sm_state + 0x7fd0);
env->dr[6] = ldl_phys(cs->as, sm_state + 0x7fcc);
env->dr[7] = ldl_phys(cs->as, sm_state + 0x7fc8);
env->eip = x86_ldl_phys(cs, sm_state + 0x7ff0);
env->regs[R_EDI] = x86_ldl_phys(cs, sm_state + 0x7fec);
env->regs[R_ESI] = x86_ldl_phys(cs, sm_state + 0x7fe8);
env->regs[R_EBP] = x86_ldl_phys(cs, sm_state + 0x7fe4);
env->regs[R_ESP] = x86_ldl_phys(cs, sm_state + 0x7fe0);
env->regs[R_EBX] = x86_ldl_phys(cs, sm_state + 0x7fdc);
env->regs[R_EDX] = x86_ldl_phys(cs, sm_state + 0x7fd8);
env->regs[R_ECX] = x86_ldl_phys(cs, sm_state + 0x7fd4);
env->regs[R_EAX] = x86_ldl_phys(cs, sm_state + 0x7fd0);
env->dr[6] = x86_ldl_phys(cs, sm_state + 0x7fcc);
env->dr[7] = x86_ldl_phys(cs, sm_state + 0x7fc8);
env->tr.selector = ldl_phys(cs->as, sm_state + 0x7fc4) & 0xffff;
env->tr.base = ldl_phys(cs->as, sm_state + 0x7f64);
env->tr.limit = ldl_phys(cs->as, sm_state + 0x7f60);
env->tr.flags = (ldl_phys(cs->as, sm_state + 0x7f5c) & 0xf0ff) << 8;
env->tr.selector = x86_ldl_phys(cs, sm_state + 0x7fc4) & 0xffff;
env->tr.base = x86_ldl_phys(cs, sm_state + 0x7f64);
env->tr.limit = x86_ldl_phys(cs, sm_state + 0x7f60);
env->tr.flags = (x86_ldl_phys(cs, sm_state + 0x7f5c) & 0xf0ff) << 8;
env->ldt.selector = ldl_phys(cs->as, sm_state + 0x7fc0) & 0xffff;
env->ldt.base = ldl_phys(cs->as, sm_state + 0x7f80);
env->ldt.limit = ldl_phys(cs->as, sm_state + 0x7f7c);
env->ldt.flags = (ldl_phys(cs->as, sm_state + 0x7f78) & 0xf0ff) << 8;
env->ldt.selector = x86_ldl_phys(cs, sm_state + 0x7fc0) & 0xffff;
env->ldt.base = x86_ldl_phys(cs, sm_state + 0x7f80);
env->ldt.limit = x86_ldl_phys(cs, sm_state + 0x7f7c);
env->ldt.flags = (x86_ldl_phys(cs, sm_state + 0x7f78) & 0xf0ff) << 8;
env->gdt.base = ldl_phys(cs->as, sm_state + 0x7f74);
env->gdt.limit = ldl_phys(cs->as, sm_state + 0x7f70);
env->gdt.base = x86_ldl_phys(cs, sm_state + 0x7f74);
env->gdt.limit = x86_ldl_phys(cs, sm_state + 0x7f70);
env->idt.base = ldl_phys(cs->as, sm_state + 0x7f58);
env->idt.limit = ldl_phys(cs->as, sm_state + 0x7f54);
env->idt.base = x86_ldl_phys(cs, sm_state + 0x7f58);
env->idt.limit = x86_ldl_phys(cs, sm_state + 0x7f54);
for (i = 0; i < 6; i++) {
if (i < 3) {
@ -293,22 +308,26 @@ void helper_rsm(CPUX86State *env)
offset = 0x7f2c + (i - 3) * 12;
}
cpu_x86_load_seg_cache(env, i,
ldl_phys(cs->as,
x86_ldl_phys(cs,
sm_state + 0x7fa8 + i * 4) & 0xffff,
ldl_phys(cs->as, sm_state + offset + 8),
ldl_phys(cs->as, sm_state + offset + 4),
(ldl_phys(cs->as,
x86_ldl_phys(cs, sm_state + offset + 8),
x86_ldl_phys(cs, sm_state + offset + 4),
(x86_ldl_phys(cs,
sm_state + offset) & 0xf0ff) << 8);
}
cpu_x86_update_cr4(env, ldl_phys(cs->as, sm_state + 0x7f14));
cpu_x86_update_cr4(env, x86_ldl_phys(cs, sm_state + 0x7f14));
val = ldl_phys(cs->as, sm_state + 0x7efc); /* revision ID */
val = x86_ldl_phys(cs, sm_state + 0x7efc); /* revision ID */
if (val & 0x20000) {
env->smbase = ldl_phys(cs->as, sm_state + 0x7ef8) & ~0x7fff;
env->smbase = x86_ldl_phys(cs, sm_state + 0x7ef8) & ~0x7fff;
}
#endif
if ((env->hflags2 & HF2_SMM_INSIDE_NMI_MASK) == 0) {
env->hflags2 &= ~HF2_NMI_MASK;
}
env->hflags2 &= ~HF2_SMM_INSIDE_NMI_MASK;
env->hflags &= ~HF_SMM_MASK;
cpu_smm_update(env);
cpu_smm_update(cpu);
qemu_log_mask(CPU_LOG_INT, "SMM: after RSM\n");
log_cpu_state_mask(CPU_LOG_INT, CPU(cpu), CPU_DUMP_CCOP);

View File

@ -87,13 +87,13 @@ static inline void svm_save_seg(CPUX86State *env, hwaddr addr,
{
CPUState *cs = CPU(x86_env_get_cpu(env));
stw_phys(cs->as, addr + offsetof(struct vmcb_seg, selector),
x86_stw_phys(cs, addr + offsetof(struct vmcb_seg, selector),
sc->selector);
stq_phys(cs->as, addr + offsetof(struct vmcb_seg, base),
x86_stq_phys(cs, addr + offsetof(struct vmcb_seg, base),
sc->base);
stl_phys(cs->as, addr + offsetof(struct vmcb_seg, limit),
x86_stl_phys(cs, addr + offsetof(struct vmcb_seg, limit),
sc->limit);
stw_phys(cs->as, addr + offsetof(struct vmcb_seg, attrib),
x86_stw_phys(cs, addr + offsetof(struct vmcb_seg, attrib),
((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
}
@ -103,11 +103,11 @@ static inline void svm_load_seg(CPUX86State *env, hwaddr addr,
CPUState *cs = CPU(x86_env_get_cpu(env));
unsigned int flags;
sc->selector = lduw_phys(cs->as,
sc->selector = x86_lduw_phys(cs,
addr + offsetof(struct vmcb_seg, selector));
sc->base = ldq_phys(cs->as, addr + offsetof(struct vmcb_seg, base));
sc->limit = ldl_phys(cs->as, addr + offsetof(struct vmcb_seg, limit));
flags = lduw_phys(cs->as, addr + offsetof(struct vmcb_seg, attrib));
sc->base = x86_ldq_phys(cs, addr + offsetof(struct vmcb_seg, base));
sc->limit = x86_ldl_phys(cs, addr + offsetof(struct vmcb_seg, limit));
flags = x86_lduw_phys(cs, addr + offsetof(struct vmcb_seg, attrib));
sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
}
@ -141,32 +141,32 @@ void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
env->vm_vmcb = addr;
/* save the current CPU state in the hsave page */
stq_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, save.gdtr.base),
x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.gdtr.base),
env->gdt.base);
stl_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit),
x86_stl_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit),
env->gdt.limit);
stq_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, save.idtr.base),
x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.idtr.base),
env->idt.base);
stl_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, save.idtr.limit),
x86_stl_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.idtr.limit),
env->idt.limit);
stq_phys(cs->as,
x86_stq_phys(cs,
env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
stq_phys(cs->as,
x86_stq_phys(cs,
env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
stq_phys(cs->as,
x86_stq_phys(cs,
env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
stq_phys(cs->as,
x86_stq_phys(cs,
env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
stq_phys(cs->as,
x86_stq_phys(cs,
env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
stq_phys(cs->as,
x86_stq_phys(cs,
env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
stq_phys(cs->as,
x86_stq_phys(cs,
env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
stq_phys(cs->as,
x86_stq_phys(cs,
env->vm_hsave + offsetof(struct vmcb, save.rflags),
cpu_compute_eflags(env));
@ -179,30 +179,30 @@ void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ds),
&env->segs[R_DS]);
stq_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, save.rip),
x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.rip),
env->eip + next_eip_addend);
stq_phys(cs->as,
x86_stq_phys(cs,
env->vm_hsave + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
stq_phys(cs->as,
x86_stq_phys(cs,
env->vm_hsave + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
/* load the interception bitmaps so we do not need to access the
vmcb in svm mode */
env->intercept = ldq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
env->intercept = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
control.intercept));
env->intercept_cr_read = lduw_phys(cs->as, env->vm_vmcb +
env->intercept_cr_read = x86_lduw_phys(cs, env->vm_vmcb +
offsetof(struct vmcb,
control.intercept_cr_read));
env->intercept_cr_write = lduw_phys(cs->as, env->vm_vmcb +
env->intercept_cr_write = x86_lduw_phys(cs, env->vm_vmcb +
offsetof(struct vmcb,
control.intercept_cr_write));
env->intercept_dr_read = lduw_phys(cs->as, env->vm_vmcb +
env->intercept_dr_read = x86_lduw_phys(cs, env->vm_vmcb +
offsetof(struct vmcb,
control.intercept_dr_read));
env->intercept_dr_write = lduw_phys(cs->as, env->vm_vmcb +
env->intercept_dr_write = x86_lduw_phys(cs, env->vm_vmcb +
offsetof(struct vmcb,
control.intercept_dr_write));
env->intercept_exceptions = ldl_phys(cs->as, env->vm_vmcb +
env->intercept_exceptions = x86_ldl_phys(cs, env->vm_vmcb +
offsetof(struct vmcb,
control.intercept_exceptions
));
@ -210,35 +210,35 @@ void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
/* enable intercepts */
env->hflags |= HF_SVMI_MASK;
env->tsc_offset = ldq_phys(cs->as, env->vm_vmcb +
env->tsc_offset = x86_ldq_phys(cs, env->vm_vmcb +
offsetof(struct vmcb, control.tsc_offset));
env->gdt.base = ldq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
env->gdt.base = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
save.gdtr.base));
env->gdt.limit = ldl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
env->gdt.limit = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
save.gdtr.limit));
env->idt.base = ldq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
env->idt.base = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
save.idtr.base));
env->idt.limit = ldl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
env->idt.limit = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
save.idtr.limit));
/* clear exit_info_2 so we behave like the real hardware */
stq_phys(cs->as,
x86_stq_phys(cs,
env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
cpu_x86_update_cr0(env, ldq_phys(cs->as,
cpu_x86_update_cr0(env, x86_ldq_phys(cs,
env->vm_vmcb + offsetof(struct vmcb,
save.cr0)));
cpu_x86_update_cr4(env, ldq_phys(cs->as,
cpu_x86_update_cr4(env, x86_ldq_phys(cs,
env->vm_vmcb + offsetof(struct vmcb,
save.cr4)));
cpu_x86_update_cr3(env, ldq_phys(cs->as,
cpu_x86_update_cr3(env, x86_ldq_phys(cs,
env->vm_vmcb + offsetof(struct vmcb,
save.cr3)));
env->cr[2] = ldq_phys(cs->as,
env->cr[2] = x86_ldq_phys(cs,
env->vm_vmcb + offsetof(struct vmcb, save.cr2));
int_ctl = ldl_phys(cs->as,
int_ctl = x86_ldl_phys(cs,
env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
if (int_ctl & V_INTR_MASKING_MASK) {
@ -250,10 +250,10 @@ void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
}
cpu_load_efer(env,
ldq_phys(cs->as,
x86_ldq_phys(cs,
env->vm_vmcb + offsetof(struct vmcb, save.efer)));
env->eflags = 0;
cpu_load_eflags(env, ldq_phys(cs->as,
cpu_load_eflags(env, x86_ldq_phys(cs,
env->vm_vmcb + offsetof(struct vmcb,
save.rflags)),
~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
@ -267,21 +267,21 @@ void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
R_DS);
env->eip = ldq_phys(cs->as,
env->eip = x86_ldq_phys(cs,
env->vm_vmcb + offsetof(struct vmcb, save.rip));
env->regs[R_ESP] = ldq_phys(cs->as,
env->regs[R_ESP] = x86_ldq_phys(cs,
env->vm_vmcb + offsetof(struct vmcb, save.rsp));
env->regs[R_EAX] = ldq_phys(cs->as,
env->regs[R_EAX] = x86_ldq_phys(cs,
env->vm_vmcb + offsetof(struct vmcb, save.rax));
env->dr[7] = ldq_phys(cs->as,
env->dr[7] = x86_ldq_phys(cs,
env->vm_vmcb + offsetof(struct vmcb, save.dr7));
env->dr[6] = ldq_phys(cs->as,
env->dr[6] = x86_ldq_phys(cs,
env->vm_vmcb + offsetof(struct vmcb, save.dr6));
/* FIXME: guest state consistency checks */
switch (ldub_phys(cs->as,
switch (x86_ldub_phys(cs,
env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
case TLB_CONTROL_DO_NOTHING:
break;
@ -300,12 +300,12 @@ void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
}
/* maybe we need to inject an event */
event_inj = ldl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
event_inj = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
control.event_inj));
if (event_inj & SVM_EVTINJ_VALID) {
uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
uint32_t event_inj_err = ldl_phys(cs->as, env->vm_vmcb +
uint32_t event_inj_err = x86_ldl_phys(cs, env->vm_vmcb +
offsetof(struct vmcb,
control.event_inj_err));
@ -372,7 +372,7 @@ void helper_vmload(CPUX86State *env, int aflag)
qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx
"\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
addr, ldq_phys(cs->as, addr + offsetof(struct vmcb,
addr, x86_ldq_phys(cs, addr + offsetof(struct vmcb,
save.fs.base)),
env->segs[R_FS].base);
@ -382,18 +382,18 @@ void helper_vmload(CPUX86State *env, int aflag)
svm_load_seg(env, addr + offsetof(struct vmcb, save.ldtr), &env->ldt);
#ifdef TARGET_X86_64
env->kernelgsbase = ldq_phys(cs->as, addr + offsetof(struct vmcb,
env->kernelgsbase = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
save.kernel_gs_base));
env->lstar = ldq_phys(cs->as, addr + offsetof(struct vmcb, save.lstar));
env->cstar = ldq_phys(cs->as, addr + offsetof(struct vmcb, save.cstar));
env->fmask = ldq_phys(cs->as, addr + offsetof(struct vmcb, save.sfmask));
env->lstar = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.lstar));
env->cstar = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.cstar));
env->fmask = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.sfmask));
#endif
env->star = ldq_phys(cs->as, addr + offsetof(struct vmcb, save.star));
env->sysenter_cs = ldq_phys(cs->as,
env->star = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.star));
env->sysenter_cs = x86_ldq_phys(cs,
addr + offsetof(struct vmcb, save.sysenter_cs));
env->sysenter_esp = ldq_phys(cs->as, addr + offsetof(struct vmcb,
env->sysenter_esp = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
save.sysenter_esp));
env->sysenter_eip = ldq_phys(cs->as, addr + offsetof(struct vmcb,
env->sysenter_eip = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
save.sysenter_eip));
}
@ -412,7 +412,7 @@ void helper_vmsave(CPUX86State *env, int aflag)
qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx
"\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
addr, ldq_phys(cs->as,
addr, x86_ldq_phys(cs,
addr + offsetof(struct vmcb, save.fs.base)),
env->segs[R_FS].base);
@ -426,18 +426,18 @@ void helper_vmsave(CPUX86State *env, int aflag)
&env->ldt);
#ifdef TARGET_X86_64
stq_phys(cs->as, addr + offsetof(struct vmcb, save.kernel_gs_base),
x86_stq_phys(cs, addr + offsetof(struct vmcb, save.kernel_gs_base),
env->kernelgsbase);
stq_phys(cs->as, addr + offsetof(struct vmcb, save.lstar), env->lstar);
stq_phys(cs->as, addr + offsetof(struct vmcb, save.cstar), env->cstar);
stq_phys(cs->as, addr + offsetof(struct vmcb, save.sfmask), env->fmask);
x86_stq_phys(cs, addr + offsetof(struct vmcb, save.lstar), env->lstar);
x86_stq_phys(cs, addr + offsetof(struct vmcb, save.cstar), env->cstar);
x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sfmask), env->fmask);
#endif
stq_phys(cs->as, addr + offsetof(struct vmcb, save.star), env->star);
stq_phys(cs->as,
x86_stq_phys(cs, addr + offsetof(struct vmcb, save.star), env->star);
x86_stq_phys(cs,
addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
stq_phys(cs->as, addr + offsetof(struct vmcb, save.sysenter_esp),
x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sysenter_esp),
env->sysenter_esp);
stq_phys(cs->as, addr + offsetof(struct vmcb, save.sysenter_eip),
x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sysenter_eip),
env->sysenter_eip);
}
@ -515,7 +515,7 @@ void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type,
case SVM_EXIT_MSR:
if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
/* FIXME: this should be read in at vmrun (faster this way?) */
uint64_t addr = ldq_phys(cs->as, env->vm_vmcb +
uint64_t addr = x86_ldq_phys(cs, env->vm_vmcb +
offsetof(struct vmcb,
control.msrpm_base_pa));
uint32_t t0, t1;
@ -541,7 +541,7 @@ void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type,
t1 = 0;
break;
}
if (ldub_phys(cs->as, addr + t1) & ((1 << param) << t0)) {
if (x86_ldub_phys(cs, addr + t1) & ((1 << param) << t0)) {
helper_vmexit(env, type, param);
}
}
@ -567,13 +567,13 @@ void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
/* FIXME: this should be read in at vmrun (faster this way?) */
uint64_t addr = ldq_phys(cs->as, env->vm_vmcb +
uint64_t addr = x86_ldq_phys(cs, env->vm_vmcb +
offsetof(struct vmcb, control.iopm_base_pa));
uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
if (lduw_phys(cs->as, addr + port / 8) & (mask << (port & 7))) {
if (x86_lduw_phys(cs, addr + port / 8) & (mask << (port & 7))) {
/* next env->eip */
stq_phys(cs->as,
x86_stq_phys(cs,
env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
env->eip + next_eip_addend);
helper_vmexit(env, SVM_EXIT_IOIO, param | (port << 16));
@ -590,17 +590,17 @@ void helper_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016"
PRIx64 ", " TARGET_FMT_lx ")!\n",
exit_code, exit_info_1,
ldq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
control.exit_info_2)),
env->eip);
if (env->hflags & HF_INHIBIT_IRQ_MASK) {
stl_phys(cs->as,
x86_stl_phys(cs,
env->vm_vmcb + offsetof(struct vmcb, control.int_state),
SVM_INTERRUPT_SHADOW_MASK);
env->hflags &= ~HF_INHIBIT_IRQ_MASK;
} else {
stl_phys(cs->as,
x86_stl_phys(cs,
env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
}
@ -614,50 +614,50 @@ void helper_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
&env->segs[R_DS]);
stq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base),
x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base),
env->gdt.base);
stl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit),
x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit),
env->gdt.limit);
stq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.idtr.base),
x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.idtr.base),
env->idt.base);
stl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit),
x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit),
env->idt.limit);
stq_phys(cs->as,
x86_stq_phys(cs,
env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
stq_phys(cs->as,
x86_stq_phys(cs,
env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
stq_phys(cs->as,
x86_stq_phys(cs,
env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
stq_phys(cs->as,
x86_stq_phys(cs,
env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
stq_phys(cs->as,
x86_stq_phys(cs,
env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
int_ctl = ldl_phys(cs->as,
int_ctl = x86_ldl_phys(cs,
env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
int_ctl |= env->v_tpr & V_TPR_MASK;
if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) {
int_ctl |= V_IRQ_MASK;
}
stl_phys(cs->as,
x86_stl_phys(cs,
env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
stq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.rflags),
x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rflags),
cpu_compute_eflags(env));
stq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.rip),
x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rip),
env->eip);
stq_phys(cs->as,
x86_stq_phys(cs,
env->vm_vmcb + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
stq_phys(cs->as,
x86_stq_phys(cs,
env->vm_vmcb + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
stq_phys(cs->as,
x86_stq_phys(cs,
env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
stq_phys(cs->as,
x86_stq_phys(cs,
env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
stb_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.cpl),
x86_stb_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cpl),
env->hflags & HF_CPL_MASK);
/* Reload the host state from vm_hsave */
@ -668,32 +668,32 @@ void helper_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
env->tsc_offset = 0;
env->gdt.base = ldq_phys(cs->as, env->vm_hsave + offsetof(struct vmcb,
env->gdt.base = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
save.gdtr.base));
env->gdt.limit = ldl_phys(cs->as, env->vm_hsave + offsetof(struct vmcb,
env->gdt.limit = x86_ldl_phys(cs, env->vm_hsave + offsetof(struct vmcb,
save.gdtr.limit));
env->idt.base = ldq_phys(cs->as, env->vm_hsave + offsetof(struct vmcb,
env->idt.base = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
save.idtr.base));
env->idt.limit = ldl_phys(cs->as, env->vm_hsave + offsetof(struct vmcb,
env->idt.limit = x86_ldl_phys(cs, env->vm_hsave + offsetof(struct vmcb,
save.idtr.limit));
cpu_x86_update_cr0(env, ldq_phys(cs->as,
cpu_x86_update_cr0(env, x86_ldq_phys(cs,
env->vm_hsave + offsetof(struct vmcb,
save.cr0)) |
CR0_PE_MASK);
cpu_x86_update_cr4(env, ldq_phys(cs->as,
cpu_x86_update_cr4(env, x86_ldq_phys(cs,
env->vm_hsave + offsetof(struct vmcb,
save.cr4)));
cpu_x86_update_cr3(env, ldq_phys(cs->as,
cpu_x86_update_cr3(env, x86_ldq_phys(cs,
env->vm_hsave + offsetof(struct vmcb,
save.cr3)));
/* we need to set the efer after the crs so the hidden flags get
set properly */
cpu_load_efer(env, ldq_phys(cs->as, env->vm_hsave + offsetof(struct vmcb,
cpu_load_efer(env, x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
save.efer)));
env->eflags = 0;
cpu_load_eflags(env, ldq_phys(cs->as,
cpu_load_eflags(env, x86_ldq_phys(cs,
env->vm_hsave + offsetof(struct vmcb,
save.rflags)),
~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK |
@ -708,33 +708,33 @@ void helper_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ds),
R_DS);
env->eip = ldq_phys(cs->as,
env->eip = x86_ldq_phys(cs,
env->vm_hsave + offsetof(struct vmcb, save.rip));
env->regs[R_ESP] = ldq_phys(cs->as, env->vm_hsave +
env->regs[R_ESP] = x86_ldq_phys(cs, env->vm_hsave +
offsetof(struct vmcb, save.rsp));
env->regs[R_EAX] = ldq_phys(cs->as, env->vm_hsave +
env->regs[R_EAX] = x86_ldq_phys(cs, env->vm_hsave +
offsetof(struct vmcb, save.rax));
env->dr[6] = ldq_phys(cs->as,
env->dr[6] = x86_ldq_phys(cs,
env->vm_hsave + offsetof(struct vmcb, save.dr6));
env->dr[7] = ldq_phys(cs->as,
env->dr[7] = x86_ldq_phys(cs,
env->vm_hsave + offsetof(struct vmcb, save.dr7));
/* other setups */
stq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, control.exit_code),
x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.exit_code),
exit_code);
stq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1),
x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1),
exit_info_1);
stl_phys(cs->as,
x86_stl_phys(cs,
env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
ldl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
control.event_inj)));
stl_phys(cs->as,
x86_stl_phys(cs,
env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
ldl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
control.event_inj_err)));
stl_phys(cs->as,
x86_stl_phys(cs,
env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0);
env->hflags2 &= ~HF2_GIF_MASK;

View File

@ -631,13 +631,13 @@ static void gen_helper_in_func(TCGMemOp ot, TCGv v, TCGv_i32 n)
{
switch (ot) {
case MO_8:
gen_helper_inb(v, n);
gen_helper_inb(v, cpu_env, n);
break;
case MO_16:
gen_helper_inw(v, n);
gen_helper_inw(v, cpu_env, n);
break;
case MO_32:
gen_helper_inl(v, n);
gen_helper_inl(v, cpu_env, n);
break;
default:
tcg_abort();
@ -648,13 +648,13 @@ static void gen_helper_out_func(TCGMemOp ot, TCGv_i32 v, TCGv_i32 n)
{
switch (ot) {
case MO_8:
gen_helper_outb(v, n);
gen_helper_outb(cpu_env, v, n);
break;
case MO_16:
gen_helper_outw(v, n);
gen_helper_outw(cpu_env, v, n);
break;
case MO_32:
gen_helper_outl(v, n);
gen_helper_outl(cpu_env, v, n);
break;
default:
tcg_abort();

View File

@ -174,6 +174,8 @@ gcov-files-i386-y += hw/usb/dev-storage.c
check-qtest-i386-y += tests/usb-hcd-xhci-test$(EXESUF)
gcov-files-i386-y += hw/usb/hcd-xhci.c
check-qtest-i386-y += tests/pc-cpu-test$(EXESUF)
check-qtest-i386-y += tests/q35-test$(EXESUF)
gcov-files-i386-y += hw/pci-host/q35.c
check-qtest-i386-$(CONFIG_LINUX) += tests/vhost-user-test$(EXESUF)
check-qtest-x86_64-y = $(check-qtest-i386-y)
gcov-files-i386-y += i386-softmmu/hw/timer/mc146818rtc.c
@ -355,6 +357,7 @@ tests/boot-order-test$(EXESUF): tests/boot-order-test.o $(libqos-obj-y)
tests/bios-tables-test$(EXESUF): tests/bios-tables-test.o $(libqos-obj-y)
tests/tmp105-test$(EXESUF): tests/tmp105-test.o $(libqos-omap-obj-y)
tests/i440fx-test$(EXESUF): tests/i440fx-test.o $(libqos-pc-obj-y)
tests/q35-test$(EXESUF): tests/q35-test.o $(libqos-pc-obj-y)
tests/fw_cfg-test$(EXESUF): tests/fw_cfg-test.o $(libqos-pc-obj-y)
tests/e1000-test$(EXESUF): tests/e1000-test.o
tests/rtl8139-test$(EXESUF): tests/rtl8139-test.o $(libqos-pc-obj-y)

91
tests/q35-test.c Normal file
View File

@ -0,0 +1,91 @@
/*
* QTest testcase for Q35 northbridge
*
* Copyright (c) 2015 Red Hat, Inc.
*
* Author: Gerd Hoffmann <kraxel@redhat.com>
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*/
#include <glib.h>
#include <string.h>
#include "libqtest.h"
#include "libqos/pci.h"
#include "libqos/pci-pc.h"
#include "qemu/osdep.h"
#include "hw/pci-host/q35.h"
static void smram_set_bit(QPCIDevice *pcidev, uint8_t mask, bool enabled)
{
uint8_t smram;
smram = qpci_config_readb(pcidev, MCH_HOST_BRIDGE_SMRAM);
if (enabled) {
smram |= mask;
} else {
smram &= ~mask;
}
qpci_config_writeb(pcidev, MCH_HOST_BRIDGE_SMRAM, smram);
}
static bool smram_test_bit(QPCIDevice *pcidev, uint8_t mask)
{
uint8_t smram;
smram = qpci_config_readb(pcidev, MCH_HOST_BRIDGE_SMRAM);
return smram & mask;
}
static void test_smram_lock(void)
{
QPCIBus *pcibus;
QPCIDevice *pcidev;
QDict *response;
pcibus = qpci_init_pc();
g_assert(pcibus != NULL);
pcidev = qpci_device_find(pcibus, 0);
g_assert(pcidev != NULL);
/* check open is settable */
smram_set_bit(pcidev, MCH_HOST_BRIDGE_SMRAM_D_OPEN, false);
g_assert(smram_test_bit(pcidev, MCH_HOST_BRIDGE_SMRAM_D_OPEN) == false);
smram_set_bit(pcidev, MCH_HOST_BRIDGE_SMRAM_D_OPEN, true);
g_assert(smram_test_bit(pcidev, MCH_HOST_BRIDGE_SMRAM_D_OPEN) == true);
/* lock, check open is cleared & not settable */
smram_set_bit(pcidev, MCH_HOST_BRIDGE_SMRAM_D_LCK, true);
g_assert(smram_test_bit(pcidev, MCH_HOST_BRIDGE_SMRAM_D_OPEN) == false);
smram_set_bit(pcidev, MCH_HOST_BRIDGE_SMRAM_D_OPEN, true);
g_assert(smram_test_bit(pcidev, MCH_HOST_BRIDGE_SMRAM_D_OPEN) == false);
/* reset */
response = qmp("{'execute': 'system_reset', 'arguments': {} }");
g_assert(response);
g_assert(!qdict_haskey(response, "error"));
QDECREF(response);
/* check open is settable again */
smram_set_bit(pcidev, MCH_HOST_BRIDGE_SMRAM_D_OPEN, false);
g_assert(smram_test_bit(pcidev, MCH_HOST_BRIDGE_SMRAM_D_OPEN) == false);
smram_set_bit(pcidev, MCH_HOST_BRIDGE_SMRAM_D_OPEN, true);
g_assert(smram_test_bit(pcidev, MCH_HOST_BRIDGE_SMRAM_D_OPEN) == true);
}
int main(int argc, char **argv)
{
int ret;
g_test_init(&argc, &argv, NULL);
qtest_add_func("/q35/smram/lock", test_smram_lock);
qtest_start("-M q35");
ret = g_test_run();
qtest_end();
return ret;
}

View File

@ -1042,11 +1042,10 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
* access: the virtual CPU will exit the current TB if code is modified inside
* this TB.
*/
void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end,
int is_cpu_write_access)
void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
{
while (start < end) {
tb_invalidate_phys_page_range(start, end, is_cpu_write_access);
tb_invalidate_phys_page_range(start, end, 0);
start &= TARGET_PAGE_MASK;
start += TARGET_PAGE_SIZE;
}
@ -1083,12 +1082,6 @@ void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
if (!p) {
return;
}
if (!p->code_bitmap &&
++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
is_cpu_write_access) {
/* build code bitmap */
build_page_bitmap(p);
}
#if defined(TARGET_HAS_PRECISE_SMC)
if (cpu != NULL) {
env = cpu->env_ptr;
@ -1158,9 +1151,7 @@ void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
/* if no code remaining, no need to continue to use slow writes */
if (!p->first_tb) {
invalidate_page_bitmap(p);
if (is_cpu_write_access) {
tlb_unprotect_code_phys(cpu, start, cpu->mem_io_vaddr);
}
tlb_unprotect_code(start);
}
#endif
#ifdef TARGET_HAS_PRECISE_SMC
@ -1193,6 +1184,11 @@ void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
if (!p) {
return;
}
if (!p->code_bitmap &&
++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD) {
/* build code bitmap */
build_page_bitmap(p);
}
if (p->code_bitmap) {
unsigned int nr;
unsigned long b;

View File

@ -21,6 +21,13 @@
/* translate-all.c */
void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len);
void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
int is_cpu_write_access);
void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end);
void tb_check_watchpoint(CPUState *cpu);
#ifdef CONFIG_USER_ONLY
int page_unprotect(target_ulong address, uintptr_t pc, void *puc);
#endif
#endif /* TRANSLATE_ALL_H */

View File

@ -1619,67 +1619,6 @@ bool dpy_cursor_define_supported(QemuConsole *con)
return false;
}
/*
* Call dpy_gfx_update for all dirity scanlines. Works for
* DisplaySurfaces backed by guest memory (i.e. the ones created
* using qemu_create_displaysurface_guestmem).
*/
void dpy_gfx_update_dirty(QemuConsole *con,
MemoryRegion *address_space,
hwaddr base,
bool invalidate)
{
DisplaySurface *ds = qemu_console_surface(con);
int width = surface_stride(ds);
int height = surface_height(ds);
hwaddr size = width * height;
MemoryRegionSection mem_section;
MemoryRegion *mem;
ram_addr_t addr;
int first, last, i;
bool dirty;
mem_section = memory_region_find(address_space, base, size);
mem = mem_section.mr;
if (int128_get64(mem_section.size) != size ||
!memory_region_is_ram(mem_section.mr)) {
goto out;
}
assert(mem);
memory_region_sync_dirty_bitmap(mem);
addr = mem_section.offset_within_region;
first = -1;
last = -1;
for (i = 0; i < height; i++, addr += width) {
dirty = invalidate ||
memory_region_get_dirty(mem, addr, width, DIRTY_MEMORY_VGA);
if (dirty) {
if (first == -1) {
first = i;
}
last = i;
}
if (first != -1 && !dirty) {
assert(last != -1 && last >= first);
dpy_gfx_update(con, 0, first, surface_width(ds),
last - first + 1);
first = -1;
}
}
if (first != -1) {
assert(last != -1 && last >= first);
dpy_gfx_update(con, 0, first, surface_width(ds),
last - first + 1);
}
memory_region_reset_dirty(mem, mem_section.offset_within_region, size,
DIRTY_MEMORY_VGA);
out:
memory_region_unref(mem);
}
/***********************************************************/
/* register display */

View File

@ -22,6 +22,7 @@
#include "tcg.h"
#include "qemu/bitops.h"
#include "exec/cpu_ldst.h"
#include "translate-all.h"
#undef EAX
#undef ECX

View File

@ -11,6 +11,7 @@
#include "qemu/bitops.h"
#include "qemu/bitmap.h"
#include "qemu/atomic.h"
/*
* bitmaps provide an array of bits, implemented using an an
@ -177,6 +178,43 @@ void bitmap_set(unsigned long *map, long start, long nr)
}
}
void bitmap_set_atomic(unsigned long *map, long start, long nr)
{
unsigned long *p = map + BIT_WORD(start);
const long size = start + nr;
int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG);
unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start);
/* First word */
if (nr - bits_to_set > 0) {
atomic_or(p, mask_to_set);
nr -= bits_to_set;
bits_to_set = BITS_PER_LONG;
mask_to_set = ~0UL;
p++;
}
/* Full words */
if (bits_to_set == BITS_PER_LONG) {
while (nr >= BITS_PER_LONG) {
*p = ~0UL;
nr -= BITS_PER_LONG;
p++;
}
}
/* Last word */
if (nr) {
mask_to_set &= BITMAP_LAST_WORD_MASK(size);
atomic_or(p, mask_to_set);
} else {
/* If we avoided the full barrier in atomic_or(), issue a
* barrier to account for the assignments in the while loop.
*/
smp_mb();
}
}
void bitmap_clear(unsigned long *map, long start, long nr)
{
unsigned long *p = map + BIT_WORD(start);
@ -197,6 +235,51 @@ void bitmap_clear(unsigned long *map, long start, long nr)
}
}
bool bitmap_test_and_clear_atomic(unsigned long *map, long start, long nr)
{
unsigned long *p = map + BIT_WORD(start);
const long size = start + nr;
int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG);
unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start);
unsigned long dirty = 0;
unsigned long old_bits;
/* First word */
if (nr - bits_to_clear > 0) {
old_bits = atomic_fetch_and(p, ~mask_to_clear);
dirty |= old_bits & mask_to_clear;
nr -= bits_to_clear;
bits_to_clear = BITS_PER_LONG;
mask_to_clear = ~0UL;
p++;
}
/* Full words */
if (bits_to_clear == BITS_PER_LONG) {
while (nr >= BITS_PER_LONG) {
if (*p) {
old_bits = atomic_xchg(p, 0);
dirty |= old_bits;
}
nr -= BITS_PER_LONG;
p++;
}
}
/* Last word */
if (nr) {
mask_to_clear &= BITMAP_LAST_WORD_MASK(size);
old_bits = atomic_fetch_and(p, ~mask_to_clear);
dirty |= old_bits & mask_to_clear;
} else {
if (!dirty) {
smp_mb();
}
}
return dirty != 0;
}
#define ALIGN_MASK(x,mask) (((x)+(mask))&~(mask))
/**

9
vl.c
View File

@ -468,6 +468,9 @@ static QemuOptsList qemu_icount_opts = {
}, {
.name = "align",
.type = QEMU_OPT_BOOL,
}, {
.name = "sleep",
.type = QEMU_OPT_BOOL,
},
{ /* end of list */ }
},
@ -2497,14 +2500,20 @@ static void qemu_run_exit_notifiers(void)
notifier_list_notify(&exit_notifiers, NULL);
}
static bool machine_init_done;
void qemu_add_machine_init_done_notifier(Notifier *notify)
{
notifier_list_add(&machine_init_done_notifiers, notify);
if (machine_init_done) {
notify->notify(notify, NULL);
}
}
static void qemu_run_machine_init_done_notifiers(void)
{
notifier_list_notify(&machine_init_done_notifiers, NULL);
machine_init_done = true;
}
static const QEMUOption *lookup_opt(int argc, char **argv,

View File

@ -488,7 +488,7 @@ static void xen_set_memory(struct MemoryListener *listener,
XenIOState *state = container_of(listener, XenIOState, memory_listener);
hwaddr start_addr = section->offset_within_address_space;
ram_addr_t size = int128_get64(section->size);
bool log_dirty = memory_region_is_logging(section->mr);
bool log_dirty = memory_region_is_logging(section->mr, DIRTY_MEMORY_VGA);
hvmmem_type_t mem_type;
if (section->mr == &ram_memory) {
@ -646,21 +646,27 @@ static void xen_sync_dirty_bitmap(XenIOState *state,
}
static void xen_log_start(MemoryListener *listener,
MemoryRegionSection *section)
MemoryRegionSection *section,
int old, int new)
{
XenIOState *state = container_of(listener, XenIOState, memory_listener);
xen_sync_dirty_bitmap(state, section->offset_within_address_space,
int128_get64(section->size));
if (new & ~old & (1 << DIRTY_MEMORY_VGA)) {
xen_sync_dirty_bitmap(state, section->offset_within_address_space,
int128_get64(section->size));
}
}
static void xen_log_stop(MemoryListener *listener, MemoryRegionSection *section)
static void xen_log_stop(MemoryListener *listener, MemoryRegionSection *section,
int old, int new)
{
XenIOState *state = container_of(listener, XenIOState, memory_listener);
state->log_for_dirtybit = NULL;
/* Disable dirty bit tracking */
xc_hvm_track_dirty_vram(xen_xc, xen_domid, 0, 0, NULL);
if (old & ~new & (1 << DIRTY_MEMORY_VGA)) {
state->log_for_dirtybit = NULL;
/* Disable dirty bit tracking */
xc_hvm_track_dirty_vram(xen_xc, xen_domid, 0, 0, NULL);
}
}
static void xen_log_sync(MemoryListener *listener, MemoryRegionSection *section)