kqemu API change - allow use of kqemu with 32 bit QEMU on a 64 bit host
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@4628 c046a42c-6fe2-441c-8c8c-71466251a162
This commit is contained in:
parent
da94d26390
commit
da260249a4
2
configure
vendored
2
configure
vendored
@ -1222,7 +1222,7 @@ case "$target_cpu" in
|
||||
echo "TARGET_ARCH=i386" >> $config_mak
|
||||
echo "#define TARGET_ARCH \"i386\"" >> $config_h
|
||||
echo "#define TARGET_I386 1" >> $config_h
|
||||
if test $kqemu = "yes" -a "$target_softmmu" = "yes" -a $cpu = "i386"
|
||||
if test $kqemu = "yes" -a "$target_softmmu" = "yes"
|
||||
then
|
||||
echo "#define USE_KQEMU 1" >> $config_h
|
||||
fi
|
||||
|
@ -563,15 +563,21 @@ static inline target_ulong get_phys_addr_code(CPUState *env1, target_ulong addr)
|
||||
#ifdef USE_KQEMU
|
||||
#define KQEMU_MODIFY_PAGE_MASK (0xff & ~(VGA_DIRTY_FLAG | CODE_DIRTY_FLAG))
|
||||
|
||||
#define MSR_QPI_COMMBASE 0xfabe0010
|
||||
|
||||
int kqemu_init(CPUState *env);
|
||||
int kqemu_cpu_exec(CPUState *env);
|
||||
void kqemu_flush_page(CPUState *env, target_ulong addr);
|
||||
void kqemu_flush(CPUState *env, int global);
|
||||
void kqemu_set_notdirty(CPUState *env, ram_addr_t ram_addr);
|
||||
void kqemu_modify_page(CPUState *env, ram_addr_t ram_addr);
|
||||
void kqemu_set_phys_mem(uint64_t start_addr, ram_addr_t size,
|
||||
ram_addr_t phys_offset);
|
||||
void kqemu_cpu_interrupt(CPUState *env);
|
||||
void kqemu_record_dump(void);
|
||||
|
||||
extern uint32_t kqemu_comm_base;
|
||||
|
||||
static inline int kqemu_is_ok(CPUState *env)
|
||||
{
|
||||
return(env->kqemu_enabled &&
|
||||
|
7
exec.c
7
exec.c
@ -2139,6 +2139,13 @@ void cpu_register_physical_memory(target_phys_addr_t start_addr,
|
||||
ram_addr_t orig_size = size;
|
||||
void *subpage;
|
||||
|
||||
#ifdef USE_KQEMU
|
||||
/* XXX: should not depend on cpu context */
|
||||
env = first_cpu;
|
||||
if (env->kqemu_enabled) {
|
||||
kqemu_set_phys_mem(start_addr, size, phys_offset);
|
||||
}
|
||||
#endif
|
||||
size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
|
||||
end_addr = start_addr + (target_phys_addr_t)size;
|
||||
for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
|
||||
|
255
kqemu.c
255
kqemu.c
@ -1,7 +1,7 @@
|
||||
/*
|
||||
* KQEMU support
|
||||
*
|
||||
* Copyright (c) 2005 Fabrice Bellard
|
||||
* Copyright (c) 2005-2008 Fabrice Bellard
|
||||
*
|
||||
* This library is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
@ -51,24 +51,14 @@
|
||||
#include <fcntl.h>
|
||||
#include "kqemu.h"
|
||||
|
||||
/* compatibility stuff */
|
||||
#ifndef KQEMU_RET_SYSCALL
|
||||
#define KQEMU_RET_SYSCALL 0x0300 /* syscall insn */
|
||||
#endif
|
||||
#ifndef KQEMU_MAX_RAM_PAGES_TO_UPDATE
|
||||
#define KQEMU_MAX_RAM_PAGES_TO_UPDATE 512
|
||||
#define KQEMU_RAM_PAGES_UPDATE_ALL (KQEMU_MAX_RAM_PAGES_TO_UPDATE + 1)
|
||||
#endif
|
||||
#ifndef KQEMU_MAX_MODIFIED_RAM_PAGES
|
||||
#define KQEMU_MAX_MODIFIED_RAM_PAGES 512
|
||||
#endif
|
||||
|
||||
#ifdef _WIN32
|
||||
#define KQEMU_DEVICE "\\\\.\\kqemu"
|
||||
#else
|
||||
#define KQEMU_DEVICE "/dev/kqemu"
|
||||
#endif
|
||||
|
||||
static void qpi_init(void);
|
||||
|
||||
#ifdef _WIN32
|
||||
#define KQEMU_INVALID_FD INVALID_HANDLE_VALUE
|
||||
HANDLE kqemu_fd = KQEMU_INVALID_FD;
|
||||
@ -84,14 +74,15 @@ int kqemu_fd = KQEMU_INVALID_FD;
|
||||
2 = kernel kqemu
|
||||
*/
|
||||
int kqemu_allowed = 1;
|
||||
unsigned long *pages_to_flush;
|
||||
uint64_t *pages_to_flush;
|
||||
unsigned int nb_pages_to_flush;
|
||||
unsigned long *ram_pages_to_update;
|
||||
uint64_t *ram_pages_to_update;
|
||||
unsigned int nb_ram_pages_to_update;
|
||||
unsigned long *modified_ram_pages;
|
||||
uint64_t *modified_ram_pages;
|
||||
unsigned int nb_modified_ram_pages;
|
||||
uint8_t *modified_ram_pages_table;
|
||||
extern uint32_t **l1_phys_map;
|
||||
int qpi_io_memory;
|
||||
uint32_t kqemu_comm_base; /* physical address of the QPI communication page */
|
||||
|
||||
#define cpuid(index, eax, ebx, ecx, edx) \
|
||||
asm volatile ("cpuid" \
|
||||
@ -161,7 +152,7 @@ static void kqemu_update_cpuid(CPUState *env)
|
||||
|
||||
int kqemu_init(CPUState *env)
|
||||
{
|
||||
struct kqemu_init init;
|
||||
struct kqemu_init kinit;
|
||||
int ret, version;
|
||||
#ifdef _WIN32
|
||||
DWORD temp;
|
||||
@ -197,39 +188,35 @@ int kqemu_init(CPUState *env)
|
||||
}
|
||||
|
||||
pages_to_flush = qemu_vmalloc(KQEMU_MAX_PAGES_TO_FLUSH *
|
||||
sizeof(unsigned long));
|
||||
sizeof(uint64_t));
|
||||
if (!pages_to_flush)
|
||||
goto fail;
|
||||
|
||||
ram_pages_to_update = qemu_vmalloc(KQEMU_MAX_RAM_PAGES_TO_UPDATE *
|
||||
sizeof(unsigned long));
|
||||
sizeof(uint64_t));
|
||||
if (!ram_pages_to_update)
|
||||
goto fail;
|
||||
|
||||
modified_ram_pages = qemu_vmalloc(KQEMU_MAX_MODIFIED_RAM_PAGES *
|
||||
sizeof(unsigned long));
|
||||
sizeof(uint64_t));
|
||||
if (!modified_ram_pages)
|
||||
goto fail;
|
||||
modified_ram_pages_table = qemu_mallocz(phys_ram_size >> TARGET_PAGE_BITS);
|
||||
if (!modified_ram_pages_table)
|
||||
goto fail;
|
||||
|
||||
init.ram_base = phys_ram_base;
|
||||
init.ram_size = phys_ram_size;
|
||||
init.ram_dirty = phys_ram_dirty;
|
||||
init.phys_to_ram_map = l1_phys_map;
|
||||
init.pages_to_flush = pages_to_flush;
|
||||
#if KQEMU_VERSION >= 0x010200
|
||||
init.ram_pages_to_update = ram_pages_to_update;
|
||||
#endif
|
||||
#if KQEMU_VERSION >= 0x010300
|
||||
init.modified_ram_pages = modified_ram_pages;
|
||||
#endif
|
||||
memset(&kinit, 0, sizeof(kinit)); /* set the paddings to zero */
|
||||
kinit.ram_base = phys_ram_base;
|
||||
kinit.ram_size = phys_ram_size;
|
||||
kinit.ram_dirty = phys_ram_dirty;
|
||||
kinit.pages_to_flush = pages_to_flush;
|
||||
kinit.ram_pages_to_update = ram_pages_to_update;
|
||||
kinit.modified_ram_pages = modified_ram_pages;
|
||||
#ifdef _WIN32
|
||||
ret = DeviceIoControl(kqemu_fd, KQEMU_INIT, &init, sizeof(init),
|
||||
ret = DeviceIoControl(kqemu_fd, KQEMU_INIT, &kinit, sizeof(kinit),
|
||||
NULL, 0, &temp, NULL) == TRUE ? 0 : -1;
|
||||
#else
|
||||
ret = ioctl(kqemu_fd, KQEMU_INIT, &init);
|
||||
ret = ioctl(kqemu_fd, KQEMU_INIT, &kinit);
|
||||
#endif
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error %d while initializing QEMU acceleration layer - disabling it for now\n", ret);
|
||||
@ -242,6 +229,8 @@ int kqemu_init(CPUState *env)
|
||||
env->kqemu_enabled = kqemu_allowed;
|
||||
nb_pages_to_flush = 0;
|
||||
nb_ram_pages_to_update = 0;
|
||||
|
||||
qpi_init();
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -272,7 +261,8 @@ void kqemu_set_notdirty(CPUState *env, ram_addr_t ram_addr)
|
||||
{
|
||||
#ifdef DEBUG
|
||||
if (loglevel & CPU_LOG_INT) {
|
||||
fprintf(logfile, "kqemu_set_notdirty: addr=%08lx\n", ram_addr);
|
||||
fprintf(logfile, "kqemu_set_notdirty: addr=%08lx\n",
|
||||
(unsigned long)ram_addr);
|
||||
}
|
||||
#endif
|
||||
/* we only track transitions to dirty state */
|
||||
@ -327,6 +317,51 @@ void kqemu_modify_page(CPUState *env, ram_addr_t ram_addr)
|
||||
}
|
||||
}
|
||||
|
||||
void kqemu_set_phys_mem(uint64_t start_addr, ram_addr_t size,
|
||||
ram_addr_t phys_offset)
|
||||
{
|
||||
struct kqemu_phys_mem kphys_mem1, *kphys_mem = &kphys_mem1;
|
||||
uint64_t end;
|
||||
int ret, io_index;
|
||||
|
||||
end = (start_addr + size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
|
||||
start_addr &= TARGET_PAGE_MASK;
|
||||
kphys_mem->phys_addr = start_addr;
|
||||
kphys_mem->size = end - start_addr;
|
||||
kphys_mem->ram_addr = phys_offset & TARGET_PAGE_MASK;
|
||||
io_index = phys_offset & ~TARGET_PAGE_MASK;
|
||||
switch(io_index) {
|
||||
case IO_MEM_RAM:
|
||||
kphys_mem->io_index = KQEMU_IO_MEM_RAM;
|
||||
break;
|
||||
case IO_MEM_ROM:
|
||||
kphys_mem->io_index = KQEMU_IO_MEM_ROM;
|
||||
break;
|
||||
default:
|
||||
if (qpi_io_memory == io_index) {
|
||||
kphys_mem->io_index = KQEMU_IO_MEM_COMM;
|
||||
} else {
|
||||
kphys_mem->io_index = KQEMU_IO_MEM_UNASSIGNED;
|
||||
}
|
||||
break;
|
||||
}
|
||||
#ifdef _WIN32
|
||||
{
|
||||
DWORD temp;
|
||||
ret = DeviceIoControl(kqemu_fd, KQEMU_SET_PHYS_MEM,
|
||||
kphys_mem, sizeof(*kphys_mem),
|
||||
NULL, 0, &temp, NULL) == TRUE ? 0 : -1;
|
||||
}
|
||||
#else
|
||||
ret = ioctl(kqemu_fd, KQEMU_SET_PHYS_MEM, kphys_mem);
|
||||
#endif
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "kqemu: KQEMU_SET_PHYS_PAGE error=%d: start_addr=0x%016" PRIx64 " size=0x%08lx phys_offset=0x%08lx\n",
|
||||
ret, start_addr,
|
||||
(unsigned long)size, (unsigned long)phys_offset);
|
||||
}
|
||||
}
|
||||
|
||||
struct fpstate {
|
||||
uint16_t fpuc;
|
||||
uint16_t dummy1;
|
||||
@ -474,7 +509,7 @@ static int do_syscall(CPUState *env,
|
||||
int selector;
|
||||
|
||||
selector = (env->star >> 32) & 0xffff;
|
||||
#ifdef __x86_64__
|
||||
#ifdef TARGET_X86_64
|
||||
if (env->hflags & HF_LMA_MASK) {
|
||||
int code64;
|
||||
|
||||
@ -631,6 +666,24 @@ void kqemu_record_dump(void)
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline void kqemu_load_seg(struct kqemu_segment_cache *ksc,
|
||||
const SegmentCache *sc)
|
||||
{
|
||||
ksc->selector = sc->selector;
|
||||
ksc->flags = sc->flags;
|
||||
ksc->limit = sc->limit;
|
||||
ksc->base = sc->base;
|
||||
}
|
||||
|
||||
static inline void kqemu_save_seg(SegmentCache *sc,
|
||||
const struct kqemu_segment_cache *ksc)
|
||||
{
|
||||
sc->selector = ksc->selector;
|
||||
sc->flags = ksc->flags;
|
||||
sc->limit = ksc->limit;
|
||||
sc->base = ksc->base;
|
||||
}
|
||||
|
||||
int kqemu_cpu_exec(CPUState *env)
|
||||
{
|
||||
struct kqemu_cpu_state kcpu_state, *kenv = &kcpu_state;
|
||||
@ -638,7 +691,6 @@ int kqemu_cpu_exec(CPUState *env)
|
||||
#ifdef CONFIG_PROFILER
|
||||
int64_t ti;
|
||||
#endif
|
||||
|
||||
#ifdef _WIN32
|
||||
DWORD temp;
|
||||
#endif
|
||||
@ -652,34 +704,32 @@ int kqemu_cpu_exec(CPUState *env)
|
||||
cpu_dump_state(env, logfile, fprintf, 0);
|
||||
}
|
||||
#endif
|
||||
memcpy(kenv->regs, env->regs, sizeof(kenv->regs));
|
||||
for(i = 0; i < CPU_NB_REGS; i++)
|
||||
kenv->regs[i] = env->regs[i];
|
||||
kenv->eip = env->eip;
|
||||
kenv->eflags = env->eflags;
|
||||
memcpy(&kenv->segs, &env->segs, sizeof(env->segs));
|
||||
memcpy(&kenv->ldt, &env->ldt, sizeof(env->ldt));
|
||||
memcpy(&kenv->tr, &env->tr, sizeof(env->tr));
|
||||
memcpy(&kenv->gdt, &env->gdt, sizeof(env->gdt));
|
||||
memcpy(&kenv->idt, &env->idt, sizeof(env->idt));
|
||||
for(i = 0; i < 6; i++)
|
||||
kqemu_load_seg(&kenv->segs[i], &env->segs[i]);
|
||||
kqemu_load_seg(&kenv->ldt, &env->ldt);
|
||||
kqemu_load_seg(&kenv->tr, &env->tr);
|
||||
kqemu_load_seg(&kenv->gdt, &env->gdt);
|
||||
kqemu_load_seg(&kenv->idt, &env->idt);
|
||||
kenv->cr0 = env->cr[0];
|
||||
kenv->cr2 = env->cr[2];
|
||||
kenv->cr3 = env->cr[3];
|
||||
kenv->cr4 = env->cr[4];
|
||||
kenv->a20_mask = env->a20_mask;
|
||||
#if KQEMU_VERSION >= 0x010100
|
||||
kenv->efer = env->efer;
|
||||
#endif
|
||||
#if KQEMU_VERSION >= 0x010300
|
||||
kenv->tsc_offset = 0;
|
||||
kenv->star = env->star;
|
||||
kenv->sysenter_cs = env->sysenter_cs;
|
||||
kenv->sysenter_esp = env->sysenter_esp;
|
||||
kenv->sysenter_eip = env->sysenter_eip;
|
||||
#ifdef __x86_64__
|
||||
#ifdef TARGET_X86_64
|
||||
kenv->lstar = env->lstar;
|
||||
kenv->cstar = env->cstar;
|
||||
kenv->fmask = env->fmask;
|
||||
kenv->kernelgsbase = env->kernelgsbase;
|
||||
#endif
|
||||
#endif
|
||||
if (env->dr[7] & 0xff) {
|
||||
kenv->dr7 = env->dr[7];
|
||||
@ -694,15 +744,11 @@ int kqemu_cpu_exec(CPUState *env)
|
||||
cpl = (env->hflags & HF_CPL_MASK);
|
||||
kenv->cpl = cpl;
|
||||
kenv->nb_pages_to_flush = nb_pages_to_flush;
|
||||
#if KQEMU_VERSION >= 0x010200
|
||||
kenv->user_only = (env->kqemu_enabled == 1);
|
||||
kenv->nb_ram_pages_to_update = nb_ram_pages_to_update;
|
||||
#endif
|
||||
nb_ram_pages_to_update = 0;
|
||||
|
||||
#if KQEMU_VERSION >= 0x010300
|
||||
kenv->nb_modified_ram_pages = nb_modified_ram_pages;
|
||||
#endif
|
||||
|
||||
kqemu_reset_modified_ram_pages();
|
||||
|
||||
if (env->cpuid_features & CPUID_FXSR)
|
||||
@ -720,40 +766,29 @@ int kqemu_cpu_exec(CPUState *env)
|
||||
ret = -1;
|
||||
}
|
||||
#else
|
||||
#if KQEMU_VERSION >= 0x010100
|
||||
ioctl(kqemu_fd, KQEMU_EXEC, kenv);
|
||||
ret = kenv->retval;
|
||||
#else
|
||||
ret = ioctl(kqemu_fd, KQEMU_EXEC, kenv);
|
||||
#endif
|
||||
#endif
|
||||
if (env->cpuid_features & CPUID_FXSR)
|
||||
save_native_fp_fxsave(env);
|
||||
else
|
||||
save_native_fp_fsave(env);
|
||||
|
||||
memcpy(env->regs, kenv->regs, sizeof(env->regs));
|
||||
for(i = 0; i < CPU_NB_REGS; i++)
|
||||
env->regs[i] = kenv->regs[i];
|
||||
env->eip = kenv->eip;
|
||||
env->eflags = kenv->eflags;
|
||||
memcpy(env->segs, kenv->segs, sizeof(env->segs));
|
||||
for(i = 0; i < 6; i++)
|
||||
kqemu_save_seg(&env->segs[i], &kenv->segs[i]);
|
||||
cpu_x86_set_cpl(env, kenv->cpl);
|
||||
memcpy(&env->ldt, &kenv->ldt, sizeof(env->ldt));
|
||||
#if 0
|
||||
/* no need to restore that */
|
||||
memcpy(env->tr, kenv->tr, sizeof(env->tr));
|
||||
memcpy(env->gdt, kenv->gdt, sizeof(env->gdt));
|
||||
memcpy(env->idt, kenv->idt, sizeof(env->idt));
|
||||
env->a20_mask = kenv->a20_mask;
|
||||
#endif
|
||||
kqemu_save_seg(&env->ldt, &kenv->ldt);
|
||||
env->cr[0] = kenv->cr0;
|
||||
env->cr[4] = kenv->cr4;
|
||||
env->cr[3] = kenv->cr3;
|
||||
env->cr[2] = kenv->cr2;
|
||||
env->dr[6] = kenv->dr6;
|
||||
#if KQEMU_VERSION >= 0x010300
|
||||
#ifdef __x86_64__
|
||||
#ifdef TARGET_X86_64
|
||||
env->kernelgsbase = kenv->kernelgsbase;
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/* flush pages as indicated by kqemu */
|
||||
@ -771,13 +806,10 @@ int kqemu_cpu_exec(CPUState *env)
|
||||
kqemu_exec_count++;
|
||||
#endif
|
||||
|
||||
#if KQEMU_VERSION >= 0x010200
|
||||
if (kenv->nb_ram_pages_to_update > 0) {
|
||||
cpu_tlb_update_dirty(env);
|
||||
}
|
||||
#endif
|
||||
|
||||
#if KQEMU_VERSION >= 0x010300
|
||||
if (kenv->nb_modified_ram_pages > 0) {
|
||||
for(i = 0; i < kenv->nb_modified_ram_pages; i++) {
|
||||
unsigned long addr;
|
||||
@ -785,7 +817,6 @@ int kqemu_cpu_exec(CPUState *env)
|
||||
tb_invalidate_phys_page_range(addr, addr + TARGET_PAGE_SIZE, 0);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
/* restore the hidden flags */
|
||||
{
|
||||
@ -905,11 +936,85 @@ int kqemu_cpu_exec(CPUState *env)
|
||||
|
||||
void kqemu_cpu_interrupt(CPUState *env)
|
||||
{
|
||||
#if defined(_WIN32) && KQEMU_VERSION >= 0x010101
|
||||
#if defined(_WIN32)
|
||||
/* cancelling the I/O request causes KQEMU to finish executing the
|
||||
current block and successfully returning. */
|
||||
CancelIo(kqemu_fd);
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
QEMU paravirtualization interface. The current interface only
|
||||
allows to modify the IF and IOPL flags when running in
|
||||
kqemu.
|
||||
|
||||
At this point it is not very satisfactory. I leave it for reference
|
||||
as it adds little complexity.
|
||||
*/
|
||||
|
||||
#define QPI_COMM_PAGE_PHYS_ADDR 0xff000000
|
||||
|
||||
static uint32_t qpi_mem_readb(void *opaque, target_phys_addr_t addr)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static uint32_t qpi_mem_readw(void *opaque, target_phys_addr_t addr)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void qpi_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
|
||||
{
|
||||
}
|
||||
|
||||
static void qpi_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
|
||||
{
|
||||
}
|
||||
|
||||
static uint32_t qpi_mem_readl(void *opaque, target_phys_addr_t addr)
|
||||
{
|
||||
CPUState *env;
|
||||
|
||||
env = cpu_single_env;
|
||||
if (!env)
|
||||
return 0;
|
||||
return env->eflags & (IF_MASK | IOPL_MASK);
|
||||
}
|
||||
|
||||
/* Note: after writing to this address, the guest code must make sure
|
||||
it is exiting the current TB. pushf/popf can be used for that
|
||||
purpose. */
|
||||
static void qpi_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
|
||||
{
|
||||
CPUState *env;
|
||||
|
||||
env = cpu_single_env;
|
||||
if (!env)
|
||||
return;
|
||||
env->eflags = (env->eflags & ~(IF_MASK | IOPL_MASK)) |
|
||||
(val & (IF_MASK | IOPL_MASK));
|
||||
}
|
||||
|
||||
static CPUReadMemoryFunc *qpi_mem_read[3] = {
|
||||
qpi_mem_readb,
|
||||
qpi_mem_readw,
|
||||
qpi_mem_readl,
|
||||
};
|
||||
|
||||
static CPUWriteMemoryFunc *qpi_mem_write[3] = {
|
||||
qpi_mem_writeb,
|
||||
qpi_mem_writew,
|
||||
qpi_mem_writel,
|
||||
};
|
||||
|
||||
static void qpi_init(void)
|
||||
{
|
||||
kqemu_comm_base = 0xff000000 | 1;
|
||||
qpi_io_memory = cpu_register_io_memory(0,
|
||||
qpi_mem_read,
|
||||
qpi_mem_write, NULL);
|
||||
cpu_register_physical_memory(kqemu_comm_base & ~0xfff,
|
||||
0x1000, qpi_io_memory);
|
||||
}
|
||||
#endif
|
||||
|
118
kqemu.h
118
kqemu.h
@ -1,8 +1,8 @@
|
||||
/*
|
||||
* KQEMU header
|
||||
*
|
||||
* Copyright (c) 2004-2006 Fabrice Bellard
|
||||
*
|
||||
*
|
||||
* Copyright (c) 2004-2008 Fabrice Bellard
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
@ -24,25 +24,27 @@
|
||||
#ifndef KQEMU_H
|
||||
#define KQEMU_H
|
||||
|
||||
#define KQEMU_VERSION 0x010300
|
||||
#if defined(__i386__)
|
||||
#define KQEMU_PAD32(x) x
|
||||
#else
|
||||
#define KQEMU_PAD32(x)
|
||||
#endif
|
||||
|
||||
#define KQEMU_VERSION 0x010400
|
||||
|
||||
struct kqemu_segment_cache {
|
||||
uint32_t selector;
|
||||
unsigned long base;
|
||||
uint32_t limit;
|
||||
uint16_t selector;
|
||||
uint16_t padding1;
|
||||
uint32_t flags;
|
||||
uint64_t base;
|
||||
uint32_t limit;
|
||||
uint32_t padding2;
|
||||
};
|
||||
|
||||
struct kqemu_cpu_state {
|
||||
#ifdef __x86_64__
|
||||
unsigned long regs[16];
|
||||
#else
|
||||
unsigned long regs[8];
|
||||
#endif
|
||||
unsigned long eip;
|
||||
unsigned long eflags;
|
||||
|
||||
uint32_t dummy0, dummy1, dumm2, dummy3, dummy4;
|
||||
uint64_t regs[16];
|
||||
uint64_t eip;
|
||||
uint64_t eflags;
|
||||
|
||||
struct kqemu_segment_cache segs[6]; /* selector values */
|
||||
struct kqemu_segment_cache ldt;
|
||||
@ -50,63 +52,81 @@ struct kqemu_cpu_state {
|
||||
struct kqemu_segment_cache gdt; /* only base and limit are used */
|
||||
struct kqemu_segment_cache idt; /* only base and limit are used */
|
||||
|
||||
unsigned long cr0;
|
||||
unsigned long dummy5;
|
||||
unsigned long cr2;
|
||||
unsigned long cr3;
|
||||
unsigned long cr4;
|
||||
uint32_t a20_mask;
|
||||
uint64_t cr0;
|
||||
uint64_t cr2;
|
||||
uint64_t cr3;
|
||||
uint64_t cr4;
|
||||
uint64_t a20_mask;
|
||||
|
||||
/* sysenter registers */
|
||||
uint32_t sysenter_cs;
|
||||
uint32_t sysenter_esp;
|
||||
uint32_t sysenter_eip;
|
||||
uint64_t efer __attribute__((aligned(8)));
|
||||
uint64_t sysenter_cs;
|
||||
uint64_t sysenter_esp;
|
||||
uint64_t sysenter_eip;
|
||||
uint64_t efer;
|
||||
uint64_t star;
|
||||
#ifdef __x86_64__
|
||||
unsigned long lstar;
|
||||
unsigned long cstar;
|
||||
unsigned long fmask;
|
||||
unsigned long kernelgsbase;
|
||||
#endif
|
||||
|
||||
uint64_t lstar;
|
||||
uint64_t cstar;
|
||||
uint64_t fmask;
|
||||
uint64_t kernelgsbase;
|
||||
|
||||
uint64_t tsc_offset;
|
||||
|
||||
unsigned long dr0;
|
||||
unsigned long dr1;
|
||||
unsigned long dr2;
|
||||
unsigned long dr3;
|
||||
unsigned long dr6;
|
||||
unsigned long dr7;
|
||||
uint64_t dr0;
|
||||
uint64_t dr1;
|
||||
uint64_t dr2;
|
||||
uint64_t dr3;
|
||||
uint64_t dr6;
|
||||
uint64_t dr7;
|
||||
|
||||
uint8_t cpl;
|
||||
uint8_t user_only;
|
||||
uint16_t padding1;
|
||||
|
||||
uint32_t error_code; /* error_code when exiting with an exception */
|
||||
unsigned long next_eip; /* next eip value when exiting with an interrupt */
|
||||
unsigned int nb_pages_to_flush; /* number of pages to flush,
|
||||
uint64_t next_eip; /* next eip value when exiting with an interrupt */
|
||||
uint32_t nb_pages_to_flush; /* number of pages to flush,
|
||||
KQEMU_FLUSH_ALL means full flush */
|
||||
#define KQEMU_MAX_PAGES_TO_FLUSH 512
|
||||
#define KQEMU_FLUSH_ALL (KQEMU_MAX_PAGES_TO_FLUSH + 1)
|
||||
|
||||
long retval;
|
||||
int32_t retval;
|
||||
|
||||
/* number of ram_dirty entries to update */
|
||||
unsigned int nb_ram_pages_to_update;
|
||||
uint32_t nb_ram_pages_to_update;
|
||||
#define KQEMU_MAX_RAM_PAGES_TO_UPDATE 512
|
||||
#define KQEMU_RAM_PAGES_UPDATE_ALL (KQEMU_MAX_RAM_PAGES_TO_UPDATE + 1)
|
||||
|
||||
#define KQEMU_MAX_MODIFIED_RAM_PAGES 512
|
||||
unsigned int nb_modified_ram_pages;
|
||||
uint32_t nb_modified_ram_pages;
|
||||
};
|
||||
|
||||
struct kqemu_init {
|
||||
uint8_t *ram_base; /* must be page aligned */
|
||||
unsigned long ram_size; /* must be multiple of 4 KB */
|
||||
KQEMU_PAD32(uint32_t padding1;)
|
||||
uint64_t ram_size; /* must be multiple of 4 KB */
|
||||
uint8_t *ram_dirty; /* must be page aligned */
|
||||
uint32_t **phys_to_ram_map; /* must be page aligned */
|
||||
unsigned long *pages_to_flush; /* must be page aligned */
|
||||
unsigned long *ram_pages_to_update; /* must be page aligned */
|
||||
unsigned long *modified_ram_pages; /* must be page aligned */
|
||||
KQEMU_PAD32(uint32_t padding2;)
|
||||
uint64_t *pages_to_flush; /* must be page aligned */
|
||||
KQEMU_PAD32(uint32_t padding4;)
|
||||
uint64_t *ram_pages_to_update; /* must be page aligned */
|
||||
KQEMU_PAD32(uint32_t padding5;)
|
||||
uint64_t *modified_ram_pages; /* must be page aligned */
|
||||
KQEMU_PAD32(uint32_t padding6;)
|
||||
};
|
||||
|
||||
#define KQEMU_IO_MEM_RAM 0
|
||||
#define KQEMU_IO_MEM_ROM 1
|
||||
#define KQEMU_IO_MEM_COMM 2 /* kqemu communication page */
|
||||
#define KQEMU_IO_MEM_UNASSIGNED 3 /* any device: return to application */
|
||||
|
||||
struct kqemu_phys_mem {
|
||||
uint64_t phys_addr; /* physical address range: phys_addr,
|
||||
phys_addr + size */
|
||||
uint64_t size;
|
||||
uint64_t ram_addr; /* corresponding ram address */
|
||||
uint32_t io_index; /* memory type: see KQEMU_IO_MEM_xxx */
|
||||
uint32_t padding1;
|
||||
};
|
||||
|
||||
#define KQEMU_RET_ABORT (-1)
|
||||
@ -122,11 +142,13 @@ struct kqemu_init {
|
||||
#define KQEMU_INIT CTL_CODE(FILE_DEVICE_UNKNOWN, 2, METHOD_BUFFERED, FILE_WRITE_ACCESS)
|
||||
#define KQEMU_GET_VERSION CTL_CODE(FILE_DEVICE_UNKNOWN, 3, METHOD_BUFFERED, FILE_READ_ACCESS)
|
||||
#define KQEMU_MODIFY_RAM_PAGES CTL_CODE(FILE_DEVICE_UNKNOWN, 4, METHOD_BUFFERED, FILE_WRITE_ACCESS)
|
||||
#define KQEMU_SET_PHYS_MEM CTL_CODE(FILE_DEVICE_UNKNOWN, 5, METHOD_BUFFERED, FILE_WRITE_ACCESS)
|
||||
#else
|
||||
#define KQEMU_EXEC _IOWR('q', 1, struct kqemu_cpu_state)
|
||||
#define KQEMU_INIT _IOW('q', 2, struct kqemu_init)
|
||||
#define KQEMU_GET_VERSION _IOR('q', 3, int)
|
||||
#define KQEMU_MODIFY_RAM_PAGES _IOW('q', 4, int)
|
||||
#define KQEMU_SET_PHYS_MEM _IOW('q', 5, struct kqemu_phys_mem)
|
||||
#endif
|
||||
|
||||
#endif /* KQEMU_H */
|
||||
|
@ -1950,20 +1950,21 @@ void helper_cpuid(void)
|
||||
case 0x80000008:
|
||||
/* virtual & phys address size in low 2 bytes. */
|
||||
/* XXX: This value must match the one used in the MMU code. */
|
||||
#if defined(TARGET_X86_64)
|
||||
# if defined(USE_KQEMU)
|
||||
EAX = 0x00003020; /* 48 bits virtual, 32 bits physical */
|
||||
# else
|
||||
/* XXX: The physical address space is limited to 42 bits in exec.c. */
|
||||
EAX = 0x00003028; /* 48 bits virtual, 40 bits physical */
|
||||
# endif
|
||||
if (env->cpuid_ext2_features & CPUID_EXT2_LM) {
|
||||
/* 64 bit processor */
|
||||
#if defined(USE_KQEMU)
|
||||
EAX = 0x00003020; /* 48 bits virtual, 32 bits physical */
|
||||
#else
|
||||
# if defined(USE_KQEMU)
|
||||
EAX = 0x00000020; /* 32 bits physical */
|
||||
# else
|
||||
EAX = 0x00000024; /* 36 bits physical */
|
||||
# endif
|
||||
/* XXX: The physical address space is limited to 42 bits in exec.c. */
|
||||
EAX = 0x00003028; /* 48 bits virtual, 40 bits physical */
|
||||
#endif
|
||||
} else {
|
||||
#if defined(USE_KQEMU)
|
||||
EAX = 0x00000020; /* 32 bits physical */
|
||||
#else
|
||||
EAX = 0x00000024; /* 36 bits physical */
|
||||
#endif
|
||||
}
|
||||
EBX = 0;
|
||||
ECX = 0;
|
||||
EDX = 0;
|
||||
@ -3157,6 +3158,15 @@ void helper_rdmsr(void)
|
||||
case MSR_KERNELGSBASE:
|
||||
val = env->kernelgsbase;
|
||||
break;
|
||||
#endif
|
||||
#ifdef USE_KQEMU
|
||||
case MSR_QPI_COMMBASE:
|
||||
if (env->kqemu_enabled) {
|
||||
val = kqemu_comm_base;
|
||||
} else {
|
||||
val = 0;
|
||||
}
|
||||
break;
|
||||
#endif
|
||||
default:
|
||||
/* XXX: exception ? */
|
||||
|
Loading…
Reference in New Issue
Block a user