Unbreak large mem support by removing kqemu

kqemu introduces a number of restrictions on the i386 target.  The worst is that
it prevents large memory from working in the default build.

Furthermore, kqemu is fundamentally flawed in a number of ways.  It relies on
the TSC as a time source which will not be reliable on a multiple processor
system in userspace.  Since most modern processors are multicore, this severely
limits the utility of kqemu.

kvm is a viable alternative for people looking to accelerate qemu and has the
benefit of being supported by the upstream Linux kernel.  If someone can
implement work arounds to remove the restrictions introduced by kqemu, I'm
happy to avoid and/or revert this patch.

N.B. kqemu will still function in the 0.11 series but this patch removes it from
the 0.12 series.

Paul, please Ack or Nack this patch.

Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
This commit is contained in:
Anthony Liguori 2009-08-10 17:07:24 -05:00
parent 0953a80f04
commit 4a1418e07b
24 changed files with 8 additions and 1722 deletions

View File

@ -70,7 +70,6 @@ Generic Subsystems:
Dynamic translator Fabrice Bellard
Main loop Fabrice Bellard (new maintainer needed)
TCG Fabrice Bellard
kqemu interface Fabrice Bellard
IDE device ?
SCSI device Paul Brook
PCI layer ?

View File

@ -36,7 +36,6 @@ all: $(PROGS)
#########################################################
# cpu emulator library
libobj-y = exec.o translate-all.o cpu-exec.o translate.o
libobj-$(CONFIG_KQEMU) += kqemu.o
libobj-y += tcg/tcg.o tcg/tcg-runtime.o
libobj-$(CONFIG_SOFTFLOAT) += fpu/softfloat.o
libobj-$(CONFIG_NOSOFTFLOAT) += fpu/softfloat-native.o

27
configure vendored
View File

@ -185,7 +185,6 @@ vnc_sasl="yes"
bsd="no"
linux="no"
solaris="no"
kqemu="no"
profiler="no"
cocoa="no"
softmmu="yes"
@ -238,25 +237,16 @@ MINGW32*)
GNU/kFreeBSD)
audio_drv_list="oss"
audio_possible_drivers="oss sdl esd pa"
if [ "$cpu" = "i386" -o "$cpu" = "x86_64" ] ; then
kqemu="yes"
fi
;;
FreeBSD)
bsd="yes"
audio_drv_list="oss"
audio_possible_drivers="oss sdl esd pa"
if [ "$cpu" = "i386" -o "$cpu" = "x86_64" ] ; then
kqemu="yes"
fi
;;
DragonFly)
bsd="yes"
audio_drv_list="oss"
audio_possible_drivers="oss sdl esd pa"
if [ "$cpu" = "i386" -o "$cpu" = "x86_64" ] ; then
kqemu="yes"
fi
aio="no"
;;
NetBSD)
@ -320,9 +310,6 @@ SunOS)
exit 1
fi
fi
if test "$solarisrev" -ge 9 ; then
kqemu="yes"
fi
fi
if test -f /usr/include/sys/soundcard.h ; then
audio_drv_list="oss"
@ -343,7 +330,6 @@ AIX)
usb="linux"
kvm="yes"
if [ "$cpu" = "i386" -o "$cpu" = "x86_64" ] ; then
kqemu="yes"
audio_possible_drivers="$audio_possible_drivers fmod"
fi
;;
@ -358,9 +344,6 @@ if [ "$bsd" = "yes" ] ; then
fi
if test "$mingw32" = "yes" ; then
if [ "$cpu" = "i386" ] ; then
kqemu="yes"
fi
EXESUF=".exe"
QEMU_CFLAGS="-DWIN32_LEAN_AND_MEAN -DWINVER=0x501 $QEMU_CFLAGS"
LIBS="-lwinmm -lws2_32 -liphlpapi $LIBS"
@ -450,8 +433,6 @@ for opt do
;;
--disable-vde) vde="no"
;;
--disable-kqemu) kqemu="no"
;;
--disable-xen) xen="no"
;;
--disable-brlapi) brlapi="no"
@ -597,9 +578,6 @@ echo " --interp-prefix=PREFIX where to find shared libraries, etc."
echo " use %M for cpu name [$interp_prefix]"
echo " --target-list=LIST set target list [$target_list]"
echo ""
echo "kqemu kernel acceleration support:"
echo " --disable-kqemu disable kqemu support"
echo ""
echo "Advanced options (experts only):"
echo " --source-path=PATH path of source code [$source_path]"
echo " --cross-prefix=PREFIX use PREFIX for compile tools [$cross_prefix]"
@ -1534,7 +1512,6 @@ fi
if test -n "$sparc_cpu"; then
echo "Target Sparc Arch $sparc_cpu"
fi
echo "kqemu support $kqemu"
echo "xen support $xen"
echo "brlapi support $brlapi"
echo "Documentation $build_docs"
@ -2019,10 +1996,6 @@ case "$target_arch2" in
if test "$xen" = "yes" -a "$target_softmmu" = "yes" ; then
echo "CONFIG_XEN=y" >> $config_mak
fi
if test $kqemu = "yes" -a "$target_softmmu" = "yes"
then
echo "CONFIG_KQEMU=y" >> $config_mak
fi
esac
case "$target_arch2" in
i386|x86_64|ppcemb|ppc|ppc64)

View File

@ -873,7 +873,6 @@ int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
#define VGA_DIRTY_FLAG 0x01
#define CODE_DIRTY_FLAG 0x02
#define KQEMU_DIRTY_FLAG 0x04
#define MIGRATION_DIRTY_FLAG 0x08
/* read dirty bit (return 0 or 1) */
@ -1056,14 +1055,9 @@ static inline int64_t profile_getclock(void)
return cpu_get_real_ticks();
}
extern int64_t kqemu_time, kqemu_time_start;
extern int64_t qemu_time, qemu_time_start;
extern int64_t tlb_flush_time;
extern int64_t kqemu_exec_count;
extern int64_t dev_time;
extern int64_t kqemu_ret_int_count;
extern int64_t kqemu_ret_excp_count;
extern int64_t kqemu_ret_intr_count;
#endif
void cpu_inject_x86_mce(CPUState *cenv, int bank, uint64_t status,

View File

@ -10,12 +10,7 @@
#include "bswap.h"
/* address in the RAM (different from a physical address) */
#ifdef CONFIG_KQEMU
/* FIXME: This is wrong. */
typedef uint32_t ram_addr_t;
#else
typedef unsigned long ram_addr_t;
#endif
/* memory API */

View File

@ -321,31 +321,6 @@ int cpu_exec(CPUState *env1)
}
env->exception_index = -1;
}
#ifdef CONFIG_KQEMU
if (kqemu_is_ok(env) && env->interrupt_request == 0 && env->exit_request == 0) {
int ret;
env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
ret = kqemu_cpu_exec(env);
/* put eflags in CPU temporary format */
CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
DF = 1 - (2 * ((env->eflags >> 10) & 1));
CC_OP = CC_OP_EFLAGS;
env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
if (ret == 1) {
/* exception */
longjmp(env->jmp_env, 1);
} else if (ret == 2) {
/* softmmu execution needed */
} else {
if (env->interrupt_request != 0 || env->exit_request != 0) {
/* hardware interrupt will be executed just after */
} else {
/* otherwise, we restart */
longjmp(env->jmp_env, 1);
}
}
}
#endif
if (kvm_enabled()) {
kvm_cpu_exec(env);
@ -620,11 +595,7 @@ int cpu_exec(CPUState *env1)
spans two pages, we cannot safely do a direct
jump. */
{
if (next_tb != 0 &&
#ifdef CONFIG_KQEMU
(env->kqemu_enabled != 2) &&
#endif
tb->page_addr[1] == -1) {
if (next_tb != 0 && tb->page_addr[1] == -1) {
tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
}
}
@ -678,13 +649,6 @@ int cpu_exec(CPUState *env1)
}
/* reset soft MMU for next block (it can currently
only be set by a memory fault) */
#if defined(CONFIG_KQEMU)
#define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
if (kqemu_is_ok(env) &&
(cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
cpu_loop_exit();
}
#endif
} /* for(;;) */
} else {
env_to_regs();

View File

@ -348,41 +348,6 @@ static inline int can_do_io(CPUState *env)
}
#endif
#ifdef CONFIG_KQEMU
#define KQEMU_MODIFY_PAGE_MASK (0xff & ~(VGA_DIRTY_FLAG | CODE_DIRTY_FLAG))
#define MSR_QPI_COMMBASE 0xfabe0010
int kqemu_init(CPUState *env);
int kqemu_cpu_exec(CPUState *env);
void kqemu_flush_page(CPUState *env, target_ulong addr);
void kqemu_flush(CPUState *env, int global);
void kqemu_set_notdirty(CPUState *env, ram_addr_t ram_addr);
void kqemu_modify_page(CPUState *env, ram_addr_t ram_addr);
void kqemu_set_phys_mem(uint64_t start_addr, ram_addr_t size,
ram_addr_t phys_offset);
void kqemu_cpu_interrupt(CPUState *env);
void kqemu_record_dump(void);
extern uint32_t kqemu_comm_base;
extern ram_addr_t kqemu_phys_ram_size;
extern uint8_t *kqemu_phys_ram_base;
static inline int kqemu_is_ok(CPUState *env)
{
return(env->kqemu_enabled &&
(env->cr[0] & CR0_PE_MASK) &&
!(env->hflags & HF_INHIBIT_IRQ_MASK) &&
(env->eflags & IF_MASK) &&
!(env->eflags & VM_MASK) &&
(env->kqemu_enabled == 2 ||
((env->hflags & HF_CPL_MASK) == 3 &&
(env->eflags & IOPL_MASK) != IOPL_MASK)));
}
#endif
typedef void (CPUDebugExcpHandler)(CPUState *env);
CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler);

91
exec.c
View File

@ -70,12 +70,11 @@
#define TARGET_VIRT_ADDR_SPACE_BITS 42
#elif defined(TARGET_PPC64)
#define TARGET_PHYS_ADDR_SPACE_BITS 42
#elif defined(TARGET_X86_64) && !defined(CONFIG_KQEMU)
#elif defined(TARGET_X86_64)
#define TARGET_PHYS_ADDR_SPACE_BITS 42
#elif defined(TARGET_I386) && !defined(CONFIG_KQEMU)
#elif defined(TARGET_I386)
#define TARGET_PHYS_ADDR_SPACE_BITS 36
#else
/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
#define TARGET_PHYS_ADDR_SPACE_BITS 32
#endif
@ -1763,11 +1762,6 @@ void tlb_flush(CPUState *env, int flush_global)
memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
#ifdef CONFIG_KQEMU
if (env->kqemu_enabled) {
kqemu_flush(env, flush_global);
}
#endif
tlb_flush_count++;
}
@ -1801,12 +1795,6 @@ void tlb_flush_page(CPUState *env, target_ulong addr)
tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
tlb_flush_jmp_cache(env, addr);
#ifdef CONFIG_KQEMU
if (env->kqemu_enabled) {
kqemu_flush_page(env, addr);
}
#endif
}
/* update the TLBs so that writes to code in the virtual page 'addr'
@ -1854,18 +1842,6 @@ void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
if (length == 0)
return;
len = length >> TARGET_PAGE_BITS;
#ifdef CONFIG_KQEMU
/* XXX: should not depend on cpu context */
env = first_cpu;
if (env->kqemu_enabled) {
ram_addr_t addr;
addr = start;
for(i = 0; i < len; i++) {
kqemu_set_notdirty(env, addr);
addr += TARGET_PAGE_SIZE;
}
}
#endif
mask = ~dirty_flags;
p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
for(i = 0; i < len; i++)
@ -2322,13 +2298,6 @@ void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
ram_addr_t orig_size = size;
void *subpage;
#ifdef CONFIG_KQEMU
/* XXX: should not depend on cpu context */
env = first_cpu;
if (env->kqemu_enabled) {
kqemu_set_phys_mem(start_addr, size, phys_offset);
}
#endif
if (kvm_enabled())
kvm_set_phys_mem(start_addr, size, phys_offset);
@ -2423,32 +2392,10 @@ void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
kvm_uncoalesce_mmio_region(addr, size);
}
#ifdef CONFIG_KQEMU
/* XXX: better than nothing */
static ram_addr_t kqemu_ram_alloc(ram_addr_t size)
{
ram_addr_t addr;
if ((last_ram_offset + size) > kqemu_phys_ram_size) {
fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n",
(uint64_t)size, (uint64_t)kqemu_phys_ram_size);
abort();
}
addr = last_ram_offset;
last_ram_offset = TARGET_PAGE_ALIGN(last_ram_offset + size);
return addr;
}
#endif
ram_addr_t qemu_ram_alloc(ram_addr_t size)
{
RAMBlock *new_block;
#ifdef CONFIG_KQEMU
if (kqemu_phys_ram_base) {
return kqemu_ram_alloc(size);
}
#endif
size = TARGET_PAGE_ALIGN(size);
new_block = qemu_malloc(sizeof(*new_block));
@ -2491,12 +2438,6 @@ void *qemu_get_ram_ptr(ram_addr_t addr)
RAMBlock **prevp;
RAMBlock *block;
#ifdef CONFIG_KQEMU
if (kqemu_phys_ram_base) {
return kqemu_phys_ram_base + addr;
}
#endif
prev = NULL;
prevp = &ram_blocks;
block = ram_blocks;
@ -2529,12 +2470,6 @@ ram_addr_t qemu_ram_addr_from_host(void *ptr)
RAMBlock *block;
uint8_t *host = ptr;
#ifdef CONFIG_KQEMU
if (kqemu_phys_ram_base) {
return host - kqemu_phys_ram_base;
}
#endif
prev = NULL;
prevp = &ram_blocks;
block = ram_blocks;
@ -2639,11 +2574,6 @@ static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
#endif
}
stb_p(qemu_get_ram_ptr(ram_addr), val);
#ifdef CONFIG_KQEMU
if (cpu_single_env->kqemu_enabled &&
(dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
kqemu_modify_page(cpu_single_env, ram_addr);
#endif
dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
/* we remove the notdirty callback only if the code has been
@ -2664,11 +2594,6 @@ static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
#endif
}
stw_p(qemu_get_ram_ptr(ram_addr), val);
#ifdef CONFIG_KQEMU
if (cpu_single_env->kqemu_enabled &&
(dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
kqemu_modify_page(cpu_single_env, ram_addr);
#endif
dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
/* we remove the notdirty callback only if the code has been
@ -2689,11 +2614,6 @@ static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
#endif
}
stl_p(qemu_get_ram_ptr(ram_addr), val);
#ifdef CONFIG_KQEMU
if (cpu_single_env->kqemu_enabled &&
(dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
kqemu_modify_page(cpu_single_env, ram_addr);
#endif
dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
/* we remove the notdirty callback only if the code has been
@ -3044,13 +2964,6 @@ static void io_mem_init(void)
io_mem_watch = cpu_register_io_memory(watch_mem_read,
watch_mem_write, NULL);
#ifdef CONFIG_KQEMU
if (kqemu_phys_ram_base) {
/* alloc dirty bits array */
phys_ram_dirty = qemu_vmalloc(kqemu_phys_ram_size >> TARGET_PAGE_BITS);
memset(phys_ram_dirty, 0xff, kqemu_phys_ram_size >> TARGET_PAGE_BITS);
}
#endif
}
#endif /* !defined(CONFIG_USER_ONLY) */

12
hw/pc.c
View File

@ -122,17 +122,7 @@ static void ioportF0_write(void *opaque, uint32_t addr, uint32_t data)
/* TSC handling */
uint64_t cpu_get_tsc(CPUX86State *env)
{
/* Note: when using kqemu, it is more logical to return the host TSC
because kqemu does not trap the RDTSC instruction for
performance reasons */
#ifdef CONFIG_KQEMU
if (env->kqemu_enabled) {
return cpu_get_real_ticks();
} else
#endif
{
return cpu_get_ticks();
}
return cpu_get_ticks();
}
/* SMM support */

View File

@ -196,30 +196,18 @@ void cpu_outb(CPUState *env, pio_addr_t addr, uint8_t val)
{
LOG_IOPORT("outb: %04"FMT_pioaddr" %02"PRIx8"\n", addr, val);
ioport_write(0, addr, val);
#ifdef CONFIG_KQEMU
if (env)
env->last_io_time = cpu_get_time_fast();
#endif
}
void cpu_outw(CPUState *env, pio_addr_t addr, uint16_t val)
{
LOG_IOPORT("outw: %04"FMT_pioaddr" %04"PRIx16"\n", addr, val);
ioport_write(1, addr, val);
#ifdef CONFIG_KQEMU
if (env)
env->last_io_time = cpu_get_time_fast();
#endif
}
void cpu_outl(CPUState *env, pio_addr_t addr, uint32_t val)
{
LOG_IOPORT("outl: %04"FMT_pioaddr" %08"PRIx32"\n", addr, val);
ioport_write(2, addr, val);
#ifdef CONFIG_KQEMU
if (env)
env->last_io_time = cpu_get_time_fast();
#endif
}
uint8_t cpu_inb(CPUState *env, pio_addr_t addr)
@ -227,10 +215,6 @@ uint8_t cpu_inb(CPUState *env, pio_addr_t addr)
uint8_t val;
val = ioport_read(0, addr);
LOG_IOPORT("inb : %04"FMT_pioaddr" %02"PRIx8"\n", addr, val);
#ifdef CONFIG_KQEMU
if (env)
env->last_io_time = cpu_get_time_fast();
#endif
return val;
}
@ -239,10 +223,6 @@ uint16_t cpu_inw(CPUState *env, pio_addr_t addr)
uint16_t val;
val = ioport_read(1, addr);
LOG_IOPORT("inw : %04"FMT_pioaddr" %04"PRIx16"\n", addr, val);
#ifdef CONFIG_KQEMU
if (env)
env->last_io_time = cpu_get_time_fast();
#endif
return val;
}
@ -251,9 +231,5 @@ uint32_t cpu_inl(CPUState *env, pio_addr_t addr)
uint32_t val;
val = ioport_read(2, addr);
LOG_IOPORT("inl : %04"FMT_pioaddr" %08"PRIx32"\n", addr, val);
#ifdef CONFIG_KQEMU
if (env)
env->last_io_time = cpu_get_time_fast();
#endif
return val;
}

998
kqemu.c
View File

@ -1,998 +0,0 @@
/*
* KQEMU support
*
* Copyright (c) 2005-2008 Fabrice Bellard
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
#include "config.h"
#ifdef _WIN32
#include <windows.h>
#include <winioctl.h>
#else
#include <sys/types.h>
#include <sys/mman.h>
#include <sys/ioctl.h>
#endif
#ifdef CONFIG_SOLARIS
#include <sys/ioccom.h>
#endif
#include <stdlib.h>
#include <stdio.h>
#include <stdarg.h>
#include <string.h>
#include <errno.h>
#include <unistd.h>
#include <inttypes.h>
#include "cpu.h"
#include "exec-all.h"
#include "qemu-common.h"
#ifdef CONFIG_KQEMU
#define DEBUG
//#define PROFILE
#ifdef DEBUG
# define LOG_INT(...) qemu_log_mask(CPU_LOG_INT, ## __VA_ARGS__)
# define LOG_INT_STATE(env) log_cpu_state_mask(CPU_LOG_INT, (env), 0)
#else
# define LOG_INT(...) do { } while (0)
# define LOG_INT_STATE(env) do { } while (0)
#endif
#include <unistd.h>
#include <fcntl.h>
#include "kqemu.h"
#ifdef _WIN32
#define KQEMU_DEVICE "\\\\.\\kqemu"
#else
#define KQEMU_DEVICE "/dev/kqemu"
#endif
static void qpi_init(void);
#ifdef _WIN32
#define KQEMU_INVALID_FD INVALID_HANDLE_VALUE
HANDLE kqemu_fd = KQEMU_INVALID_FD;
#define kqemu_closefd(x) CloseHandle(x)
#else
#define KQEMU_INVALID_FD -1
int kqemu_fd = KQEMU_INVALID_FD;
#define kqemu_closefd(x) close(x)
#endif
/* 0 = not allowed
1 = user kqemu
2 = kernel kqemu
*/
int kqemu_allowed = 0;
uint64_t *pages_to_flush;
unsigned int nb_pages_to_flush;
uint64_t *ram_pages_to_update;
unsigned int nb_ram_pages_to_update;
uint64_t *modified_ram_pages;
unsigned int nb_modified_ram_pages;
uint8_t *modified_ram_pages_table;
int qpi_io_memory;
uint32_t kqemu_comm_base; /* physical address of the QPI communication page */
ram_addr_t kqemu_phys_ram_size;
uint8_t *kqemu_phys_ram_base;
#define cpuid(index, eax, ebx, ecx, edx) \
asm volatile ("cpuid" \
: "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx) \
: "0" (index))
#ifdef __x86_64__
static int is_cpuid_supported(void)
{
return 1;
}
#else
static int is_cpuid_supported(void)
{
int v0, v1;
asm volatile ("pushf\n"
"popl %0\n"
"movl %0, %1\n"
"xorl $0x00200000, %0\n"
"pushl %0\n"
"popf\n"
"pushf\n"
"popl %0\n"
: "=a" (v0), "=d" (v1)
:
: "cc");
return (v0 != v1);
}
#endif
static void kqemu_update_cpuid(CPUState *env)
{
int critical_features_mask, features, ext_features, ext_features_mask;
uint32_t eax, ebx, ecx, edx;
/* the following features are kept identical on the host and
target cpus because they are important for user code. Strictly
speaking, only SSE really matters because the OS must support
it if the user code uses it. */
critical_features_mask =
CPUID_CMOV | CPUID_CX8 |
CPUID_FXSR | CPUID_MMX | CPUID_SSE |
CPUID_SSE2 | CPUID_SEP;
ext_features_mask = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR;
if (!is_cpuid_supported()) {
features = 0;
ext_features = 0;
} else {
cpuid(1, eax, ebx, ecx, edx);
features = edx;
ext_features = ecx;
}
#ifdef __x86_64__
/* NOTE: on x86_64 CPUs, SYSENTER is not supported in
compatibility mode, so in order to have the best performances
it is better not to use it */
features &= ~CPUID_SEP;
#endif
env->cpuid_features = (env->cpuid_features & ~critical_features_mask) |
(features & critical_features_mask);
env->cpuid_ext_features = (env->cpuid_ext_features & ~ext_features_mask) |
(ext_features & ext_features_mask);
/* XXX: we could update more of the target CPUID state so that the
non accelerated code sees exactly the same CPU features as the
accelerated code */
}
int kqemu_init(CPUState *env)
{
struct kqemu_init kinit;
int ret, version;
#ifdef _WIN32
DWORD temp;
#endif
if (!kqemu_allowed)
return -1;
#ifdef _WIN32
kqemu_fd = CreateFile(KQEMU_DEVICE, GENERIC_WRITE | GENERIC_READ,
FILE_SHARE_READ | FILE_SHARE_WRITE,
NULL, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL,
NULL);
if (kqemu_fd == KQEMU_INVALID_FD) {
fprintf(stderr, "Could not open '%s' - QEMU acceleration layer not activated: %lu\n",
KQEMU_DEVICE, GetLastError());
return -1;
}
#else
kqemu_fd = open(KQEMU_DEVICE, O_RDWR);
if (kqemu_fd == KQEMU_INVALID_FD) {
fprintf(stderr, "Could not open '%s' - QEMU acceleration layer not activated: %s\n",
KQEMU_DEVICE, strerror(errno));
return -1;
}
#endif
version = 0;
#ifdef _WIN32
DeviceIoControl(kqemu_fd, KQEMU_GET_VERSION, NULL, 0,
&version, sizeof(version), &temp, NULL);
#else
ioctl(kqemu_fd, KQEMU_GET_VERSION, &version);
#endif
if (version != KQEMU_VERSION) {
fprintf(stderr, "Version mismatch between kqemu module and qemu (%08x %08x) - disabling kqemu use\n",
version, KQEMU_VERSION);
goto fail;
}
pages_to_flush = qemu_vmalloc(KQEMU_MAX_PAGES_TO_FLUSH *
sizeof(uint64_t));
if (!pages_to_flush)
goto fail;
ram_pages_to_update = qemu_vmalloc(KQEMU_MAX_RAM_PAGES_TO_UPDATE *
sizeof(uint64_t));
if (!ram_pages_to_update)
goto fail;
modified_ram_pages = qemu_vmalloc(KQEMU_MAX_MODIFIED_RAM_PAGES *
sizeof(uint64_t));
if (!modified_ram_pages)
goto fail;
modified_ram_pages_table =
qemu_mallocz(kqemu_phys_ram_size >> TARGET_PAGE_BITS);
if (!modified_ram_pages_table)
goto fail;
memset(&kinit, 0, sizeof(kinit)); /* set the paddings to zero */
kinit.ram_base = kqemu_phys_ram_base;
kinit.ram_size = kqemu_phys_ram_size;
kinit.ram_dirty = phys_ram_dirty;
kinit.pages_to_flush = pages_to_flush;
kinit.ram_pages_to_update = ram_pages_to_update;
kinit.modified_ram_pages = modified_ram_pages;
#ifdef _WIN32
ret = DeviceIoControl(kqemu_fd, KQEMU_INIT, &kinit, sizeof(kinit),
NULL, 0, &temp, NULL) == TRUE ? 0 : -1;
#else
ret = ioctl(kqemu_fd, KQEMU_INIT, &kinit);
#endif
if (ret < 0) {
fprintf(stderr, "Error %d while initializing QEMU acceleration layer - disabling it for now\n", ret);
fail:
kqemu_closefd(kqemu_fd);
kqemu_fd = KQEMU_INVALID_FD;
return -1;
}
kqemu_update_cpuid(env);
env->kqemu_enabled = kqemu_allowed;
nb_pages_to_flush = 0;
nb_ram_pages_to_update = 0;
qpi_init();
return 0;
}
void kqemu_flush_page(CPUState *env, target_ulong addr)
{
LOG_INT("kqemu_flush_page: addr=" TARGET_FMT_lx "\n", addr);
if (nb_pages_to_flush >= KQEMU_MAX_PAGES_TO_FLUSH)
nb_pages_to_flush = KQEMU_FLUSH_ALL;
else
pages_to_flush[nb_pages_to_flush++] = addr;
}
void kqemu_flush(CPUState *env, int global)
{
LOG_INT("kqemu_flush:\n");
nb_pages_to_flush = KQEMU_FLUSH_ALL;
}
void kqemu_set_notdirty(CPUState *env, ram_addr_t ram_addr)
{
LOG_INT("kqemu_set_notdirty: addr=%08lx\n",
(unsigned long)ram_addr);
/* we only track transitions to dirty state */
if (phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] != 0xff)
return;
if (nb_ram_pages_to_update >= KQEMU_MAX_RAM_PAGES_TO_UPDATE)
nb_ram_pages_to_update = KQEMU_RAM_PAGES_UPDATE_ALL;
else
ram_pages_to_update[nb_ram_pages_to_update++] = ram_addr;
}
static void kqemu_reset_modified_ram_pages(void)
{
int i;
unsigned long page_index;
for(i = 0; i < nb_modified_ram_pages; i++) {
page_index = modified_ram_pages[i] >> TARGET_PAGE_BITS;
modified_ram_pages_table[page_index] = 0;
}
nb_modified_ram_pages = 0;
}
void kqemu_modify_page(CPUState *env, ram_addr_t ram_addr)
{
unsigned long page_index;
int ret;
#ifdef _WIN32
DWORD temp;
#endif
page_index = ram_addr >> TARGET_PAGE_BITS;
if (!modified_ram_pages_table[page_index]) {
#if 0
printf("%d: modify_page=%08lx\n", nb_modified_ram_pages, ram_addr);
#endif
modified_ram_pages_table[page_index] = 1;
modified_ram_pages[nb_modified_ram_pages++] = ram_addr;
if (nb_modified_ram_pages >= KQEMU_MAX_MODIFIED_RAM_PAGES) {
/* flush */
#ifdef _WIN32
ret = DeviceIoControl(kqemu_fd, KQEMU_MODIFY_RAM_PAGES,
&nb_modified_ram_pages,
sizeof(nb_modified_ram_pages),
NULL, 0, &temp, NULL);
#else
ret = ioctl(kqemu_fd, KQEMU_MODIFY_RAM_PAGES,
&nb_modified_ram_pages);
#endif
kqemu_reset_modified_ram_pages();
}
}
}
void kqemu_set_phys_mem(uint64_t start_addr, ram_addr_t size,
ram_addr_t phys_offset)
{
struct kqemu_phys_mem kphys_mem1, *kphys_mem = &kphys_mem1;
uint64_t end;
int ret, io_index;
end = (start_addr + size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
start_addr &= TARGET_PAGE_MASK;
kphys_mem->phys_addr = start_addr;
kphys_mem->size = end - start_addr;
kphys_mem->ram_addr = phys_offset & TARGET_PAGE_MASK;
io_index = phys_offset & ~TARGET_PAGE_MASK;
switch(io_index) {
case IO_MEM_RAM:
kphys_mem->io_index = KQEMU_IO_MEM_RAM;
break;
case IO_MEM_ROM:
kphys_mem->io_index = KQEMU_IO_MEM_ROM;
break;
default:
if (qpi_io_memory == io_index) {
kphys_mem->io_index = KQEMU_IO_MEM_COMM;
} else {
kphys_mem->io_index = KQEMU_IO_MEM_UNASSIGNED;
}
break;
}
#ifdef _WIN32
{
DWORD temp;
ret = DeviceIoControl(kqemu_fd, KQEMU_SET_PHYS_MEM,
kphys_mem, sizeof(*kphys_mem),
NULL, 0, &temp, NULL) == TRUE ? 0 : -1;
}
#else
ret = ioctl(kqemu_fd, KQEMU_SET_PHYS_MEM, kphys_mem);
#endif
if (ret < 0) {
fprintf(stderr, "kqemu: KQEMU_SET_PHYS_PAGE error=%d: start_addr=0x%016" PRIx64 " size=0x%08lx phys_offset=0x%08lx\n",
ret, start_addr,
(unsigned long)size, (unsigned long)phys_offset);
}
}
struct fpstate {
uint16_t fpuc;
uint16_t dummy1;
uint16_t fpus;
uint16_t dummy2;
uint16_t fptag;
uint16_t dummy3;
uint32_t fpip;
uint32_t fpcs;
uint32_t fpoo;
uint32_t fpos;
uint8_t fpregs1[8 * 10];
};
struct fpxstate {
uint16_t fpuc;
uint16_t fpus;
uint16_t fptag;
uint16_t fop;
uint32_t fpuip;
uint16_t cs_sel;
uint16_t dummy0;
uint32_t fpudp;
uint16_t ds_sel;
uint16_t dummy1;
uint32_t mxcsr;
uint32_t mxcsr_mask;
uint8_t fpregs1[8 * 16];
uint8_t xmm_regs[16 * 16];
uint8_t dummy2[96];
};
static struct fpxstate fpx1 __attribute__((aligned(16)));
static void restore_native_fp_frstor(CPUState *env)
{
int fptag, i, j;
struct fpstate fp1, *fp = &fp1;
fp->fpuc = env->fpuc;
fp->fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
fptag = 0;
for (i=7; i>=0; i--) {
fptag <<= 2;
if (env->fptags[i]) {
fptag |= 3;
} else {
/* the FPU automatically computes it */
}
}
fp->fptag = fptag;
j = env->fpstt;
for(i = 0;i < 8; i++) {
memcpy(&fp->fpregs1[i * 10], &env->fpregs[j].d, 10);
j = (j + 1) & 7;
}
asm volatile ("frstor %0" : "=m" (*fp));
}
static void save_native_fp_fsave(CPUState *env)
{
int fptag, i, j;
uint16_t fpuc;
struct fpstate fp1, *fp = &fp1;
asm volatile ("fsave %0" : : "m" (*fp));
env->fpuc = fp->fpuc;
env->fpstt = (fp->fpus >> 11) & 7;
env->fpus = fp->fpus & ~0x3800;
fptag = fp->fptag;
for(i = 0;i < 8; i++) {
env->fptags[i] = ((fptag & 3) == 3);
fptag >>= 2;
}
j = env->fpstt;
for(i = 0;i < 8; i++) {
memcpy(&env->fpregs[j].d, &fp->fpregs1[i * 10], 10);
j = (j + 1) & 7;
}
/* we must restore the default rounding state */
fpuc = 0x037f | (env->fpuc & (3 << 10));
asm volatile("fldcw %0" : : "m" (fpuc));
}
static void restore_native_fp_fxrstor(CPUState *env)
{
struct fpxstate *fp = &fpx1;
int i, j, fptag;
fp->fpuc = env->fpuc;
fp->fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
fptag = 0;
for(i = 0; i < 8; i++)
fptag |= (env->fptags[i] << i);
fp->fptag = fptag ^ 0xff;
j = env->fpstt;
for(i = 0;i < 8; i++) {
memcpy(&fp->fpregs1[i * 16], &env->fpregs[j].d, 10);
j = (j + 1) & 7;
}
if (env->cpuid_features & CPUID_SSE) {
fp->mxcsr = env->mxcsr;
/* XXX: check if DAZ is not available */
fp->mxcsr_mask = 0xffff;
memcpy(fp->xmm_regs, env->xmm_regs, CPU_NB_REGS * 16);
}
asm volatile ("fxrstor %0" : "=m" (*fp));
}
static void save_native_fp_fxsave(CPUState *env)
{
struct fpxstate *fp = &fpx1;
int fptag, i, j;
uint16_t fpuc;
asm volatile ("fxsave %0" : : "m" (*fp));
env->fpuc = fp->fpuc;
env->fpstt = (fp->fpus >> 11) & 7;
env->fpus = fp->fpus & ~0x3800;
fptag = fp->fptag ^ 0xff;
for(i = 0;i < 8; i++) {
env->fptags[i] = (fptag >> i) & 1;
}
j = env->fpstt;
for(i = 0;i < 8; i++) {
memcpy(&env->fpregs[j].d, &fp->fpregs1[i * 16], 10);
j = (j + 1) & 7;
}
if (env->cpuid_features & CPUID_SSE) {
env->mxcsr = fp->mxcsr;
memcpy(env->xmm_regs, fp->xmm_regs, CPU_NB_REGS * 16);
}
/* we must restore the default rounding state */
asm volatile ("fninit");
fpuc = 0x037f | (env->fpuc & (3 << 10));
asm volatile("fldcw %0" : : "m" (fpuc));
}
static int do_syscall(CPUState *env,
struct kqemu_cpu_state *kenv)
{
int selector;
selector = (env->star >> 32) & 0xffff;
#ifdef TARGET_X86_64
if (env->hflags & HF_LMA_MASK) {
int code64;
env->regs[R_ECX] = kenv->next_eip;
env->regs[11] = env->eflags;
code64 = env->hflags & HF_CS64_MASK;
cpu_x86_set_cpl(env, 0);
cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
0, 0xffffffff,
DESC_G_MASK | DESC_P_MASK |
DESC_S_MASK |
DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
0, 0xffffffff,
DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
DESC_S_MASK |
DESC_W_MASK | DESC_A_MASK);
env->eflags &= ~env->fmask;
if (code64)
env->eip = env->lstar;
else
env->eip = env->cstar;
} else
#endif
{
env->regs[R_ECX] = (uint32_t)kenv->next_eip;
cpu_x86_set_cpl(env, 0);
cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
0, 0xffffffff,
DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
DESC_S_MASK |
DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
0, 0xffffffff,
DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
DESC_S_MASK |
DESC_W_MASK | DESC_A_MASK);
env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
env->eip = (uint32_t)env->star;
}
return 2;
}
#ifdef CONFIG_PROFILER
#define PC_REC_SIZE 1
#define PC_REC_HASH_BITS 16
#define PC_REC_HASH_SIZE (1 << PC_REC_HASH_BITS)
typedef struct PCRecord {
unsigned long pc;
int64_t count;
struct PCRecord *next;
} PCRecord;
static PCRecord *pc_rec_hash[PC_REC_HASH_SIZE];
static int nb_pc_records;
static void kqemu_record_pc(unsigned long pc)
{
unsigned long h;
PCRecord **pr, *r;
h = pc / PC_REC_SIZE;
h = h ^ (h >> PC_REC_HASH_BITS);
h &= (PC_REC_HASH_SIZE - 1);
pr = &pc_rec_hash[h];
for(;;) {
r = *pr;
if (r == NULL)
break;
if (r->pc == pc) {
r->count++;
return;
}
pr = &r->next;
}
r = malloc(sizeof(PCRecord));
r->count = 1;
r->pc = pc;
r->next = NULL;
*pr = r;
nb_pc_records++;
}
static int pc_rec_cmp(const void *p1, const void *p2)
{
PCRecord *r1 = *(PCRecord **)p1;
PCRecord *r2 = *(PCRecord **)p2;
if (r1->count < r2->count)
return 1;
else if (r1->count == r2->count)
return 0;
else
return -1;
}
static void kqemu_record_flush(void)
{
PCRecord *r, *r_next;
int h;
for(h = 0; h < PC_REC_HASH_SIZE; h++) {
for(r = pc_rec_hash[h]; r != NULL; r = r_next) {
r_next = r->next;
free(r);
}
pc_rec_hash[h] = NULL;
}
nb_pc_records = 0;
}
void kqemu_record_dump(void)
{
PCRecord **pr, *r;
int i, h;
FILE *f;
int64_t total, sum;
pr = malloc(sizeof(PCRecord *) * nb_pc_records);
i = 0;
total = 0;
for(h = 0; h < PC_REC_HASH_SIZE; h++) {
for(r = pc_rec_hash[h]; r != NULL; r = r->next) {
pr[i++] = r;
total += r->count;
}
}
qsort(pr, nb_pc_records, sizeof(PCRecord *), pc_rec_cmp);
f = fopen("/tmp/kqemu.stats", "w");
if (!f) {
perror("/tmp/kqemu.stats");
exit(1);
}
fprintf(f, "total: %" PRId64 "\n", total);
sum = 0;
for(i = 0; i < nb_pc_records; i++) {
r = pr[i];
sum += r->count;
fprintf(f, "%08lx: %" PRId64 " %0.2f%% %0.2f%%\n",
r->pc,
r->count,
(double)r->count / (double)total * 100.0,
(double)sum / (double)total * 100.0);
}
fclose(f);
free(pr);
kqemu_record_flush();
}
#endif
static inline void kqemu_load_seg(struct kqemu_segment_cache *ksc,
const SegmentCache *sc)
{
ksc->selector = sc->selector;
ksc->flags = sc->flags;
ksc->limit = sc->limit;
ksc->base = sc->base;
}
static inline void kqemu_save_seg(SegmentCache *sc,
const struct kqemu_segment_cache *ksc)
{
sc->selector = ksc->selector;
sc->flags = ksc->flags;
sc->limit = ksc->limit;
sc->base = ksc->base;
}
int kqemu_cpu_exec(CPUState *env)
{
struct kqemu_cpu_state kcpu_state, *kenv = &kcpu_state;
int ret, cpl, i;
#ifdef CONFIG_PROFILER
int64_t ti;
#endif
#ifdef _WIN32
DWORD temp;
#endif
#ifdef CONFIG_PROFILER
ti = profile_getclock();
#endif
LOG_INT("kqemu: cpu_exec: enter\n");
LOG_INT_STATE(env);
for(i = 0; i < CPU_NB_REGS; i++)
kenv->regs[i] = env->regs[i];
kenv->eip = env->eip;
kenv->eflags = env->eflags;
for(i = 0; i < 6; i++)
kqemu_load_seg(&kenv->segs[i], &env->segs[i]);
kqemu_load_seg(&kenv->ldt, &env->ldt);
kqemu_load_seg(&kenv->tr, &env->tr);
kqemu_load_seg(&kenv->gdt, &env->gdt);
kqemu_load_seg(&kenv->idt, &env->idt);
kenv->cr0 = env->cr[0];
kenv->cr2 = env->cr[2];
kenv->cr3 = env->cr[3];
kenv->cr4 = env->cr[4];
kenv->a20_mask = env->a20_mask;
kenv->efer = env->efer;
kenv->tsc_offset = 0;
kenv->star = env->star;
kenv->sysenter_cs = env->sysenter_cs;
kenv->sysenter_esp = env->sysenter_esp;
kenv->sysenter_eip = env->sysenter_eip;
#ifdef TARGET_X86_64
kenv->lstar = env->lstar;
kenv->cstar = env->cstar;
kenv->fmask = env->fmask;
kenv->kernelgsbase = env->kernelgsbase;
#endif
if (env->dr[7] & 0xff) {
kenv->dr7 = env->dr[7];
kenv->dr0 = env->dr[0];
kenv->dr1 = env->dr[1];
kenv->dr2 = env->dr[2];
kenv->dr3 = env->dr[3];
} else {
kenv->dr7 = 0;
}
kenv->dr6 = env->dr[6];
cpl = (env->hflags & HF_CPL_MASK);
kenv->cpl = cpl;
kenv->nb_pages_to_flush = nb_pages_to_flush;
kenv->user_only = (env->kqemu_enabled == 1);
kenv->nb_ram_pages_to_update = nb_ram_pages_to_update;
nb_ram_pages_to_update = 0;
kenv->nb_modified_ram_pages = nb_modified_ram_pages;
kqemu_reset_modified_ram_pages();
if (env->cpuid_features & CPUID_FXSR)
restore_native_fp_fxrstor(env);
else
restore_native_fp_frstor(env);
#ifdef _WIN32
if (DeviceIoControl(kqemu_fd, KQEMU_EXEC,
kenv, sizeof(struct kqemu_cpu_state),
kenv, sizeof(struct kqemu_cpu_state),
&temp, NULL)) {
ret = kenv->retval;
} else {
ret = -1;
}
#else
ioctl(kqemu_fd, KQEMU_EXEC, kenv);
ret = kenv->retval;
#endif
if (env->cpuid_features & CPUID_FXSR)
save_native_fp_fxsave(env);
else
save_native_fp_fsave(env);
for(i = 0; i < CPU_NB_REGS; i++)
env->regs[i] = kenv->regs[i];
env->eip = kenv->eip;
env->eflags = kenv->eflags;
for(i = 0; i < 6; i++)
kqemu_save_seg(&env->segs[i], &kenv->segs[i]);
cpu_x86_set_cpl(env, kenv->cpl);
kqemu_save_seg(&env->ldt, &kenv->ldt);
env->cr[0] = kenv->cr0;
env->cr[4] = kenv->cr4;
env->cr[3] = kenv->cr3;
env->cr[2] = kenv->cr2;
env->dr[6] = kenv->dr6;
#ifdef TARGET_X86_64
env->kernelgsbase = kenv->kernelgsbase;
#endif
/* flush pages as indicated by kqemu */
if (kenv->nb_pages_to_flush >= KQEMU_FLUSH_ALL) {
tlb_flush(env, 1);
} else {
for(i = 0; i < kenv->nb_pages_to_flush; i++) {
tlb_flush_page(env, pages_to_flush[i]);
}
}
nb_pages_to_flush = 0;
#ifdef CONFIG_PROFILER
kqemu_time += profile_getclock() - ti;
kqemu_exec_count++;
#endif
if (kenv->nb_ram_pages_to_update > 0) {
cpu_tlb_update_dirty(env);
}
if (kenv->nb_modified_ram_pages > 0) {
for(i = 0; i < kenv->nb_modified_ram_pages; i++) {
unsigned long addr;
addr = modified_ram_pages[i];
tb_invalidate_phys_page_range(addr, addr + TARGET_PAGE_SIZE, 0);
}
}
/* restore the hidden flags */
{
unsigned int new_hflags;
#ifdef TARGET_X86_64
if ((env->hflags & HF_LMA_MASK) &&
(env->segs[R_CS].flags & DESC_L_MASK)) {
/* long mode */
new_hflags = HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK;
} else
#endif
{
/* legacy / compatibility case */
new_hflags = (env->segs[R_CS].flags & DESC_B_MASK)
>> (DESC_B_SHIFT - HF_CS32_SHIFT);
new_hflags |= (env->segs[R_SS].flags & DESC_B_MASK)
>> (DESC_B_SHIFT - HF_SS32_SHIFT);
if (!(env->cr[0] & CR0_PE_MASK) ||
(env->eflags & VM_MASK) ||
!(env->hflags & HF_CS32_MASK)) {
/* XXX: try to avoid this test. The problem comes from the
fact that is real mode or vm86 mode we only modify the
'base' and 'selector' fields of the segment cache to go
faster. A solution may be to force addseg to one in
translate-i386.c. */
new_hflags |= HF_ADDSEG_MASK;
} else {
new_hflags |= ((env->segs[R_DS].base |
env->segs[R_ES].base |
env->segs[R_SS].base) != 0) <<
HF_ADDSEG_SHIFT;
}
}
env->hflags = (env->hflags &
~(HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)) |
new_hflags;
}
/* update FPU flags */
env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
((env->cr[0] << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
if (env->cr[4] & CR4_OSFXSR_MASK)
env->hflags |= HF_OSFXSR_MASK;
else
env->hflags &= ~HF_OSFXSR_MASK;
LOG_INT("kqemu: kqemu_cpu_exec: ret=0x%x\n", ret);
if (ret == KQEMU_RET_SYSCALL) {
/* syscall instruction */
return do_syscall(env, kenv);
} else
if ((ret & 0xff00) == KQEMU_RET_INT) {
env->exception_index = ret & 0xff;
env->error_code = 0;
env->exception_is_int = 1;
env->exception_next_eip = kenv->next_eip;
#ifdef CONFIG_PROFILER
kqemu_ret_int_count++;
#endif
LOG_INT("kqemu: interrupt v=%02x:\n", env->exception_index);
LOG_INT_STATE(env);
return 1;
} else if ((ret & 0xff00) == KQEMU_RET_EXCEPTION) {
env->exception_index = ret & 0xff;
env->error_code = kenv->error_code;
env->exception_is_int = 0;
env->exception_next_eip = 0;
#ifdef CONFIG_PROFILER
kqemu_ret_excp_count++;
#endif
LOG_INT("kqemu: exception v=%02x e=%04x:\n",
env->exception_index, env->error_code);
LOG_INT_STATE(env);
return 1;
} else if (ret == KQEMU_RET_INTR) {
#ifdef CONFIG_PROFILER
kqemu_ret_intr_count++;
#endif
LOG_INT_STATE(env);
return 0;
} else if (ret == KQEMU_RET_SOFTMMU) {
#ifdef CONFIG_PROFILER
{
unsigned long pc = env->eip + env->segs[R_CS].base;
kqemu_record_pc(pc);
}
#endif
LOG_INT_STATE(env);
return 2;
} else {
cpu_dump_state(env, stderr, fprintf, 0);
fprintf(stderr, "Unsupported return value: 0x%x\n", ret);
exit(1);
}
return 0;
}
void kqemu_cpu_interrupt(CPUState *env)
{
#if defined(_WIN32)
/* cancelling the I/O request causes KQEMU to finish executing the
current block and successfully returning. */
CancelIo(kqemu_fd);
#endif
}
/*
QEMU paravirtualization interface. The current interface only
allows to modify the IF and IOPL flags when running in
kqemu.
At this point it is not very satisfactory. I leave it for reference
as it adds little complexity.
*/
#define QPI_COMM_PAGE_PHYS_ADDR 0xff000000
static uint32_t qpi_mem_readb(void *opaque, target_phys_addr_t addr)
{
return 0;
}
static uint32_t qpi_mem_readw(void *opaque, target_phys_addr_t addr)
{
return 0;
}
static void qpi_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
{
}
static void qpi_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
{
}
static uint32_t qpi_mem_readl(void *opaque, target_phys_addr_t addr)
{
CPUState *env;
env = cpu_single_env;
if (!env)
return 0;
return env->eflags & (IF_MASK | IOPL_MASK);
}
/* Note: after writing to this address, the guest code must make sure
it is exiting the current TB. pushf/popf can be used for that
purpose. */
static void qpi_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
{
CPUState *env;
env = cpu_single_env;
if (!env)
return;
env->eflags = (env->eflags & ~(IF_MASK | IOPL_MASK)) |
(val & (IF_MASK | IOPL_MASK));
}
static CPUReadMemoryFunc *qpi_mem_read[3] = {
qpi_mem_readb,
qpi_mem_readw,
qpi_mem_readl,
};
static CPUWriteMemoryFunc *qpi_mem_write[3] = {
qpi_mem_writeb,
qpi_mem_writew,
qpi_mem_writel,
};
static void qpi_init(void)
{
kqemu_comm_base = 0xff000000 | 1;
qpi_io_memory = cpu_register_io_memory(
qpi_mem_read,
qpi_mem_write, NULL);
cpu_register_physical_memory(kqemu_comm_base & ~0xfff,
0x1000, qpi_io_memory);
}
#endif

154
kqemu.h
View File

@ -1,154 +0,0 @@
/*
* KQEMU header
*
* Copyright (c) 2004-2008 Fabrice Bellard
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#ifndef KQEMU_H
#define KQEMU_H
#if defined(__i386__)
#define KQEMU_PAD32(x) x
#else
#define KQEMU_PAD32(x)
#endif
#define KQEMU_VERSION 0x010400
struct kqemu_segment_cache {
uint16_t selector;
uint16_t padding1;
uint32_t flags;
uint64_t base;
uint32_t limit;
uint32_t padding2;
};
struct kqemu_cpu_state {
uint64_t regs[16];
uint64_t eip;
uint64_t eflags;
struct kqemu_segment_cache segs[6]; /* selector values */
struct kqemu_segment_cache ldt;
struct kqemu_segment_cache tr;
struct kqemu_segment_cache gdt; /* only base and limit are used */
struct kqemu_segment_cache idt; /* only base and limit are used */
uint64_t cr0;
uint64_t cr2;
uint64_t cr3;
uint64_t cr4;
uint64_t a20_mask;
/* sysenter registers */
uint64_t sysenter_cs;
uint64_t sysenter_esp;
uint64_t sysenter_eip;
uint64_t efer;
uint64_t star;
uint64_t lstar;
uint64_t cstar;
uint64_t fmask;
uint64_t kernelgsbase;
uint64_t tsc_offset;
uint64_t dr0;
uint64_t dr1;
uint64_t dr2;
uint64_t dr3;
uint64_t dr6;
uint64_t dr7;
uint8_t cpl;
uint8_t user_only;
uint16_t padding1;
uint32_t error_code; /* error_code when exiting with an exception */
uint64_t next_eip; /* next eip value when exiting with an interrupt */
uint32_t nb_pages_to_flush; /* number of pages to flush,
KQEMU_FLUSH_ALL means full flush */
#define KQEMU_MAX_PAGES_TO_FLUSH 512
#define KQEMU_FLUSH_ALL (KQEMU_MAX_PAGES_TO_FLUSH + 1)
int32_t retval;
/* number of ram_dirty entries to update */
uint32_t nb_ram_pages_to_update;
#define KQEMU_MAX_RAM_PAGES_TO_UPDATE 512
#define KQEMU_RAM_PAGES_UPDATE_ALL (KQEMU_MAX_RAM_PAGES_TO_UPDATE + 1)
#define KQEMU_MAX_MODIFIED_RAM_PAGES 512
uint32_t nb_modified_ram_pages;
};
struct kqemu_init {
uint8_t *ram_base; /* must be page aligned */
KQEMU_PAD32(uint32_t padding1;)
uint64_t ram_size; /* must be multiple of 4 KB */
uint8_t *ram_dirty; /* must be page aligned */
KQEMU_PAD32(uint32_t padding2;)
uint64_t *pages_to_flush; /* must be page aligned */
KQEMU_PAD32(uint32_t padding4;)
uint64_t *ram_pages_to_update; /* must be page aligned */
KQEMU_PAD32(uint32_t padding5;)
uint64_t *modified_ram_pages; /* must be page aligned */
KQEMU_PAD32(uint32_t padding6;)
};
#define KQEMU_IO_MEM_RAM 0
#define KQEMU_IO_MEM_ROM 1
#define KQEMU_IO_MEM_COMM 2 /* kqemu communication page */
#define KQEMU_IO_MEM_UNASSIGNED 3 /* any device: return to application */
struct kqemu_phys_mem {
uint64_t phys_addr; /* physical address range: phys_addr,
phys_addr + size */
uint64_t size;
uint64_t ram_addr; /* corresponding ram address */
uint32_t io_index; /* memory type: see KQEMU_IO_MEM_xxx */
uint32_t padding1;
};
#define KQEMU_RET_ABORT (-1)
#define KQEMU_RET_EXCEPTION 0x0000 /* 8 low order bit are the exception */
#define KQEMU_RET_INT 0x0100 /* 8 low order bit are the interrupt */
#define KQEMU_RET_SOFTMMU 0x0200 /* emulation needed (I/O or
unsupported INSN) */
#define KQEMU_RET_INTR 0x0201 /* interrupted by a signal */
#define KQEMU_RET_SYSCALL 0x0300 /* syscall insn */
#ifdef _WIN32
#define KQEMU_EXEC CTL_CODE(FILE_DEVICE_UNKNOWN, 1, METHOD_BUFFERED, FILE_READ_ACCESS | FILE_WRITE_ACCESS)
#define KQEMU_INIT CTL_CODE(FILE_DEVICE_UNKNOWN, 2, METHOD_BUFFERED, FILE_WRITE_ACCESS)
#define KQEMU_GET_VERSION CTL_CODE(FILE_DEVICE_UNKNOWN, 3, METHOD_BUFFERED, FILE_READ_ACCESS)
#define KQEMU_MODIFY_RAM_PAGES CTL_CODE(FILE_DEVICE_UNKNOWN, 4, METHOD_BUFFERED, FILE_WRITE_ACCESS)
#define KQEMU_SET_PHYS_MEM CTL_CODE(FILE_DEVICE_UNKNOWN, 5, METHOD_BUFFERED, FILE_WRITE_ACCESS)
#else
#define KQEMU_EXEC _IOWR('q', 1, struct kqemu_cpu_state)
#define KQEMU_INIT _IOW('q', 2, struct kqemu_init)
#define KQEMU_GET_VERSION _IOR('q', 3, int)
#define KQEMU_MODIFY_RAM_PAGES _IOW('q', 4, int)
#define KQEMU_SET_PHYS_MEM _IOW('q', 5, struct kqemu_phys_mem)
#endif
#endif /* KQEMU_H */

View File

@ -1390,36 +1390,6 @@ static void tlb_info(Monitor *mon)
#endif
static void do_info_kqemu(Monitor *mon)
{
#ifdef CONFIG_KQEMU
CPUState *env;
int val;
val = 0;
env = mon_get_cpu();
if (!env) {
monitor_printf(mon, "No cpu initialized yet");
return;
}
val = env->kqemu_enabled;
monitor_printf(mon, "kqemu support: ");
switch(val) {
default:
case 0:
monitor_printf(mon, "disabled\n");
break;
case 1:
monitor_printf(mon, "enabled for user code\n");
break;
case 2:
monitor_printf(mon, "enabled for user and kernel code\n");
break;
}
#else
monitor_printf(mon, "kqemu support: not compiled\n");
#endif
}
static void do_info_kvm(Monitor *mon)
{
#ifdef CONFIG_KVM
@ -1454,14 +1424,6 @@ static void do_info_numa(Monitor *mon)
#ifdef CONFIG_PROFILER
int64_t kqemu_time;
int64_t qemu_time;
int64_t kqemu_exec_count;
int64_t dev_time;
int64_t kqemu_ret_int_count;
int64_t kqemu_ret_excp_count;
int64_t kqemu_ret_intr_count;
static void do_info_profile(Monitor *mon)
{
int64_t total;
@ -1472,25 +1434,8 @@ static void do_info_profile(Monitor *mon)
dev_time, dev_time / (double)ticks_per_sec);
monitor_printf(mon, "qemu time %" PRId64 " (%0.3f)\n",
qemu_time, qemu_time / (double)ticks_per_sec);
monitor_printf(mon, "kqemu time %" PRId64 " (%0.3f %0.1f%%) count=%"
PRId64 " int=%" PRId64 " excp=%" PRId64 " intr=%"
PRId64 "\n",
kqemu_time, kqemu_time / (double)ticks_per_sec,
kqemu_time / (double)total * 100.0,
kqemu_exec_count,
kqemu_ret_int_count,
kqemu_ret_excp_count,
kqemu_ret_intr_count);
qemu_time = 0;
kqemu_time = 0;
kqemu_exec_count = 0;
dev_time = 0;
kqemu_ret_int_count = 0;
kqemu_ret_excp_count = 0;
kqemu_ret_intr_count = 0;
#ifdef CONFIG_KQEMU
kqemu_record_dump();
#endif
}
#else
static void do_info_profile(Monitor *mon)
@ -1841,8 +1786,6 @@ static const mon_cmd_t info_cmds[] = {
#endif
{ "jit", "", do_info_jit,
"", "show dynamic compiler info", },
{ "kqemu", "", do_info_kqemu,
"", "show KQEMU information", },
{ "kvm", "", do_info_kvm,
"", "show KVM information", },
{ "numa", "", do_info_numa,

121
osdep.c
View File

@ -88,119 +88,6 @@ void qemu_vfree(void *ptr)
#else
#if defined(CONFIG_KQEMU)
#ifdef __OpenBSD__
#include <sys/param.h>
#include <sys/types.h>
#include <sys/mount.h>
#else
#ifndef __FreeBSD__
#include <sys/vfs.h>
#endif
#endif
#include <sys/mman.h>
#include <fcntl.h>
static void *kqemu_vmalloc(size_t size)
{
static int phys_ram_fd = -1;
static int phys_ram_size = 0;
void *ptr;
/* no need (?) for a dummy file on OpenBSD/FreeBSD */
#if defined(__OpenBSD__) || defined(__FreeBSD__) || defined(__DragonFly__)
int map_anon = MAP_ANON;
#else
int map_anon = 0;
const char *tmpdir;
char phys_ram_file[1024];
#ifdef CONFIG_SOLARIS
struct statvfs stfs;
#else
struct statfs stfs;
#endif
if (!size) {
abort ();
}
if (phys_ram_fd < 0) {
tmpdir = getenv("QEMU_TMPDIR");
if (!tmpdir)
#ifdef CONFIG_SOLARIS
tmpdir = "/tmp";
if (statvfs(tmpdir, &stfs) == 0) {
#else
tmpdir = "/dev/shm";
if (statfs(tmpdir, &stfs) == 0) {
#endif
int64_t free_space;
int ram_mb;
free_space = (int64_t)stfs.f_bavail * stfs.f_bsize;
if ((ram_size + 8192 * 1024) >= free_space) {
ram_mb = (ram_size / (1024 * 1024));
fprintf(stderr,
"You do not have enough space in '%s' for the %d MB of QEMU virtual RAM.\n",
tmpdir, ram_mb);
if (strcmp(tmpdir, "/dev/shm") == 0) {
fprintf(stderr, "To have more space available provided you have enough RAM and swap, do as root:\n"
"mount -o remount,size=%dm /dev/shm\n",
ram_mb + 16);
} else {
fprintf(stderr,
"Use the '-m' option of QEMU to diminish the amount of virtual RAM or use the\n"
"QEMU_TMPDIR environment variable to set another directory where the QEMU\n"
"temporary RAM file will be opened.\n");
}
fprintf(stderr, "Or disable the accelerator module with -no-kqemu\n");
exit(1);
}
}
snprintf(phys_ram_file, sizeof(phys_ram_file), "%s/qemuXXXXXX",
tmpdir);
phys_ram_fd = mkstemp(phys_ram_file);
if (phys_ram_fd < 0) {
fprintf(stderr,
"warning: could not create temporary file in '%s'.\n"
"Use QEMU_TMPDIR to select a directory in a tmpfs filesystem.\n"
"Using '/tmp' as fallback.\n",
tmpdir);
snprintf(phys_ram_file, sizeof(phys_ram_file), "%s/qemuXXXXXX",
"/tmp");
phys_ram_fd = mkstemp(phys_ram_file);
if (phys_ram_fd < 0) {
fprintf(stderr, "Could not create temporary memory file '%s'\n",
phys_ram_file);
exit(1);
}
}
unlink(phys_ram_file);
}
size = (size + 4095) & ~4095;
ftruncate(phys_ram_fd, phys_ram_size + size);
#endif /* !(__OpenBSD__ || __FreeBSD__ || __DragonFly__) */
ptr = mmap(NULL,
size,
PROT_WRITE | PROT_READ, map_anon | MAP_SHARED,
phys_ram_fd, phys_ram_size);
if (ptr == MAP_FAILED) {
fprintf(stderr, "Could not map physical memory\n");
exit(1);
}
phys_ram_size += size;
return ptr;
}
static void kqemu_vfree(void *ptr)
{
/* may be useful some day, but currently we do not need to free */
}
#endif
void *qemu_memalign(size_t alignment, size_t size)
{
#if defined(_POSIX_C_SOURCE)
@ -220,19 +107,11 @@ void *qemu_memalign(size_t alignment, size_t size)
/* alloc shared memory pages */
void *qemu_vmalloc(size_t size)
{
#if defined(CONFIG_KQEMU)
if (kqemu_allowed)
return kqemu_vmalloc(size);
#endif
return qemu_memalign(getpagesize(), size);
}
void qemu_vfree(void *ptr)
{
#if defined(CONFIG_KQEMU)
if (kqemu_allowed)
kqemu_vfree(ptr);
#endif
free(ptr);
}

View File

@ -57,8 +57,6 @@ show virtual to physical memory mappings (i386 only)
show the active virtual memory mappings (i386 only)
@item info hpet
show state of HPET (i386 only)
@item info kqemu
show KQEMU information
@item info kvm
show KVM information
@item info usb

View File

@ -1414,25 +1414,6 @@ STEXI
Set the filename for the BIOS.
ETEXI
#ifdef CONFIG_KQEMU
DEF("kernel-kqemu", 0, QEMU_OPTION_kernel_kqemu, \
"-kernel-kqemu enable KQEMU full virtualization (default is user mode only)\n")
#endif
STEXI
@item -kernel-kqemu
Enable KQEMU full virtualization (default is user mode only).
ETEXI
#ifdef CONFIG_KQEMU
DEF("enable-kqemu", 0, QEMU_OPTION_enable_kqemu, \
"-enable-kqemu enable KQEMU kernel module usage\n")
#endif
STEXI
@item -enable-kqemu
Enable KQEMU kernel module usage. KQEMU options are only available if
KQEMU support is enabled when compiling.
ETEXI
#ifdef CONFIG_KVM
DEF("enable-kvm", 0, QEMU_OPTION_enable_kvm, \
"-enable-kvm enable KVM full virtualization support\n")

View File

@ -116,8 +116,8 @@ QEMU full system emulation features:
QEMU uses a full software MMU for maximum portability.
@item
QEMU can optionally use an in-kernel accelerator, like kqemu and
kvm. The accelerators execute some of the guest code natively, while
QEMU can optionally use an in-kernel accelerator, like kvm. The accelerators
execute some of the guest code natively, while
continuing to emulate the rest of the machine.
@item

View File

@ -75,9 +75,6 @@ static inline DATA_TYPE glue(io_read, SUFFIX)(target_phys_addr_t physaddr,
res |= (uint64_t)io_mem_read[index][2](io_mem_opaque[index], physaddr + 4) << 32;
#endif
#endif /* SHIFT > 2 */
#ifdef CONFIG_KQEMU
env->last_io_time = cpu_get_time_fast();
#endif
return res;
}
@ -220,9 +217,6 @@ static inline void glue(io_write, SUFFIX)(target_phys_addr_t physaddr,
io_mem_write[index][2](io_mem_opaque[index], physaddr + 4, val >> 32);
#endif
#endif /* SHIFT > 2 */
#ifdef CONFIG_KQEMU
env->last_io_time = cpu_get_time_fast();
#endif
}
void REGPARM glue(glue(__st, SUFFIX), MMUSUFFIX)(target_ulong addr,

View File

@ -131,10 +131,6 @@ extern int semihosting_enabled;
extern int old_param;
extern int boot_menu;
#ifdef CONFIG_KQEMU
extern int kqemu_allowed;
#endif
#define MAX_NODES 64
extern int nb_numa_nodes;
extern uint64_t node_mem[MAX_NODES];

View File

@ -30,4 +30,3 @@ Optimizations/Features:
- evaluate x87 stack pointer statically
- find a way to avoid translating several time the same TB if CR0.TS
is set or not.
- move kqemu support outside target-i386.

View File

@ -682,11 +682,6 @@ typedef struct CPUX86State {
uint64_t mask;
} mtrr_var[8];
#ifdef CONFIG_KQEMU
int kqemu_enabled;
int last_io_time;
#endif
/* For KVM */
uint64_t interrupt_bitmap[256 / 64];
uint32_t mp_state;
@ -850,15 +845,6 @@ uint64_t cpu_get_tsc(CPUX86State *env);
#define X86_DUMP_FPU 0x0001 /* dump FPU state too */
#define X86_DUMP_CCOP 0x0002 /* dump qemu flag cache */
#ifdef CONFIG_KQEMU
static inline int cpu_get_time_fast(void)
{
int low, high;
asm volatile("rdtsc" : "=a" (low), "=d" (high));
return low;
}
#endif
#define TARGET_PAGE_BITS 12
#define cpu_init cpu_x86_init

View File

@ -995,15 +995,11 @@ target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
/* XXX: This value should match the one returned by CPUID
* and in exec.c */
#if defined(CONFIG_KQEMU)
#define PHYS_ADDR_MASK 0xfffff000LL
#else
# if defined(TARGET_X86_64)
# define PHYS_ADDR_MASK 0xfffffff000LL
# else
# define PHYS_ADDR_MASK 0xffffff000LL
# endif
#endif
/* return value:
-1 = cannot handle fault
@ -1743,21 +1739,13 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
/* XXX: This value must match the one used in the MMU code. */
if (env->cpuid_ext2_features & CPUID_EXT2_LM) {
/* 64 bit processor */
#if defined(CONFIG_KQEMU)
*eax = 0x00003020; /* 48 bits virtual, 32 bits physical */
#else
/* XXX: The physical address space is limited to 42 bits in exec.c. */
*eax = 0x00003028; /* 48 bits virtual, 40 bits physical */
#endif
} else {
#if defined(CONFIG_KQEMU)
*eax = 0x00000020; /* 32 bits physical */
#else
if (env->cpuid_features & CPUID_PSE36)
*eax = 0x00000024; /* 36 bits physical */
else
*eax = 0x00000020; /* 32 bits physical */
#endif
}
*ebx = 0;
*ecx = 0;
@ -1833,9 +1821,6 @@ CPUX86State *cpu_x86_init(const char *cpu_model)
}
mce_init(env);
cpu_reset(env);
#ifdef CONFIG_KQEMU
kqemu_init(env);
#endif
qemu_init_vcpu(env);

View File

@ -1111,14 +1111,6 @@ void helper_sysret(int dflag)
env->eflags |= IF_MASK;
cpu_x86_set_cpl(env, 3);
}
#ifdef CONFIG_KQEMU
if (kqemu_is_ok(env)) {
if (env->hflags & HF_LMA_MASK)
CC_OP = CC_OP_EFLAGS;
env->exception_index = -1;
cpu_loop_exit();
}
#endif
}
#endif
@ -2506,12 +2498,6 @@ void helper_lcall_protected(int new_cs, target_ulong new_eip,
SET_ESP(sp, sp_mask);
EIP = offset;
}
#ifdef CONFIG_KQEMU
if (kqemu_is_ok(env)) {
env->exception_index = -1;
cpu_loop_exit();
}
#endif
}
/* real and vm86 mode iret */
@ -2792,24 +2778,11 @@ void helper_iret_protected(int shift, int next_eip)
helper_ret_protected(shift, 1, 0);
}
env->hflags2 &= ~HF2_NMI_MASK;
#ifdef CONFIG_KQEMU
if (kqemu_is_ok(env)) {
CC_OP = CC_OP_EFLAGS;
env->exception_index = -1;
cpu_loop_exit();
}
#endif
}
void helper_lret_protected(int shift, int addend)
{
helper_ret_protected(shift, 0, addend);
#ifdef CONFIG_KQEMU
if (kqemu_is_ok(env)) {
env->exception_index = -1;
cpu_loop_exit();
}
#endif
}
void helper_sysenter(void)
@ -2882,12 +2855,6 @@ void helper_sysexit(int dflag)
}
ESP = ECX;
EIP = EDX;
#ifdef CONFIG_KQEMU
if (kqemu_is_ok(env)) {
env->exception_index = -1;
cpu_loop_exit();
}
#endif
}
#if defined(CONFIG_USER_ONLY)
@ -3210,15 +3177,6 @@ void helper_rdmsr(void)
case MSR_KERNELGSBASE:
val = env->kernelgsbase;
break;
#endif
#ifdef CONFIG_KQEMU
case MSR_QPI_COMMBASE:
if (env->kqemu_enabled) {
val = kqemu_comm_base;
} else {
val = 0;
}
break;
#endif
case MSR_MTRRphysBase(0):
case MSR_MTRRphysBase(1):

53
vl.c
View File

@ -1139,11 +1139,6 @@ static void host_alarm_handler(int host_signum)
if (next_cpu) {
/* stop the currently executing cpu because a timer occured */
cpu_exit(next_cpu);
#ifdef CONFIG_KQEMU
if (next_cpu->kqemu_enabled) {
kqemu_cpu_interrupt(next_cpu);
}
#endif
}
#endif
timer_alarm_pending = 1;
@ -3597,11 +3592,7 @@ void qemu_notify_event(void)
if (env) {
cpu_exit(env);
#ifdef USE_KQEMU
if (env->kqemu_enabled)
kqemu_cpu_interrupt(env);
#endif
}
}
}
#define qemu_mutex_lock_iothread() do { } while (0)
@ -5182,11 +5173,7 @@ int main(int argc, char **argv, char **envp)
}
/* On 32-bit hosts, QEMU is limited by virtual address space */
if (value > (2047 << 20)
#ifndef CONFIG_KQEMU
&& HOST_LONG_BITS == 32
#endif
) {
if (value > (2047 << 20) && HOST_LONG_BITS == 32) {
fprintf(stderr, "qemu: at most 2047 MB RAM can be simulated\n");
exit(1);
}
@ -5367,20 +5354,9 @@ int main(int argc, char **argv, char **envp)
}
break;
#endif
#ifdef CONFIG_KQEMU
case QEMU_OPTION_enable_kqemu:
kqemu_allowed = 1;
break;
case QEMU_OPTION_kernel_kqemu:
kqemu_allowed = 2;
break;
#endif
#ifdef CONFIG_KVM
case QEMU_OPTION_enable_kvm:
kvm_allowed = 1;
#ifdef CONFIG_KQEMU
kqemu_allowed = 0;
#endif
break;
#endif
case QEMU_OPTION_usb:
@ -5593,14 +5569,6 @@ int main(int argc, char **argv, char **envp)
data_dir = CONFIG_QEMU_SHAREDIR;
}
#if defined(CONFIG_KVM) && defined(CONFIG_KQEMU)
if (kvm_allowed && kqemu_allowed) {
fprintf(stderr,
"You can not enable both KVM and kqemu at the same time\n");
exit(1);
}
#endif
/*
* Default to max_cpus = smp_cpus, in case the user doesn't
* specify a max_cpus value.
@ -5679,10 +5647,6 @@ int main(int argc, char **argv, char **envp)
}
#endif
#ifdef CONFIG_KQEMU
if (smp_cpus > 1)
kqemu_allowed = 0;
#endif
if (qemu_init_main_loop()) {
fprintf(stderr, "qemu_init_main_loop failed\n");
exit(1);
@ -5748,19 +5712,6 @@ int main(int argc, char **argv, char **envp)
if (ram_size == 0)
ram_size = DEFAULT_RAM_SIZE * 1024 * 1024;
#ifdef CONFIG_KQEMU
/* FIXME: This is a nasty hack because kqemu can't cope with dynamic
guest ram allocation. It needs to go away. */
if (kqemu_allowed) {
kqemu_phys_ram_size = ram_size + 8 * 1024 * 1024 + 4 * 1024 * 1024;
kqemu_phys_ram_base = qemu_vmalloc(kqemu_phys_ram_size);
if (!kqemu_phys_ram_base) {
fprintf(stderr, "Could not allocate physical memory\n");
exit(1);
}
}
#endif
/* init the dynamic translator */
cpu_exec_init_all(tb_size * 1024 * 1024);