f65ed4c152
MMIO exits are more expensive in KVM or Xen than in QEMU because they involve, at least, privilege transitions. However, MMIO write operations can be effectively batched if those writes do not have side effects. Good examples of this include VGA pixel operations when in a planar mode. As it turns out, we can get a nice boost in other areas too. Laurent mentioned a 9.7% performance boost in iperf with the coalesced MMIO changes for the e1000 when he originally posted this work for KVM. Signed-off-by: Anthony Liguori <aliguori@us.ibm.com> git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@5961 c046a42c-6fe2-441c-8c8c-71466251a162
79 lines
1.7 KiB
C
79 lines
1.7 KiB
C
/*
|
|
* QEMU KVM support
|
|
*
|
|
* Copyright IBM, Corp. 2008
|
|
*
|
|
* Authors:
|
|
* Anthony Liguori <aliguori@us.ibm.com>
|
|
*
|
|
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
|
* See the COPYING file in the top-level directory.
|
|
*
|
|
*/
|
|
|
|
#ifndef QEMU_KVM_H
|
|
#define QEMU_KVM_H
|
|
|
|
#include "config.h"
|
|
|
|
#ifdef CONFIG_KVM
|
|
extern int kvm_allowed;
|
|
|
|
#define kvm_enabled() (kvm_allowed)
|
|
#else
|
|
#define kvm_enabled() (0)
|
|
#endif
|
|
|
|
struct kvm_run;
|
|
|
|
/* external API */
|
|
|
|
int kvm_init(int smp_cpus);
|
|
|
|
int kvm_init_vcpu(CPUState *env);
|
|
|
|
int kvm_cpu_exec(CPUState *env);
|
|
|
|
void kvm_set_phys_mem(target_phys_addr_t start_addr,
|
|
ram_addr_t size,
|
|
ram_addr_t phys_offset);
|
|
|
|
void kvm_physical_sync_dirty_bitmap(target_phys_addr_t start_addr, target_phys_addr_t end_addr);
|
|
|
|
int kvm_log_start(target_phys_addr_t phys_addr, target_phys_addr_t len);
|
|
int kvm_log_stop(target_phys_addr_t phys_addr, target_phys_addr_t len);
|
|
|
|
int kvm_has_sync_mmu(void);
|
|
|
|
int kvm_coalesce_mmio_region(target_phys_addr_t start, ram_addr_t size);
|
|
int kvm_uncoalesce_mmio_region(target_phys_addr_t start, ram_addr_t size);
|
|
|
|
/* internal API */
|
|
|
|
struct KVMState;
|
|
typedef struct KVMState KVMState;
|
|
|
|
int kvm_ioctl(KVMState *s, int type, ...);
|
|
|
|
int kvm_vm_ioctl(KVMState *s, int type, ...);
|
|
|
|
int kvm_vcpu_ioctl(CPUState *env, int type, ...);
|
|
|
|
/* Arch specific hooks */
|
|
|
|
int kvm_arch_post_run(CPUState *env, struct kvm_run *run);
|
|
|
|
int kvm_arch_handle_exit(CPUState *env, struct kvm_run *run);
|
|
|
|
int kvm_arch_pre_run(CPUState *env, struct kvm_run *run);
|
|
|
|
int kvm_arch_get_registers(CPUState *env);
|
|
|
|
int kvm_arch_put_registers(CPUState *env);
|
|
|
|
int kvm_arch_init(KVMState *s, int smp_cpus);
|
|
|
|
int kvm_arch_init_vcpu(CPUState *env);
|
|
|
|
#endif
|