2008-11-05 17:29:27 +01:00
|
|
|
/*
|
|
|
|
* QEMU KVM support
|
|
|
|
*
|
|
|
|
* Copyright IBM, Corp. 2008
|
|
|
|
*
|
|
|
|
* Authors:
|
|
|
|
* Anthony Liguori <aliguori@us.ibm.com>
|
|
|
|
*
|
|
|
|
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
|
|
|
* See the COPYING file in the top-level directory.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
2023-04-03 14:53:17 +02:00
|
|
|
/* header to be included in non-KVM-specific code */
|
|
|
|
|
2008-11-05 17:29:27 +01:00
|
|
|
#ifndef QEMU_KVM_H
|
|
|
|
#define QEMU_KVM_H
|
|
|
|
|
2023-06-06 07:32:17 +02:00
|
|
|
#include "exec/memattrs.h"
|
2021-02-04 17:39:24 +01:00
|
|
|
#include "qemu/accel.h"
|
2020-09-03 22:43:22 +02:00
|
|
|
#include "qom/object.h"
|
2008-11-05 17:29:27 +01:00
|
|
|
|
2017-06-26 07:22:54 +02:00
|
|
|
#ifdef NEED_CPU_H
|
|
|
|
# ifdef CONFIG_KVM
|
|
|
|
# include <linux/kvm.h>
|
|
|
|
# define CONFIG_KVM_IS_POSSIBLE
|
|
|
|
# endif
|
|
|
|
#else
|
|
|
|
# define CONFIG_KVM_IS_POSSIBLE
|
2010-03-17 12:07:54 +01:00
|
|
|
#endif
|
2008-11-05 17:29:27 +01:00
|
|
|
|
2017-06-26 07:22:54 +02:00
|
|
|
#ifdef CONFIG_KVM_IS_POSSIBLE
|
|
|
|
|
2013-01-24 06:03:27 +01:00
|
|
|
extern bool kvm_allowed;
|
2012-01-31 19:17:52 +01:00
|
|
|
extern bool kvm_kernel_irqchip;
|
2015-11-16 19:03:06 +01:00
|
|
|
extern bool kvm_split_irqchip;
|
2012-07-26 16:35:11 +02:00
|
|
|
extern bool kvm_async_interrupts_allowed;
|
2013-04-24 22:24:12 +02:00
|
|
|
extern bool kvm_halt_in_kernel_allowed;
|
2014-10-31 14:38:18 +01:00
|
|
|
extern bool kvm_resamplefds_allowed;
|
2012-07-26 16:35:15 +02:00
|
|
|
extern bool kvm_msi_via_irqfd_allowed;
|
2012-07-26 16:35:16 +02:00
|
|
|
extern bool kvm_gsi_routing_allowed;
|
2013-09-03 10:08:25 +02:00
|
|
|
extern bool kvm_gsi_direct_mapping;
|
2013-05-29 10:27:25 +02:00
|
|
|
extern bool kvm_readonly_mem_allowed;
|
2016-10-04 14:28:09 +02:00
|
|
|
extern bool kvm_msi_use_devid;
|
2010-04-19 20:59:30 +02:00
|
|
|
|
2012-01-31 19:17:52 +01:00
|
|
|
#define kvm_enabled() (kvm_allowed)
|
2012-07-26 16:35:17 +02:00
|
|
|
/**
|
|
|
|
* kvm_irqchip_in_kernel:
|
|
|
|
*
|
2020-09-22 22:36:12 +02:00
|
|
|
* Returns: true if an in-kernel irqchip was created.
|
2012-07-26 16:35:17 +02:00
|
|
|
* What this actually means is architecture and machine model
|
2020-09-22 22:36:12 +02:00
|
|
|
* specific: on PC, for instance, it means that the LAPIC
|
|
|
|
* is in kernel. This function should never be used from generic
|
|
|
|
* target-independent code: use one of the following functions or
|
|
|
|
* some other specific check instead.
|
2012-07-26 16:35:17 +02:00
|
|
|
*/
|
2012-01-31 19:17:52 +01:00
|
|
|
#define kvm_irqchip_in_kernel() (kvm_kernel_irqchip)
|
2012-07-26 16:35:11 +02:00
|
|
|
|
2015-11-16 19:03:06 +01:00
|
|
|
/**
|
|
|
|
* kvm_irqchip_is_split:
|
|
|
|
*
|
2020-09-22 22:36:12 +02:00
|
|
|
* Returns: true if the irqchip implementation is split between
|
|
|
|
* user and kernel space. The details are architecture and
|
|
|
|
* machine specific. On PC, it means that the PIC, IOAPIC, and
|
|
|
|
* PIT are in user space while the LAPIC is in the kernel.
|
2015-11-16 19:03:06 +01:00
|
|
|
*/
|
|
|
|
#define kvm_irqchip_is_split() (kvm_split_irqchip)
|
|
|
|
|
2012-07-26 16:35:11 +02:00
|
|
|
/**
|
|
|
|
* kvm_async_interrupts_enabled:
|
|
|
|
*
|
|
|
|
* Returns: true if we can deliver interrupts to KVM
|
|
|
|
* asynchronously (ie by ioctl from any thread at any time)
|
|
|
|
* rather than having to do interrupt delivery synchronously
|
|
|
|
* (where the vcpu must be stopped at a suitable point first).
|
|
|
|
*/
|
|
|
|
#define kvm_async_interrupts_enabled() (kvm_async_interrupts_allowed)
|
|
|
|
|
2013-04-24 22:24:12 +02:00
|
|
|
/**
|
|
|
|
* kvm_halt_in_kernel
|
|
|
|
*
|
|
|
|
* Returns: true if halted cpus should still get a KVM_RUN ioctl to run
|
|
|
|
* inside of kernel space. This only works if MP state is implemented.
|
|
|
|
*/
|
|
|
|
#define kvm_halt_in_kernel() (kvm_halt_in_kernel_allowed)
|
|
|
|
|
2012-07-26 16:35:14 +02:00
|
|
|
/**
|
|
|
|
* kvm_irqfds_enabled:
|
|
|
|
*
|
|
|
|
* Returns: true if we can use irqfds to inject interrupts into
|
|
|
|
* a KVM CPU (ie the kernel supports irqfds and we are running
|
|
|
|
* with a configuration where it is meaningful to use them).
|
2023-10-17 13:34:50 +02:00
|
|
|
*
|
|
|
|
* Always available if running with in-kernel irqchip.
|
2012-07-26 16:35:14 +02:00
|
|
|
*/
|
2023-10-17 13:34:50 +02:00
|
|
|
#define kvm_irqfds_enabled() kvm_irqchip_in_kernel()
|
2012-07-26 16:35:14 +02:00
|
|
|
|
2014-10-31 14:38:18 +01:00
|
|
|
/**
|
|
|
|
* kvm_resamplefds_enabled:
|
|
|
|
*
|
|
|
|
* Returns: true if we can use resamplefds to inject interrupts into
|
|
|
|
* a KVM CPU (ie the kernel supports resamplefds and we are running
|
|
|
|
* with a configuration where it is meaningful to use them).
|
|
|
|
*/
|
|
|
|
#define kvm_resamplefds_enabled() (kvm_resamplefds_allowed)
|
|
|
|
|
2012-07-26 16:35:15 +02:00
|
|
|
/**
|
|
|
|
* kvm_msi_via_irqfd_enabled:
|
|
|
|
*
|
|
|
|
* Returns: true if we can route a PCI MSI (Message Signaled Interrupt)
|
|
|
|
* to a KVM CPU via an irqfd. This requires that the kernel supports
|
|
|
|
* this and that we're running in a configuration that permits it.
|
|
|
|
*/
|
|
|
|
#define kvm_msi_via_irqfd_enabled() (kvm_msi_via_irqfd_allowed)
|
|
|
|
|
2012-07-26 16:35:16 +02:00
|
|
|
/**
|
|
|
|
* kvm_gsi_routing_enabled:
|
|
|
|
*
|
|
|
|
* Returns: true if GSI routing is enabled (ie the kernel supports
|
|
|
|
* it and we're running in a configuration that permits it).
|
|
|
|
*/
|
|
|
|
#define kvm_gsi_routing_enabled() (kvm_gsi_routing_allowed)
|
|
|
|
|
2013-09-03 10:08:25 +02:00
|
|
|
/**
|
|
|
|
* kvm_gsi_direct_mapping:
|
|
|
|
*
|
|
|
|
* Returns: true if GSI direct mapping is enabled.
|
|
|
|
*/
|
|
|
|
#define kvm_gsi_direct_mapping() (kvm_gsi_direct_mapping)
|
|
|
|
|
2013-05-29 10:27:25 +02:00
|
|
|
/**
|
|
|
|
* kvm_readonly_mem_enabled:
|
|
|
|
*
|
|
|
|
* Returns: true if KVM readonly memory is enabled (ie the kernel
|
|
|
|
* supports it and we're running in a configuration that permits it).
|
|
|
|
*/
|
|
|
|
#define kvm_readonly_mem_enabled() (kvm_readonly_mem_allowed)
|
|
|
|
|
2016-10-04 14:28:09 +02:00
|
|
|
/**
|
|
|
|
* kvm_msi_devid_required:
|
|
|
|
* Returns: true if KVM requires a device id to be provided while
|
|
|
|
* defining an MSI routing entry.
|
|
|
|
*/
|
|
|
|
#define kvm_msi_devid_required() (kvm_msi_use_devid)
|
|
|
|
|
2008-11-05 17:29:27 +01:00
|
|
|
#else
|
2017-06-26 07:22:54 +02:00
|
|
|
|
2012-01-31 19:17:52 +01:00
|
|
|
#define kvm_enabled() (0)
|
|
|
|
#define kvm_irqchip_in_kernel() (false)
|
2015-12-17 17:16:08 +01:00
|
|
|
#define kvm_irqchip_is_split() (false)
|
2012-07-26 16:35:11 +02:00
|
|
|
#define kvm_async_interrupts_enabled() (false)
|
2013-04-24 22:24:12 +02:00
|
|
|
#define kvm_halt_in_kernel() (false)
|
2023-10-21 17:09:46 +02:00
|
|
|
#define kvm_irqfds_enabled() (false)
|
2015-07-06 20:15:14 +02:00
|
|
|
#define kvm_resamplefds_enabled() (false)
|
2012-07-26 16:35:15 +02:00
|
|
|
#define kvm_msi_via_irqfd_enabled() (false)
|
2012-07-26 16:35:16 +02:00
|
|
|
#define kvm_gsi_routing_allowed() (false)
|
2013-09-03 10:08:25 +02:00
|
|
|
#define kvm_gsi_direct_mapping() (false)
|
2013-05-29 10:27:25 +02:00
|
|
|
#define kvm_readonly_mem_enabled() (false)
|
2016-10-04 14:28:09 +02:00
|
|
|
#define kvm_msi_devid_required() (false)
|
2017-06-26 07:22:54 +02:00
|
|
|
|
|
|
|
#endif /* CONFIG_KVM_IS_POSSIBLE */
|
2008-11-05 17:29:27 +01:00
|
|
|
|
|
|
|
struct kvm_run;
|
2015-01-09 09:04:40 +01:00
|
|
|
struct kvm_irq_routing_entry;
|
2008-11-05 17:29:27 +01:00
|
|
|
|
2011-01-21 21:48:17 +01:00
|
|
|
typedef struct KVMCapabilityInfo {
|
|
|
|
const char *name;
|
|
|
|
int value;
|
|
|
|
} KVMCapabilityInfo;
|
|
|
|
|
|
|
|
#define KVM_CAP_INFO(CAP) { "KVM_CAP_" stringify(CAP), KVM_CAP_##CAP }
|
|
|
|
#define KVM_CAP_LAST_INFO { NULL, 0 }
|
|
|
|
|
2012-05-17 15:32:33 +02:00
|
|
|
struct KVMState;
|
2020-08-25 21:20:37 +02:00
|
|
|
|
|
|
|
#define TYPE_KVM_ACCEL ACCEL_CLASS_NAME("kvm")
|
2012-05-17 15:32:33 +02:00
|
|
|
typedef struct KVMState KVMState;
|
2020-08-31 23:07:33 +02:00
|
|
|
DECLARE_INSTANCE_CHECKER(KVMState, KVM_STATE,
|
|
|
|
TYPE_KVM_ACCEL)
|
2020-08-25 21:20:37 +02:00
|
|
|
|
2012-05-17 15:32:33 +02:00
|
|
|
extern KVMState *kvm_state;
|
2019-10-17 03:12:35 +02:00
|
|
|
typedef struct Notifier Notifier;
|
2012-05-17 15:32:33 +02:00
|
|
|
|
2022-02-22 15:11:15 +01:00
|
|
|
typedef struct KVMRouteChange {
|
|
|
|
KVMState *s;
|
|
|
|
int changes;
|
|
|
|
} KVMRouteChange;
|
|
|
|
|
2008-11-05 17:29:27 +01:00
|
|
|
/* external API */
|
|
|
|
|
2023-09-26 20:57:30 +02:00
|
|
|
unsigned int kvm_get_max_memslots(void);
|
2023-09-26 20:57:24 +02:00
|
|
|
unsigned int kvm_get_free_memslots(void);
|
kvm: check KVM_CAP_SYNC_MMU with kvm_vm_check_extension()
On a server-class ppc host, this capability depends on the KVM type,
ie, HV or PR. If both KVM are present in the kernel, we will always
get the HV specific value, even if we explicitely requested PR on
the command line.
This can have an impact if we're using hugepages or a balloon device.
Since we've already created the VM at the time any user calls
kvm_has_sync_mmu(), switching to kvm_vm_check_extension() is
enough to fix any potential issue.
It is okay for the other archs that also implement KVM_CAP_SYNC_MMU,
ie, mips, s390, x86 and arm, because they don't depend on the VM being
created or not.
While here, let's cache the state of this extension in a bool variable,
since it has several users in the code, as suggested by Thomas Huth.
Signed-off-by: Greg Kurz <groug@kaod.org>
Message-Id: <150600965332.30533.14702405809647835716.stgit@bahia.lan>
Reviewed-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2017-09-21 18:00:53 +02:00
|
|
|
bool kvm_has_sync_mmu(void);
|
2010-04-01 19:57:11 +02:00
|
|
|
int kvm_has_vcpu_events(void);
|
2019-06-19 18:21:38 +02:00
|
|
|
int kvm_max_nested_state_length(void);
|
2011-10-15 11:49:47 +02:00
|
|
|
int kvm_has_gsi_routing(void);
|
2010-04-01 19:57:11 +02:00
|
|
|
|
2017-07-11 12:21:26 +02:00
|
|
|
/**
|
|
|
|
* kvm_arm_supports_user_irq
|
|
|
|
*
|
|
|
|
* Not all KVM implementations support notifications for kernel generated
|
|
|
|
* interrupt events to user space. This function indicates whether the current
|
|
|
|
* KVM implementation does support them.
|
|
|
|
*
|
|
|
|
* Returns: true if KVM supports using kernel generated IRQs from user space
|
|
|
|
*/
|
|
|
|
bool kvm_arm_supports_user_irq(void);
|
|
|
|
|
2018-03-08 13:48:44 +01:00
|
|
|
|
2021-05-16 19:01:46 +02:00
|
|
|
int kvm_on_sigbus_vcpu(CPUState *cpu, int code, void *addr);
|
|
|
|
int kvm_on_sigbus(int code, void *addr);
|
|
|
|
|
2012-12-17 06:38:45 +01:00
|
|
|
#ifdef NEED_CPU_H
|
2016-03-15 16:58:45 +01:00
|
|
|
#include "cpu.h"
|
2008-11-05 17:29:27 +01:00
|
|
|
|
2010-01-26 12:21:16 +01:00
|
|
|
void kvm_flush_coalesced_mmio_buffer(void);
|
2008-12-09 21:09:57 +01:00
|
|
|
|
2022-09-29 13:42:26 +02:00
|
|
|
/**
|
|
|
|
* kvm_update_guest_debug(): ensure KVM debug structures updated
|
|
|
|
* @cs: the CPUState for this cpu
|
|
|
|
* @reinject_trap: KVM trap injection control
|
|
|
|
*
|
|
|
|
* There are usually per-arch specifics which will be handled by
|
|
|
|
* calling down to kvm_arch_update_guest_debug after the generic
|
|
|
|
* fields have been set.
|
|
|
|
*/
|
|
|
|
#ifdef KVM_CAP_SET_GUEST_DEBUG
|
2013-07-25 20:50:21 +02:00
|
|
|
int kvm_update_guest_debug(CPUState *cpu, unsigned long reinject_trap);
|
2022-09-29 13:42:26 +02:00
|
|
|
#else
|
|
|
|
static inline int kvm_update_guest_debug(CPUState *cpu, unsigned long reinject_trap)
|
|
|
|
{
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
#endif
|
2009-03-12 21:12:48 +01:00
|
|
|
|
2008-11-05 17:29:27 +01:00
|
|
|
/* internal API */
|
|
|
|
|
2008-11-13 20:21:00 +01:00
|
|
|
int kvm_ioctl(KVMState *s, int type, ...);
|
2008-11-05 17:29:27 +01:00
|
|
|
|
2008-11-13 20:21:00 +01:00
|
|
|
int kvm_vm_ioctl(KVMState *s, int type, ...);
|
2008-11-05 17:29:27 +01:00
|
|
|
|
2012-10-31 06:06:49 +01:00
|
|
|
int kvm_vcpu_ioctl(CPUState *cpu, int type, ...);
|
2008-11-05 17:29:27 +01:00
|
|
|
|
2014-02-26 18:20:00 +01:00
|
|
|
/**
|
|
|
|
* kvm_device_ioctl - call an ioctl on a kvm device
|
|
|
|
* @fd: The KVM device file descriptor as returned from KVM_CREATE_DEVICE
|
|
|
|
* @type: The device-ctrl ioctl number
|
|
|
|
*
|
|
|
|
* Returns: -errno on error, nonnegative on success
|
|
|
|
*/
|
|
|
|
int kvm_device_ioctl(int fd, int type, ...);
|
|
|
|
|
2015-03-12 13:53:49 +01:00
|
|
|
/**
|
|
|
|
* kvm_vm_check_attr - check for existence of a specific vm attribute
|
|
|
|
* @s: The KVMState pointer
|
|
|
|
* @group: the group
|
|
|
|
* @attr: the attribute of that group to query for
|
|
|
|
*
|
|
|
|
* Returns: 1 if the attribute exists
|
|
|
|
* 0 if the attribute either does not exist or if the vm device
|
|
|
|
* interface is unavailable
|
|
|
|
*/
|
|
|
|
int kvm_vm_check_attr(KVMState *s, uint32_t group, uint64_t attr);
|
|
|
|
|
2015-09-24 02:29:36 +02:00
|
|
|
/**
|
|
|
|
* kvm_device_check_attr - check for existence of a specific device attribute
|
|
|
|
* @fd: The device file descriptor
|
|
|
|
* @group: the group
|
|
|
|
* @attr: the attribute of that group to query for
|
|
|
|
*
|
|
|
|
* Returns: 1 if the attribute exists
|
|
|
|
* 0 if the attribute either does not exist or if the vm device
|
|
|
|
* interface is unavailable
|
|
|
|
*/
|
|
|
|
int kvm_device_check_attr(int fd, uint32_t group, uint64_t attr);
|
|
|
|
|
|
|
|
/**
|
2019-09-12 15:57:26 +02:00
|
|
|
* kvm_device_access - set or get value of a specific device attribute
|
2015-09-24 02:29:36 +02:00
|
|
|
* @fd: The device file descriptor
|
|
|
|
* @group: the group
|
|
|
|
* @attr: the attribute of that group to set or get
|
|
|
|
* @val: pointer to a storage area for the value
|
|
|
|
* @write: true for set and false for get operation
|
2017-06-13 15:57:00 +02:00
|
|
|
* @errp: error object handle
|
2015-09-24 02:29:36 +02:00
|
|
|
*
|
2017-06-13 15:57:00 +02:00
|
|
|
* Returns: 0 on success
|
|
|
|
* < 0 on error
|
|
|
|
* Use kvm_device_check_attr() in order to check for the availability
|
|
|
|
* of optional attributes.
|
2015-09-24 02:29:36 +02:00
|
|
|
*/
|
2017-06-13 15:57:00 +02:00
|
|
|
int kvm_device_access(int fd, int group, uint64_t attr,
|
|
|
|
void *val, bool write, Error **errp);
|
2015-09-24 02:29:36 +02:00
|
|
|
|
2014-02-26 18:20:00 +01:00
|
|
|
/**
|
|
|
|
* kvm_create_device - create a KVM device for the device control API
|
|
|
|
* @KVMState: The KVMState pointer
|
|
|
|
* @type: The KVM device type (see Documentation/virtual/kvm/devices in the
|
|
|
|
* kernel source)
|
|
|
|
* @test: If true, only test if device can be created, but don't actually
|
|
|
|
* create the device.
|
|
|
|
*
|
|
|
|
* Returns: -errno on error, nonnegative on success: @test ? 0 : device fd;
|
|
|
|
*/
|
|
|
|
int kvm_create_device(KVMState *s, uint64_t type, bool test);
|
|
|
|
|
2016-03-30 18:27:24 +02:00
|
|
|
/**
|
|
|
|
* kvm_device_supported - probe whether KVM supports specific device
|
|
|
|
*
|
|
|
|
* @vmfd: The fd handler for VM
|
|
|
|
* @type: type of device
|
|
|
|
*
|
|
|
|
* @return: true if supported, otherwise false.
|
|
|
|
*/
|
|
|
|
bool kvm_device_supported(int vmfd, uint64_t type);
|
2014-02-26 18:20:00 +01:00
|
|
|
|
2008-11-05 17:29:27 +01:00
|
|
|
/* Arch specific hooks */
|
|
|
|
|
2011-01-21 21:48:17 +01:00
|
|
|
extern const KVMCapabilityInfo kvm_arch_required_capabilities[];
|
|
|
|
|
2022-09-29 09:20:12 +02:00
|
|
|
void kvm_arch_accel_class_init(ObjectClass *oc);
|
|
|
|
|
2012-10-31 06:57:49 +01:00
|
|
|
void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run);
|
2015-04-08 13:30:58 +02:00
|
|
|
MemTxAttrs kvm_arch_post_run(CPUState *cpu, struct kvm_run *run);
|
2008-11-05 17:29:27 +01:00
|
|
|
|
2012-10-31 06:57:49 +01:00
|
|
|
int kvm_arch_handle_exit(CPUState *cpu, struct kvm_run *run);
|
2008-11-05 17:29:27 +01:00
|
|
|
|
2012-10-31 06:57:49 +01:00
|
|
|
int kvm_arch_process_async_events(CPUState *cpu);
|
2010-05-04 14:45:27 +02:00
|
|
|
|
2012-10-31 06:57:49 +01:00
|
|
|
int kvm_arch_get_registers(CPUState *cpu);
|
2008-11-05 17:29:27 +01:00
|
|
|
|
2010-03-01 19:10:30 +01:00
|
|
|
/* state subset only touched by the VCPU itself during runtime */
|
|
|
|
#define KVM_PUT_RUNTIME_STATE 1
|
|
|
|
/* state subset modified during VCPU reset */
|
|
|
|
#define KVM_PUT_RESET_STATE 2
|
|
|
|
/* full state set, modified during initialization or on vmload */
|
|
|
|
#define KVM_PUT_FULL_STATE 3
|
|
|
|
|
2012-10-31 06:57:49 +01:00
|
|
|
int kvm_arch_put_registers(CPUState *cpu, int level);
|
2008-11-05 17:29:27 +01:00
|
|
|
|
2023-08-22 18:31:02 +02:00
|
|
|
int kvm_arch_get_default_type(MachineState *ms);
|
|
|
|
|
2015-02-04 16:43:51 +01:00
|
|
|
int kvm_arch_init(MachineState *ms, KVMState *s);
|
2008-11-05 17:29:27 +01:00
|
|
|
|
2012-10-31 06:57:49 +01:00
|
|
|
int kvm_arch_init_vcpu(CPUState *cpu);
|
2019-06-19 18:21:32 +02:00
|
|
|
int kvm_arch_destroy_vcpu(CPUState *cpu);
|
2008-11-05 17:29:27 +01:00
|
|
|
|
2016-04-26 15:41:04 +02:00
|
|
|
bool kvm_vcpu_id_is_valid(int vcpu_id);
|
|
|
|
|
2013-01-22 21:25:01 +01:00
|
|
|
/* Returns VCPU ID to be used on KVM_CREATE_VCPU ioctl() */
|
|
|
|
unsigned long kvm_arch_vcpu_id(CPUState *cpu);
|
|
|
|
|
target-arm: kvm64: handle SIGBUS signal from kernel or KVM
Add a SIGBUS signal handler. In this handler, it checks the SIGBUS type,
translates the host VA delivered by host to guest PA, then fills this PA
to guest APEI GHES memory, then notifies guest according to the SIGBUS
type.
When guest accesses the poisoned memory, it will generate a Synchronous
External Abort(SEA). Then host kernel gets an APEI notification and calls
memory_failure() to unmapped the affected page in stage 2, finally
returns to guest.
Guest continues to access the PG_hwpoison page, it will trap to KVM as
stage2 fault, then a SIGBUS_MCEERR_AR synchronous signal is delivered to
Qemu, Qemu records this error address into guest APEI GHES memory and
notifes guest using Synchronous-External-Abort(SEA).
In order to inject a vSEA, we introduce the kvm_inject_arm_sea() function
in which we can setup the type of exception and the syndrome information.
When switching to guest, the target vcpu will jump to the synchronous
external abort vector table entry.
The ESR_ELx.DFSC is set to synchronous external abort(0x10), and the
ESR_ELx.FnV is set to not valid(0x1), which will tell guest that FAR is
not valid and hold an UNKNOWN value. These values will be set to KVM
register structures through KVM_SET_ONE_REG IOCTL.
Signed-off-by: Dongjiu Geng <gengdongjiu@huawei.com>
Signed-off-by: Xiang Zheng <zhengxiang9@huawei.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Acked-by: Xiang Zheng <zhengxiang9@huawei.com>
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Igor Mammedov <imammedo@redhat.com>
Message-id: 20200512030609.19593-10-gengdongjiu@huawei.com
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
2020-05-12 05:06:08 +02:00
|
|
|
#ifdef KVM_HAVE_MCE_INJECTION
|
2017-02-08 12:48:54 +01:00
|
|
|
void kvm_arch_on_sigbus_vcpu(CPUState *cpu, int code, void *addr);
|
|
|
|
#endif
|
2010-10-11 20:31:21 +02:00
|
|
|
|
2011-10-15 11:49:47 +02:00
|
|
|
void kvm_arch_init_irq_routing(KVMState *s);
|
|
|
|
|
2015-01-09 09:04:40 +01:00
|
|
|
int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
|
2015-10-15 15:44:52 +02:00
|
|
|
uint64_t address, uint32_t data, PCIDevice *dev);
|
2015-01-09 09:04:40 +01:00
|
|
|
|
2016-07-14 07:56:31 +02:00
|
|
|
/* Notify arch about newly added MSI routes */
|
|
|
|
int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route,
|
|
|
|
int vector, PCIDevice *dev);
|
|
|
|
/* Notify arch about released MSI routes */
|
|
|
|
int kvm_arch_release_virq_post(int virq);
|
|
|
|
|
2015-06-02 15:56:23 +02:00
|
|
|
int kvm_arch_msi_data_to_gsi(uint32_t data);
|
|
|
|
|
2012-07-26 16:35:12 +02:00
|
|
|
int kvm_set_irq(KVMState *s, int irq, int level);
|
2012-05-16 20:41:10 +02:00
|
|
|
int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg);
|
2011-10-15 11:49:47 +02:00
|
|
|
|
2012-05-17 15:32:32 +02:00
|
|
|
void kvm_irqchip_add_irq_route(KVMState *s, int gsi, int irqchip, int pin);
|
2011-10-15 11:49:47 +02:00
|
|
|
|
2019-10-17 03:12:35 +02:00
|
|
|
void kvm_irqchip_add_change_notifier(Notifier *n);
|
|
|
|
void kvm_irqchip_remove_change_notifier(Notifier *n);
|
|
|
|
void kvm_irqchip_change_notify(void);
|
|
|
|
|
2009-03-12 21:12:48 +01:00
|
|
|
struct kvm_guest_debug;
|
|
|
|
struct kvm_debug_exit_arch;
|
|
|
|
|
|
|
|
struct kvm_sw_breakpoint {
|
2023-08-07 17:56:58 +02:00
|
|
|
vaddr pc;
|
|
|
|
vaddr saved_insn;
|
2009-03-12 21:12:48 +01:00
|
|
|
int use_count;
|
2009-09-12 09:36:22 +02:00
|
|
|
QTAILQ_ENTRY(kvm_sw_breakpoint) entry;
|
2009-03-12 21:12:48 +01:00
|
|
|
};
|
|
|
|
|
2012-12-01 05:35:08 +01:00
|
|
|
struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUState *cpu,
|
2023-08-07 17:56:58 +02:00
|
|
|
vaddr pc);
|
2009-03-12 21:12:48 +01:00
|
|
|
|
2012-12-01 05:35:08 +01:00
|
|
|
int kvm_sw_breakpoints_active(CPUState *cpu);
|
2009-03-12 21:12:48 +01:00
|
|
|
|
2013-06-19 17:37:31 +02:00
|
|
|
int kvm_arch_insert_sw_breakpoint(CPUState *cpu,
|
2009-03-12 21:12:48 +01:00
|
|
|
struct kvm_sw_breakpoint *bp);
|
2013-06-19 17:37:31 +02:00
|
|
|
int kvm_arch_remove_sw_breakpoint(CPUState *cpu,
|
2009-03-12 21:12:48 +01:00
|
|
|
struct kvm_sw_breakpoint *bp);
|
2023-08-07 17:57:00 +02:00
|
|
|
int kvm_arch_insert_hw_breakpoint(vaddr addr, vaddr len, int type);
|
|
|
|
int kvm_arch_remove_hw_breakpoint(vaddr addr, vaddr len, int type);
|
2009-03-12 21:12:48 +01:00
|
|
|
void kvm_arch_remove_all_hw_breakpoints(void);
|
|
|
|
|
2012-10-31 06:57:49 +01:00
|
|
|
void kvm_arch_update_guest_debug(CPUState *cpu, struct kvm_guest_debug *dbg);
|
2009-03-12 21:12:48 +01:00
|
|
|
|
2012-10-31 06:57:49 +01:00
|
|
|
bool kvm_arch_stop_on_emulation_error(CPUState *cpu);
|
2010-05-10 10:21:34 +02:00
|
|
|
|
2009-05-08 22:33:24 +02:00
|
|
|
int kvm_check_extension(KVMState *s, unsigned int extension);
|
|
|
|
|
2014-07-14 19:15:15 +02:00
|
|
|
int kvm_vm_check_extension(KVMState *s, unsigned int extension);
|
|
|
|
|
2013-10-23 18:19:26 +02:00
|
|
|
#define kvm_vm_enable_cap(s, capability, cap_flags, ...) \
|
|
|
|
({ \
|
|
|
|
struct kvm_enable_cap cap = { \
|
|
|
|
.cap = capability, \
|
|
|
|
.flags = cap_flags, \
|
|
|
|
}; \
|
|
|
|
uint64_t args_tmp[] = { __VA_ARGS__ }; \
|
2017-08-07 13:36:44 +02:00
|
|
|
size_t n = MIN(ARRAY_SIZE(args_tmp), ARRAY_SIZE(cap.args)); \
|
|
|
|
memcpy(cap.args, args_tmp, n * sizeof(cap.args[0])); \
|
2013-10-23 18:19:26 +02:00
|
|
|
kvm_vm_ioctl(s, KVM_ENABLE_CAP, &cap); \
|
|
|
|
})
|
|
|
|
|
|
|
|
#define kvm_vcpu_enable_cap(cpu, capability, cap_flags, ...) \
|
|
|
|
({ \
|
|
|
|
struct kvm_enable_cap cap = { \
|
|
|
|
.cap = capability, \
|
|
|
|
.flags = cap_flags, \
|
|
|
|
}; \
|
|
|
|
uint64_t args_tmp[] = { __VA_ARGS__ }; \
|
2017-08-07 13:36:44 +02:00
|
|
|
size_t n = MIN(ARRAY_SIZE(args_tmp), ARRAY_SIZE(cap.args)); \
|
|
|
|
memcpy(cap.args, args_tmp, n * sizeof(cap.args[0])); \
|
2013-10-23 18:19:26 +02:00
|
|
|
kvm_vcpu_ioctl(cpu, KVM_ENABLE_CAP, &cap); \
|
|
|
|
})
|
|
|
|
|
2014-06-18 00:10:31 +02:00
|
|
|
void kvm_set_sigmask_len(KVMState *s, unsigned int sigmask_len);
|
|
|
|
|
2013-04-23 10:29:36 +02:00
|
|
|
int kvm_physical_memory_addr_from_host(KVMState *s, void *ram_addr,
|
|
|
|
hwaddr *phys_addr);
|
|
|
|
|
|
|
|
#endif /* NEED_CPU_H */
|
|
|
|
|
2013-08-27 13:19:10 +02:00
|
|
|
void kvm_cpu_synchronize_state(CPUState *cpu);
|
2013-04-23 10:29:36 +02:00
|
|
|
|
2017-02-09 09:41:14 +01:00
|
|
|
void kvm_init_cpu_signals(CPUState *cpu);
|
|
|
|
|
2016-07-14 07:56:30 +02:00
|
|
|
/**
|
|
|
|
* kvm_irqchip_add_msi_route - Add MSI route for specific vector
|
2022-02-22 15:11:16 +01:00
|
|
|
* @c: KVMRouteChange instance.
|
2016-07-14 07:56:30 +02:00
|
|
|
* @vector: which vector to add. This can be either MSI/MSIX
|
|
|
|
* vector. The function will automatically detect whether
|
|
|
|
* MSI/MSIX is enabled, and fetch corresponding MSI
|
|
|
|
* message.
|
|
|
|
* @dev: Owner PCI device to add the route. If @dev is specified
|
|
|
|
* as @NULL, an empty MSI message will be inited.
|
|
|
|
* @return: virq (>=0) when success, errno (<0) when failed.
|
|
|
|
*/
|
2022-02-22 15:11:16 +01:00
|
|
|
int kvm_irqchip_add_msi_route(KVMRouteChange *c, int vector, PCIDevice *dev);
|
2015-10-15 15:44:52 +02:00
|
|
|
int kvm_irqchip_update_msi_route(KVMState *s, int virq, MSIMessage msg,
|
|
|
|
PCIDevice *dev);
|
2016-07-14 07:56:33 +02:00
|
|
|
void kvm_irqchip_commit_routes(KVMState *s);
|
2022-02-22 15:11:15 +01:00
|
|
|
|
|
|
|
static inline KVMRouteChange kvm_irqchip_begin_route_changes(KVMState *s)
|
|
|
|
{
|
|
|
|
return (KVMRouteChange) { .s = s, .changes = 0 };
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void kvm_irqchip_commit_route_changes(KVMRouteChange *c)
|
|
|
|
{
|
|
|
|
if (c->changes) {
|
|
|
|
kvm_irqchip_commit_routes(c->s);
|
|
|
|
c->changes = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-05-17 15:32:34 +02:00
|
|
|
void kvm_irqchip_release_virq(KVMState *s, int virq);
|
2012-05-17 15:32:36 +02:00
|
|
|
|
2013-07-15 17:45:03 +02:00
|
|
|
int kvm_irqchip_add_adapter_route(KVMState *s, AdapterInfo *adapter);
|
2015-11-10 13:52:42 +01:00
|
|
|
int kvm_irqchip_add_hv_sint_route(KVMState *s, uint32_t vcpu, uint32_t sint);
|
2013-07-15 17:45:03 +02:00
|
|
|
|
2015-07-06 20:15:13 +02:00
|
|
|
int kvm_irqchip_add_irqfd_notifier_gsi(KVMState *s, EventNotifier *n,
|
|
|
|
EventNotifier *rn, int virq);
|
|
|
|
int kvm_irqchip_remove_irqfd_notifier_gsi(KVMState *s, EventNotifier *n,
|
|
|
|
int virq);
|
2015-07-06 20:15:13 +02:00
|
|
|
int kvm_irqchip_add_irqfd_notifier(KVMState *s, EventNotifier *n,
|
|
|
|
EventNotifier *rn, qemu_irq irq);
|
|
|
|
int kvm_irqchip_remove_irqfd_notifier(KVMState *s, EventNotifier *n,
|
|
|
|
qemu_irq irq);
|
|
|
|
void kvm_irqchip_set_qemuirq_gsi(KVMState *s, qemu_irq irq, int gsi);
|
2013-04-16 15:58:13 +02:00
|
|
|
void kvm_init_irq_routing(KVMState *s);
|
2014-02-26 18:20:00 +01:00
|
|
|
|
2019-11-13 11:17:12 +01:00
|
|
|
bool kvm_kernel_irqchip_allowed(void);
|
|
|
|
bool kvm_kernel_irqchip_required(void);
|
|
|
|
bool kvm_kernel_irqchip_split(void);
|
|
|
|
|
2014-02-26 18:20:00 +01:00
|
|
|
/**
|
|
|
|
* kvm_arch_irqchip_create:
|
|
|
|
* @KVMState: The KVMState pointer
|
|
|
|
*
|
|
|
|
* Allow architectures to create an in-kernel irq chip themselves.
|
|
|
|
*
|
|
|
|
* Returns: < 0: error
|
|
|
|
* 0: irq chip was not created
|
|
|
|
* > 0: irq chip was created
|
|
|
|
*/
|
2019-11-13 11:17:12 +01:00
|
|
|
int kvm_arch_irqchip_create(KVMState *s);
|
2014-05-09 10:06:46 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
* kvm_set_one_reg - set a register value in KVM via KVM_SET_ONE_REG ioctl
|
|
|
|
* @id: The register ID
|
|
|
|
* @source: The pointer to the value to be set. It must point to a variable
|
|
|
|
* of the correct type/size for the register being accessed.
|
|
|
|
*
|
|
|
|
* Returns: 0 on success, or a negative errno on failure.
|
|
|
|
*/
|
|
|
|
int kvm_set_one_reg(CPUState *cs, uint64_t id, void *source);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* kvm_get_one_reg - get a register value from KVM via KVM_GET_ONE_REG ioctl
|
|
|
|
* @id: The register ID
|
|
|
|
* @target: The pointer where the value is to be stored. It must point to a
|
|
|
|
* variable of the correct type/size for the register being accessed.
|
|
|
|
*
|
|
|
|
* Returns: 0 on success, or a negative errno on failure.
|
|
|
|
*/
|
|
|
|
int kvm_get_one_reg(CPUState *cs, uint64_t id, void *target);
|
2020-03-18 15:52:03 +01:00
|
|
|
|
|
|
|
/* Notify resamplefd for EOI of specific interrupts. */
|
|
|
|
void kvm_resample_fd_notify(int gsi);
|
|
|
|
|
2021-01-26 18:36:47 +01:00
|
|
|
/**
|
|
|
|
* kvm_cpu_check_are_resettable - return whether CPUs can be reset
|
|
|
|
*
|
|
|
|
* Returns: true: CPUs are resettable
|
|
|
|
* false: CPUs are not resettable
|
|
|
|
*/
|
|
|
|
bool kvm_cpu_check_are_resettable(void);
|
|
|
|
|
|
|
|
bool kvm_arch_cpu_check_are_resettable(void);
|
|
|
|
|
2021-06-29 18:01:18 +02:00
|
|
|
bool kvm_dirty_ring_enabled(void);
|
2022-06-25 19:38:34 +02:00
|
|
|
|
|
|
|
uint32_t kvm_dirty_ring_size(void);
|
2024-01-30 20:06:40 +01:00
|
|
|
|
|
|
|
/**
|
|
|
|
* kvm_hwpoisoned_mem - indicate if there is any hwpoisoned page
|
|
|
|
* reported for the VM.
|
|
|
|
*/
|
|
|
|
bool kvm_hwpoisoned_mem(void);
|
2008-11-05 17:29:27 +01:00
|
|
|
#endif
|