2013-03-05 01:34:41 +01:00
|
|
|
/*
|
|
|
|
* ARM implementation of KVM hooks
|
|
|
|
*
|
|
|
|
* Copyright Christoffer Dall 2009-2010
|
|
|
|
*
|
|
|
|
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
|
|
|
* See the COPYING file in the top-level directory.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
2015-12-07 17:23:44 +01:00
|
|
|
#include "qemu/osdep.h"
|
2013-03-05 01:34:41 +01:00
|
|
|
#include <sys/ioctl.h>
|
|
|
|
|
|
|
|
#include <linux/kvm.h>
|
|
|
|
|
|
|
|
#include "qemu/timer.h"
|
2015-12-17 14:37:15 +01:00
|
|
|
#include "qemu/error-report.h"
|
Include qemu/main-loop.h less
In my "build everything" tree, changing qemu/main-loop.h triggers a
recompile of some 5600 out of 6600 objects (not counting tests and
objects that don't depend on qemu/osdep.h). It includes block/aio.h,
which in turn includes qemu/event_notifier.h, qemu/notify.h,
qemu/processor.h, qemu/qsp.h, qemu/queue.h, qemu/thread-posix.h,
qemu/thread.h, qemu/timer.h, and a few more.
Include qemu/main-loop.h only where it's needed. Touching it now
recompiles only some 1700 objects. For block/aio.h and
qemu/event_notifier.h, these numbers drop from 5600 to 2800. For the
others, they shrink only slightly.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Message-Id: <20190812052359.30071-21-armbru@redhat.com>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
2019-08-12 07:23:50 +02:00
|
|
|
#include "qemu/main-loop.h"
|
2020-01-30 17:02:06 +01:00
|
|
|
#include "qom/object.h"
|
|
|
|
#include "qapi/error.h"
|
2013-03-05 01:34:41 +01:00
|
|
|
#include "sysemu/sysemu.h"
|
|
|
|
#include "sysemu/kvm.h"
|
2019-03-04 11:13:34 +01:00
|
|
|
#include "sysemu/kvm_int.h"
|
2013-03-05 01:34:42 +01:00
|
|
|
#include "kvm_arm.h"
|
2013-03-05 01:34:41 +01:00
|
|
|
#include "cpu.h"
|
2018-05-04 19:05:52 +02:00
|
|
|
#include "trace.h"
|
2014-12-11 13:07:53 +01:00
|
|
|
#include "internals.h"
|
2018-05-04 19:05:52 +02:00
|
|
|
#include "hw/pci/pci.h"
|
2015-04-08 13:30:58 +02:00
|
|
|
#include "exec/memattrs.h"
|
2016-10-17 20:22:16 +02:00
|
|
|
#include "exec/address-spaces.h"
|
2015-12-17 17:16:08 +01:00
|
|
|
#include "hw/boards.h"
|
2019-08-12 07:23:42 +02:00
|
|
|
#include "hw/irq.h"
|
2015-12-15 13:16:16 +01:00
|
|
|
#include "qemu/log.h"
|
2013-03-05 01:34:41 +01:00
|
|
|
|
|
|
|
const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
|
|
|
|
KVM_CAP_LAST_INFO
|
|
|
|
};
|
|
|
|
|
2015-04-01 18:57:30 +02:00
|
|
|
static bool cap_has_mp_state;
|
2018-10-24 08:50:16 +02:00
|
|
|
static bool cap_has_inject_serror_esr;
|
2020-07-03 17:59:42 +02:00
|
|
|
static bool cap_has_inject_ext_dabt;
|
2015-04-01 18:57:30 +02:00
|
|
|
|
2018-03-09 18:09:44 +01:00
|
|
|
static ARMHostCPUFeatures arm_host_cpu_features;
|
|
|
|
|
2014-06-19 19:06:26 +02:00
|
|
|
int kvm_arm_vcpu_init(CPUState *cs)
|
|
|
|
{
|
|
|
|
ARMCPU *cpu = ARM_CPU(cs);
|
|
|
|
struct kvm_vcpu_init init;
|
|
|
|
|
|
|
|
init.target = cpu->kvm_target;
|
|
|
|
memcpy(init.features, cpu->kvm_init_features, sizeof(init.features));
|
|
|
|
|
|
|
|
return kvm_vcpu_ioctl(cs, KVM_ARM_VCPU_INIT, &init);
|
|
|
|
}
|
|
|
|
|
2019-10-31 15:27:31 +01:00
|
|
|
int kvm_arm_vcpu_finalize(CPUState *cs, int feature)
|
|
|
|
{
|
|
|
|
return kvm_vcpu_ioctl(cs, KVM_ARM_VCPU_FINALIZE, &feature);
|
|
|
|
}
|
|
|
|
|
2018-10-24 08:50:16 +02:00
|
|
|
void kvm_arm_init_serror_injection(CPUState *cs)
|
|
|
|
{
|
|
|
|
cap_has_inject_serror_esr = kvm_check_extension(cs->kvm_state,
|
|
|
|
KVM_CAP_ARM_INJECT_SERROR_ESR);
|
|
|
|
}
|
|
|
|
|
2013-11-22 18:17:17 +01:00
|
|
|
bool kvm_arm_create_scratch_host_vcpu(const uint32_t *cpus_to_try,
|
|
|
|
int *fdarray,
|
|
|
|
struct kvm_vcpu_init *init)
|
|
|
|
{
|
2019-10-31 15:27:32 +01:00
|
|
|
int ret = 0, kvmfd = -1, vmfd = -1, cpufd = -1;
|
2021-09-13 17:07:22 +02:00
|
|
|
int max_vm_pa_size;
|
2013-11-22 18:17:17 +01:00
|
|
|
|
2020-07-21 14:25:21 +02:00
|
|
|
kvmfd = qemu_open_old("/dev/kvm", O_RDWR);
|
2013-11-22 18:17:17 +01:00
|
|
|
if (kvmfd < 0) {
|
|
|
|
goto err;
|
|
|
|
}
|
2021-09-13 17:07:22 +02:00
|
|
|
max_vm_pa_size = ioctl(kvmfd, KVM_CHECK_EXTENSION, KVM_CAP_ARM_VM_IPA_SIZE);
|
|
|
|
if (max_vm_pa_size < 0) {
|
|
|
|
max_vm_pa_size = 0;
|
|
|
|
}
|
|
|
|
vmfd = ioctl(kvmfd, KVM_CREATE_VM, max_vm_pa_size);
|
2013-11-22 18:17:17 +01:00
|
|
|
if (vmfd < 0) {
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
cpufd = ioctl(vmfd, KVM_CREATE_VCPU, 0);
|
|
|
|
if (cpufd < 0) {
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
2016-03-30 18:27:24 +02:00
|
|
|
if (!init) {
|
|
|
|
/* Caller doesn't want the VCPU to be initialized, so skip it */
|
|
|
|
goto finish;
|
|
|
|
}
|
|
|
|
|
2019-10-31 15:27:32 +01:00
|
|
|
if (init->target == -1) {
|
|
|
|
struct kvm_vcpu_init preferred;
|
|
|
|
|
|
|
|
ret = ioctl(vmfd, KVM_ARM_PREFERRED_TARGET, &preferred);
|
|
|
|
if (!ret) {
|
|
|
|
init->target = preferred.target;
|
|
|
|
}
|
|
|
|
}
|
2013-11-22 18:17:17 +01:00
|
|
|
if (ret >= 0) {
|
|
|
|
ret = ioctl(cpufd, KVM_ARM_VCPU_INIT, init);
|
|
|
|
if (ret < 0) {
|
|
|
|
goto err;
|
|
|
|
}
|
2016-03-30 18:27:24 +02:00
|
|
|
} else if (cpus_to_try) {
|
2013-11-22 18:17:17 +01:00
|
|
|
/* Old kernel which doesn't know about the
|
|
|
|
* PREFERRED_TARGET ioctl: we know it will only support
|
|
|
|
* creating one kind of guest CPU which is its preferred
|
|
|
|
* CPU type.
|
|
|
|
*/
|
2019-10-31 15:27:32 +01:00
|
|
|
struct kvm_vcpu_init try;
|
|
|
|
|
2013-11-22 18:17:17 +01:00
|
|
|
while (*cpus_to_try != QEMU_KVM_ARM_TARGET_NONE) {
|
2019-10-31 15:27:32 +01:00
|
|
|
try.target = *cpus_to_try++;
|
|
|
|
memcpy(try.features, init->features, sizeof(init->features));
|
|
|
|
ret = ioctl(cpufd, KVM_ARM_VCPU_INIT, &try);
|
2013-11-22 18:17:17 +01:00
|
|
|
if (ret >= 0) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (ret < 0) {
|
|
|
|
goto err;
|
|
|
|
}
|
2019-10-31 15:27:32 +01:00
|
|
|
init->target = try.target;
|
2016-03-30 18:27:24 +02:00
|
|
|
} else {
|
|
|
|
/* Treat a NULL cpus_to_try argument the same as an empty
|
|
|
|
* list, which means we will fail the call since this must
|
|
|
|
* be an old kernel which doesn't support PREFERRED_TARGET.
|
|
|
|
*/
|
|
|
|
goto err;
|
2013-11-22 18:17:17 +01:00
|
|
|
}
|
|
|
|
|
2016-03-30 18:27:24 +02:00
|
|
|
finish:
|
2013-11-22 18:17:17 +01:00
|
|
|
fdarray[0] = kvmfd;
|
|
|
|
fdarray[1] = vmfd;
|
|
|
|
fdarray[2] = cpufd;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
|
|
|
|
err:
|
|
|
|
if (cpufd >= 0) {
|
|
|
|
close(cpufd);
|
|
|
|
}
|
|
|
|
if (vmfd >= 0) {
|
|
|
|
close(vmfd);
|
|
|
|
}
|
|
|
|
if (kvmfd >= 0) {
|
|
|
|
close(kvmfd);
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
void kvm_arm_destroy_scratch_host_vcpu(int *fdarray)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 2; i >= 0; i--) {
|
|
|
|
close(fdarray[i]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-09 18:09:44 +01:00
|
|
|
void kvm_arm_set_cpu_features_from_host(ARMCPU *cpu)
|
2013-11-22 18:17:17 +01:00
|
|
|
{
|
2018-03-09 18:09:44 +01:00
|
|
|
CPUARMState *env = &cpu->env;
|
2013-11-22 18:17:17 +01:00
|
|
|
|
2018-03-09 18:09:44 +01:00
|
|
|
if (!arm_host_cpu_features.dtb_compatible) {
|
|
|
|
if (!kvm_enabled() ||
|
|
|
|
!kvm_arm_get_host_cpu_features(&arm_host_cpu_features)) {
|
|
|
|
/* We can't report this error yet, so flag that we need to
|
|
|
|
* in arm_cpu_realizefn().
|
|
|
|
*/
|
|
|
|
cpu->kvm_target = QEMU_KVM_ARM_TARGET_NONE;
|
|
|
|
cpu->host_cpu_probe_failed = true;
|
|
|
|
return;
|
|
|
|
}
|
2013-11-22 18:17:17 +01:00
|
|
|
}
|
2018-03-09 18:09:44 +01:00
|
|
|
|
|
|
|
cpu->kvm_target = arm_host_cpu_features.target;
|
|
|
|
cpu->dtb_compatible = arm_host_cpu_features.dtb_compatible;
|
2018-11-19 16:29:07 +01:00
|
|
|
cpu->isar = arm_host_cpu_features.isar;
|
2018-03-09 18:09:44 +01:00
|
|
|
env->features = arm_host_cpu_features.features;
|
2013-11-22 18:17:17 +01:00
|
|
|
}
|
|
|
|
|
2020-01-30 17:02:06 +01:00
|
|
|
static bool kvm_no_adjvtime_get(Object *obj, Error **errp)
|
|
|
|
{
|
|
|
|
return !ARM_CPU(obj)->kvm_adjvtime;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void kvm_no_adjvtime_set(Object *obj, bool value, Error **errp)
|
|
|
|
{
|
|
|
|
ARM_CPU(obj)->kvm_adjvtime = !value;
|
|
|
|
}
|
|
|
|
|
2020-10-01 08:17:18 +02:00
|
|
|
static bool kvm_steal_time_get(Object *obj, Error **errp)
|
|
|
|
{
|
|
|
|
return ARM_CPU(obj)->kvm_steal_time != ON_OFF_AUTO_OFF;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void kvm_steal_time_set(Object *obj, bool value, Error **errp)
|
|
|
|
{
|
|
|
|
ARM_CPU(obj)->kvm_steal_time = value ? ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF;
|
|
|
|
}
|
|
|
|
|
2020-01-30 17:02:06 +01:00
|
|
|
/* KVM VCPU properties should be prefixed with "kvm-". */
|
|
|
|
void kvm_arm_add_vcpu_properties(Object *obj)
|
|
|
|
{
|
2020-06-16 11:32:29 +02:00
|
|
|
ARMCPU *cpu = ARM_CPU(obj);
|
|
|
|
CPUARMState *env = &cpu->env;
|
2020-01-30 17:02:06 +01:00
|
|
|
|
2020-06-16 11:32:29 +02:00
|
|
|
if (arm_feature(env, ARM_FEATURE_GENERIC_TIMER)) {
|
|
|
|
cpu->kvm_adjvtime = true;
|
|
|
|
object_property_add_bool(obj, "kvm-no-adjvtime", kvm_no_adjvtime_get,
|
|
|
|
kvm_no_adjvtime_set);
|
|
|
|
object_property_set_description(obj, "kvm-no-adjvtime",
|
|
|
|
"Set on to disable the adjustment of "
|
|
|
|
"the virtual counter. VM stopped time "
|
|
|
|
"will be counted.");
|
|
|
|
}
|
2020-10-01 08:17:18 +02:00
|
|
|
|
|
|
|
cpu->kvm_steal_time = ON_OFF_AUTO_AUTO;
|
|
|
|
object_property_add_bool(obj, "kvm-steal-time", kvm_steal_time_get,
|
|
|
|
kvm_steal_time_set);
|
|
|
|
object_property_set_description(obj, "kvm-steal-time",
|
|
|
|
"Set off to disable KVM steal time.");
|
2020-01-30 17:02:06 +01:00
|
|
|
}
|
|
|
|
|
target/arm: Check supported KVM features globally (not per vCPU)
Since commit d70c996df23f, when enabling the PMU we get:
$ qemu-system-aarch64 -cpu host,pmu=on -M virt,accel=kvm,gic-version=3
Segmentation fault (core dumped)
Thread 1 "qemu-system-aar" received signal SIGSEGV, Segmentation fault.
0x0000aaaaaae356d0 in kvm_ioctl (s=0x0, type=44547) at accel/kvm/kvm-all.c:2588
2588 ret = ioctl(s->fd, type, arg);
(gdb) bt
#0 0x0000aaaaaae356d0 in kvm_ioctl (s=0x0, type=44547) at accel/kvm/kvm-all.c:2588
#1 0x0000aaaaaae31568 in kvm_check_extension (s=0x0, extension=126) at accel/kvm/kvm-all.c:916
#2 0x0000aaaaaafce254 in kvm_arm_pmu_supported (cpu=0xaaaaac214ab0) at target/arm/kvm.c:213
#3 0x0000aaaaaafc0f94 in arm_set_pmu (obj=0xaaaaac214ab0, value=true, errp=0xffffffffe438) at target/arm/cpu.c:1111
#4 0x0000aaaaab5533ac in property_set_bool (obj=0xaaaaac214ab0, v=0xaaaaac223a80, name=0xaaaaac11a970 "pmu", opaque=0xaaaaac222730, errp=0xffffffffe438) at qom/object.c:2170
#5 0x0000aaaaab5512f0 in object_property_set (obj=0xaaaaac214ab0, v=0xaaaaac223a80, name=0xaaaaac11a970 "pmu", errp=0xffffffffe438) at qom/object.c:1328
#6 0x0000aaaaab551e10 in object_property_parse (obj=0xaaaaac214ab0, string=0xaaaaac11b4c0 "on", name=0xaaaaac11a970 "pmu", errp=0xffffffffe438) at qom/object.c:1561
#7 0x0000aaaaab54ee8c in object_apply_global_props (obj=0xaaaaac214ab0, props=0xaaaaac018e20, errp=0xaaaaabd6fd88 <error_fatal>) at qom/object.c:407
#8 0x0000aaaaab1dd5a4 in qdev_prop_set_globals (dev=0xaaaaac214ab0) at hw/core/qdev-properties.c:1218
#9 0x0000aaaaab1d9fac in device_post_init (obj=0xaaaaac214ab0) at hw/core/qdev.c:1050
...
#15 0x0000aaaaab54f310 in object_initialize_with_type (obj=0xaaaaac214ab0, size=52208, type=0xaaaaabe237f0) at qom/object.c:512
#16 0x0000aaaaab54fa24 in object_new_with_type (type=0xaaaaabe237f0) at qom/object.c:687
#17 0x0000aaaaab54fa80 in object_new (typename=0xaaaaabe23970 "host-arm-cpu") at qom/object.c:702
#18 0x0000aaaaaaf04a74 in machvirt_init (machine=0xaaaaac0a8550) at hw/arm/virt.c:1770
#19 0x0000aaaaab1e8720 in machine_run_board_init (machine=0xaaaaac0a8550) at hw/core/machine.c:1138
#20 0x0000aaaaaaf95394 in qemu_init (argc=5, argv=0xffffffffea58, envp=0xffffffffea88) at softmmu/vl.c:4348
#21 0x0000aaaaaada3f74 in main (argc=<optimized out>, argv=<optimized out>, envp=<optimized out>) at softmmu/main.c:48
This is because in frame #2, cpu->kvm_state is still NULL
(the vCPU is not yet realized).
KVM has a hard requirement of all cores supporting the same
feature set. We only need to check if the accelerator supports
a feature, not each vCPU individually.
Fix by removing the 'CPUState *cpu' argument from the
kvm_arm_<FEATURE>_supported() functions.
Fixes: d70c996df23f ('Use CPUState::kvm_state in kvm_arm_pmu_supported')
Reported-by: Haibo Xu <haibo.xu@linaro.org>
Reviewed-by: Andrew Jones <drjones@redhat.com>
Acked-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Suggested-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
2020-06-23 11:06:22 +02:00
|
|
|
bool kvm_arm_pmu_supported(void)
|
2019-08-02 14:25:27 +02:00
|
|
|
{
|
target/arm: Check supported KVM features globally (not per vCPU)
Since commit d70c996df23f, when enabling the PMU we get:
$ qemu-system-aarch64 -cpu host,pmu=on -M virt,accel=kvm,gic-version=3
Segmentation fault (core dumped)
Thread 1 "qemu-system-aar" received signal SIGSEGV, Segmentation fault.
0x0000aaaaaae356d0 in kvm_ioctl (s=0x0, type=44547) at accel/kvm/kvm-all.c:2588
2588 ret = ioctl(s->fd, type, arg);
(gdb) bt
#0 0x0000aaaaaae356d0 in kvm_ioctl (s=0x0, type=44547) at accel/kvm/kvm-all.c:2588
#1 0x0000aaaaaae31568 in kvm_check_extension (s=0x0, extension=126) at accel/kvm/kvm-all.c:916
#2 0x0000aaaaaafce254 in kvm_arm_pmu_supported (cpu=0xaaaaac214ab0) at target/arm/kvm.c:213
#3 0x0000aaaaaafc0f94 in arm_set_pmu (obj=0xaaaaac214ab0, value=true, errp=0xffffffffe438) at target/arm/cpu.c:1111
#4 0x0000aaaaab5533ac in property_set_bool (obj=0xaaaaac214ab0, v=0xaaaaac223a80, name=0xaaaaac11a970 "pmu", opaque=0xaaaaac222730, errp=0xffffffffe438) at qom/object.c:2170
#5 0x0000aaaaab5512f0 in object_property_set (obj=0xaaaaac214ab0, v=0xaaaaac223a80, name=0xaaaaac11a970 "pmu", errp=0xffffffffe438) at qom/object.c:1328
#6 0x0000aaaaab551e10 in object_property_parse (obj=0xaaaaac214ab0, string=0xaaaaac11b4c0 "on", name=0xaaaaac11a970 "pmu", errp=0xffffffffe438) at qom/object.c:1561
#7 0x0000aaaaab54ee8c in object_apply_global_props (obj=0xaaaaac214ab0, props=0xaaaaac018e20, errp=0xaaaaabd6fd88 <error_fatal>) at qom/object.c:407
#8 0x0000aaaaab1dd5a4 in qdev_prop_set_globals (dev=0xaaaaac214ab0) at hw/core/qdev-properties.c:1218
#9 0x0000aaaaab1d9fac in device_post_init (obj=0xaaaaac214ab0) at hw/core/qdev.c:1050
...
#15 0x0000aaaaab54f310 in object_initialize_with_type (obj=0xaaaaac214ab0, size=52208, type=0xaaaaabe237f0) at qom/object.c:512
#16 0x0000aaaaab54fa24 in object_new_with_type (type=0xaaaaabe237f0) at qom/object.c:687
#17 0x0000aaaaab54fa80 in object_new (typename=0xaaaaabe23970 "host-arm-cpu") at qom/object.c:702
#18 0x0000aaaaaaf04a74 in machvirt_init (machine=0xaaaaac0a8550) at hw/arm/virt.c:1770
#19 0x0000aaaaab1e8720 in machine_run_board_init (machine=0xaaaaac0a8550) at hw/core/machine.c:1138
#20 0x0000aaaaaaf95394 in qemu_init (argc=5, argv=0xffffffffea58, envp=0xffffffffea88) at softmmu/vl.c:4348
#21 0x0000aaaaaada3f74 in main (argc=<optimized out>, argv=<optimized out>, envp=<optimized out>) at softmmu/main.c:48
This is because in frame #2, cpu->kvm_state is still NULL
(the vCPU is not yet realized).
KVM has a hard requirement of all cores supporting the same
feature set. We only need to check if the accelerator supports
a feature, not each vCPU individually.
Fix by removing the 'CPUState *cpu' argument from the
kvm_arm_<FEATURE>_supported() functions.
Fixes: d70c996df23f ('Use CPUState::kvm_state in kvm_arm_pmu_supported')
Reported-by: Haibo Xu <haibo.xu@linaro.org>
Reviewed-by: Andrew Jones <drjones@redhat.com>
Acked-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Suggested-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
2020-06-23 11:06:22 +02:00
|
|
|
return kvm_check_extension(kvm_state, KVM_CAP_ARM_PMU_V3);
|
2019-08-02 14:25:27 +02:00
|
|
|
}
|
|
|
|
|
hw/arm/virt: KVM: The IPA lower bound is 32
The virt machine already checks KVM_CAP_ARM_VM_IPA_SIZE to get the
upper bound of the IPA size. If that bound is lower than the highest
possible GPA for the machine, then QEMU will error out. However, the
IPA is set to 40 when the highest GPA is less than or equal to 40,
even when KVM may support an IPA limit as low as 32. This means KVM
may fail the VM creation unnecessarily. Additionally, 40 is selected
with the value 0, which means use the default, and that gets around
a check in some versions of KVM, causing a difficult to debug fail.
Always use the IPA size that corresponds to the highest possible GPA,
unless it's lower than 32, in which case use 32. Also, we must still
use 0 when KVM only supports the legacy fixed 40 bit IPA.
Suggested-by: Marc Zyngier <maz@kernel.org>
Signed-off-by: Andrew Jones <drjones@redhat.com>
Reviewed-by: Eric Auger <eric.auger@redhat.com>
Reviewed-by: Marc Zyngier <maz@kernel.org>
Message-id: 20210310135218.255205-3-drjones@redhat.com
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
2021-03-10 14:52:18 +01:00
|
|
|
int kvm_arm_get_max_vm_ipa_size(MachineState *ms, bool *fixed_ipa)
|
2019-03-04 11:13:34 +01:00
|
|
|
{
|
|
|
|
KVMState *s = KVM_STATE(ms->accelerator);
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = kvm_check_extension(s, KVM_CAP_ARM_VM_IPA_SIZE);
|
hw/arm/virt: KVM: The IPA lower bound is 32
The virt machine already checks KVM_CAP_ARM_VM_IPA_SIZE to get the
upper bound of the IPA size. If that bound is lower than the highest
possible GPA for the machine, then QEMU will error out. However, the
IPA is set to 40 when the highest GPA is less than or equal to 40,
even when KVM may support an IPA limit as low as 32. This means KVM
may fail the VM creation unnecessarily. Additionally, 40 is selected
with the value 0, which means use the default, and that gets around
a check in some versions of KVM, causing a difficult to debug fail.
Always use the IPA size that corresponds to the highest possible GPA,
unless it's lower than 32, in which case use 32. Also, we must still
use 0 when KVM only supports the legacy fixed 40 bit IPA.
Suggested-by: Marc Zyngier <maz@kernel.org>
Signed-off-by: Andrew Jones <drjones@redhat.com>
Reviewed-by: Eric Auger <eric.auger@redhat.com>
Reviewed-by: Marc Zyngier <maz@kernel.org>
Message-id: 20210310135218.255205-3-drjones@redhat.com
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
2021-03-10 14:52:18 +01:00
|
|
|
*fixed_ipa = ret <= 0;
|
|
|
|
|
2019-03-04 11:13:34 +01:00
|
|
|
return ret > 0 ? ret : 40;
|
|
|
|
}
|
|
|
|
|
2015-02-04 16:43:51 +01:00
|
|
|
int kvm_arch_init(MachineState *ms, KVMState *s)
|
2013-03-05 01:34:41 +01:00
|
|
|
{
|
2019-10-03 17:46:40 +02:00
|
|
|
int ret = 0;
|
2013-03-05 01:34:41 +01:00
|
|
|
/* For ARM interrupt delivery is always asynchronous,
|
|
|
|
* whether we are using an in-kernel VGIC or not.
|
|
|
|
*/
|
|
|
|
kvm_async_interrupts_allowed = true;
|
2013-11-22 18:17:17 +01:00
|
|
|
|
2017-07-11 12:21:26 +02:00
|
|
|
/*
|
|
|
|
* PSCI wakes up secondary cores, so we always need to
|
|
|
|
* have vCPUs waiting in kernel space
|
|
|
|
*/
|
|
|
|
kvm_halt_in_kernel_allowed = true;
|
|
|
|
|
2015-04-01 18:57:30 +02:00
|
|
|
cap_has_mp_state = kvm_check_extension(s, KVM_CAP_MP_STATE);
|
|
|
|
|
2019-10-03 17:46:40 +02:00
|
|
|
if (ms->smp.cpus > 256 &&
|
|
|
|
!kvm_check_extension(s, KVM_CAP_ARM_IRQ_LINE_LAYOUT_2)) {
|
|
|
|
error_report("Using more than 256 vcpus requires a host kernel "
|
|
|
|
"with KVM_CAP_ARM_IRQ_LINE_LAYOUT_2");
|
|
|
|
ret = -EINVAL;
|
|
|
|
}
|
|
|
|
|
2020-07-03 17:59:42 +02:00
|
|
|
if (kvm_check_extension(s, KVM_CAP_ARM_NISV_TO_USER)) {
|
|
|
|
if (kvm_vm_enable_cap(s, KVM_CAP_ARM_NISV_TO_USER, 0)) {
|
|
|
|
error_report("Failed to enable KVM_CAP_ARM_NISV_TO_USER cap");
|
|
|
|
} else {
|
|
|
|
/* Set status for supporting the external dabt injection */
|
|
|
|
cap_has_inject_ext_dabt = kvm_check_extension(s,
|
|
|
|
KVM_CAP_ARM_INJECT_EXT_DABT);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-10-03 17:46:40 +02:00
|
|
|
return ret;
|
2013-03-05 01:34:41 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
unsigned long kvm_arch_vcpu_id(CPUState *cpu)
|
|
|
|
{
|
|
|
|
return cpu->cpu_index;
|
|
|
|
}
|
|
|
|
|
2013-03-05 01:34:42 +01:00
|
|
|
/* We track all the KVM devices which need their memory addresses
|
|
|
|
* passing to the kernel in a list of these structures.
|
|
|
|
* When board init is complete we run through the list and
|
|
|
|
* tell the kernel the base addresses of the memory regions.
|
|
|
|
* We use a MemoryListener to track mapping and unmapping of
|
|
|
|
* the regions during board creation, so the board models don't
|
|
|
|
* need to do anything special for the KVM case.
|
2018-06-22 14:28:35 +02:00
|
|
|
*
|
|
|
|
* Sometimes the address must be OR'ed with some other fields
|
|
|
|
* (for example for KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION).
|
|
|
|
* @kda_addr_ormask aims at storing the value of those fields.
|
2013-03-05 01:34:42 +01:00
|
|
|
*/
|
|
|
|
typedef struct KVMDevice {
|
|
|
|
struct kvm_arm_device_addr kda;
|
2014-02-26 18:20:00 +01:00
|
|
|
struct kvm_device_attr kdattr;
|
2018-06-22 14:28:35 +02:00
|
|
|
uint64_t kda_addr_ormask;
|
2013-03-05 01:34:42 +01:00
|
|
|
MemoryRegion *mr;
|
|
|
|
QSLIST_ENTRY(KVMDevice) entries;
|
2014-02-26 18:20:00 +01:00
|
|
|
int dev_fd;
|
2013-03-05 01:34:42 +01:00
|
|
|
} KVMDevice;
|
|
|
|
|
2018-12-06 11:58:10 +01:00
|
|
|
static QSLIST_HEAD(, KVMDevice) kvm_devices_head;
|
2013-03-05 01:34:42 +01:00
|
|
|
|
|
|
|
static void kvm_arm_devlistener_add(MemoryListener *listener,
|
|
|
|
MemoryRegionSection *section)
|
|
|
|
{
|
|
|
|
KVMDevice *kd;
|
|
|
|
|
|
|
|
QSLIST_FOREACH(kd, &kvm_devices_head, entries) {
|
|
|
|
if (section->mr == kd->mr) {
|
|
|
|
kd->kda.addr = section->offset_within_address_space;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void kvm_arm_devlistener_del(MemoryListener *listener,
|
|
|
|
MemoryRegionSection *section)
|
|
|
|
{
|
|
|
|
KVMDevice *kd;
|
|
|
|
|
|
|
|
QSLIST_FOREACH(kd, &kvm_devices_head, entries) {
|
|
|
|
if (section->mr == kd->mr) {
|
|
|
|
kd->kda.addr = -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static MemoryListener devlistener = {
|
2021-08-17 03:35:52 +02:00
|
|
|
.name = "kvm-arm",
|
2013-03-05 01:34:42 +01:00
|
|
|
.region_add = kvm_arm_devlistener_add,
|
|
|
|
.region_del = kvm_arm_devlistener_del,
|
|
|
|
};
|
|
|
|
|
2014-02-26 18:20:00 +01:00
|
|
|
static void kvm_arm_set_device_addr(KVMDevice *kd)
|
|
|
|
{
|
|
|
|
struct kvm_device_attr *attr = &kd->kdattr;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
/* If the device control API is available and we have a device fd on the
|
|
|
|
* KVMDevice struct, let's use the newer API
|
|
|
|
*/
|
|
|
|
if (kd->dev_fd >= 0) {
|
|
|
|
uint64_t addr = kd->kda.addr;
|
2018-06-22 14:28:35 +02:00
|
|
|
|
|
|
|
addr |= kd->kda_addr_ormask;
|
2014-02-26 18:20:00 +01:00
|
|
|
attr->addr = (uintptr_t)&addr;
|
|
|
|
ret = kvm_device_ioctl(kd->dev_fd, KVM_SET_DEVICE_ATTR, attr);
|
|
|
|
} else {
|
|
|
|
ret = kvm_vm_ioctl(kvm_state, KVM_ARM_SET_DEVICE_ADDR, &kd->kda);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ret < 0) {
|
|
|
|
fprintf(stderr, "Failed to set device address: %s\n",
|
|
|
|
strerror(-ret));
|
|
|
|
abort();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-03-05 01:34:42 +01:00
|
|
|
static void kvm_arm_machine_init_done(Notifier *notifier, void *data)
|
|
|
|
{
|
|
|
|
KVMDevice *kd, *tkd;
|
|
|
|
|
|
|
|
QSLIST_FOREACH_SAFE(kd, &kvm_devices_head, entries, tkd) {
|
|
|
|
if (kd->kda.addr != -1) {
|
2014-02-26 18:20:00 +01:00
|
|
|
kvm_arm_set_device_addr(kd);
|
2013-03-05 01:34:42 +01:00
|
|
|
}
|
2013-05-06 10:46:11 +02:00
|
|
|
memory_region_unref(kd->mr);
|
2018-06-22 14:28:35 +02:00
|
|
|
QSLIST_REMOVE_HEAD(&kvm_devices_head, entries);
|
2013-03-05 01:34:42 +01:00
|
|
|
g_free(kd);
|
|
|
|
}
|
2018-01-22 07:02:42 +01:00
|
|
|
memory_listener_unregister(&devlistener);
|
2013-03-05 01:34:42 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static Notifier notify = {
|
|
|
|
.notify = kvm_arm_machine_init_done,
|
|
|
|
};
|
|
|
|
|
2014-02-26 18:20:00 +01:00
|
|
|
void kvm_arm_register_device(MemoryRegion *mr, uint64_t devid, uint64_t group,
|
2018-06-22 14:28:35 +02:00
|
|
|
uint64_t attr, int dev_fd, uint64_t addr_ormask)
|
2013-03-05 01:34:42 +01:00
|
|
|
{
|
|
|
|
KVMDevice *kd;
|
|
|
|
|
|
|
|
if (!kvm_irqchip_in_kernel()) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (QSLIST_EMPTY(&kvm_devices_head)) {
|
2016-10-17 20:22:16 +02:00
|
|
|
memory_listener_register(&devlistener, &address_space_memory);
|
2013-03-05 01:34:42 +01:00
|
|
|
qemu_add_machine_init_done_notifier(¬ify);
|
|
|
|
}
|
|
|
|
kd = g_new0(KVMDevice, 1);
|
|
|
|
kd->mr = mr;
|
|
|
|
kd->kda.id = devid;
|
|
|
|
kd->kda.addr = -1;
|
2014-02-26 18:20:00 +01:00
|
|
|
kd->kdattr.flags = 0;
|
|
|
|
kd->kdattr.group = group;
|
|
|
|
kd->kdattr.attr = attr;
|
|
|
|
kd->dev_fd = dev_fd;
|
2018-06-22 14:28:35 +02:00
|
|
|
kd->kda_addr_ormask = addr_ormask;
|
2013-03-05 01:34:42 +01:00
|
|
|
QSLIST_INSERT_HEAD(&kvm_devices_head, kd, entries);
|
2013-05-06 10:46:11 +02:00
|
|
|
memory_region_ref(kd->mr);
|
2013-03-05 01:34:42 +01:00
|
|
|
}
|
|
|
|
|
2014-12-11 13:07:53 +01:00
|
|
|
static int compare_u64(const void *a, const void *b)
|
|
|
|
{
|
|
|
|
if (*(uint64_t *)a > *(uint64_t *)b) {
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
if (*(uint64_t *)a < *(uint64_t *)b) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-01-30 17:02:06 +01:00
|
|
|
/*
|
|
|
|
* cpreg_values are sorted in ascending order by KVM register ID
|
|
|
|
* (see kvm_arm_init_cpreg_list). This allows us to cheaply find
|
|
|
|
* the storage for a KVM register by ID with a binary search.
|
|
|
|
*/
|
|
|
|
static uint64_t *kvm_arm_get_cpreg_ptr(ARMCPU *cpu, uint64_t regidx)
|
|
|
|
{
|
|
|
|
uint64_t *res;
|
|
|
|
|
|
|
|
res = bsearch(®idx, cpu->cpreg_indexes, cpu->cpreg_array_len,
|
|
|
|
sizeof(uint64_t), compare_u64);
|
|
|
|
assert(res);
|
|
|
|
|
|
|
|
return &cpu->cpreg_values[res - cpu->cpreg_indexes];
|
|
|
|
}
|
|
|
|
|
2018-10-08 15:55:02 +02:00
|
|
|
/* Initialize the ARMCPU cpreg list according to the kernel's
|
2014-12-11 13:07:53 +01:00
|
|
|
* definition of what CPU registers it knows about (and throw away
|
|
|
|
* the previous TCG-created cpreg list).
|
|
|
|
*/
|
|
|
|
int kvm_arm_init_cpreg_list(ARMCPU *cpu)
|
|
|
|
{
|
|
|
|
struct kvm_reg_list rl;
|
|
|
|
struct kvm_reg_list *rlp;
|
|
|
|
int i, ret, arraylen;
|
|
|
|
CPUState *cs = CPU(cpu);
|
|
|
|
|
|
|
|
rl.n = 0;
|
|
|
|
ret = kvm_vcpu_ioctl(cs, KVM_GET_REG_LIST, &rl);
|
|
|
|
if (ret != -E2BIG) {
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
rlp = g_malloc(sizeof(struct kvm_reg_list) + rl.n * sizeof(uint64_t));
|
|
|
|
rlp->n = rl.n;
|
|
|
|
ret = kvm_vcpu_ioctl(cs, KVM_GET_REG_LIST, rlp);
|
|
|
|
if (ret) {
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
/* Sort the list we get back from the kernel, since cpreg_tuples
|
|
|
|
* must be in strictly ascending order.
|
|
|
|
*/
|
|
|
|
qsort(&rlp->reg, rlp->n, sizeof(rlp->reg[0]), compare_u64);
|
|
|
|
|
|
|
|
for (i = 0, arraylen = 0; i < rlp->n; i++) {
|
|
|
|
if (!kvm_arm_reg_syncs_via_cpreg_list(rlp->reg[i])) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
switch (rlp->reg[i] & KVM_REG_SIZE_MASK) {
|
|
|
|
case KVM_REG_SIZE_U32:
|
|
|
|
case KVM_REG_SIZE_U64:
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
fprintf(stderr, "Can't handle size of register in kernel list\n");
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
arraylen++;
|
|
|
|
}
|
|
|
|
|
|
|
|
cpu->cpreg_indexes = g_renew(uint64_t, cpu->cpreg_indexes, arraylen);
|
|
|
|
cpu->cpreg_values = g_renew(uint64_t, cpu->cpreg_values, arraylen);
|
|
|
|
cpu->cpreg_vmstate_indexes = g_renew(uint64_t, cpu->cpreg_vmstate_indexes,
|
|
|
|
arraylen);
|
|
|
|
cpu->cpreg_vmstate_values = g_renew(uint64_t, cpu->cpreg_vmstate_values,
|
|
|
|
arraylen);
|
|
|
|
cpu->cpreg_array_len = arraylen;
|
|
|
|
cpu->cpreg_vmstate_array_len = arraylen;
|
|
|
|
|
|
|
|
for (i = 0, arraylen = 0; i < rlp->n; i++) {
|
|
|
|
uint64_t regidx = rlp->reg[i];
|
|
|
|
if (!kvm_arm_reg_syncs_via_cpreg_list(regidx)) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
cpu->cpreg_indexes[arraylen] = regidx;
|
|
|
|
arraylen++;
|
|
|
|
}
|
|
|
|
assert(cpu->cpreg_array_len == arraylen);
|
|
|
|
|
|
|
|
if (!write_kvmstate_to_list(cpu)) {
|
|
|
|
/* Shouldn't happen unless kernel is inconsistent about
|
|
|
|
* what registers exist.
|
|
|
|
*/
|
|
|
|
fprintf(stderr, "Initial read of kernel register state failed\n");
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
out:
|
|
|
|
g_free(rlp);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2013-06-25 19:16:07 +02:00
|
|
|
bool write_kvmstate_to_list(ARMCPU *cpu)
|
|
|
|
{
|
|
|
|
CPUState *cs = CPU(cpu);
|
|
|
|
int i;
|
|
|
|
bool ok = true;
|
|
|
|
|
|
|
|
for (i = 0; i < cpu->cpreg_array_len; i++) {
|
|
|
|
struct kvm_one_reg r;
|
|
|
|
uint64_t regidx = cpu->cpreg_indexes[i];
|
|
|
|
uint32_t v32;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
r.id = regidx;
|
|
|
|
|
|
|
|
switch (regidx & KVM_REG_SIZE_MASK) {
|
|
|
|
case KVM_REG_SIZE_U32:
|
|
|
|
r.addr = (uintptr_t)&v32;
|
|
|
|
ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r);
|
|
|
|
if (!ret) {
|
|
|
|
cpu->cpreg_values[i] = v32;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case KVM_REG_SIZE_U64:
|
|
|
|
r.addr = (uintptr_t)(cpu->cpreg_values + i);
|
|
|
|
ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r);
|
|
|
|
break;
|
|
|
|
default:
|
2022-05-01 07:49:48 +02:00
|
|
|
g_assert_not_reached();
|
2013-06-25 19:16:07 +02:00
|
|
|
}
|
|
|
|
if (ret) {
|
|
|
|
ok = false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return ok;
|
|
|
|
}
|
|
|
|
|
2015-07-21 12:18:45 +02:00
|
|
|
bool write_list_to_kvmstate(ARMCPU *cpu, int level)
|
2013-06-25 19:16:07 +02:00
|
|
|
{
|
|
|
|
CPUState *cs = CPU(cpu);
|
|
|
|
int i;
|
|
|
|
bool ok = true;
|
|
|
|
|
|
|
|
for (i = 0; i < cpu->cpreg_array_len; i++) {
|
|
|
|
struct kvm_one_reg r;
|
|
|
|
uint64_t regidx = cpu->cpreg_indexes[i];
|
|
|
|
uint32_t v32;
|
|
|
|
int ret;
|
|
|
|
|
2015-07-21 12:18:45 +02:00
|
|
|
if (kvm_arm_cpreg_level(regidx) > level) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2013-06-25 19:16:07 +02:00
|
|
|
r.id = regidx;
|
|
|
|
switch (regidx & KVM_REG_SIZE_MASK) {
|
|
|
|
case KVM_REG_SIZE_U32:
|
|
|
|
v32 = cpu->cpreg_values[i];
|
|
|
|
r.addr = (uintptr_t)&v32;
|
|
|
|
break;
|
|
|
|
case KVM_REG_SIZE_U64:
|
|
|
|
r.addr = (uintptr_t)(cpu->cpreg_values + i);
|
|
|
|
break;
|
|
|
|
default:
|
2022-05-01 07:49:48 +02:00
|
|
|
g_assert_not_reached();
|
2013-06-25 19:16:07 +02:00
|
|
|
}
|
|
|
|
ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &r);
|
|
|
|
if (ret) {
|
|
|
|
/* We might fail for "unknown register" and also for
|
|
|
|
* "you tried to set a register which is constant with
|
|
|
|
* a different value from what it actually contains".
|
|
|
|
*/
|
|
|
|
ok = false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return ok;
|
|
|
|
}
|
|
|
|
|
2020-01-30 17:02:06 +01:00
|
|
|
void kvm_arm_cpu_pre_save(ARMCPU *cpu)
|
|
|
|
{
|
|
|
|
/* KVM virtual time adjustment */
|
|
|
|
if (cpu->kvm_vtime_dirty) {
|
|
|
|
*kvm_arm_get_cpreg_ptr(cpu, KVM_REG_ARM_TIMER_CNT) = cpu->kvm_vtime;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void kvm_arm_cpu_post_load(ARMCPU *cpu)
|
|
|
|
{
|
|
|
|
/* KVM virtual time adjustment */
|
|
|
|
if (cpu->kvm_adjvtime) {
|
|
|
|
cpu->kvm_vtime = *kvm_arm_get_cpreg_ptr(cpu, KVM_REG_ARM_TIMER_CNT);
|
|
|
|
cpu->kvm_vtime_dirty = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-12-11 13:07:53 +01:00
|
|
|
void kvm_arm_reset_vcpu(ARMCPU *cpu)
|
|
|
|
{
|
2014-12-11 13:07:53 +01:00
|
|
|
int ret;
|
|
|
|
|
2014-12-11 13:07:53 +01:00
|
|
|
/* Re-init VCPU so that all registers are set to
|
|
|
|
* their respective reset values.
|
|
|
|
*/
|
2014-12-11 13:07:53 +01:00
|
|
|
ret = kvm_arm_vcpu_init(CPU(cpu));
|
|
|
|
if (ret < 0) {
|
|
|
|
fprintf(stderr, "kvm_arm_vcpu_init failed: %s\n", strerror(-ret));
|
|
|
|
abort();
|
|
|
|
}
|
|
|
|
if (!write_kvmstate_to_list(cpu)) {
|
|
|
|
fprintf(stderr, "write_kvmstate_to_list failed\n");
|
|
|
|
abort();
|
|
|
|
}
|
2019-05-07 13:55:02 +02:00
|
|
|
/*
|
|
|
|
* Sync the reset values also into the CPUState. This is necessary
|
|
|
|
* because the next thing we do will be a kvm_arch_put_registers()
|
|
|
|
* which will update the list values from the CPUState before copying
|
|
|
|
* the list values back to KVM. It's OK to ignore failure returns here
|
|
|
|
* for the same reason we do so in kvm_arch_get_registers().
|
|
|
|
*/
|
|
|
|
write_list_to_cpustate(cpu);
|
2014-12-11 13:07:53 +01:00
|
|
|
}
|
|
|
|
|
2015-04-01 18:57:30 +02:00
|
|
|
/*
|
|
|
|
* Update KVM's MP_STATE based on what QEMU thinks it is
|
|
|
|
*/
|
|
|
|
int kvm_arm_sync_mpstate_to_kvm(ARMCPU *cpu)
|
|
|
|
{
|
|
|
|
if (cap_has_mp_state) {
|
|
|
|
struct kvm_mp_state mp_state = {
|
2017-02-23 19:29:23 +01:00
|
|
|
.mp_state = (cpu->power_state == PSCI_OFF) ?
|
|
|
|
KVM_MP_STATE_STOPPED : KVM_MP_STATE_RUNNABLE
|
2015-04-01 18:57:30 +02:00
|
|
|
};
|
|
|
|
int ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MP_STATE, &mp_state);
|
|
|
|
if (ret) {
|
|
|
|
fprintf(stderr, "%s: failed to set MP_STATE %d/%s\n",
|
|
|
|
__func__, ret, strerror(-ret));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Sync the KVM MP_STATE into QEMU
|
|
|
|
*/
|
|
|
|
int kvm_arm_sync_mpstate_to_qemu(ARMCPU *cpu)
|
|
|
|
{
|
|
|
|
if (cap_has_mp_state) {
|
|
|
|
struct kvm_mp_state mp_state;
|
|
|
|
int ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MP_STATE, &mp_state);
|
|
|
|
if (ret) {
|
|
|
|
fprintf(stderr, "%s: failed to get MP_STATE %d/%s\n",
|
|
|
|
__func__, ret, strerror(-ret));
|
|
|
|
abort();
|
|
|
|
}
|
2017-02-23 19:29:23 +01:00
|
|
|
cpu->power_state = (mp_state.mp_state == KVM_MP_STATE_STOPPED) ?
|
|
|
|
PSCI_OFF : PSCI_ON;
|
2015-04-01 18:57:30 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-01-30 17:02:06 +01:00
|
|
|
void kvm_arm_get_virtual_time(CPUState *cs)
|
|
|
|
{
|
|
|
|
ARMCPU *cpu = ARM_CPU(cs);
|
|
|
|
struct kvm_one_reg reg = {
|
|
|
|
.id = KVM_REG_ARM_TIMER_CNT,
|
|
|
|
.addr = (uintptr_t)&cpu->kvm_vtime,
|
|
|
|
};
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (cpu->kvm_vtime_dirty) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
|
|
|
|
if (ret) {
|
|
|
|
error_report("Failed to get KVM_REG_ARM_TIMER_CNT");
|
|
|
|
abort();
|
|
|
|
}
|
|
|
|
|
|
|
|
cpu->kvm_vtime_dirty = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
void kvm_arm_put_virtual_time(CPUState *cs)
|
|
|
|
{
|
|
|
|
ARMCPU *cpu = ARM_CPU(cs);
|
|
|
|
struct kvm_one_reg reg = {
|
|
|
|
.id = KVM_REG_ARM_TIMER_CNT,
|
|
|
|
.addr = (uintptr_t)&cpu->kvm_vtime,
|
|
|
|
};
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!cpu->kvm_vtime_dirty) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
|
|
|
|
if (ret) {
|
|
|
|
error_report("Failed to set KVM_REG_ARM_TIMER_CNT");
|
|
|
|
abort();
|
|
|
|
}
|
|
|
|
|
|
|
|
cpu->kvm_vtime_dirty = false;
|
|
|
|
}
|
|
|
|
|
2018-10-24 08:50:16 +02:00
|
|
|
int kvm_put_vcpu_events(ARMCPU *cpu)
|
|
|
|
{
|
|
|
|
CPUARMState *env = &cpu->env;
|
|
|
|
struct kvm_vcpu_events events;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!kvm_has_vcpu_events()) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
memset(&events, 0, sizeof(events));
|
|
|
|
events.exception.serror_pending = env->serror.pending;
|
|
|
|
|
|
|
|
/* Inject SError to guest with specified syndrome if host kernel
|
|
|
|
* supports it, otherwise inject SError without syndrome.
|
|
|
|
*/
|
|
|
|
if (cap_has_inject_serror_esr) {
|
|
|
|
events.exception.serror_has_esr = env->serror.has_esr;
|
|
|
|
events.exception.serror_esr = env->serror.esr;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_VCPU_EVENTS, &events);
|
|
|
|
if (ret) {
|
|
|
|
error_report("failed to put vcpu events");
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
int kvm_get_vcpu_events(ARMCPU *cpu)
|
|
|
|
{
|
|
|
|
CPUARMState *env = &cpu->env;
|
|
|
|
struct kvm_vcpu_events events;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!kvm_has_vcpu_events()) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
memset(&events, 0, sizeof(events));
|
|
|
|
ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_VCPU_EVENTS, &events);
|
|
|
|
if (ret) {
|
|
|
|
error_report("failed to get vcpu events");
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
env->serror.pending = events.exception.serror_pending;
|
|
|
|
env->serror.has_esr = events.exception.serror_has_esr;
|
|
|
|
env->serror.esr = events.exception.serror_esr;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-03-05 01:34:41 +01:00
|
|
|
void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
|
|
|
|
{
|
2020-07-03 17:59:42 +02:00
|
|
|
ARMCPU *cpu = ARM_CPU(cs);
|
|
|
|
CPUARMState *env = &cpu->env;
|
|
|
|
|
|
|
|
if (unlikely(env->ext_dabt_raised)) {
|
|
|
|
/*
|
|
|
|
* Verifying that the ext DABT has been properly injected,
|
|
|
|
* otherwise risking indefinitely re-running the faulting instruction
|
|
|
|
* Covering a very narrow case for kernels 5.5..5.5.4
|
|
|
|
* when injected abort was misconfigured to be
|
|
|
|
* an IMPLEMENTATION DEFINED exception (for 32-bit EL1)
|
|
|
|
*/
|
|
|
|
if (!arm_feature(env, ARM_FEATURE_AARCH64) &&
|
|
|
|
unlikely(!kvm_arm_verify_ext_dabt_pending(cs))) {
|
|
|
|
|
|
|
|
error_report("Data abort exception with no valid ISS generated by "
|
|
|
|
"guest memory access. KVM unable to emulate faulting "
|
|
|
|
"instruction. Failed to inject an external data abort "
|
|
|
|
"into the guest.");
|
|
|
|
abort();
|
|
|
|
}
|
|
|
|
/* Clear the status */
|
|
|
|
env->ext_dabt_raised = 0;
|
|
|
|
}
|
2013-03-05 01:34:41 +01:00
|
|
|
}
|
|
|
|
|
2015-04-08 13:30:58 +02:00
|
|
|
MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run)
|
2013-03-05 01:34:41 +01:00
|
|
|
{
|
2017-07-11 12:21:26 +02:00
|
|
|
ARMCPU *cpu;
|
|
|
|
uint32_t switched_level;
|
|
|
|
|
|
|
|
if (kvm_irqchip_in_kernel()) {
|
|
|
|
/*
|
|
|
|
* We only need to sync timer states with user-space interrupt
|
|
|
|
* controllers, so return early and save cycles if we don't.
|
|
|
|
*/
|
|
|
|
return MEMTXATTRS_UNSPECIFIED;
|
|
|
|
}
|
|
|
|
|
|
|
|
cpu = ARM_CPU(cs);
|
|
|
|
|
|
|
|
/* Synchronize our shadowed in-kernel device irq lines with the kvm ones */
|
|
|
|
if (run->s.regs.device_irq_level != cpu->device_irq_level) {
|
|
|
|
switched_level = cpu->device_irq_level ^ run->s.regs.device_irq_level;
|
|
|
|
|
|
|
|
qemu_mutex_lock_iothread();
|
|
|
|
|
|
|
|
if (switched_level & KVM_ARM_DEV_EL1_VTIMER) {
|
|
|
|
qemu_set_irq(cpu->gt_timer_outputs[GTIMER_VIRT],
|
|
|
|
!!(run->s.regs.device_irq_level &
|
|
|
|
KVM_ARM_DEV_EL1_VTIMER));
|
|
|
|
switched_level &= ~KVM_ARM_DEV_EL1_VTIMER;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (switched_level & KVM_ARM_DEV_EL1_PTIMER) {
|
|
|
|
qemu_set_irq(cpu->gt_timer_outputs[GTIMER_PHYS],
|
|
|
|
!!(run->s.regs.device_irq_level &
|
|
|
|
KVM_ARM_DEV_EL1_PTIMER));
|
|
|
|
switched_level &= ~KVM_ARM_DEV_EL1_PTIMER;
|
|
|
|
}
|
|
|
|
|
2017-09-04 16:21:54 +02:00
|
|
|
if (switched_level & KVM_ARM_DEV_PMU) {
|
|
|
|
qemu_set_irq(cpu->pmu_interrupt,
|
|
|
|
!!(run->s.regs.device_irq_level & KVM_ARM_DEV_PMU));
|
|
|
|
switched_level &= ~KVM_ARM_DEV_PMU;
|
|
|
|
}
|
2017-07-11 12:21:26 +02:00
|
|
|
|
|
|
|
if (switched_level) {
|
|
|
|
qemu_log_mask(LOG_UNIMP, "%s: unhandled in-kernel device IRQ %x\n",
|
|
|
|
__func__, switched_level);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* We also mark unknown levels as processed to not waste cycles */
|
|
|
|
cpu->device_irq_level = run->s.regs.device_irq_level;
|
|
|
|
qemu_mutex_unlock_iothread();
|
|
|
|
}
|
|
|
|
|
2015-04-08 13:30:58 +02:00
|
|
|
return MEMTXATTRS_UNSPECIFIED;
|
2013-03-05 01:34:41 +01:00
|
|
|
}
|
|
|
|
|
2021-01-11 16:20:20 +01:00
|
|
|
void kvm_arm_vm_state_change(void *opaque, bool running, RunState state)
|
2020-01-30 17:02:06 +01:00
|
|
|
{
|
|
|
|
CPUState *cs = opaque;
|
|
|
|
ARMCPU *cpu = ARM_CPU(cs);
|
|
|
|
|
|
|
|
if (running) {
|
|
|
|
if (cpu->kvm_adjvtime) {
|
|
|
|
kvm_arm_put_virtual_time(cs);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (cpu->kvm_adjvtime) {
|
|
|
|
kvm_arm_get_virtual_time(cs);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2015-12-17 14:37:15 +01:00
|
|
|
|
2020-07-03 17:59:42 +02:00
|
|
|
/**
|
|
|
|
* kvm_arm_handle_dabt_nisv:
|
|
|
|
* @cs: CPUState
|
|
|
|
* @esr_iss: ISS encoding (limited) for the exception from Data Abort
|
|
|
|
* ISV bit set to '0b0' -> no valid instruction syndrome
|
|
|
|
* @fault_ipa: faulting address for the synchronous data abort
|
|
|
|
*
|
|
|
|
* Returns: 0 if the exception has been handled, < 0 otherwise
|
|
|
|
*/
|
|
|
|
static int kvm_arm_handle_dabt_nisv(CPUState *cs, uint64_t esr_iss,
|
|
|
|
uint64_t fault_ipa)
|
|
|
|
{
|
2020-07-03 17:59:42 +02:00
|
|
|
ARMCPU *cpu = ARM_CPU(cs);
|
|
|
|
CPUARMState *env = &cpu->env;
|
2020-07-03 17:59:42 +02:00
|
|
|
/*
|
|
|
|
* Request KVM to inject the external data abort into the guest
|
|
|
|
*/
|
|
|
|
if (cap_has_inject_ext_dabt) {
|
|
|
|
struct kvm_vcpu_events events = { };
|
|
|
|
/*
|
|
|
|
* The external data abort event will be handled immediately by KVM
|
|
|
|
* using the address fault that triggered the exit on given VCPU.
|
|
|
|
* Requesting injection of the external data abort does not rely
|
|
|
|
* on any other VCPU state. Therefore, in this particular case, the VCPU
|
|
|
|
* synchronization can be exceptionally skipped.
|
|
|
|
*/
|
|
|
|
events.exception.ext_dabt_pending = 1;
|
|
|
|
/* KVM_CAP_ARM_INJECT_EXT_DABT implies KVM_CAP_VCPU_EVENTS */
|
2020-07-03 17:59:42 +02:00
|
|
|
if (!kvm_vcpu_ioctl(cs, KVM_SET_VCPU_EVENTS, &events)) {
|
|
|
|
env->ext_dabt_raised = 1;
|
|
|
|
return 0;
|
|
|
|
}
|
2020-07-03 17:59:42 +02:00
|
|
|
} else {
|
|
|
|
error_report("Data abort exception triggered by guest memory access "
|
|
|
|
"at physical address: 0x" TARGET_FMT_lx,
|
|
|
|
(target_ulong)fault_ipa);
|
|
|
|
error_printf("KVM unable to emulate faulting instruction.\n");
|
|
|
|
}
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2013-03-05 01:34:41 +01:00
|
|
|
int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
|
|
|
|
{
|
2015-12-17 14:37:15 +01:00
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
switch (run->exit_reason) {
|
|
|
|
case KVM_EXIT_DEBUG:
|
|
|
|
if (kvm_arm_handle_debug(cs, &run->debug.arch)) {
|
|
|
|
ret = EXCP_DEBUG;
|
|
|
|
} /* otherwise return to guest */
|
|
|
|
break;
|
2020-07-03 17:59:42 +02:00
|
|
|
case KVM_EXIT_ARM_NISV:
|
|
|
|
/* External DABT with no valid iss to decode */
|
|
|
|
ret = kvm_arm_handle_dabt_nisv(cs, run->arm_nisv.esr_iss,
|
|
|
|
run->arm_nisv.fault_ipa);
|
|
|
|
break;
|
2015-12-17 14:37:15 +01:00
|
|
|
default:
|
|
|
|
qemu_log_mask(LOG_UNIMP, "%s: un-handled exit reason %d\n",
|
|
|
|
__func__, run->exit_reason);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return ret;
|
2013-03-05 01:34:41 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
bool kvm_arch_stop_on_emulation_error(CPUState *cs)
|
|
|
|
{
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
int kvm_arch_process_async_events(CPUState *cs)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void kvm_arch_update_guest_debug(CPUState *cs, struct kvm_guest_debug *dbg)
|
|
|
|
{
|
2015-12-17 14:37:15 +01:00
|
|
|
if (kvm_sw_breakpoints_active(cs)) {
|
|
|
|
dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP;
|
|
|
|
}
|
2015-12-17 14:37:15 +01:00
|
|
|
if (kvm_arm_hw_debug_active(cs)) {
|
|
|
|
dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW;
|
|
|
|
kvm_arm_copy_hw_debug_data(&dbg->arch);
|
|
|
|
}
|
2013-03-05 01:34:41 +01:00
|
|
|
}
|
2013-06-12 09:26:52 +02:00
|
|
|
|
|
|
|
void kvm_arch_init_irq_routing(KVMState *s)
|
|
|
|
{
|
|
|
|
}
|
2014-02-26 18:20:00 +01:00
|
|
|
|
2019-11-13 11:17:12 +01:00
|
|
|
int kvm_arch_irqchip_create(KVMState *s)
|
2014-02-26 18:20:00 +01:00
|
|
|
{
|
2019-11-13 11:17:12 +01:00
|
|
|
if (kvm_kernel_irqchip_split()) {
|
|
|
|
perror("-machine kernel_irqchip=split is not supported on ARM.");
|
|
|
|
exit(1);
|
2015-12-17 17:16:08 +01:00
|
|
|
}
|
|
|
|
|
2014-02-26 18:20:00 +01:00
|
|
|
/* If we can create the VGIC using the newer device control API, we
|
|
|
|
* let the device do this when it initializes itself, otherwise we
|
|
|
|
* fall back to the old API */
|
2015-09-24 02:29:37 +02:00
|
|
|
return kvm_check_extension(s, KVM_CAP_DEVICE_CTRL);
|
|
|
|
}
|
2014-02-26 18:20:00 +01:00
|
|
|
|
2015-09-24 02:29:37 +02:00
|
|
|
int kvm_arm_vgic_probe(void)
|
|
|
|
{
|
2020-03-11 14:16:16 +01:00
|
|
|
int val = 0;
|
|
|
|
|
2015-09-24 02:29:37 +02:00
|
|
|
if (kvm_create_device(kvm_state,
|
|
|
|
KVM_DEV_TYPE_ARM_VGIC_V3, true) == 0) {
|
2020-03-11 14:16:16 +01:00
|
|
|
val |= KVM_ARM_VGIC_V3;
|
|
|
|
}
|
|
|
|
if (kvm_create_device(kvm_state,
|
|
|
|
KVM_DEV_TYPE_ARM_VGIC_V2, true) == 0) {
|
|
|
|
val |= KVM_ARM_VGIC_V2;
|
2014-02-26 18:20:00 +01:00
|
|
|
}
|
2020-03-11 14:16:16 +01:00
|
|
|
return val;
|
2014-02-26 18:20:00 +01:00
|
|
|
}
|
2015-01-09 09:04:40 +01:00
|
|
|
|
2019-10-03 17:46:39 +02:00
|
|
|
int kvm_arm_set_irq(int cpu, int irqtype, int irq, int level)
|
|
|
|
{
|
|
|
|
int kvm_irq = (irqtype << KVM_ARM_IRQ_TYPE_SHIFT) | irq;
|
|
|
|
int cpu_idx1 = cpu % 256;
|
|
|
|
int cpu_idx2 = cpu / 256;
|
|
|
|
|
|
|
|
kvm_irq |= (cpu_idx1 << KVM_ARM_IRQ_VCPU_SHIFT) |
|
|
|
|
(cpu_idx2 << KVM_ARM_IRQ_VCPU2_SHIFT);
|
|
|
|
|
|
|
|
return kvm_set_irq(kvm_state, kvm_irq, !!level);
|
|
|
|
}
|
|
|
|
|
2015-01-09 09:04:40 +01:00
|
|
|
int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
|
2015-10-15 15:44:52 +02:00
|
|
|
uint64_t address, uint32_t data, PCIDevice *dev)
|
2015-01-09 09:04:40 +01:00
|
|
|
{
|
2018-05-04 19:05:52 +02:00
|
|
|
AddressSpace *as = pci_device_iommu_address_space(dev);
|
|
|
|
hwaddr xlat, len, doorbell_gpa;
|
|
|
|
MemoryRegionSection mrs;
|
|
|
|
MemoryRegion *mr;
|
|
|
|
|
|
|
|
if (as == &address_space_memory) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* MSI doorbell address is translated by an IOMMU */
|
|
|
|
|
2021-07-28 01:52:01 +02:00
|
|
|
RCU_READ_LOCK_GUARD();
|
|
|
|
|
2018-05-31 15:50:52 +02:00
|
|
|
mr = address_space_translate(as, address, &xlat, &len, true,
|
|
|
|
MEMTXATTRS_UNSPECIFIED);
|
2021-07-28 01:52:01 +02:00
|
|
|
|
2018-05-04 19:05:52 +02:00
|
|
|
if (!mr) {
|
2021-07-28 01:52:01 +02:00
|
|
|
return 1;
|
2018-05-04 19:05:52 +02:00
|
|
|
}
|
2021-07-28 01:52:01 +02:00
|
|
|
|
2018-05-04 19:05:52 +02:00
|
|
|
mrs = memory_region_find(mr, xlat, 1);
|
2021-07-28 01:52:01 +02:00
|
|
|
|
2018-05-04 19:05:52 +02:00
|
|
|
if (!mrs.mr) {
|
2021-07-28 01:52:01 +02:00
|
|
|
return 1;
|
2018-05-04 19:05:52 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
doorbell_gpa = mrs.offset_within_address_space;
|
|
|
|
memory_region_unref(mrs.mr);
|
|
|
|
|
|
|
|
route->u.msi.address_lo = doorbell_gpa;
|
|
|
|
route->u.msi.address_hi = doorbell_gpa >> 32;
|
|
|
|
|
|
|
|
trace_kvm_arm_fixup_msi_route(address, doorbell_gpa);
|
|
|
|
|
2021-07-28 01:52:01 +02:00
|
|
|
return 0;
|
2015-01-09 09:04:40 +01:00
|
|
|
}
|
2015-06-02 15:56:23 +02:00
|
|
|
|
2016-07-14 07:56:31 +02:00
|
|
|
int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route,
|
|
|
|
int vector, PCIDevice *dev)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int kvm_arch_release_virq_post(int virq)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-06-02 15:56:23 +02:00
|
|
|
int kvm_arch_msi_data_to_gsi(uint32_t data)
|
|
|
|
{
|
|
|
|
return (data - 32) & 0xffff;
|
|
|
|
}
|
2021-01-26 18:36:47 +01:00
|
|
|
|
|
|
|
bool kvm_arch_cpu_check_are_resettable(void)
|
|
|
|
{
|
|
|
|
return true;
|
|
|
|
}
|