2010-03-11 14:38:55 +01:00
|
|
|
/*
|
2021-03-22 14:27:54 +01:00
|
|
|
* i386 CPUID, CPU class, definitions, models
|
2010-03-11 14:38:55 +01:00
|
|
|
*
|
|
|
|
* Copyright (c) 2003 Fabrice Bellard
|
|
|
|
*
|
|
|
|
* This library is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU Lesser General Public
|
|
|
|
* License as published by the Free Software Foundation; either
|
2020-10-23 14:28:01 +02:00
|
|
|
* version 2.1 of the License, or (at your option) any later version.
|
2010-03-11 14:38:55 +01:00
|
|
|
*
|
|
|
|
* This library is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* Lesser General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU Lesser General Public
|
|
|
|
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
|
|
|
|
*/
|
2018-02-01 12:18:31 +01:00
|
|
|
|
2016-02-23 12:58:02 +01:00
|
|
|
#include "qemu/osdep.h"
|
2018-06-25 14:41:55 +02:00
|
|
|
#include "qemu/units.h"
|
2016-03-20 18:16:19 +01:00
|
|
|
#include "qemu/cutils.h"
|
2019-04-17 21:17:57 +02:00
|
|
|
#include "qemu/qemu-print.h"
|
2022-02-08 21:08:56 +01:00
|
|
|
#include "qemu/hw-version.h"
|
2010-03-11 14:38:55 +01:00
|
|
|
#include "cpu.h"
|
2020-12-12 16:55:14 +01:00
|
|
|
#include "tcg/helper-tcg.h"
|
2019-08-12 07:23:38 +02:00
|
|
|
#include "sysemu/reset.h"
|
2017-09-13 11:05:19 +02:00
|
|
|
#include "sysemu/hvf.h"
|
2020-12-12 16:55:08 +01:00
|
|
|
#include "kvm/kvm_i386.h"
|
2021-10-07 18:17:07 +02:00
|
|
|
#include "sev.h"
|
2021-10-07 18:17:00 +02:00
|
|
|
#include "qapi/error.h"
|
2019-06-19 22:10:41 +02:00
|
|
|
#include "qapi/qapi-visit-machine.h"
|
2012-12-17 18:19:43 +01:00
|
|
|
#include "qapi/qmp/qerror.h"
|
2019-06-19 22:10:45 +02:00
|
|
|
#include "qapi/qapi-commands-machine-target.h"
|
2018-04-17 20:47:50 +02:00
|
|
|
#include "standard-headers/asm-x86/kvm_para.h"
|
2013-04-29 19:03:01 +02:00
|
|
|
#include "hw/qdev-properties.h"
|
2016-05-12 19:24:26 +02:00
|
|
|
#include "hw/i386/topology.h"
|
2012-10-13 22:35:39 +02:00
|
|
|
#ifndef CONFIG_USER_ONLY
|
2015-03-31 14:11:09 +02:00
|
|
|
#include "exec/address-spaces.h"
|
2019-05-18 22:54:25 +02:00
|
|
|
#include "hw/boards.h"
|
2021-07-19 13:21:15 +02:00
|
|
|
#include "hw/i386/sgx-epc.h"
|
2012-10-13 22:35:39 +02:00
|
|
|
#endif
|
|
|
|
|
2017-09-14 18:50:05 +02:00
|
|
|
#include "disas/capstone.h"
|
2021-03-22 14:27:54 +01:00
|
|
|
#include "cpu-internal.h"
|
2017-09-14 18:50:05 +02:00
|
|
|
|
2018-05-10 22:41:41 +02:00
|
|
|
/* Helpers for building CPUID[2] descriptors: */
|
|
|
|
|
|
|
|
struct CPUID2CacheDescriptorInfo {
|
|
|
|
enum CacheType type;
|
|
|
|
int level;
|
|
|
|
int size;
|
|
|
|
int line_size;
|
|
|
|
int associativity;
|
|
|
|
};
|
2013-08-27 17:24:37 +02:00
|
|
|
|
2018-05-10 22:41:41 +02:00
|
|
|
/*
|
|
|
|
* Known CPUID 2 cache descriptors.
|
|
|
|
* From Intel SDM Volume 2A, CPUID instruction
|
|
|
|
*/
|
|
|
|
struct CPUID2CacheDescriptorInfo cpuid2_cache_descriptors[] = {
|
2018-07-17 21:40:10 +02:00
|
|
|
[0x06] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 8 * KiB,
|
2018-05-10 22:41:41 +02:00
|
|
|
.associativity = 4, .line_size = 32, },
|
2018-07-17 21:40:10 +02:00
|
|
|
[0x08] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 16 * KiB,
|
2018-05-10 22:41:41 +02:00
|
|
|
.associativity = 4, .line_size = 32, },
|
2018-07-17 21:40:10 +02:00
|
|
|
[0x09] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 32 * KiB,
|
2018-05-10 22:41:41 +02:00
|
|
|
.associativity = 4, .line_size = 64, },
|
2018-07-17 21:40:10 +02:00
|
|
|
[0x0A] = { .level = 1, .type = DATA_CACHE, .size = 8 * KiB,
|
2018-05-10 22:41:41 +02:00
|
|
|
.associativity = 2, .line_size = 32, },
|
2018-07-17 21:40:10 +02:00
|
|
|
[0x0C] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB,
|
2018-05-10 22:41:41 +02:00
|
|
|
.associativity = 4, .line_size = 32, },
|
2018-07-17 21:40:10 +02:00
|
|
|
[0x0D] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB,
|
2018-05-10 22:41:41 +02:00
|
|
|
.associativity = 4, .line_size = 64, },
|
2018-07-17 21:40:10 +02:00
|
|
|
[0x0E] = { .level = 1, .type = DATA_CACHE, .size = 24 * KiB,
|
2018-05-10 22:41:41 +02:00
|
|
|
.associativity = 6, .line_size = 64, },
|
2018-07-17 21:40:10 +02:00
|
|
|
[0x1D] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB,
|
2018-05-10 22:41:41 +02:00
|
|
|
.associativity = 2, .line_size = 64, },
|
2018-07-17 21:40:10 +02:00
|
|
|
[0x21] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
|
2018-05-10 22:41:41 +02:00
|
|
|
.associativity = 8, .line_size = 64, },
|
|
|
|
/* lines per sector is not supported cpuid2_cache_descriptor(),
|
|
|
|
* so descriptors 0x22, 0x23 are not included
|
|
|
|
*/
|
2018-07-17 21:40:10 +02:00
|
|
|
[0x24] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
|
2018-05-10 22:41:41 +02:00
|
|
|
.associativity = 16, .line_size = 64, },
|
|
|
|
/* lines per sector is not supported cpuid2_cache_descriptor(),
|
|
|
|
* so descriptors 0x25, 0x20 are not included
|
|
|
|
*/
|
2018-07-17 21:40:10 +02:00
|
|
|
[0x2C] = { .level = 1, .type = DATA_CACHE, .size = 32 * KiB,
|
2018-05-10 22:41:41 +02:00
|
|
|
.associativity = 8, .line_size = 64, },
|
2018-07-17 21:40:10 +02:00
|
|
|
[0x30] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 32 * KiB,
|
2018-05-10 22:41:41 +02:00
|
|
|
.associativity = 8, .line_size = 64, },
|
2018-07-17 21:40:10 +02:00
|
|
|
[0x41] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB,
|
2018-05-10 22:41:41 +02:00
|
|
|
.associativity = 4, .line_size = 32, },
|
2018-07-17 21:40:10 +02:00
|
|
|
[0x42] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
|
2018-05-10 22:41:41 +02:00
|
|
|
.associativity = 4, .line_size = 32, },
|
2018-07-17 21:40:10 +02:00
|
|
|
[0x43] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
|
2018-05-10 22:41:41 +02:00
|
|
|
.associativity = 4, .line_size = 32, },
|
2018-07-17 21:40:10 +02:00
|
|
|
[0x44] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
|
2018-05-10 22:41:41 +02:00
|
|
|
.associativity = 4, .line_size = 32, },
|
2018-07-17 21:40:10 +02:00
|
|
|
[0x45] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
|
2018-05-10 22:41:41 +02:00
|
|
|
.associativity = 4, .line_size = 32, },
|
2018-07-17 21:40:10 +02:00
|
|
|
[0x46] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
|
2018-05-10 22:41:41 +02:00
|
|
|
.associativity = 4, .line_size = 64, },
|
2018-07-17 21:40:10 +02:00
|
|
|
[0x47] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
|
2018-05-10 22:41:41 +02:00
|
|
|
.associativity = 8, .line_size = 64, },
|
2018-07-17 21:40:10 +02:00
|
|
|
[0x48] = { .level = 2, .type = UNIFIED_CACHE, .size = 3 * MiB,
|
2018-05-10 22:41:41 +02:00
|
|
|
.associativity = 12, .line_size = 64, },
|
|
|
|
/* Descriptor 0x49 depends on CPU family/model, so it is not included */
|
2018-07-17 21:40:10 +02:00
|
|
|
[0x4A] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB,
|
2018-05-10 22:41:41 +02:00
|
|
|
.associativity = 12, .line_size = 64, },
|
2018-07-17 21:40:10 +02:00
|
|
|
[0x4B] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
|
2018-05-10 22:41:41 +02:00
|
|
|
.associativity = 16, .line_size = 64, },
|
2018-07-17 21:40:10 +02:00
|
|
|
[0x4C] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB,
|
2018-05-10 22:41:41 +02:00
|
|
|
.associativity = 12, .line_size = 64, },
|
2018-07-17 21:40:10 +02:00
|
|
|
[0x4D] = { .level = 3, .type = UNIFIED_CACHE, .size = 16 * MiB,
|
2018-05-10 22:41:41 +02:00
|
|
|
.associativity = 16, .line_size = 64, },
|
2018-07-17 21:40:10 +02:00
|
|
|
[0x4E] = { .level = 2, .type = UNIFIED_CACHE, .size = 6 * MiB,
|
2018-05-10 22:41:41 +02:00
|
|
|
.associativity = 24, .line_size = 64, },
|
2018-07-17 21:40:10 +02:00
|
|
|
[0x60] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB,
|
2018-05-10 22:41:41 +02:00
|
|
|
.associativity = 8, .line_size = 64, },
|
2018-07-17 21:40:10 +02:00
|
|
|
[0x66] = { .level = 1, .type = DATA_CACHE, .size = 8 * KiB,
|
2018-05-10 22:41:41 +02:00
|
|
|
.associativity = 4, .line_size = 64, },
|
2018-07-17 21:40:10 +02:00
|
|
|
[0x67] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB,
|
2018-05-10 22:41:41 +02:00
|
|
|
.associativity = 4, .line_size = 64, },
|
2018-07-17 21:40:10 +02:00
|
|
|
[0x68] = { .level = 1, .type = DATA_CACHE, .size = 32 * KiB,
|
2018-05-10 22:41:41 +02:00
|
|
|
.associativity = 4, .line_size = 64, },
|
2018-07-17 21:40:10 +02:00
|
|
|
[0x78] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
|
2018-05-10 22:41:41 +02:00
|
|
|
.associativity = 4, .line_size = 64, },
|
|
|
|
/* lines per sector is not supported cpuid2_cache_descriptor(),
|
|
|
|
* so descriptors 0x79, 0x7A, 0x7B, 0x7C are not included.
|
|
|
|
*/
|
2018-07-17 21:40:10 +02:00
|
|
|
[0x7D] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
|
2018-05-10 22:41:41 +02:00
|
|
|
.associativity = 8, .line_size = 64, },
|
2018-07-17 21:40:10 +02:00
|
|
|
[0x7F] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
|
2018-05-10 22:41:41 +02:00
|
|
|
.associativity = 2, .line_size = 64, },
|
2018-07-17 21:40:10 +02:00
|
|
|
[0x80] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
|
2018-05-10 22:41:41 +02:00
|
|
|
.associativity = 8, .line_size = 64, },
|
2018-07-17 21:40:10 +02:00
|
|
|
[0x82] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
|
2018-05-10 22:41:41 +02:00
|
|
|
.associativity = 8, .line_size = 32, },
|
2018-07-17 21:40:10 +02:00
|
|
|
[0x83] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
|
2018-05-10 22:41:41 +02:00
|
|
|
.associativity = 8, .line_size = 32, },
|
2018-07-17 21:40:10 +02:00
|
|
|
[0x84] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
|
2018-05-10 22:41:41 +02:00
|
|
|
.associativity = 8, .line_size = 32, },
|
2018-07-17 21:40:10 +02:00
|
|
|
[0x85] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
|
2018-05-10 22:41:41 +02:00
|
|
|
.associativity = 8, .line_size = 32, },
|
2018-07-17 21:40:10 +02:00
|
|
|
[0x86] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
|
2018-05-10 22:41:41 +02:00
|
|
|
.associativity = 4, .line_size = 64, },
|
2018-07-17 21:40:10 +02:00
|
|
|
[0x87] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
|
2018-05-10 22:41:41 +02:00
|
|
|
.associativity = 8, .line_size = 64, },
|
2018-07-17 21:40:10 +02:00
|
|
|
[0xD0] = { .level = 3, .type = UNIFIED_CACHE, .size = 512 * KiB,
|
2018-05-10 22:41:41 +02:00
|
|
|
.associativity = 4, .line_size = 64, },
|
2018-07-17 21:40:10 +02:00
|
|
|
[0xD1] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB,
|
2018-05-10 22:41:41 +02:00
|
|
|
.associativity = 4, .line_size = 64, },
|
2018-07-17 21:40:10 +02:00
|
|
|
[0xD2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
|
2018-05-10 22:41:41 +02:00
|
|
|
.associativity = 4, .line_size = 64, },
|
2018-07-17 21:40:10 +02:00
|
|
|
[0xD6] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB,
|
2018-05-10 22:41:41 +02:00
|
|
|
.associativity = 8, .line_size = 64, },
|
2018-07-17 21:40:10 +02:00
|
|
|
[0xD7] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
|
2018-05-10 22:41:41 +02:00
|
|
|
.associativity = 8, .line_size = 64, },
|
2018-07-17 21:40:10 +02:00
|
|
|
[0xD8] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
|
2018-05-10 22:41:41 +02:00
|
|
|
.associativity = 8, .line_size = 64, },
|
2018-07-17 21:40:10 +02:00
|
|
|
[0xDC] = { .level = 3, .type = UNIFIED_CACHE, .size = 1.5 * MiB,
|
2018-05-10 22:41:41 +02:00
|
|
|
.associativity = 12, .line_size = 64, },
|
2018-07-17 21:40:10 +02:00
|
|
|
[0xDD] = { .level = 3, .type = UNIFIED_CACHE, .size = 3 * MiB,
|
2018-05-10 22:41:41 +02:00
|
|
|
.associativity = 12, .line_size = 64, },
|
2018-07-17 21:40:10 +02:00
|
|
|
[0xDE] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB,
|
2018-05-10 22:41:41 +02:00
|
|
|
.associativity = 12, .line_size = 64, },
|
2018-07-17 21:40:10 +02:00
|
|
|
[0xE2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
|
2018-05-10 22:41:41 +02:00
|
|
|
.associativity = 16, .line_size = 64, },
|
2018-07-17 21:40:10 +02:00
|
|
|
[0xE3] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
|
2018-05-10 22:41:41 +02:00
|
|
|
.associativity = 16, .line_size = 64, },
|
2018-07-17 21:40:10 +02:00
|
|
|
[0xE4] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
|
2018-05-10 22:41:41 +02:00
|
|
|
.associativity = 16, .line_size = 64, },
|
2018-07-17 21:40:10 +02:00
|
|
|
[0xEA] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB,
|
2018-05-10 22:41:41 +02:00
|
|
|
.associativity = 24, .line_size = 64, },
|
2018-07-17 21:40:10 +02:00
|
|
|
[0xEB] = { .level = 3, .type = UNIFIED_CACHE, .size = 18 * MiB,
|
2018-05-10 22:41:41 +02:00
|
|
|
.associativity = 24, .line_size = 64, },
|
2018-07-17 21:40:10 +02:00
|
|
|
[0xEC] = { .level = 3, .type = UNIFIED_CACHE, .size = 24 * MiB,
|
2018-05-10 22:41:41 +02:00
|
|
|
.associativity = 24, .line_size = 64, },
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* "CPUID leaf 2 does not report cache descriptor information,
|
|
|
|
* use CPUID leaf 4 to query cache parameters"
|
|
|
|
*/
|
|
|
|
#define CACHE_DESCRIPTOR_UNAVAILABLE 0xFF
|
2013-08-27 17:24:37 +02:00
|
|
|
|
2018-05-10 22:41:41 +02:00
|
|
|
/*
|
|
|
|
* Return a CPUID 2 cache descriptor for a given cache.
|
|
|
|
* If no known descriptor is found, return CACHE_DESCRIPTOR_UNAVAILABLE
|
|
|
|
*/
|
|
|
|
static uint8_t cpuid2_cache_descriptor(CPUCacheInfo *cache)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
assert(cache->size > 0);
|
|
|
|
assert(cache->level > 0);
|
|
|
|
assert(cache->line_size > 0);
|
|
|
|
assert(cache->associativity > 0);
|
|
|
|
for (i = 0; i < ARRAY_SIZE(cpuid2_cache_descriptors); i++) {
|
|
|
|
struct CPUID2CacheDescriptorInfo *d = &cpuid2_cache_descriptors[i];
|
|
|
|
if (d->level == cache->level && d->type == cache->type &&
|
|
|
|
d->size == cache->size && d->line_size == cache->line_size &&
|
|
|
|
d->associativity == cache->associativity) {
|
|
|
|
return i;
|
|
|
|
}
|
|
|
|
}
|
2013-08-27 17:24:37 +02:00
|
|
|
|
2018-05-10 22:41:41 +02:00
|
|
|
return CACHE_DESCRIPTOR_UNAVAILABLE;
|
|
|
|
}
|
2013-08-27 17:24:37 +02:00
|
|
|
|
|
|
|
/* CPUID Leaf 4 constants: */
|
|
|
|
|
|
|
|
/* EAX: */
|
2018-05-10 22:41:41 +02:00
|
|
|
#define CACHE_TYPE_D 1
|
|
|
|
#define CACHE_TYPE_I 2
|
|
|
|
#define CACHE_TYPE_UNIFIED 3
|
2013-08-27 17:24:37 +02:00
|
|
|
|
2018-05-10 22:41:41 +02:00
|
|
|
#define CACHE_LEVEL(l) (l << 5)
|
2013-08-27 17:24:37 +02:00
|
|
|
|
2018-05-10 22:41:41 +02:00
|
|
|
#define CACHE_SELF_INIT_LEVEL (1 << 8)
|
2013-08-27 17:24:37 +02:00
|
|
|
|
|
|
|
/* EDX: */
|
2018-05-10 22:41:41 +02:00
|
|
|
#define CACHE_NO_INVD_SHARING (1 << 0)
|
|
|
|
#define CACHE_INCLUSIVE (1 << 1)
|
|
|
|
#define CACHE_COMPLEX_IDX (1 << 2)
|
|
|
|
|
|
|
|
/* Encode CacheType for CPUID[4].EAX */
|
2018-07-17 21:40:10 +02:00
|
|
|
#define CACHE_TYPE(t) (((t) == DATA_CACHE) ? CACHE_TYPE_D : \
|
|
|
|
((t) == INSTRUCTION_CACHE) ? CACHE_TYPE_I : \
|
|
|
|
((t) == UNIFIED_CACHE) ? CACHE_TYPE_UNIFIED : \
|
|
|
|
0 /* Invalid value */)
|
2018-05-10 22:41:41 +02:00
|
|
|
|
|
|
|
|
|
|
|
/* Encode cache info for CPUID[4] */
|
|
|
|
static void encode_cache_cpuid4(CPUCacheInfo *cache,
|
|
|
|
int num_apic_ids, int num_cores,
|
|
|
|
uint32_t *eax, uint32_t *ebx,
|
|
|
|
uint32_t *ecx, uint32_t *edx)
|
|
|
|
{
|
|
|
|
assert(cache->size == cache->line_size * cache->associativity *
|
|
|
|
cache->partitions * cache->sets);
|
|
|
|
|
|
|
|
assert(num_apic_ids > 0);
|
|
|
|
*eax = CACHE_TYPE(cache->type) |
|
|
|
|
CACHE_LEVEL(cache->level) |
|
|
|
|
(cache->self_init ? CACHE_SELF_INIT_LEVEL : 0) |
|
|
|
|
((num_cores - 1) << 26) |
|
|
|
|
((num_apic_ids - 1) << 14);
|
|
|
|
|
|
|
|
assert(cache->line_size > 0);
|
|
|
|
assert(cache->partitions > 0);
|
|
|
|
assert(cache->associativity > 0);
|
|
|
|
/* We don't implement fully-associative caches */
|
|
|
|
assert(cache->associativity < cache->sets);
|
|
|
|
*ebx = (cache->line_size - 1) |
|
|
|
|
((cache->partitions - 1) << 12) |
|
|
|
|
((cache->associativity - 1) << 22);
|
|
|
|
|
|
|
|
assert(cache->sets > 0);
|
|
|
|
*ecx = cache->sets - 1;
|
|
|
|
|
|
|
|
*edx = (cache->no_invd_sharing ? CACHE_NO_INVD_SHARING : 0) |
|
|
|
|
(cache->inclusive ? CACHE_INCLUSIVE : 0) |
|
|
|
|
(cache->complex_indexing ? CACHE_COMPLEX_IDX : 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Encode cache info for CPUID[0x80000005].ECX or CPUID[0x80000005].EDX */
|
|
|
|
static uint32_t encode_cache_cpuid80000005(CPUCacheInfo *cache)
|
|
|
|
{
|
|
|
|
assert(cache->size % 1024 == 0);
|
|
|
|
assert(cache->lines_per_tag > 0);
|
|
|
|
assert(cache->associativity > 0);
|
|
|
|
assert(cache->line_size > 0);
|
|
|
|
return ((cache->size / 1024) << 24) | (cache->associativity << 16) |
|
|
|
|
(cache->lines_per_tag << 8) | (cache->line_size);
|
|
|
|
}
|
2013-08-27 17:24:37 +02:00
|
|
|
|
|
|
|
#define ASSOC_FULL 0xFF
|
|
|
|
|
|
|
|
/* AMD associativity encoding used on CPUID Leaf 0x80000006: */
|
|
|
|
#define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
|
|
|
|
a == 2 ? 0x2 : \
|
|
|
|
a == 4 ? 0x4 : \
|
|
|
|
a == 8 ? 0x6 : \
|
|
|
|
a == 16 ? 0x8 : \
|
|
|
|
a == 32 ? 0xA : \
|
|
|
|
a == 48 ? 0xB : \
|
|
|
|
a == 64 ? 0xC : \
|
|
|
|
a == 96 ? 0xD : \
|
|
|
|
a == 128 ? 0xE : \
|
|
|
|
a == ASSOC_FULL ? 0xF : \
|
|
|
|
0 /* invalid value */)
|
|
|
|
|
2018-05-10 22:41:41 +02:00
|
|
|
/*
|
|
|
|
* Encode cache info for CPUID[0x80000006].ECX and CPUID[0x80000006].EDX
|
|
|
|
* @l3 can be NULL.
|
|
|
|
*/
|
|
|
|
static void encode_cache_cpuid80000006(CPUCacheInfo *l2,
|
|
|
|
CPUCacheInfo *l3,
|
|
|
|
uint32_t *ecx, uint32_t *edx)
|
|
|
|
{
|
|
|
|
assert(l2->size % 1024 == 0);
|
|
|
|
assert(l2->associativity > 0);
|
|
|
|
assert(l2->lines_per_tag > 0);
|
|
|
|
assert(l2->line_size > 0);
|
|
|
|
*ecx = ((l2->size / 1024) << 16) |
|
|
|
|
(AMD_ENC_ASSOC(l2->associativity) << 12) |
|
|
|
|
(l2->lines_per_tag << 8) | (l2->line_size);
|
|
|
|
|
|
|
|
if (l3) {
|
|
|
|
assert(l3->size % (512 * 1024) == 0);
|
|
|
|
assert(l3->associativity > 0);
|
|
|
|
assert(l3->lines_per_tag > 0);
|
|
|
|
assert(l3->line_size > 0);
|
|
|
|
*edx = ((l3->size / (512 * 1024)) << 18) |
|
|
|
|
(AMD_ENC_ASSOC(l3->associativity) << 12) |
|
|
|
|
(l3->lines_per_tag << 8) | (l3->line_size);
|
|
|
|
} else {
|
|
|
|
*edx = 0;
|
|
|
|
}
|
|
|
|
}
|
2013-08-27 17:24:37 +02:00
|
|
|
|
2018-05-24 17:43:31 +02:00
|
|
|
/* Encode cache info for CPUID[8000001D] */
|
2020-09-01 17:57:26 +02:00
|
|
|
static void encode_cache_cpuid8000001d(CPUCacheInfo *cache,
|
|
|
|
X86CPUTopoInfo *topo_info,
|
|
|
|
uint32_t *eax, uint32_t *ebx,
|
|
|
|
uint32_t *ecx, uint32_t *edx)
|
2018-05-24 17:43:31 +02:00
|
|
|
{
|
2020-09-01 17:57:26 +02:00
|
|
|
uint32_t l3_threads;
|
2018-05-24 17:43:31 +02:00
|
|
|
assert(cache->size == cache->line_size * cache->associativity *
|
|
|
|
cache->partitions * cache->sets);
|
|
|
|
|
|
|
|
*eax = CACHE_TYPE(cache->type) | CACHE_LEVEL(cache->level) |
|
|
|
|
(cache->self_init ? CACHE_SELF_INIT_LEVEL : 0);
|
|
|
|
|
|
|
|
/* L3 is shared among multiple cores */
|
|
|
|
if (cache->level == 3) {
|
2020-09-01 17:57:26 +02:00
|
|
|
l3_threads = topo_info->cores_per_die * topo_info->threads_per_core;
|
|
|
|
*eax |= (l3_threads - 1) << 14;
|
2018-05-24 17:43:31 +02:00
|
|
|
} else {
|
2020-09-01 17:57:26 +02:00
|
|
|
*eax |= ((topo_info->threads_per_core - 1) << 14);
|
2018-05-24 17:43:31 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
assert(cache->line_size > 0);
|
|
|
|
assert(cache->partitions > 0);
|
|
|
|
assert(cache->associativity > 0);
|
|
|
|
/* We don't implement fully-associative caches */
|
|
|
|
assert(cache->associativity < cache->sets);
|
|
|
|
*ebx = (cache->line_size - 1) |
|
|
|
|
((cache->partitions - 1) << 12) |
|
|
|
|
((cache->associativity - 1) << 22);
|
|
|
|
|
|
|
|
assert(cache->sets > 0);
|
|
|
|
*ecx = cache->sets - 1;
|
|
|
|
|
|
|
|
*edx = (cache->no_invd_sharing ? CACHE_NO_INVD_SHARING : 0) |
|
|
|
|
(cache->inclusive ? CACHE_INCLUSIVE : 0) |
|
|
|
|
(cache->complex_indexing ? CACHE_COMPLEX_IDX : 0);
|
|
|
|
}
|
|
|
|
|
2018-06-09 00:56:17 +02:00
|
|
|
/* Encode cache info for CPUID[8000001E] */
|
2020-09-01 17:57:32 +02:00
|
|
|
static void encode_topo_cpuid8000001e(X86CPU *cpu, X86CPUTopoInfo *topo_info,
|
|
|
|
uint32_t *eax, uint32_t *ebx,
|
|
|
|
uint32_t *ecx, uint32_t *edx)
|
2018-06-09 00:56:17 +02:00
|
|
|
{
|
2020-09-01 17:57:32 +02:00
|
|
|
X86CPUTopoIDs topo_ids;
|
|
|
|
|
|
|
|
x86_topo_ids_from_apicid(cpu->apic_id, topo_info, &topo_ids);
|
2018-06-09 00:56:17 +02:00
|
|
|
|
|
|
|
*eax = cpu->apic_id;
|
2020-09-01 17:57:32 +02:00
|
|
|
|
2018-06-09 00:56:17 +02:00
|
|
|
/*
|
2020-09-01 17:57:32 +02:00
|
|
|
* CPUID_Fn8000001E_EBX [Core Identifiers] (CoreId)
|
|
|
|
* Read-only. Reset: 0000_XXXXh.
|
|
|
|
* See Core::X86::Cpuid::ExtApicId.
|
|
|
|
* Core::X86::Cpuid::CoreId_lthree[1:0]_core[3:0]_thread[1:0];
|
|
|
|
* Bits Description
|
|
|
|
* 31:16 Reserved.
|
|
|
|
* 15:8 ThreadsPerCore: threads per core. Read-only. Reset: XXh.
|
|
|
|
* The number of threads per core is ThreadsPerCore+1.
|
|
|
|
* 7:0 CoreId: core ID. Read-only. Reset: XXh.
|
|
|
|
*
|
|
|
|
* NOTE: CoreId is already part of apic_id. Just use it. We can
|
|
|
|
* use all the 8 bits to represent the core_id here.
|
2018-06-09 00:56:17 +02:00
|
|
|
*/
|
2020-09-01 17:57:32 +02:00
|
|
|
*ebx = ((topo_info->threads_per_core - 1) << 8) | (topo_ids.core_id & 0xFF);
|
|
|
|
|
2018-06-09 00:56:17 +02:00
|
|
|
/*
|
2020-09-01 17:57:32 +02:00
|
|
|
* CPUID_Fn8000001E_ECX [Node Identifiers] (NodeId)
|
|
|
|
* Read-only. Reset: 0000_0XXXh.
|
|
|
|
* Core::X86::Cpuid::NodeId_lthree[1:0]_core[3:0]_thread[1:0];
|
|
|
|
* Bits Description
|
|
|
|
* 31:11 Reserved.
|
|
|
|
* 10:8 NodesPerProcessor: Node per processor. Read-only. Reset: XXXb.
|
|
|
|
* ValidValues:
|
|
|
|
* Value Description
|
|
|
|
* 000b 1 node per processor.
|
|
|
|
* 001b 2 nodes per processor.
|
|
|
|
* 010b Reserved.
|
|
|
|
* 011b 4 nodes per processor.
|
|
|
|
* 111b-100b Reserved.
|
|
|
|
* 7:0 NodeId: Node ID. Read-only. Reset: XXh.
|
|
|
|
*
|
|
|
|
* NOTE: Hardware reserves 3 bits for number of nodes per processor.
|
|
|
|
* But users can create more nodes than the actual hardware can
|
|
|
|
* support. To genaralize we can use all the upper 8 bits for nodes.
|
|
|
|
* NodeId is combination of node and socket_id which is already decoded
|
|
|
|
* in apic_id. Just use it by shifting.
|
2018-06-09 00:56:17 +02:00
|
|
|
*/
|
2020-09-01 17:57:32 +02:00
|
|
|
*ecx = ((topo_info->dies_per_pkg - 1) << 8) |
|
|
|
|
((cpu->apic_id >> apicid_die_offset(topo_info)) & 0xFF);
|
|
|
|
|
2018-06-09 00:56:17 +02:00
|
|
|
*edx = 0;
|
|
|
|
}
|
|
|
|
|
2018-05-14 18:41:51 +02:00
|
|
|
/*
|
|
|
|
* Definitions of the hardcoded cache entries we expose:
|
|
|
|
* These are legacy cache values. If there is a need to change any
|
|
|
|
* of these values please use builtin_x86_defs
|
|
|
|
*/
|
2013-08-27 17:24:37 +02:00
|
|
|
|
|
|
|
/* L1 data cache: */
|
2018-05-14 18:41:51 +02:00
|
|
|
static CPUCacheInfo legacy_l1d_cache = {
|
2018-07-17 21:40:10 +02:00
|
|
|
.type = DATA_CACHE,
|
2018-05-10 22:41:41 +02:00
|
|
|
.level = 1,
|
|
|
|
.size = 32 * KiB,
|
|
|
|
.self_init = 1,
|
|
|
|
.line_size = 64,
|
|
|
|
.associativity = 8,
|
|
|
|
.sets = 64,
|
|
|
|
.partitions = 1,
|
|
|
|
.no_invd_sharing = true,
|
|
|
|
};
|
|
|
|
|
2013-08-27 17:24:37 +02:00
|
|
|
/*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
|
2018-05-14 18:41:51 +02:00
|
|
|
static CPUCacheInfo legacy_l1d_cache_amd = {
|
2018-07-17 21:40:10 +02:00
|
|
|
.type = DATA_CACHE,
|
2018-05-10 22:41:41 +02:00
|
|
|
.level = 1,
|
|
|
|
.size = 64 * KiB,
|
|
|
|
.self_init = 1,
|
|
|
|
.line_size = 64,
|
|
|
|
.associativity = 2,
|
|
|
|
.sets = 512,
|
|
|
|
.partitions = 1,
|
|
|
|
.lines_per_tag = 1,
|
|
|
|
.no_invd_sharing = true,
|
|
|
|
};
|
2013-08-27 17:24:37 +02:00
|
|
|
|
|
|
|
/* L1 instruction cache: */
|
2018-05-14 18:41:51 +02:00
|
|
|
static CPUCacheInfo legacy_l1i_cache = {
|
2018-07-17 21:40:10 +02:00
|
|
|
.type = INSTRUCTION_CACHE,
|
2018-05-10 22:41:41 +02:00
|
|
|
.level = 1,
|
|
|
|
.size = 32 * KiB,
|
|
|
|
.self_init = 1,
|
|
|
|
.line_size = 64,
|
|
|
|
.associativity = 8,
|
|
|
|
.sets = 64,
|
|
|
|
.partitions = 1,
|
|
|
|
.no_invd_sharing = true,
|
|
|
|
};
|
|
|
|
|
2013-08-27 17:24:37 +02:00
|
|
|
/*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
|
2018-05-14 18:41:51 +02:00
|
|
|
static CPUCacheInfo legacy_l1i_cache_amd = {
|
2018-07-17 21:40:10 +02:00
|
|
|
.type = INSTRUCTION_CACHE,
|
2018-05-10 22:41:41 +02:00
|
|
|
.level = 1,
|
|
|
|
.size = 64 * KiB,
|
|
|
|
.self_init = 1,
|
|
|
|
.line_size = 64,
|
|
|
|
.associativity = 2,
|
|
|
|
.sets = 512,
|
|
|
|
.partitions = 1,
|
|
|
|
.lines_per_tag = 1,
|
|
|
|
.no_invd_sharing = true,
|
|
|
|
};
|
2013-08-27 17:24:37 +02:00
|
|
|
|
|
|
|
/* Level 2 unified cache: */
|
2018-05-14 18:41:51 +02:00
|
|
|
static CPUCacheInfo legacy_l2_cache = {
|
2018-05-10 22:41:41 +02:00
|
|
|
.type = UNIFIED_CACHE,
|
|
|
|
.level = 2,
|
|
|
|
.size = 4 * MiB,
|
|
|
|
.self_init = 1,
|
|
|
|
.line_size = 64,
|
|
|
|
.associativity = 16,
|
|
|
|
.sets = 4096,
|
|
|
|
.partitions = 1,
|
|
|
|
.no_invd_sharing = true,
|
|
|
|
};
|
|
|
|
|
2013-08-27 17:24:37 +02:00
|
|
|
/*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
|
2018-05-14 18:41:51 +02:00
|
|
|
static CPUCacheInfo legacy_l2_cache_cpuid2 = {
|
2018-05-10 22:41:41 +02:00
|
|
|
.type = UNIFIED_CACHE,
|
|
|
|
.level = 2,
|
|
|
|
.size = 2 * MiB,
|
|
|
|
.line_size = 64,
|
|
|
|
.associativity = 8,
|
|
|
|
};
|
|
|
|
|
|
|
|
|
2013-08-27 17:24:37 +02:00
|
|
|
/*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
|
2018-05-14 18:41:51 +02:00
|
|
|
static CPUCacheInfo legacy_l2_cache_amd = {
|
2018-05-10 22:41:41 +02:00
|
|
|
.type = UNIFIED_CACHE,
|
|
|
|
.level = 2,
|
|
|
|
.size = 512 * KiB,
|
|
|
|
.line_size = 64,
|
|
|
|
.lines_per_tag = 1,
|
|
|
|
.associativity = 16,
|
|
|
|
.sets = 512,
|
|
|
|
.partitions = 1,
|
|
|
|
};
|
2013-08-27 17:24:37 +02:00
|
|
|
|
target-i386: present virtual L3 cache info for vcpus
Some software algorithms are based on the hardware's cache info, for example,
for x86 linux kernel, when cpu1 want to wakeup a task on cpu2, cpu1 will trigger
a resched IPI and told cpu2 to do the wakeup if they don't share low level
cache. Oppositely, cpu1 will access cpu2's runqueue directly if they share llc.
The relevant linux-kernel code as bellow:
static void ttwu_queue(struct task_struct *p, int cpu)
{
struct rq *rq = cpu_rq(cpu);
......
if (... && !cpus_share_cache(smp_processor_id(), cpu)) {
......
ttwu_queue_remote(p, cpu); /* will trigger RES IPI */
return;
}
......
ttwu_do_activate(rq, p, 0); /* access target's rq directly */
......
}
In real hardware, the cpus on the same socket share L3 cache, so one won't
trigger a resched IPIs when wakeup a task on others. But QEMU doesn't present a
virtual L3 cache info for VM, then the linux guest will trigger lots of RES IPIs
under some workloads even if the virtual cpus belongs to the same virtual socket.
For KVM, there will be lots of vmexit due to guest send IPIs.
The workload is a SAP HANA's testsuite, we run it one round(about 40 minuates)
and observe the (Suse11sp3)Guest's amounts of RES IPIs which triggering during
the period:
No-L3 With-L3(applied this patch)
cpu0: 363890 44582
cpu1: 373405 43109
cpu2: 340783 43797
cpu3: 333854 43409
cpu4: 327170 40038
cpu5: 325491 39922
cpu6: 319129 42391
cpu7: 306480 41035
cpu8: 161139 32188
cpu9: 164649 31024
cpu10: 149823 30398
cpu11: 149823 32455
cpu12: 164830 35143
cpu13: 172269 35805
cpu14: 179979 33898
cpu15: 194505 32754
avg: 268963.6 40129.8
The VM's topology is "1*socket 8*cores 2*threads".
After present virtual L3 cache info for VM, the amounts of RES IPIs in guest
reduce 85%.
For KVM, vcpus send IPIs will cause vmexit which is expensive, so it can cause
severe performance degradation. We had tested the overall system performance if
vcpus actually run on sparate physical socket. With L3 cache, the performance
improves 7.2%~33.1%(avg:15.7%).
Signed-off-by: Longpeng(Mike) <longpeng2@huawei.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2016-09-07 07:21:13 +02:00
|
|
|
/* Level 3 unified cache: */
|
2018-05-14 18:41:51 +02:00
|
|
|
static CPUCacheInfo legacy_l3_cache = {
|
2018-05-10 22:41:41 +02:00
|
|
|
.type = UNIFIED_CACHE,
|
|
|
|
.level = 3,
|
|
|
|
.size = 16 * MiB,
|
|
|
|
.line_size = 64,
|
|
|
|
.associativity = 16,
|
|
|
|
.sets = 16384,
|
|
|
|
.partitions = 1,
|
|
|
|
.lines_per_tag = 1,
|
|
|
|
.self_init = true,
|
|
|
|
.inclusive = true,
|
|
|
|
.complex_indexing = true,
|
|
|
|
};
|
2013-08-27 17:24:37 +02:00
|
|
|
|
|
|
|
/* TLB definitions: */
|
|
|
|
|
|
|
|
#define L1_DTLB_2M_ASSOC 1
|
|
|
|
#define L1_DTLB_2M_ENTRIES 255
|
|
|
|
#define L1_DTLB_4K_ASSOC 1
|
|
|
|
#define L1_DTLB_4K_ENTRIES 255
|
|
|
|
|
|
|
|
#define L1_ITLB_2M_ASSOC 1
|
|
|
|
#define L1_ITLB_2M_ENTRIES 255
|
|
|
|
#define L1_ITLB_4K_ASSOC 1
|
|
|
|
#define L1_ITLB_4K_ENTRIES 255
|
|
|
|
|
|
|
|
#define L2_DTLB_2M_ASSOC 0 /* disabled */
|
|
|
|
#define L2_DTLB_2M_ENTRIES 0 /* disabled */
|
|
|
|
#define L2_DTLB_4K_ASSOC 4
|
|
|
|
#define L2_DTLB_4K_ENTRIES 512
|
|
|
|
|
|
|
|
#define L2_ITLB_2M_ASSOC 0 /* disabled */
|
|
|
|
#define L2_ITLB_2M_ENTRIES 0 /* disabled */
|
|
|
|
#define L2_ITLB_4K_ASSOC 4
|
|
|
|
#define L2_ITLB_4K_ENTRIES 512
|
|
|
|
|
2018-03-04 17:48:35 +01:00
|
|
|
/* CPUID Leaf 0x14 constants: */
|
|
|
|
#define INTEL_PT_MAX_SUBLEAF 0x1
|
|
|
|
/*
|
|
|
|
* bit[00]: IA32_RTIT_CTL.CR3 filter can be set to 1 and IA32_RTIT_CR3_MATCH
|
|
|
|
* MSR can be accessed;
|
|
|
|
* bit[01]: Support Configurable PSB and Cycle-Accurate Mode;
|
|
|
|
* bit[02]: Support IP Filtering, TraceStop filtering, and preservation
|
|
|
|
* of Intel PT MSRs across warm reset;
|
|
|
|
* bit[03]: Support MTC timing packet and suppression of COFI-based packets;
|
|
|
|
*/
|
|
|
|
#define INTEL_PT_MINIMAL_EBX 0xf
|
|
|
|
/*
|
|
|
|
* bit[00]: Tracing can be enabled with IA32_RTIT_CTL.ToPA = 1 and
|
|
|
|
* IA32_RTIT_OUTPUT_BASE and IA32_RTIT_OUTPUT_MASK_PTRS MSRs can be
|
|
|
|
* accessed;
|
|
|
|
* bit[01]: ToPA tables can hold any number of output entries, up to the
|
|
|
|
* maximum allowed by the MaskOrTableOffset field of
|
|
|
|
* IA32_RTIT_OUTPUT_MASK_PTRS;
|
|
|
|
* bit[02]: Support Single-Range Output scheme;
|
|
|
|
*/
|
|
|
|
#define INTEL_PT_MINIMAL_ECX 0x7
|
2018-03-13 20:26:31 +01:00
|
|
|
/* generated packets which contain IP payloads have LIP values */
|
|
|
|
#define INTEL_PT_IP_LIP (1 << 31)
|
2018-03-04 17:48:35 +01:00
|
|
|
#define INTEL_PT_ADDR_RANGES_NUM 0x2 /* Number of configurable address ranges */
|
|
|
|
#define INTEL_PT_ADDR_RANGES_NUM_MASK 0x3
|
|
|
|
#define INTEL_PT_MTC_BITMAP (0x0249 << 16) /* Support ART(0,3,6,9) */
|
|
|
|
#define INTEL_PT_CYCLE_BITMAP 0x1fff /* Support 0,2^(0~11) */
|
|
|
|
#define INTEL_PT_PSB_BITMAP (0x003f << 16) /* Support 2K,4K,8K,16K,32K,64K */
|
2013-08-27 17:24:37 +02:00
|
|
|
|
2022-02-17 07:04:31 +01:00
|
|
|
/* CPUID Leaf 0x1D constants: */
|
|
|
|
#define INTEL_AMX_TILE_MAX_SUBLEAF 0x1
|
|
|
|
#define INTEL_AMX_TOTAL_TILE_BYTES 0x2000
|
|
|
|
#define INTEL_AMX_BYTES_PER_TILE 0x400
|
|
|
|
#define INTEL_AMX_BYTES_PER_ROW 0x40
|
|
|
|
#define INTEL_AMX_TILE_MAX_NAMES 0x8
|
|
|
|
#define INTEL_AMX_TILE_MAX_ROWS 0x10
|
|
|
|
|
|
|
|
/* CPUID Leaf 0x1E constants: */
|
|
|
|
#define INTEL_AMX_TMUL_MAX_K 0x10
|
|
|
|
#define INTEL_AMX_TMUL_MAX_N 0x40
|
|
|
|
|
2021-03-22 14:27:40 +01:00
|
|
|
void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
|
|
|
|
uint32_t vendor2, uint32_t vendor3)
|
2013-01-21 15:06:36 +01:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
for (i = 0; i < 4; i++) {
|
|
|
|
dst[i] = vendor1 >> (8 * i);
|
|
|
|
dst[i + 4] = vendor2 >> (8 * i);
|
|
|
|
dst[i + 8] = vendor3 >> (8 * i);
|
|
|
|
}
|
|
|
|
dst[CPUID_VENDOR_SZ] = '\0';
|
|
|
|
}
|
|
|
|
|
2014-04-30 18:48:36 +02:00
|
|
|
#define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
|
|
|
|
#define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
|
|
|
|
CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
|
|
|
|
#define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
|
|
|
|
CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
|
|
|
|
CPUID_PSE36 | CPUID_FXSR)
|
|
|
|
#define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
|
|
|
|
#define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
|
|
|
|
CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
|
|
|
|
CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
|
|
|
|
CPUID_PAE | CPUID_SEP | CPUID_APIC)
|
|
|
|
|
|
|
|
#define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
|
|
|
|
CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
|
|
|
|
CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
|
|
|
|
CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
|
2015-10-07 21:39:43 +02:00
|
|
|
CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
|
2014-04-30 18:48:36 +02:00
|
|
|
/* partly implemented:
|
|
|
|
CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
|
|
|
|
/* missing:
|
|
|
|
CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
|
|
|
|
#define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
|
|
|
|
CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
|
|
|
|
CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
|
2015-07-02 15:53:40 +02:00
|
|
|
CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \
|
2019-03-15 04:01:42 +01:00
|
|
|
CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR | \
|
|
|
|
CPUID_EXT_RDRAND)
|
2014-04-30 18:48:36 +02:00
|
|
|
/* missing:
|
|
|
|
CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
|
|
|
|
CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
|
|
|
|
CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
|
2015-07-02 15:53:40 +02:00
|
|
|
CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX,
|
2019-03-15 04:01:42 +01:00
|
|
|
CPUID_EXT_F16C */
|
2014-04-30 18:48:36 +02:00
|
|
|
|
|
|
|
#ifdef TARGET_X86_64
|
|
|
|
#define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
|
|
|
|
#else
|
|
|
|
#define TCG_EXT2_X86_64_FEATURES 0
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
|
|
|
|
CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
|
|
|
|
CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
|
|
|
|
TCG_EXT2_X86_64_FEATURES)
|
|
|
|
#define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
|
|
|
|
CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
|
|
|
|
#define TCG_EXT4_FEATURES 0
|
2021-07-30 09:07:42 +02:00
|
|
|
#define TCG_SVM_FEATURES (CPUID_SVM_NPT | CPUID_SVM_VGIF | \
|
|
|
|
CPUID_SVM_SVME_ADDR_CHK)
|
2014-04-30 18:48:36 +02:00
|
|
|
#define TCG_KVM_FEATURES 0
|
|
|
|
#define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
|
2015-10-29 08:31:39 +01:00
|
|
|
CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
|
|
|
|
CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
|
2016-06-17 17:47:13 +02:00
|
|
|
CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \
|
|
|
|
CPUID_7_0_EBX_ERMS)
|
2014-04-30 18:48:36 +02:00
|
|
|
/* missing:
|
2015-11-18 12:55:47 +01:00
|
|
|
CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
|
2016-06-17 17:47:13 +02:00
|
|
|
CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
|
2014-04-30 18:48:36 +02:00
|
|
|
CPUID_7_0_EBX_RDSEED */
|
2022-02-06 23:36:09 +01:00
|
|
|
#define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_PKU | \
|
2018-06-11 22:37:12 +02:00
|
|
|
/* CPUID_7_0_ECX_OSPKE is dynamic */ \
|
2021-01-27 09:28:49 +01:00
|
|
|
CPUID_7_0_ECX_LA57 | CPUID_7_0_ECX_PKS)
|
2016-10-31 09:27:26 +01:00
|
|
|
#define TCG_7_0_EDX_FEATURES 0
|
2019-07-25 08:14:16 +02:00
|
|
|
#define TCG_7_1_EAX_FEATURES 0
|
2014-04-30 18:48:45 +02:00
|
|
|
#define TCG_APM_FEATURES 0
|
2015-06-07 11:15:08 +02:00
|
|
|
#define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
|
2015-07-02 16:21:23 +02:00
|
|
|
#define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1)
|
|
|
|
/* missing:
|
|
|
|
CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
|
2020-12-02 11:10:42 +01:00
|
|
|
#define TCG_14_0_ECX_FEATURES 0
|
2021-07-19 13:21:10 +02:00
|
|
|
#define TCG_SGX_12_0_EAX_FEATURES 0
|
2021-07-19 13:21:11 +02:00
|
|
|
#define TCG_SGX_12_0_EBX_FEATURES 0
|
i386: Add SGX CPUID leaf FEAT_SGX_12_1_EAX
CPUID leaf 12_1_EAX is an Intel-defined feature bits leaf enumerating
the platform's SGX capabilities that may be utilized by an enclave, e.g.
whether or not an enclave can gain access to the provision key.
Currently there are six capabilities:
- INIT: set when the enclave has has been initialized by EINIT. Cannot
be set by software, i.e. forced to zero in CPUID.
- DEBUG: permits a debugger to read/write into the enclave.
- MODE64BIT: the enclave runs in 64-bit mode
- PROVISIONKEY: grants has access to the provision key
- EINITTOKENKEY: grants access to the EINIT token key, i.e. the
enclave can generate EINIT tokens
- KSS: Key Separation and Sharing enabled for the enclave.
Note that the entirety of CPUID.0x12.0x1, i.e. all registers, enumerates
the allowed ATTRIBUTES (128 bits), but only bits 31:0 are directly
exposed to the user (via FEAT_12_1_EAX). Bits 63:32 are currently all
reserved and bits 127:64 correspond to the allowed XSAVE Feature Request
Mask, which is calculated based on other CPU features, e.g. XSAVE, MPX,
AVX, etc... and is not exposed to the user.
Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: Yang Zhong <yang.zhong@intel.com>
Message-Id: <20210719112136.57018-10-yang.zhong@intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2021-07-19 13:21:12 +02:00
|
|
|
#define TCG_SGX_12_1_EAX_FEATURES 0
|
2014-04-30 18:48:36 +02:00
|
|
|
|
2021-03-22 14:27:54 +01:00
|
|
|
FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
|
2013-01-07 19:20:46 +01:00
|
|
|
[FEAT_1_EDX] = {
|
2018-10-15 06:47:24 +02:00
|
|
|
.type = CPUID_FEATURE_WORD,
|
2016-05-12 16:24:04 +02:00
|
|
|
.feat_names = {
|
|
|
|
"fpu", "vme", "de", "pse",
|
|
|
|
"tsc", "msr", "pae", "mce",
|
|
|
|
"cx8", "apic", NULL, "sep",
|
|
|
|
"mtrr", "pge", "mca", "cmov",
|
|
|
|
"pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
|
|
|
|
NULL, "ds" /* Intel dts */, "acpi", "mmx",
|
|
|
|
"fxsr", "sse", "sse2", "ss",
|
|
|
|
"ht" /* Intel htt */, "tm", "ia64", "pbe",
|
|
|
|
},
|
2018-10-15 06:47:24 +02:00
|
|
|
.cpuid = {.eax = 1, .reg = R_EDX, },
|
2014-04-30 18:48:38 +02:00
|
|
|
.tcg_features = TCG_FEATURES,
|
2013-01-07 19:20:46 +01:00
|
|
|
},
|
|
|
|
[FEAT_1_ECX] = {
|
2018-10-15 06:47:24 +02:00
|
|
|
.type = CPUID_FEATURE_WORD,
|
2016-05-12 16:24:04 +02:00
|
|
|
.feat_names = {
|
2016-09-30 20:49:41 +02:00
|
|
|
"pni" /* Intel,AMD sse3 */, "pclmulqdq", "dtes64", "monitor",
|
2016-09-30 20:49:40 +02:00
|
|
|
"ds-cpl", "vmx", "smx", "est",
|
2016-05-12 16:24:04 +02:00
|
|
|
"tm2", "ssse3", "cid", NULL,
|
|
|
|
"fma", "cx16", "xtpr", "pdcm",
|
2016-09-30 20:49:41 +02:00
|
|
|
NULL, "pcid", "dca", "sse4.1",
|
|
|
|
"sse4.2", "x2apic", "movbe", "popcnt",
|
2018-06-11 22:38:55 +02:00
|
|
|
"tsc-deadline", "aes", "xsave", NULL /* osxsave */,
|
2016-05-12 16:24:04 +02:00
|
|
|
"avx", "f16c", "rdrand", "hypervisor",
|
|
|
|
},
|
2018-10-15 06:47:24 +02:00
|
|
|
.cpuid = { .eax = 1, .reg = R_ECX, },
|
2014-04-30 18:48:38 +02:00
|
|
|
.tcg_features = TCG_EXT_FEATURES,
|
2013-01-07 19:20:46 +01:00
|
|
|
},
|
2016-05-12 16:24:04 +02:00
|
|
|
/* Feature names that are already defined on feature_name[] but
|
|
|
|
* are set on CPUID[8000_0001].EDX on AMD CPUs don't have their
|
|
|
|
* names on feat_names below. They are copied automatically
|
|
|
|
* to features[FEAT_8000_0001_EDX] if and only if CPU vendor is AMD.
|
|
|
|
*/
|
2013-01-07 19:20:46 +01:00
|
|
|
[FEAT_8000_0001_EDX] = {
|
2018-10-15 06:47:24 +02:00
|
|
|
.type = CPUID_FEATURE_WORD,
|
2016-05-12 16:24:04 +02:00
|
|
|
.feat_names = {
|
|
|
|
NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
|
|
|
|
NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
|
|
|
|
NULL /* cx8 */, NULL /* apic */, NULL, "syscall",
|
|
|
|
NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
|
|
|
|
NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
|
2016-09-30 20:49:41 +02:00
|
|
|
"nx", NULL, "mmxext", NULL /* mmx */,
|
|
|
|
NULL /* fxsr */, "fxsr-opt", "pdpe1gb", "rdtscp",
|
|
|
|
NULL, "lm", "3dnowext", "3dnow",
|
2016-05-12 16:24:04 +02:00
|
|
|
},
|
2018-10-15 06:47:24 +02:00
|
|
|
.cpuid = { .eax = 0x80000001, .reg = R_EDX, },
|
2014-04-30 18:48:38 +02:00
|
|
|
.tcg_features = TCG_EXT2_FEATURES,
|
2013-01-07 19:20:46 +01:00
|
|
|
},
|
|
|
|
[FEAT_8000_0001_ECX] = {
|
2018-10-15 06:47:24 +02:00
|
|
|
.type = CPUID_FEATURE_WORD,
|
2016-05-12 16:24:04 +02:00
|
|
|
.feat_names = {
|
2016-09-30 20:49:40 +02:00
|
|
|
"lahf-lm", "cmp-legacy", "svm", "extapic",
|
2016-05-12 16:24:04 +02:00
|
|
|
"cr8legacy", "abm", "sse4a", "misalignsse",
|
|
|
|
"3dnowprefetch", "osvw", "ibs", "xop",
|
|
|
|
"skinit", "wdt", NULL, "lwp",
|
2016-09-30 20:49:40 +02:00
|
|
|
"fma4", "tce", NULL, "nodeid-msr",
|
|
|
|
NULL, "tbm", "topoext", "perfctr-core",
|
|
|
|
"perfctr-nb", NULL, NULL, NULL,
|
2016-05-12 16:24:04 +02:00
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
},
|
2018-10-15 06:47:24 +02:00
|
|
|
.cpuid = { .eax = 0x80000001, .reg = R_ECX, },
|
2014-04-30 18:48:38 +02:00
|
|
|
.tcg_features = TCG_EXT3_FEATURES,
|
2018-08-10 00:18:52 +02:00
|
|
|
/*
|
|
|
|
* TOPOEXT is always allowed but can't be enabled blindly by
|
|
|
|
* "-cpu host", as it requires consistent cache topology info
|
|
|
|
* to be provided so it doesn't confuse guests.
|
|
|
|
*/
|
|
|
|
.no_autoenable_flags = CPUID_EXT3_TOPOEXT,
|
2013-01-07 19:20:46 +01:00
|
|
|
},
|
2013-01-07 19:20:47 +01:00
|
|
|
[FEAT_C000_0001_EDX] = {
|
2018-10-15 06:47:24 +02:00
|
|
|
.type = CPUID_FEATURE_WORD,
|
2016-05-12 16:24:04 +02:00
|
|
|
.feat_names = {
|
|
|
|
NULL, NULL, "xstore", "xstore-en",
|
|
|
|
NULL, NULL, "xcrypt", "xcrypt-en",
|
|
|
|
"ace2", "ace2-en", "phe", "phe-en",
|
|
|
|
"pmm", "pmm-en", NULL, NULL,
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
},
|
2018-10-15 06:47:24 +02:00
|
|
|
.cpuid = { .eax = 0xC0000001, .reg = R_EDX, },
|
2014-04-30 18:48:38 +02:00
|
|
|
.tcg_features = TCG_EXT4_FEATURES,
|
2013-01-07 19:20:47 +01:00
|
|
|
},
|
2013-01-07 19:20:46 +01:00
|
|
|
[FEAT_KVM] = {
|
2018-10-15 06:47:24 +02:00
|
|
|
.type = CPUID_FEATURE_WORD,
|
2016-05-12 16:24:04 +02:00
|
|
|
.feat_names = {
|
2016-09-30 20:49:40 +02:00
|
|
|
"kvmclock", "kvm-nopiodelay", "kvm-mmu", "kvmclock",
|
|
|
|
"kvm-asyncpf", "kvm-steal-time", "kvm-pv-eoi", "kvm-pv-unhalt",
|
2018-07-02 12:22:51 +02:00
|
|
|
NULL, "kvm-pv-tlb-flush", NULL, "kvm-pv-ipi",
|
2020-10-05 16:18:19 +02:00
|
|
|
"kvm-poll-control", "kvm-pv-sched-yield", "kvm-asyncpf-int", "kvm-msi-ext-dest-id",
|
2016-05-12 16:24:04 +02:00
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
"kvmclock-stable-bit", NULL, NULL, NULL,
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
},
|
2018-10-15 06:47:24 +02:00
|
|
|
.cpuid = { .eax = KVM_CPUID_FEATURES, .reg = R_EAX, },
|
2014-04-30 18:48:38 +02:00
|
|
|
.tcg_features = TCG_KVM_FEATURES,
|
2013-01-07 19:20:46 +01:00
|
|
|
},
|
2018-02-09 15:15:25 +01:00
|
|
|
[FEAT_KVM_HINTS] = {
|
2018-10-15 06:47:24 +02:00
|
|
|
.type = CPUID_FEATURE_WORD,
|
2018-02-09 15:15:25 +01:00
|
|
|
.feat_names = {
|
|
|
|
"kvm-hint-dedicated", NULL, NULL, NULL,
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
},
|
2018-10-15 06:47:24 +02:00
|
|
|
.cpuid = { .eax = KVM_CPUID_FEATURES, .reg = R_EDX, },
|
2018-02-09 15:15:25 +01:00
|
|
|
.tcg_features = TCG_KVM_FEATURES,
|
2018-04-10 23:15:34 +02:00
|
|
|
/*
|
|
|
|
* KVM hints aren't auto-enabled by -cpu host, they need to be
|
|
|
|
* explicitly enabled in the command-line.
|
|
|
|
*/
|
|
|
|
.no_autoenable_flags = ~0U,
|
2018-02-09 15:15:25 +01:00
|
|
|
},
|
2013-01-07 19:20:46 +01:00
|
|
|
[FEAT_SVM] = {
|
2018-10-15 06:47:24 +02:00
|
|
|
.type = CPUID_FEATURE_WORD,
|
2016-05-12 16:24:04 +02:00
|
|
|
.feat_names = {
|
2016-09-30 20:49:40 +02:00
|
|
|
"npt", "lbrv", "svm-lock", "nrip-save",
|
|
|
|
"tsc-scale", "vmcb-clean", "flushbyasid", "decodeassists",
|
|
|
|
NULL, NULL, "pause-filter", NULL,
|
2021-01-26 21:24:56 +01:00
|
|
|
"pfthreshold", "avic", NULL, "v-vmsave-vmload",
|
|
|
|
"vgif", NULL, NULL, NULL,
|
2016-05-12 16:24:04 +02:00
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
NULL, NULL, NULL, NULL,
|
2021-01-26 21:24:56 +01:00
|
|
|
"svme-addr-chk", NULL, NULL, NULL,
|
2016-05-12 16:24:04 +02:00
|
|
|
},
|
2018-10-15 06:47:24 +02:00
|
|
|
.cpuid = { .eax = 0x8000000A, .reg = R_EDX, },
|
2014-04-30 18:48:38 +02:00
|
|
|
.tcg_features = TCG_SVM_FEATURES,
|
2013-01-07 19:20:46 +01:00
|
|
|
},
|
|
|
|
[FEAT_7_0_EBX] = {
|
2018-10-15 06:47:24 +02:00
|
|
|
.type = CPUID_FEATURE_WORD,
|
2016-05-12 16:24:04 +02:00
|
|
|
.feat_names = {
|
2021-07-19 13:21:09 +02:00
|
|
|
"fsgsbase", "tsc-adjust", "sgx", "bmi1",
|
2016-05-12 16:24:04 +02:00
|
|
|
"hle", "avx2", NULL, "smep",
|
|
|
|
"bmi2", "erms", "invpcid", "rtm",
|
|
|
|
NULL, NULL, "mpx", NULL,
|
|
|
|
"avx512f", "avx512dq", "rdseed", "adx",
|
|
|
|
"smap", "avx512ifma", "pcommit", "clflushopt",
|
2018-03-04 17:48:35 +01:00
|
|
|
"clwb", "intel-pt", "avx512pf", "avx512er",
|
2016-12-14 03:50:03 +01:00
|
|
|
"avx512cd", "sha-ni", "avx512bw", "avx512vl",
|
2016-05-12 16:24:04 +02:00
|
|
|
},
|
2018-10-15 06:47:24 +02:00
|
|
|
.cpuid = {
|
|
|
|
.eax = 7,
|
|
|
|
.needs_ecx = true, .ecx = 0,
|
|
|
|
.reg = R_EBX,
|
|
|
|
},
|
2014-04-30 18:48:38 +02:00
|
|
|
.tcg_features = TCG_7_0_EBX_FEATURES,
|
2013-01-07 19:20:46 +01:00
|
|
|
},
|
2015-11-18 03:20:15 +01:00
|
|
|
[FEAT_7_0_ECX] = {
|
2018-10-15 06:47:24 +02:00
|
|
|
.type = CPUID_FEATURE_WORD,
|
2016-05-12 16:24:04 +02:00
|
|
|
.feat_names = {
|
|
|
|
NULL, "avx512vbmi", "umip", "pku",
|
2019-10-11 09:41:02 +02:00
|
|
|
NULL /* ospke */, "waitpkg", "avx512vbmi2", NULL,
|
2017-11-22 08:27:56 +01:00
|
|
|
"gfni", "vaes", "vpclmulqdq", "avx512vnni",
|
|
|
|
"avx512bitalg", NULL, "avx512-vpopcntdq", NULL,
|
2016-12-15 01:13:05 +01:00
|
|
|
"la57", NULL, NULL, NULL,
|
2016-05-12 16:24:04 +02:00
|
|
|
NULL, NULL, "rdpid", NULL,
|
2021-02-02 10:02:24 +01:00
|
|
|
"bus-lock-detect", "cldemote", NULL, "movdiri",
|
2021-07-19 13:21:09 +02:00
|
|
|
"movdir64b", NULL, "sgxlc", "pks",
|
2016-05-12 16:24:04 +02:00
|
|
|
},
|
2018-10-15 06:47:24 +02:00
|
|
|
.cpuid = {
|
|
|
|
.eax = 7,
|
|
|
|
.needs_ecx = true, .ecx = 0,
|
|
|
|
.reg = R_ECX,
|
|
|
|
},
|
2015-11-18 03:20:15 +01:00
|
|
|
.tcg_features = TCG_7_0_ECX_FEATURES,
|
|
|
|
},
|
2016-10-31 09:27:26 +01:00
|
|
|
[FEAT_7_0_EDX] = {
|
2018-10-15 06:47:24 +02:00
|
|
|
.type = CPUID_FEATURE_WORD,
|
2016-10-31 09:27:26 +01:00
|
|
|
.feat_names = {
|
|
|
|
NULL, NULL, "avx512-4vnniw", "avx512-4fmaps",
|
2020-07-14 10:41:46 +02:00
|
|
|
"fsrm", NULL, NULL, NULL,
|
2020-04-13 08:52:38 +02:00
|
|
|
"avx512-vp2intersect", NULL, "md-clear", NULL,
|
2020-07-06 01:17:15 +02:00
|
|
|
NULL, NULL, "serialize", NULL,
|
2022-02-15 20:52:58 +01:00
|
|
|
"tsx-ldtrk", NULL, NULL /* pconfig */, "arch-lbr",
|
2022-02-17 07:04:31 +01:00
|
|
|
NULL, NULL, "amx-bf16", "avx512-fp16",
|
|
|
|
"amx-tile", "amx-int8", "spec-ctrl", "stibp",
|
2019-06-17 17:36:54 +02:00
|
|
|
NULL, "arch-capabilities", "core-capability", "ssbd",
|
2016-10-31 09:27:26 +01:00
|
|
|
},
|
2018-10-15 06:47:24 +02:00
|
|
|
.cpuid = {
|
|
|
|
.eax = 7,
|
|
|
|
.needs_ecx = true, .ecx = 0,
|
|
|
|
.reg = R_EDX,
|
|
|
|
},
|
2016-10-31 09:27:26 +01:00
|
|
|
.tcg_features = TCG_7_0_EDX_FEATURES,
|
|
|
|
},
|
2019-07-25 08:14:16 +02:00
|
|
|
[FEAT_7_1_EAX] = {
|
|
|
|
.type = CPUID_FEATURE_WORD,
|
|
|
|
.feat_names = {
|
|
|
|
NULL, NULL, NULL, NULL,
|
2021-04-07 03:56:09 +02:00
|
|
|
"avx-vnni", "avx512-bf16", NULL, NULL,
|
2019-07-25 08:14:16 +02:00
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
},
|
|
|
|
.cpuid = {
|
|
|
|
.eax = 7,
|
|
|
|
.needs_ecx = true, .ecx = 1,
|
|
|
|
.reg = R_EAX,
|
|
|
|
},
|
|
|
|
.tcg_features = TCG_7_1_EAX_FEATURES,
|
|
|
|
},
|
2014-04-30 18:48:45 +02:00
|
|
|
[FEAT_8000_0007_EDX] = {
|
2018-10-15 06:47:24 +02:00
|
|
|
.type = CPUID_FEATURE_WORD,
|
2016-05-12 16:24:04 +02:00
|
|
|
.feat_names = {
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
"invtsc", NULL, NULL, NULL,
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
},
|
2018-10-15 06:47:24 +02:00
|
|
|
.cpuid = { .eax = 0x80000007, .reg = R_EDX, },
|
2014-04-30 18:48:45 +02:00
|
|
|
.tcg_features = TCG_APM_FEATURES,
|
|
|
|
.unmigratable_flags = CPUID_APM_INVTSC,
|
|
|
|
},
|
2018-01-09 16:45:16 +01:00
|
|
|
[FEAT_8000_0008_EBX] = {
|
2018-10-15 06:47:24 +02:00
|
|
|
.type = CPUID_FEATURE_WORD,
|
2018-01-09 16:45:16 +01:00
|
|
|
.feat_names = {
|
2019-09-25 23:49:48 +02:00
|
|
|
"clzero", NULL, "xsaveerptr", NULL,
|
2018-01-09 16:45:16 +01:00
|
|
|
NULL, NULL, NULL, NULL,
|
2018-07-05 11:09:57 +02:00
|
|
|
NULL, "wbnoinvd", NULL, NULL,
|
2021-02-09 22:04:05 +01:00
|
|
|
"ibpb", NULL, "ibrs", "amd-stibp",
|
2018-01-09 16:45:16 +01:00
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
NULL, NULL, NULL, NULL,
|
2018-06-01 17:38:09 +02:00
|
|
|
"amd-ssbd", "virt-ssbd", "amd-no-ssb", NULL,
|
2018-01-09 16:45:16 +01:00
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
},
|
2018-10-15 06:47:24 +02:00
|
|
|
.cpuid = { .eax = 0x80000008, .reg = R_EBX, },
|
2018-01-09 16:45:16 +01:00
|
|
|
.tcg_features = 0,
|
|
|
|
.unmigratable_flags = 0,
|
|
|
|
},
|
2014-11-24 15:54:43 +01:00
|
|
|
[FEAT_XSAVE] = {
|
2018-10-15 06:47:24 +02:00
|
|
|
.type = CPUID_FEATURE_WORD,
|
2016-05-12 16:24:04 +02:00
|
|
|
.feat_names = {
|
|
|
|
"xsaveopt", "xsavec", "xgetbv1", "xsaves",
|
2022-02-17 07:04:31 +01:00
|
|
|
"xfd", NULL, NULL, NULL,
|
2016-05-12 16:24:04 +02:00
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
},
|
2018-10-15 06:47:24 +02:00
|
|
|
.cpuid = {
|
|
|
|
.eax = 0xd,
|
|
|
|
.needs_ecx = true, .ecx = 1,
|
|
|
|
.reg = R_EAX,
|
|
|
|
},
|
2015-07-02 16:21:23 +02:00
|
|
|
.tcg_features = TCG_XSAVE_FEATURES,
|
2014-11-24 15:54:43 +01:00
|
|
|
},
|
2022-02-15 20:52:54 +01:00
|
|
|
[FEAT_XSAVE_XSS_LO] = {
|
|
|
|
.type = CPUID_FEATURE_WORD,
|
|
|
|
.feat_names = {
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
},
|
|
|
|
.cpuid = {
|
|
|
|
.eax = 0xD,
|
|
|
|
.needs_ecx = true,
|
|
|
|
.ecx = 1,
|
|
|
|
.reg = R_ECX,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
[FEAT_XSAVE_XSS_HI] = {
|
|
|
|
.type = CPUID_FEATURE_WORD,
|
|
|
|
.cpuid = {
|
|
|
|
.eax = 0xD,
|
|
|
|
.needs_ecx = true,
|
|
|
|
.ecx = 1,
|
|
|
|
.reg = R_EDX
|
|
|
|
},
|
|
|
|
},
|
2015-06-07 11:15:08 +02:00
|
|
|
[FEAT_6_EAX] = {
|
2018-10-15 06:47:24 +02:00
|
|
|
.type = CPUID_FEATURE_WORD,
|
2016-05-12 16:24:04 +02:00
|
|
|
.feat_names = {
|
|
|
|
NULL, NULL, "arat", NULL,
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
},
|
2018-10-15 06:47:24 +02:00
|
|
|
.cpuid = { .eax = 6, .reg = R_EAX, },
|
2015-06-07 11:15:08 +02:00
|
|
|
.tcg_features = TCG_6_EAX_FEATURES,
|
|
|
|
},
|
2022-02-15 20:52:54 +01:00
|
|
|
[FEAT_XSAVE_XCR0_LO] = {
|
2018-10-15 06:47:24 +02:00
|
|
|
.type = CPUID_FEATURE_WORD,
|
|
|
|
.cpuid = {
|
|
|
|
.eax = 0xD,
|
|
|
|
.needs_ecx = true, .ecx = 0,
|
|
|
|
.reg = R_EAX,
|
|
|
|
},
|
2016-09-22 22:41:35 +02:00
|
|
|
.tcg_features = ~0U,
|
2016-09-28 17:00:29 +02:00
|
|
|
.migratable_flags = XSTATE_FP_MASK | XSTATE_SSE_MASK |
|
|
|
|
XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK | XSTATE_BNDCSR_MASK |
|
|
|
|
XSTATE_OPMASK_MASK | XSTATE_ZMM_Hi256_MASK | XSTATE_Hi16_ZMM_MASK |
|
|
|
|
XSTATE_PKRU_MASK,
|
2016-09-22 22:41:35 +02:00
|
|
|
},
|
2022-02-15 20:52:54 +01:00
|
|
|
[FEAT_XSAVE_XCR0_HI] = {
|
2018-10-15 06:47:24 +02:00
|
|
|
.type = CPUID_FEATURE_WORD,
|
|
|
|
.cpuid = {
|
|
|
|
.eax = 0xD,
|
|
|
|
.needs_ecx = true, .ecx = 0,
|
|
|
|
.reg = R_EDX,
|
|
|
|
},
|
2016-09-22 22:41:35 +02:00
|
|
|
.tcg_features = ~0U,
|
|
|
|
},
|
2018-10-15 06:47:25 +02:00
|
|
|
/*Below are MSR exposed features*/
|
|
|
|
[FEAT_ARCH_CAPABILITIES] = {
|
|
|
|
.type = MSR_FEATURE_WORD,
|
|
|
|
.feat_names = {
|
|
|
|
"rdctl-no", "ibrs-all", "rsba", "skip-l1dfl-vmentry",
|
2019-11-20 13:19:22 +01:00
|
|
|
"ssb-no", "mds-no", "pschange-mc-no", "tsx-ctrl",
|
2019-11-19 08:23:27 +01:00
|
|
|
"taa-no", NULL, NULL, NULL,
|
2018-10-15 06:47:25 +02:00
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
},
|
|
|
|
.msr = {
|
|
|
|
.index = MSR_IA32_ARCH_CAPABILITIES,
|
|
|
|
},
|
|
|
|
},
|
2019-06-17 17:36:54 +02:00
|
|
|
[FEAT_CORE_CAPABILITY] = {
|
|
|
|
.type = MSR_FEATURE_WORD,
|
|
|
|
.feat_names = {
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
NULL, "split-lock-detect", NULL, NULL,
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
},
|
|
|
|
.msr = {
|
|
|
|
.index = MSR_IA32_CORE_CAPABILITY,
|
|
|
|
},
|
|
|
|
},
|
2020-05-29 09:43:47 +02:00
|
|
|
[FEAT_PERF_CAPABILITIES] = {
|
|
|
|
.type = MSR_FEATURE_WORD,
|
|
|
|
.feat_names = {
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
NULL, "full-width-write", NULL, NULL,
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
},
|
|
|
|
.msr = {
|
|
|
|
.index = MSR_IA32_PERF_CAPABILITIES,
|
|
|
|
},
|
|
|
|
},
|
2019-07-01 18:32:17 +02:00
|
|
|
|
|
|
|
[FEAT_VMX_PROCBASED_CTLS] = {
|
|
|
|
.type = MSR_FEATURE_WORD,
|
|
|
|
.feat_names = {
|
|
|
|
NULL, NULL, "vmx-vintr-pending", "vmx-tsc-offset",
|
|
|
|
NULL, NULL, NULL, "vmx-hlt-exit",
|
|
|
|
NULL, "vmx-invlpg-exit", "vmx-mwait-exit", "vmx-rdpmc-exit",
|
|
|
|
"vmx-rdtsc-exit", NULL, NULL, "vmx-cr3-load-noexit",
|
|
|
|
"vmx-cr3-store-noexit", NULL, NULL, "vmx-cr8-load-exit",
|
|
|
|
"vmx-cr8-store-exit", "vmx-flexpriority", "vmx-vnmi-pending", "vmx-movdr-exit",
|
|
|
|
"vmx-io-exit", "vmx-io-bitmap", NULL, "vmx-mtf",
|
|
|
|
"vmx-msr-bitmap", "vmx-monitor-exit", "vmx-pause-exit", "vmx-secondary-ctls",
|
|
|
|
},
|
|
|
|
.msr = {
|
|
|
|
.index = MSR_IA32_VMX_TRUE_PROCBASED_CTLS,
|
|
|
|
}
|
|
|
|
},
|
|
|
|
|
|
|
|
[FEAT_VMX_SECONDARY_CTLS] = {
|
|
|
|
.type = MSR_FEATURE_WORD,
|
|
|
|
.feat_names = {
|
|
|
|
"vmx-apicv-xapic", "vmx-ept", "vmx-desc-exit", "vmx-rdtscp-exit",
|
|
|
|
"vmx-apicv-x2apic", "vmx-vpid", "vmx-wbinvd-exit", "vmx-unrestricted-guest",
|
|
|
|
"vmx-apicv-register", "vmx-apicv-vid", "vmx-ple", "vmx-rdrand-exit",
|
|
|
|
"vmx-invpcid-exit", "vmx-vmfunc", "vmx-shadow-vmcs", "vmx-encls-exit",
|
|
|
|
"vmx-rdseed-exit", "vmx-pml", NULL, NULL,
|
|
|
|
"vmx-xsaves", NULL, NULL, NULL,
|
2021-06-21 18:31:52 +02:00
|
|
|
NULL, "vmx-tsc-scaling", NULL, NULL,
|
2019-07-01 18:32:17 +02:00
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
},
|
|
|
|
.msr = {
|
|
|
|
.index = MSR_IA32_VMX_PROCBASED_CTLS2,
|
|
|
|
}
|
|
|
|
},
|
|
|
|
|
|
|
|
[FEAT_VMX_PINBASED_CTLS] = {
|
|
|
|
.type = MSR_FEATURE_WORD,
|
|
|
|
.feat_names = {
|
|
|
|
"vmx-intr-exit", NULL, NULL, "vmx-nmi-exit",
|
|
|
|
NULL, "vmx-vnmi", "vmx-preemption-timer", "vmx-posted-intr",
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
},
|
|
|
|
.msr = {
|
|
|
|
.index = MSR_IA32_VMX_TRUE_PINBASED_CTLS,
|
|
|
|
}
|
|
|
|
},
|
|
|
|
|
|
|
|
[FEAT_VMX_EXIT_CTLS] = {
|
|
|
|
.type = MSR_FEATURE_WORD,
|
|
|
|
/*
|
|
|
|
* VMX_VM_EXIT_HOST_ADDR_SPACE_SIZE is copied from
|
|
|
|
* the LM CPUID bit.
|
|
|
|
*/
|
|
|
|
.feat_names = {
|
|
|
|
NULL, NULL, "vmx-exit-nosave-debugctl", NULL,
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
NULL, NULL /* vmx-exit-host-addr-space-size */, NULL, NULL,
|
|
|
|
"vmx-exit-load-perf-global-ctrl", NULL, NULL, "vmx-exit-ack-intr",
|
|
|
|
NULL, NULL, "vmx-exit-save-pat", "vmx-exit-load-pat",
|
|
|
|
"vmx-exit-save-efer", "vmx-exit-load-efer",
|
|
|
|
"vmx-exit-save-preemption-timer", "vmx-exit-clear-bndcfgs",
|
|
|
|
NULL, "vmx-exit-clear-rtit-ctl", NULL, NULL,
|
2021-02-05 09:33:25 +01:00
|
|
|
NULL, "vmx-exit-load-pkrs", NULL, NULL,
|
2019-07-01 18:32:17 +02:00
|
|
|
},
|
|
|
|
.msr = {
|
|
|
|
.index = MSR_IA32_VMX_TRUE_EXIT_CTLS,
|
|
|
|
}
|
|
|
|
},
|
|
|
|
|
|
|
|
[FEAT_VMX_ENTRY_CTLS] = {
|
|
|
|
.type = MSR_FEATURE_WORD,
|
|
|
|
.feat_names = {
|
|
|
|
NULL, NULL, "vmx-entry-noload-debugctl", NULL,
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
NULL, "vmx-entry-ia32e-mode", NULL, NULL,
|
|
|
|
NULL, "vmx-entry-load-perf-global-ctrl", "vmx-entry-load-pat", "vmx-entry-load-efer",
|
|
|
|
"vmx-entry-load-bndcfgs", NULL, "vmx-entry-load-rtit-ctl", NULL,
|
2021-02-05 09:33:25 +01:00
|
|
|
NULL, NULL, "vmx-entry-load-pkrs", NULL,
|
2019-07-01 18:32:17 +02:00
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
},
|
|
|
|
.msr = {
|
|
|
|
.index = MSR_IA32_VMX_TRUE_ENTRY_CTLS,
|
|
|
|
}
|
|
|
|
},
|
|
|
|
|
|
|
|
[FEAT_VMX_MISC] = {
|
|
|
|
.type = MSR_FEATURE_WORD,
|
|
|
|
.feat_names = {
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
NULL, "vmx-store-lma", "vmx-activity-hlt", "vmx-activity-shutdown",
|
|
|
|
"vmx-activity-wait-sipi", NULL, NULL, NULL,
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
NULL, "vmx-vmwrite-vmexit-fields", "vmx-zero-len-inject", NULL,
|
|
|
|
},
|
|
|
|
.msr = {
|
|
|
|
.index = MSR_IA32_VMX_MISC,
|
|
|
|
}
|
|
|
|
},
|
|
|
|
|
|
|
|
[FEAT_VMX_EPT_VPID_CAPS] = {
|
|
|
|
.type = MSR_FEATURE_WORD,
|
|
|
|
.feat_names = {
|
|
|
|
"vmx-ept-execonly", NULL, NULL, NULL,
|
|
|
|
NULL, NULL, "vmx-page-walk-4", "vmx-page-walk-5",
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
"vmx-ept-2mb", "vmx-ept-1gb", NULL, NULL,
|
|
|
|
"vmx-invept", "vmx-eptad", "vmx-ept-advanced-exitinfo", NULL,
|
|
|
|
NULL, "vmx-invept-single-context", "vmx-invept-all-context", NULL,
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
"vmx-invvpid", NULL, NULL, NULL,
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
"vmx-invvpid-single-addr", "vmx-invept-single-context",
|
|
|
|
"vmx-invvpid-all-context", "vmx-invept-single-context-noglobals",
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
},
|
|
|
|
.msr = {
|
|
|
|
.index = MSR_IA32_VMX_EPT_VPID_CAP,
|
|
|
|
}
|
|
|
|
},
|
|
|
|
|
|
|
|
[FEAT_VMX_BASIC] = {
|
|
|
|
.type = MSR_FEATURE_WORD,
|
|
|
|
.feat_names = {
|
|
|
|
[54] = "vmx-ins-outs",
|
|
|
|
[55] = "vmx-true-ctls",
|
|
|
|
},
|
|
|
|
.msr = {
|
|
|
|
.index = MSR_IA32_VMX_BASIC,
|
|
|
|
},
|
|
|
|
/* Just to be safe - we don't support setting the MSEG version field. */
|
|
|
|
.no_autoenable_flags = MSR_VMX_BASIC_DUAL_MONITOR,
|
|
|
|
},
|
|
|
|
|
|
|
|
[FEAT_VMX_VMFUNC] = {
|
|
|
|
.type = MSR_FEATURE_WORD,
|
|
|
|
.feat_names = {
|
|
|
|
[0] = "vmx-eptp-switching",
|
|
|
|
},
|
|
|
|
.msr = {
|
|
|
|
.index = MSR_IA32_VMX_VMFUNC,
|
|
|
|
}
|
|
|
|
},
|
|
|
|
|
2020-12-02 11:10:42 +01:00
|
|
|
[FEAT_14_0_ECX] = {
|
|
|
|
.type = CPUID_FEATURE_WORD,
|
|
|
|
.feat_names = {
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
NULL, NULL, NULL, "intel-pt-lip",
|
|
|
|
},
|
|
|
|
.cpuid = {
|
|
|
|
.eax = 0x14,
|
|
|
|
.needs_ecx = true, .ecx = 0,
|
|
|
|
.reg = R_ECX,
|
|
|
|
},
|
|
|
|
.tcg_features = TCG_14_0_ECX_FEATURES,
|
|
|
|
},
|
|
|
|
|
2021-07-19 13:21:10 +02:00
|
|
|
[FEAT_SGX_12_0_EAX] = {
|
|
|
|
.type = CPUID_FEATURE_WORD,
|
|
|
|
.feat_names = {
|
|
|
|
"sgx1", "sgx2", NULL, NULL,
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
},
|
|
|
|
.cpuid = {
|
|
|
|
.eax = 0x12,
|
|
|
|
.needs_ecx = true, .ecx = 0,
|
|
|
|
.reg = R_EAX,
|
|
|
|
},
|
|
|
|
.tcg_features = TCG_SGX_12_0_EAX_FEATURES,
|
|
|
|
},
|
2021-07-19 13:21:11 +02:00
|
|
|
|
|
|
|
[FEAT_SGX_12_0_EBX] = {
|
|
|
|
.type = CPUID_FEATURE_WORD,
|
|
|
|
.feat_names = {
|
|
|
|
"sgx-exinfo" , NULL, NULL, NULL,
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
},
|
|
|
|
.cpuid = {
|
|
|
|
.eax = 0x12,
|
|
|
|
.needs_ecx = true, .ecx = 0,
|
|
|
|
.reg = R_EBX,
|
|
|
|
},
|
|
|
|
.tcg_features = TCG_SGX_12_0_EBX_FEATURES,
|
|
|
|
},
|
i386: Add SGX CPUID leaf FEAT_SGX_12_1_EAX
CPUID leaf 12_1_EAX is an Intel-defined feature bits leaf enumerating
the platform's SGX capabilities that may be utilized by an enclave, e.g.
whether or not an enclave can gain access to the provision key.
Currently there are six capabilities:
- INIT: set when the enclave has has been initialized by EINIT. Cannot
be set by software, i.e. forced to zero in CPUID.
- DEBUG: permits a debugger to read/write into the enclave.
- MODE64BIT: the enclave runs in 64-bit mode
- PROVISIONKEY: grants has access to the provision key
- EINITTOKENKEY: grants access to the EINIT token key, i.e. the
enclave can generate EINIT tokens
- KSS: Key Separation and Sharing enabled for the enclave.
Note that the entirety of CPUID.0x12.0x1, i.e. all registers, enumerates
the allowed ATTRIBUTES (128 bits), but only bits 31:0 are directly
exposed to the user (via FEAT_12_1_EAX). Bits 63:32 are currently all
reserved and bits 127:64 correspond to the allowed XSAVE Feature Request
Mask, which is calculated based on other CPU features, e.g. XSAVE, MPX,
AVX, etc... and is not exposed to the user.
Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: Yang Zhong <yang.zhong@intel.com>
Message-Id: <20210719112136.57018-10-yang.zhong@intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2021-07-19 13:21:12 +02:00
|
|
|
|
|
|
|
[FEAT_SGX_12_1_EAX] = {
|
|
|
|
.type = CPUID_FEATURE_WORD,
|
|
|
|
.feat_names = {
|
|
|
|
NULL, "sgx-debug", "sgx-mode64", NULL,
|
|
|
|
"sgx-provisionkey", "sgx-tokenkey", NULL, "sgx-kss",
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
},
|
|
|
|
.cpuid = {
|
|
|
|
.eax = 0x12,
|
|
|
|
.needs_ecx = true, .ecx = 1,
|
|
|
|
.reg = R_EAX,
|
|
|
|
},
|
|
|
|
.tcg_features = TCG_SGX_12_1_EAX_FEATURES,
|
|
|
|
},
|
2013-01-07 19:20:45 +01:00
|
|
|
};
|
|
|
|
|
2019-07-01 17:26:45 +02:00
|
|
|
typedef struct FeatureMask {
|
|
|
|
FeatureWord index;
|
2019-07-01 17:38:54 +02:00
|
|
|
uint64_t mask;
|
2019-07-01 17:26:45 +02:00
|
|
|
} FeatureMask;
|
|
|
|
|
|
|
|
typedef struct FeatureDep {
|
|
|
|
FeatureMask from, to;
|
|
|
|
} FeatureDep;
|
|
|
|
|
|
|
|
static FeatureDep feature_dependencies[] = {
|
|
|
|
{
|
|
|
|
.from = { FEAT_7_0_EDX, CPUID_7_0_EDX_ARCH_CAPABILITIES },
|
2019-07-01 17:38:54 +02:00
|
|
|
.to = { FEAT_ARCH_CAPABILITIES, ~0ull },
|
2019-07-01 17:26:45 +02:00
|
|
|
},
|
|
|
|
{
|
|
|
|
.from = { FEAT_7_0_EDX, CPUID_7_0_EDX_CORE_CAPABILITY },
|
2019-07-01 17:38:54 +02:00
|
|
|
.to = { FEAT_CORE_CAPABILITY, ~0ull },
|
2019-07-01 17:26:45 +02:00
|
|
|
},
|
2020-05-29 09:43:47 +02:00
|
|
|
{
|
|
|
|
.from = { FEAT_1_ECX, CPUID_EXT_PDCM },
|
|
|
|
.to = { FEAT_PERF_CAPABILITIES, ~0ull },
|
|
|
|
},
|
2019-07-01 18:32:17 +02:00
|
|
|
{
|
|
|
|
.from = { FEAT_1_ECX, CPUID_EXT_VMX },
|
|
|
|
.to = { FEAT_VMX_PROCBASED_CTLS, ~0ull },
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.from = { FEAT_1_ECX, CPUID_EXT_VMX },
|
|
|
|
.to = { FEAT_VMX_PINBASED_CTLS, ~0ull },
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.from = { FEAT_1_ECX, CPUID_EXT_VMX },
|
|
|
|
.to = { FEAT_VMX_EXIT_CTLS, ~0ull },
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.from = { FEAT_1_ECX, CPUID_EXT_VMX },
|
|
|
|
.to = { FEAT_VMX_ENTRY_CTLS, ~0ull },
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.from = { FEAT_1_ECX, CPUID_EXT_VMX },
|
|
|
|
.to = { FEAT_VMX_MISC, ~0ull },
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.from = { FEAT_1_ECX, CPUID_EXT_VMX },
|
|
|
|
.to = { FEAT_VMX_BASIC, ~0ull },
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.from = { FEAT_8000_0001_EDX, CPUID_EXT2_LM },
|
|
|
|
.to = { FEAT_VMX_ENTRY_CTLS, VMX_VM_ENTRY_IA32E_MODE },
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.from = { FEAT_VMX_PROCBASED_CTLS, VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS },
|
|
|
|
.to = { FEAT_VMX_SECONDARY_CTLS, ~0ull },
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.from = { FEAT_XSAVE, CPUID_XSAVE_XSAVES },
|
|
|
|
.to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_XSAVES },
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.from = { FEAT_1_ECX, CPUID_EXT_RDRAND },
|
|
|
|
.to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_RDRAND_EXITING },
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.from = { FEAT_7_0_EBX, CPUID_7_0_EBX_INVPCID },
|
|
|
|
.to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_INVPCID },
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.from = { FEAT_7_0_EBX, CPUID_7_0_EBX_RDSEED },
|
|
|
|
.to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_RDSEED_EXITING },
|
|
|
|
},
|
2020-12-02 11:10:42 +01:00
|
|
|
{
|
|
|
|
.from = { FEAT_7_0_EBX, CPUID_7_0_EBX_INTEL_PT },
|
|
|
|
.to = { FEAT_14_0_ECX, ~0ull },
|
|
|
|
},
|
2019-07-01 18:32:17 +02:00
|
|
|
{
|
|
|
|
.from = { FEAT_8000_0001_EDX, CPUID_EXT2_RDTSCP },
|
|
|
|
.to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_RDTSCP },
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.from = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_EPT },
|
|
|
|
.to = { FEAT_VMX_EPT_VPID_CAPS, 0xffffffffull },
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.from = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_EPT },
|
|
|
|
.to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST },
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.from = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_VPID },
|
|
|
|
.to = { FEAT_VMX_EPT_VPID_CAPS, 0xffffffffull << 32 },
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.from = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_VMFUNC },
|
|
|
|
.to = { FEAT_VMX_VMFUNC, ~0ull },
|
|
|
|
},
|
2020-06-24 01:01:16 +02:00
|
|
|
{
|
|
|
|
.from = { FEAT_8000_0001_ECX, CPUID_EXT3_SVM },
|
|
|
|
.to = { FEAT_SVM, ~0ull },
|
|
|
|
},
|
2019-07-01 17:26:45 +02:00
|
|
|
};
|
|
|
|
|
2013-05-06 18:20:07 +02:00
|
|
|
typedef struct X86RegisterInfo32 {
|
|
|
|
/* Name of register */
|
|
|
|
const char *name;
|
|
|
|
/* QAPI enum value register */
|
|
|
|
X86CPURegister32 qapi_enum;
|
|
|
|
} X86RegisterInfo32;
|
|
|
|
|
|
|
|
#define REGISTER(reg) \
|
2014-03-05 03:44:40 +01:00
|
|
|
[R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
|
2014-03-16 15:03:41 +01:00
|
|
|
static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
|
2013-05-06 18:20:07 +02:00
|
|
|
REGISTER(EAX),
|
|
|
|
REGISTER(ECX),
|
|
|
|
REGISTER(EDX),
|
|
|
|
REGISTER(EBX),
|
|
|
|
REGISTER(ESP),
|
|
|
|
REGISTER(EBP),
|
|
|
|
REGISTER(ESI),
|
|
|
|
REGISTER(EDI),
|
|
|
|
};
|
|
|
|
#undef REGISTER
|
|
|
|
|
2022-02-15 20:52:54 +01:00
|
|
|
/* CPUID feature bits available in XSS */
|
2022-02-15 20:52:55 +01:00
|
|
|
#define CPUID_XSTATE_XSS_MASK (XSTATE_ARCH_LBR_MASK)
|
2022-02-15 20:52:54 +01:00
|
|
|
|
2021-07-05 12:46:31 +02:00
|
|
|
ExtSaveArea x86_ext_save_areas[XSAVE_STATE_AREA_COUNT] = {
|
2016-09-30 20:49:42 +02:00
|
|
|
[XSTATE_FP_BIT] = {
|
|
|
|
/* x87 FP state component is always enabled if XSAVE is supported */
|
|
|
|
.feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
|
|
|
|
.size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
|
|
|
|
},
|
|
|
|
[XSTATE_SSE_BIT] = {
|
|
|
|
/* SSE state component is always enabled if XSAVE is supported */
|
|
|
|
.feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
|
|
|
|
.size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
|
|
|
|
},
|
2016-02-17 10:54:53 +01:00
|
|
|
[XSTATE_YMM_BIT] =
|
|
|
|
{ .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
|
2015-11-28 17:32:26 +01:00
|
|
|
.size = sizeof(XSaveAVX) },
|
2016-02-17 10:54:53 +01:00
|
|
|
[XSTATE_BNDREGS_BIT] =
|
|
|
|
{ .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
|
2015-11-28 17:32:26 +01:00
|
|
|
.size = sizeof(XSaveBNDREG) },
|
2016-02-17 10:54:53 +01:00
|
|
|
[XSTATE_BNDCSR_BIT] =
|
|
|
|
{ .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
|
2015-11-28 17:32:26 +01:00
|
|
|
.size = sizeof(XSaveBNDCSR) },
|
2016-02-17 10:54:53 +01:00
|
|
|
[XSTATE_OPMASK_BIT] =
|
|
|
|
{ .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
|
2015-11-28 17:32:26 +01:00
|
|
|
.size = sizeof(XSaveOpmask) },
|
2016-02-17 10:54:53 +01:00
|
|
|
[XSTATE_ZMM_Hi256_BIT] =
|
|
|
|
{ .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
|
2015-11-28 17:32:26 +01:00
|
|
|
.size = sizeof(XSaveZMM_Hi256) },
|
2016-02-17 10:54:53 +01:00
|
|
|
[XSTATE_Hi16_ZMM_BIT] =
|
|
|
|
{ .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
|
2015-11-28 17:32:26 +01:00
|
|
|
.size = sizeof(XSaveHi16_ZMM) },
|
2016-02-17 10:54:53 +01:00
|
|
|
[XSTATE_PKRU_BIT] =
|
|
|
|
{ .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU,
|
2015-11-28 17:32:26 +01:00
|
|
|
.size = sizeof(XSavePKRU) },
|
2022-02-15 20:52:55 +01:00
|
|
|
[XSTATE_ARCH_LBR_BIT] = {
|
|
|
|
.feature = FEAT_7_0_EDX, .bits = CPUID_7_0_EDX_ARCH_LBR,
|
|
|
|
.offset = 0 /*supervisor mode component, offset = 0 */,
|
|
|
|
.size = sizeof(XSavesArchLBR) },
|
2022-02-17 07:04:28 +01:00
|
|
|
[XSTATE_XTILE_CFG_BIT] = {
|
|
|
|
.feature = FEAT_7_0_EDX, .bits = CPUID_7_0_EDX_AMX_TILE,
|
|
|
|
.size = sizeof(XSaveXTILECFG),
|
|
|
|
},
|
|
|
|
[XSTATE_XTILE_DATA_BIT] = {
|
|
|
|
.feature = FEAT_7_0_EDX, .bits = CPUID_7_0_EDX_AMX_TILE,
|
|
|
|
.size = sizeof(XSaveXTILEDATA)
|
|
|
|
},
|
2013-10-02 17:54:57 +02:00
|
|
|
};
|
2013-05-06 18:20:07 +02:00
|
|
|
|
2022-02-15 20:52:54 +01:00
|
|
|
static uint32_t xsave_area_size(uint64_t mask, bool compacted)
|
2016-09-22 22:14:07 +02:00
|
|
|
{
|
2022-02-15 20:52:54 +01:00
|
|
|
uint64_t ret = x86_ext_save_areas[0].size;
|
|
|
|
const ExtSaveArea *esa;
|
|
|
|
uint32_t offset = 0;
|
2016-09-22 22:14:07 +02:00
|
|
|
int i;
|
|
|
|
|
2022-02-15 20:52:54 +01:00
|
|
|
for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
|
|
|
|
esa = &x86_ext_save_areas[i];
|
2016-09-22 22:14:07 +02:00
|
|
|
if ((mask >> i) & 1) {
|
2022-02-15 20:52:54 +01:00
|
|
|
offset = compacted ? ret : esa->offset;
|
|
|
|
ret = MAX(ret, offset + esa->size);
|
2016-09-22 22:14:07 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2017-09-13 11:05:19 +02:00
|
|
|
static inline bool accel_uses_host_cpuid(void)
|
|
|
|
{
|
|
|
|
return kvm_enabled() || hvf_enabled();
|
|
|
|
}
|
|
|
|
|
2022-02-15 20:52:54 +01:00
|
|
|
static inline uint64_t x86_cpu_xsave_xcr0_components(X86CPU *cpu)
|
2016-09-22 22:41:35 +02:00
|
|
|
{
|
2022-02-15 20:52:54 +01:00
|
|
|
return ((uint64_t)cpu->env.features[FEAT_XSAVE_XCR0_HI]) << 32 |
|
|
|
|
cpu->env.features[FEAT_XSAVE_XCR0_LO];
|
2016-09-22 22:41:35 +02:00
|
|
|
}
|
|
|
|
|
2020-12-12 16:55:14 +01:00
|
|
|
/* Return name of 32-bit register, from a R_* constant */
|
|
|
|
static const char *get_register_name_32(unsigned int reg)
|
2013-01-04 23:01:06 +01:00
|
|
|
{
|
2013-06-03 18:23:27 +02:00
|
|
|
if (reg >= CPU_NB_REGS32) {
|
2013-01-04 23:01:06 +01:00
|
|
|
return NULL;
|
|
|
|
}
|
2013-05-06 18:20:07 +02:00
|
|
|
return x86_reg_info_32[reg].name;
|
2013-01-04 23:01:06 +01:00
|
|
|
}
|
|
|
|
|
2022-02-15 20:52:54 +01:00
|
|
|
static inline uint64_t x86_cpu_xsave_xss_components(X86CPU *cpu)
|
|
|
|
{
|
|
|
|
return ((uint64_t)cpu->env.features[FEAT_XSAVE_XSS_HI]) << 32 |
|
|
|
|
cpu->env.features[FEAT_XSAVE_XSS_LO];
|
|
|
|
}
|
|
|
|
|
2014-04-30 18:48:41 +02:00
|
|
|
/*
|
|
|
|
* Returns the set of feature flags that are supported and migratable by
|
|
|
|
* QEMU, for a given FeatureWord.
|
|
|
|
*/
|
2019-07-01 17:38:54 +02:00
|
|
|
static uint64_t x86_cpu_get_migratable_flags(FeatureWord w)
|
2014-04-30 18:48:41 +02:00
|
|
|
{
|
|
|
|
FeatureWordInfo *wi = &feature_word_info[w];
|
2019-07-01 17:38:54 +02:00
|
|
|
uint64_t r = 0;
|
2014-04-30 18:48:41 +02:00
|
|
|
int i;
|
|
|
|
|
2019-07-01 17:38:54 +02:00
|
|
|
for (i = 0; i < 64; i++) {
|
|
|
|
uint64_t f = 1ULL << i;
|
2016-09-28 17:00:29 +02:00
|
|
|
|
|
|
|
/* If the feature name is known, it is implicitly considered migratable,
|
|
|
|
* unless it is explicitly set in unmigratable_flags */
|
|
|
|
if ((wi->migratable_flags & f) ||
|
|
|
|
(wi->feat_names[i] && !(wi->unmigratable_flags & f))) {
|
|
|
|
r |= f;
|
2014-04-30 18:48:41 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2011-01-21 21:48:07 +01:00
|
|
|
void host_cpuid(uint32_t function, uint32_t count,
|
|
|
|
uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
|
2010-03-11 14:38:58 +01:00
|
|
|
{
|
2011-11-27 18:13:01 +01:00
|
|
|
uint32_t vec[4];
|
|
|
|
|
|
|
|
#ifdef __x86_64__
|
|
|
|
asm volatile("cpuid"
|
|
|
|
: "=a"(vec[0]), "=b"(vec[1]),
|
|
|
|
"=c"(vec[2]), "=d"(vec[3])
|
|
|
|
: "0"(function), "c"(count) : "cc");
|
2014-01-30 20:48:53 +01:00
|
|
|
#elif defined(__i386__)
|
2011-11-27 18:13:01 +01:00
|
|
|
asm volatile("pusha \n\t"
|
|
|
|
"cpuid \n\t"
|
|
|
|
"mov %%eax, 0(%2) \n\t"
|
|
|
|
"mov %%ebx, 4(%2) \n\t"
|
|
|
|
"mov %%ecx, 8(%2) \n\t"
|
|
|
|
"mov %%edx, 12(%2) \n\t"
|
|
|
|
"popa"
|
|
|
|
: : "a"(function), "c"(count), "S"(vec)
|
|
|
|
: "memory", "cc");
|
2014-01-30 20:48:53 +01:00
|
|
|
#else
|
|
|
|
abort();
|
2011-11-27 18:13:01 +01:00
|
|
|
#endif
|
|
|
|
|
2010-03-11 14:38:58 +01:00
|
|
|
if (eax)
|
2011-11-27 18:13:01 +01:00
|
|
|
*eax = vec[0];
|
2010-03-11 14:38:58 +01:00
|
|
|
if (ebx)
|
2011-11-27 18:13:01 +01:00
|
|
|
*ebx = vec[1];
|
2010-03-11 14:38:58 +01:00
|
|
|
if (ecx)
|
2011-11-27 18:13:01 +01:00
|
|
|
*ecx = vec[2];
|
2010-03-11 14:38:58 +01:00
|
|
|
if (edx)
|
2011-11-27 18:13:01 +01:00
|
|
|
*edx = vec[3];
|
2010-03-11 14:38:58 +01:00
|
|
|
}
|
2010-03-11 14:38:55 +01:00
|
|
|
|
2014-02-10 11:21:30 +01:00
|
|
|
/* CPU class name definitions: */
|
|
|
|
|
|
|
|
/* Return type name for a given CPU model name
|
|
|
|
* Caller is responsible for freeing the returned string.
|
|
|
|
*/
|
|
|
|
static char *x86_cpu_type_name(const char *model_name)
|
|
|
|
{
|
|
|
|
return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
|
|
|
|
}
|
|
|
|
|
2014-02-10 22:02:44 +01:00
|
|
|
static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
|
|
|
|
{
|
2019-10-25 04:56:32 +02:00
|
|
|
g_autofree char *typename = x86_cpu_type_name(cpu_model);
|
|
|
|
return object_class_by_name(typename);
|
2014-02-10 22:02:44 +01:00
|
|
|
}
|
|
|
|
|
2016-06-06 17:16:45 +02:00
|
|
|
static char *x86_cpu_class_get_model_name(X86CPUClass *cc)
|
|
|
|
{
|
|
|
|
const char *class_name = object_class_get_name(OBJECT_CLASS(cc));
|
|
|
|
assert(g_str_has_suffix(class_name, X86_CPU_TYPE_SUFFIX));
|
|
|
|
return g_strndup(class_name,
|
|
|
|
strlen(class_name) - strlen(X86_CPU_TYPE_SUFFIX));
|
|
|
|
}
|
|
|
|
|
2019-06-28 02:28:39 +02:00
|
|
|
typedef struct X86CPUVersionDefinition {
|
|
|
|
X86CPUVersion version;
|
2019-06-28 02:28:41 +02:00
|
|
|
const char *alias;
|
2020-02-12 09:13:27 +01:00
|
|
|
const char *note;
|
2019-06-28 02:28:39 +02:00
|
|
|
PropValue *props;
|
|
|
|
} X86CPUVersionDefinition;
|
|
|
|
|
|
|
|
/* Base definition for a CPU model */
|
|
|
|
typedef struct X86CPUDefinition {
|
2010-03-11 14:38:55 +01:00
|
|
|
const char *name;
|
|
|
|
uint32_t level;
|
2013-04-22 21:00:12 +02:00
|
|
|
uint32_t xlevel;
|
2013-01-21 15:06:36 +01:00
|
|
|
/* vendor is zero-terminated, 12 character ASCII string */
|
|
|
|
char vendor[CPUID_VENDOR_SZ + 1];
|
2010-03-11 14:38:55 +01:00
|
|
|
int family;
|
|
|
|
int model;
|
|
|
|
int stepping;
|
2013-04-22 21:00:15 +02:00
|
|
|
FeatureWordArray features;
|
2018-01-09 16:45:13 +01:00
|
|
|
const char *model_id;
|
2021-05-03 19:35:23 +02:00
|
|
|
const CPUCaches *const cache_info;
|
2019-06-28 02:28:39 +02:00
|
|
|
/*
|
|
|
|
* Definitions for alternative versions of CPU model.
|
|
|
|
* List is terminated by item with version == 0.
|
|
|
|
* If NULL, version 1 will be registered automatically.
|
|
|
|
*/
|
|
|
|
const X86CPUVersionDefinition *versions;
|
2020-09-22 09:14:14 +02:00
|
|
|
const char *deprecation_note;
|
2019-06-28 02:28:39 +02:00
|
|
|
} X86CPUDefinition;
|
|
|
|
|
|
|
|
/* Reference to a specific CPU model version */
|
|
|
|
struct X86CPUModel {
|
|
|
|
/* Base CPU definition */
|
2021-05-03 19:35:24 +02:00
|
|
|
const X86CPUDefinition *cpudef;
|
2019-06-28 02:28:39 +02:00
|
|
|
/* CPU model version */
|
|
|
|
X86CPUVersion version;
|
2020-02-12 09:13:27 +01:00
|
|
|
const char *note;
|
2019-06-28 02:28:42 +02:00
|
|
|
/*
|
|
|
|
* If true, this is an alias CPU model.
|
|
|
|
* This matters only for "-cpu help" and query-cpu-definitions
|
|
|
|
*/
|
|
|
|
bool is_alias;
|
2014-02-10 11:21:30 +01:00
|
|
|
};
|
2010-03-11 14:38:55 +01:00
|
|
|
|
2019-06-28 02:28:39 +02:00
|
|
|
/* Get full model name for CPU version */
|
2021-05-03 19:35:24 +02:00
|
|
|
static char *x86_cpu_versioned_model_name(const X86CPUDefinition *cpudef,
|
2019-06-28 02:28:39 +02:00
|
|
|
X86CPUVersion version)
|
|
|
|
{
|
|
|
|
assert(version > 0);
|
|
|
|
return g_strdup_printf("%s-v%d", cpudef->name, (int)version);
|
|
|
|
}
|
|
|
|
|
2021-05-03 19:35:24 +02:00
|
|
|
static const X86CPUVersionDefinition *
|
|
|
|
x86_cpu_def_get_versions(const X86CPUDefinition *def)
|
2019-06-28 02:28:39 +02:00
|
|
|
{
|
|
|
|
/* When X86CPUDefinition::versions is NULL, we register only v1 */
|
|
|
|
static const X86CPUVersionDefinition default_version_list[] = {
|
|
|
|
{ 1 },
|
|
|
|
{ /* end of list */ }
|
|
|
|
};
|
|
|
|
|
|
|
|
return def->versions ?: default_version_list;
|
|
|
|
}
|
|
|
|
|
2021-05-03 19:35:23 +02:00
|
|
|
static const CPUCaches epyc_cache_info = {
|
2018-05-24 17:43:30 +02:00
|
|
|
.l1d_cache = &(CPUCacheInfo) {
|
2018-07-17 21:40:10 +02:00
|
|
|
.type = DATA_CACHE,
|
2018-05-10 22:41:44 +02:00
|
|
|
.level = 1,
|
|
|
|
.size = 32 * KiB,
|
|
|
|
.line_size = 64,
|
|
|
|
.associativity = 8,
|
|
|
|
.partitions = 1,
|
|
|
|
.sets = 64,
|
|
|
|
.lines_per_tag = 1,
|
|
|
|
.self_init = 1,
|
|
|
|
.no_invd_sharing = true,
|
|
|
|
},
|
2018-05-24 17:43:30 +02:00
|
|
|
.l1i_cache = &(CPUCacheInfo) {
|
2018-07-17 21:40:10 +02:00
|
|
|
.type = INSTRUCTION_CACHE,
|
2018-05-10 22:41:44 +02:00
|
|
|
.level = 1,
|
|
|
|
.size = 64 * KiB,
|
|
|
|
.line_size = 64,
|
|
|
|
.associativity = 4,
|
|
|
|
.partitions = 1,
|
|
|
|
.sets = 256,
|
|
|
|
.lines_per_tag = 1,
|
|
|
|
.self_init = 1,
|
|
|
|
.no_invd_sharing = true,
|
|
|
|
},
|
2018-05-24 17:43:30 +02:00
|
|
|
.l2_cache = &(CPUCacheInfo) {
|
2018-05-10 22:41:44 +02:00
|
|
|
.type = UNIFIED_CACHE,
|
|
|
|
.level = 2,
|
|
|
|
.size = 512 * KiB,
|
|
|
|
.line_size = 64,
|
|
|
|
.associativity = 8,
|
|
|
|
.partitions = 1,
|
|
|
|
.sets = 1024,
|
|
|
|
.lines_per_tag = 1,
|
|
|
|
},
|
2018-05-24 17:43:30 +02:00
|
|
|
.l3_cache = &(CPUCacheInfo) {
|
2018-05-10 22:41:44 +02:00
|
|
|
.type = UNIFIED_CACHE,
|
|
|
|
.level = 3,
|
|
|
|
.size = 8 * MiB,
|
|
|
|
.line_size = 64,
|
|
|
|
.associativity = 16,
|
|
|
|
.partitions = 1,
|
|
|
|
.sets = 8192,
|
|
|
|
.lines_per_tag = 1,
|
|
|
|
.self_init = true,
|
|
|
|
.inclusive = true,
|
|
|
|
.complex_indexing = true,
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
2021-05-03 19:35:23 +02:00
|
|
|
static const CPUCaches epyc_rome_cache_info = {
|
2019-11-07 19:01:04 +01:00
|
|
|
.l1d_cache = &(CPUCacheInfo) {
|
|
|
|
.type = DATA_CACHE,
|
|
|
|
.level = 1,
|
|
|
|
.size = 32 * KiB,
|
|
|
|
.line_size = 64,
|
|
|
|
.associativity = 8,
|
|
|
|
.partitions = 1,
|
|
|
|
.sets = 64,
|
|
|
|
.lines_per_tag = 1,
|
|
|
|
.self_init = 1,
|
|
|
|
.no_invd_sharing = true,
|
|
|
|
},
|
|
|
|
.l1i_cache = &(CPUCacheInfo) {
|
|
|
|
.type = INSTRUCTION_CACHE,
|
|
|
|
.level = 1,
|
|
|
|
.size = 32 * KiB,
|
|
|
|
.line_size = 64,
|
|
|
|
.associativity = 8,
|
|
|
|
.partitions = 1,
|
|
|
|
.sets = 64,
|
|
|
|
.lines_per_tag = 1,
|
|
|
|
.self_init = 1,
|
|
|
|
.no_invd_sharing = true,
|
|
|
|
},
|
|
|
|
.l2_cache = &(CPUCacheInfo) {
|
|
|
|
.type = UNIFIED_CACHE,
|
|
|
|
.level = 2,
|
|
|
|
.size = 512 * KiB,
|
|
|
|
.line_size = 64,
|
|
|
|
.associativity = 8,
|
|
|
|
.partitions = 1,
|
|
|
|
.sets = 1024,
|
|
|
|
.lines_per_tag = 1,
|
|
|
|
},
|
|
|
|
.l3_cache = &(CPUCacheInfo) {
|
|
|
|
.type = UNIFIED_CACHE,
|
|
|
|
.level = 3,
|
|
|
|
.size = 16 * MiB,
|
|
|
|
.line_size = 64,
|
|
|
|
.associativity = 16,
|
|
|
|
.partitions = 1,
|
|
|
|
.sets = 16384,
|
|
|
|
.lines_per_tag = 1,
|
|
|
|
.self_init = true,
|
|
|
|
.inclusive = true,
|
|
|
|
.complex_indexing = true,
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
2021-05-03 19:35:23 +02:00
|
|
|
static const CPUCaches epyc_milan_cache_info = {
|
2021-02-09 22:04:05 +01:00
|
|
|
.l1d_cache = &(CPUCacheInfo) {
|
|
|
|
.type = DATA_CACHE,
|
|
|
|
.level = 1,
|
|
|
|
.size = 32 * KiB,
|
|
|
|
.line_size = 64,
|
|
|
|
.associativity = 8,
|
|
|
|
.partitions = 1,
|
|
|
|
.sets = 64,
|
|
|
|
.lines_per_tag = 1,
|
|
|
|
.self_init = 1,
|
|
|
|
.no_invd_sharing = true,
|
|
|
|
},
|
|
|
|
.l1i_cache = &(CPUCacheInfo) {
|
|
|
|
.type = INSTRUCTION_CACHE,
|
|
|
|
.level = 1,
|
|
|
|
.size = 32 * KiB,
|
|
|
|
.line_size = 64,
|
|
|
|
.associativity = 8,
|
|
|
|
.partitions = 1,
|
|
|
|
.sets = 64,
|
|
|
|
.lines_per_tag = 1,
|
|
|
|
.self_init = 1,
|
|
|
|
.no_invd_sharing = true,
|
|
|
|
},
|
|
|
|
.l2_cache = &(CPUCacheInfo) {
|
|
|
|
.type = UNIFIED_CACHE,
|
|
|
|
.level = 2,
|
|
|
|
.size = 512 * KiB,
|
|
|
|
.line_size = 64,
|
|
|
|
.associativity = 8,
|
|
|
|
.partitions = 1,
|
|
|
|
.sets = 1024,
|
|
|
|
.lines_per_tag = 1,
|
|
|
|
},
|
|
|
|
.l3_cache = &(CPUCacheInfo) {
|
|
|
|
.type = UNIFIED_CACHE,
|
|
|
|
.level = 3,
|
|
|
|
.size = 32 * MiB,
|
|
|
|
.line_size = 64,
|
|
|
|
.associativity = 16,
|
|
|
|
.partitions = 1,
|
|
|
|
.sets = 32768,
|
|
|
|
.lines_per_tag = 1,
|
|
|
|
.self_init = true,
|
|
|
|
.inclusive = true,
|
|
|
|
.complex_indexing = true,
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
target/i386: add VMX features to named CPU models
This allows using "-cpu Haswell,+vmx", which we did not really want to
support in QEMU but was produced by Libvirt when using the "host-model"
CPU model. Without this patch, no VMX feature is _actually_ supported
(only the basic instruction set extensions are) and KVM fails to load
in the guest.
This was produced from the output of scripts/kvm/vmxcap using the following
very ugly Python script:
bits = {
'INS/OUTS instruction information': ['FEAT_VMX_BASIC', 'MSR_VMX_BASIC_INS_OUTS'],
'IA32_VMX_TRUE_*_CTLS support': ['FEAT_VMX_BASIC', 'MSR_VMX_BASIC_TRUE_CTLS'],
'External interrupt exiting': ['FEAT_VMX_PINBASED_CTLS', 'VMX_PIN_BASED_EXT_INTR_MASK'],
'NMI exiting': ['FEAT_VMX_PINBASED_CTLS', 'VMX_PIN_BASED_NMI_EXITING'],
'Virtual NMIs': ['FEAT_VMX_PINBASED_CTLS', 'VMX_PIN_BASED_VIRTUAL_NMIS'],
'Activate VMX-preemption timer': ['FEAT_VMX_PINBASED_CTLS', 'VMX_PIN_BASED_VMX_PREEMPTION_TIMER'],
'Process posted interrupts': ['FEAT_VMX_PINBASED_CTLS', 'VMX_PIN_BASED_POSTED_INTR'],
'Interrupt window exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_VIRTUAL_INTR_PENDING'],
'Use TSC offsetting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_USE_TSC_OFFSETING'],
'HLT exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_HLT_EXITING'],
'INVLPG exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_INVLPG_EXITING'],
'MWAIT exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_MWAIT_EXITING'],
'RDPMC exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_RDPMC_EXITING'],
'RDTSC exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_RDTSC_EXITING'],
'CR3-load exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_CR3_LOAD_EXITING'],
'CR3-store exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_CR3_STORE_EXITING'],
'CR8-load exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_CR8_LOAD_EXITING'],
'CR8-store exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_CR8_STORE_EXITING'],
'Use TPR shadow': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_TPR_SHADOW'],
'NMI-window exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_VIRTUAL_NMI_PENDING'],
'MOV-DR exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_MOV_DR_EXITING'],
'Unconditional I/O exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_UNCOND_IO_EXITING'],
'Use I/O bitmaps': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_USE_IO_BITMAPS'],
'Monitor trap flag': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_MONITOR_TRAP_FLAG'],
'Use MSR bitmaps': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_USE_MSR_BITMAPS'],
'MONITOR exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_MONITOR_EXITING'],
'PAUSE exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_PAUSE_EXITING'],
'Activate secondary control': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS'],
'Virtualize APIC accesses': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES'],
'Enable EPT': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_ENABLE_EPT'],
'Descriptor-table exiting': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_DESC'],
'Enable RDTSCP': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_RDTSCP'],
'Virtualize x2APIC mode': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE'],
'Enable VPID': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_ENABLE_VPID'],
'WBINVD exiting': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_WBINVD_EXITING'],
'Unrestricted guest': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST'],
'APIC register emulation': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT'],
'Virtual interrupt delivery': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY'],
'PAUSE-loop exiting': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_PAUSE_LOOP_EXITING'],
'RDRAND exiting': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_RDRAND_EXITING'],
'Enable INVPCID': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_ENABLE_INVPCID'],
'Enable VM functions': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_ENABLE_VMFUNC'],
'VMCS shadowing': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_SHADOW_VMCS'],
'RDSEED exiting': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_RDSEED_EXITING'],
'Enable PML': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_ENABLE_PML'],
'Enable XSAVES/XRSTORS': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_XSAVES'],
'Save debug controls': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_SAVE_DEBUG_CONTROLS'],
'Load IA32_PERF_GLOBAL_CTRL': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL'],
'Acknowledge interrupt on exit': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_ACK_INTR_ON_EXIT'],
'Save IA32_PAT': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_SAVE_IA32_PAT'],
'Load IA32_PAT': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_LOAD_IA32_PAT'],
'Save IA32_EFER': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_SAVE_IA32_EFER'],
'Load IA32_EFER': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_LOAD_IA32_EFER'],
'Save VMX-preemption timer value': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER'],
'Clear IA32_BNDCFGS': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_CLEAR_BNDCFGS'],
'Load debug controls': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS'],
'IA-32e mode guest': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_IA32E_MODE'],
'Load IA32_PERF_GLOBAL_CTRL': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL'],
'Load IA32_PAT': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_LOAD_IA32_PAT'],
'Load IA32_EFER': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_LOAD_IA32_EFER'],
'Load IA32_BNDCFGS': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_LOAD_BNDCFGS'],
'Store EFER.LMA into IA-32e mode guest control': ['FEAT_VMX_MISC', 'MSR_VMX_MISC_STORE_LMA'],
'HLT activity state': ['FEAT_VMX_MISC', 'MSR_VMX_MISC_ACTIVITY_HLT'],
'VMWRITE to VM-exit information fields': ['FEAT_VMX_MISC', 'MSR_VMX_MISC_VMWRITE_VMEXIT'],
'Inject event with insn length=0': ['FEAT_VMX_MISC', 'MSR_VMX_MISC_ZERO_LEN_INJECT'],
'Execute-only EPT translations': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_EXECONLY'],
'Page-walk length 4': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_PAGE_WALK_LENGTH_4'],
'Paging-structure memory type WB': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_WB'],
'2MB EPT pages': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_2MB | MSR_VMX_EPT_1GB'],
'INVEPT supported': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVEPT'],
'EPT accessed and dirty flags': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_AD_BITS'],
'Single-context INVEPT': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT'],
'All-context INVEPT': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVEPT_ALL_CONTEXT'],
'INVVPID supported': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVVPID'],
'Individual-address INVVPID': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVVPID_SINGLE_ADDR'],
'Single-context INVVPID': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT'],
'All-context INVVPID': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVVPID_ALL_CONTEXT'],
'Single-context-retaining-globals INVVPID': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS'],
'EPTP Switching': ['FEAT_VMX_VMFUNC', 'MSR_VMX_VMFUNC_EPT_SWITCHING']
}
import sys
import textwrap
out = {}
for l in sys.stdin.readlines():
l = l.rstrip()
if l.endswith('!!'):
l = l[:-2].rstrip()
if l.startswith(' ') and (l.endswith('default') or l.endswith('yes')):
l = l[4:]
for key, value in bits.items():
if l.startswith(key):
ctl, bit = value
if ctl in out:
out[ctl] = out[ctl] + ' | '
else:
out[ctl] = ' [%s] = ' % ctl
out[ctl] = out[ctl] + bit
for x in sorted(out.keys()):
print("\n ".join(textwrap.wrap(out[x] + ",")))
Note that the script has a bug in that some keys apply to both VM entry
and VM exit controls ("load IA32_PERF_GLOBAL_CTRL", "load IA32_EFER",
"load IA32_PAT". Those have to be fixed by hand.
Reviewed-by: Eduardo Habkost <ehabkost@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2019-11-20 18:37:53 +01:00
|
|
|
/* The following VMX features are not supported by KVM and are left out in the
|
|
|
|
* CPU definitions:
|
|
|
|
*
|
|
|
|
* Dual-monitor support (all processors)
|
|
|
|
* Entry to SMM
|
|
|
|
* Deactivate dual-monitor treatment
|
|
|
|
* Number of CR3-target values
|
|
|
|
* Shutdown activity state
|
|
|
|
* Wait-for-SIPI activity state
|
|
|
|
* PAUSE-loop exiting (Westmere and newer)
|
|
|
|
* EPT-violation #VE (Broadwell and newer)
|
|
|
|
* Inject event with insn length=0 (Skylake and newer)
|
|
|
|
* Conceal non-root operation from PT
|
|
|
|
* Conceal VM exits from PT
|
|
|
|
* Conceal VM entries from PT
|
|
|
|
* Enable ENCLS exiting
|
|
|
|
* Mode-based execute control (XS/XU)
|
|
|
|
s TSC scaling (Skylake Server and newer)
|
|
|
|
* GPA translation for PT (IceLake and newer)
|
|
|
|
* User wait and pause
|
|
|
|
* ENCLV exiting
|
|
|
|
* Load IA32_RTIT_CTL
|
|
|
|
* Clear IA32_RTIT_CTL
|
|
|
|
* Advanced VM-exit information for EPT violations
|
|
|
|
* Sub-page write permissions
|
|
|
|
* PT in VMX operation
|
|
|
|
*/
|
|
|
|
|
2021-05-03 19:35:24 +02:00
|
|
|
static const X86CPUDefinition builtin_x86_defs[] = {
|
2010-03-11 14:38:55 +01:00
|
|
|
{
|
|
|
|
.name = "qemu64",
|
2015-07-09 21:07:39 +02:00
|
|
|
.level = 0xd,
|
2013-01-21 15:06:36 +01:00
|
|
|
.vendor = CPUID_VENDOR_AMD,
|
2021-05-07 15:36:49 +02:00
|
|
|
.family = 15,
|
|
|
|
.model = 107,
|
|
|
|
.stepping = 1,
|
2013-04-22 21:00:15 +02:00
|
|
|
.features[FEAT_1_EDX] =
|
2013-04-22 21:00:14 +02:00
|
|
|
PPRO_FEATURES |
|
2010-03-11 14:38:55 +01:00
|
|
|
CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
|
|
|
|
CPUID_PSE36,
|
2013-04-22 21:00:15 +02:00
|
|
|
.features[FEAT_1_ECX] =
|
2015-11-03 20:24:18 +01:00
|
|
|
CPUID_EXT_SSE3 | CPUID_EXT_CX16,
|
2013-04-22 21:00:15 +02:00
|
|
|
.features[FEAT_8000_0001_EDX] =
|
2010-03-11 14:38:55 +01:00
|
|
|
CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
|
2013-04-22 21:00:15 +02:00
|
|
|
.features[FEAT_8000_0001_ECX] =
|
2015-11-03 20:24:18 +01:00
|
|
|
CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM,
|
2010-03-11 14:38:55 +01:00
|
|
|
.xlevel = 0x8000000A,
|
2016-04-09 21:44:20 +02:00
|
|
|
.model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
|
2010-03-11 14:38:55 +01:00
|
|
|
},
|
|
|
|
{
|
|
|
|
.name = "phenom",
|
|
|
|
.level = 5,
|
2013-01-21 15:06:36 +01:00
|
|
|
.vendor = CPUID_VENDOR_AMD,
|
2010-03-11 14:38:55 +01:00
|
|
|
.family = 16,
|
|
|
|
.model = 2,
|
|
|
|
.stepping = 3,
|
2014-10-03 21:39:49 +02:00
|
|
|
/* Missing: CPUID_HT */
|
2013-04-22 21:00:15 +02:00
|
|
|
.features[FEAT_1_EDX] =
|
2013-04-22 21:00:14 +02:00
|
|
|
PPRO_FEATURES |
|
2010-03-11 14:38:55 +01:00
|
|
|
CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
|
2014-10-03 21:39:49 +02:00
|
|
|
CPUID_PSE36 | CPUID_VME,
|
2013-04-22 21:00:15 +02:00
|
|
|
.features[FEAT_1_ECX] =
|
2013-04-22 21:00:14 +02:00
|
|
|
CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
|
2010-03-11 14:38:55 +01:00
|
|
|
CPUID_EXT_POPCNT,
|
2013-04-22 21:00:15 +02:00
|
|
|
.features[FEAT_8000_0001_EDX] =
|
2010-03-11 14:38:55 +01:00
|
|
|
CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
|
|
|
|
CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
|
2010-03-13 16:43:15 +01:00
|
|
|
CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
|
2010-03-11 14:38:55 +01:00
|
|
|
/* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
|
|
|
|
CPUID_EXT3_CR8LEG,
|
|
|
|
CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
|
|
|
|
CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
|
2013-04-22 21:00:15 +02:00
|
|
|
.features[FEAT_8000_0001_ECX] =
|
2013-04-22 21:00:14 +02:00
|
|
|
CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
|
2010-03-11 14:38:55 +01:00
|
|
|
CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
|
2014-10-03 21:39:49 +02:00
|
|
|
/* Missing: CPUID_SVM_LBRV */
|
2013-04-22 21:00:15 +02:00
|
|
|
.features[FEAT_SVM] =
|
2014-10-03 21:39:49 +02:00
|
|
|
CPUID_SVM_NPT,
|
2010-03-11 14:38:55 +01:00
|
|
|
.xlevel = 0x8000001A,
|
|
|
|
.model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.name = "core2duo",
|
|
|
|
.level = 10,
|
2013-01-21 15:06:36 +01:00
|
|
|
.vendor = CPUID_VENDOR_INTEL,
|
2010-03-11 14:38:55 +01:00
|
|
|
.family = 6,
|
|
|
|
.model = 15,
|
|
|
|
.stepping = 11,
|
2014-10-03 21:39:49 +02:00
|
|
|
/* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
|
2013-04-22 21:00:15 +02:00
|
|
|
.features[FEAT_1_EDX] =
|
2013-04-22 21:00:14 +02:00
|
|
|
PPRO_FEATURES |
|
2010-03-11 14:38:55 +01:00
|
|
|
CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
|
2014-10-03 21:39:49 +02:00
|
|
|
CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
|
|
|
|
/* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
|
2014-10-03 21:39:50 +02:00
|
|
|
* CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
|
2013-04-22 21:00:15 +02:00
|
|
|
.features[FEAT_1_ECX] =
|
2013-04-22 21:00:14 +02:00
|
|
|
CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
|
2014-10-03 21:39:50 +02:00
|
|
|
CPUID_EXT_CX16,
|
2013-04-22 21:00:15 +02:00
|
|
|
.features[FEAT_8000_0001_EDX] =
|
2013-04-22 21:00:14 +02:00
|
|
|
CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
|
2013-04-22 21:00:15 +02:00
|
|
|
.features[FEAT_8000_0001_ECX] =
|
2013-04-22 21:00:14 +02:00
|
|
|
CPUID_EXT3_LAHF_LM,
|
target/i386: add VMX features to named CPU models
This allows using "-cpu Haswell,+vmx", which we did not really want to
support in QEMU but was produced by Libvirt when using the "host-model"
CPU model. Without this patch, no VMX feature is _actually_ supported
(only the basic instruction set extensions are) and KVM fails to load
in the guest.
This was produced from the output of scripts/kvm/vmxcap using the following
very ugly Python script:
bits = {
'INS/OUTS instruction information': ['FEAT_VMX_BASIC', 'MSR_VMX_BASIC_INS_OUTS'],
'IA32_VMX_TRUE_*_CTLS support': ['FEAT_VMX_BASIC', 'MSR_VMX_BASIC_TRUE_CTLS'],
'External interrupt exiting': ['FEAT_VMX_PINBASED_CTLS', 'VMX_PIN_BASED_EXT_INTR_MASK'],
'NMI exiting': ['FEAT_VMX_PINBASED_CTLS', 'VMX_PIN_BASED_NMI_EXITING'],
'Virtual NMIs': ['FEAT_VMX_PINBASED_CTLS', 'VMX_PIN_BASED_VIRTUAL_NMIS'],
'Activate VMX-preemption timer': ['FEAT_VMX_PINBASED_CTLS', 'VMX_PIN_BASED_VMX_PREEMPTION_TIMER'],
'Process posted interrupts': ['FEAT_VMX_PINBASED_CTLS', 'VMX_PIN_BASED_POSTED_INTR'],
'Interrupt window exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_VIRTUAL_INTR_PENDING'],
'Use TSC offsetting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_USE_TSC_OFFSETING'],
'HLT exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_HLT_EXITING'],
'INVLPG exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_INVLPG_EXITING'],
'MWAIT exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_MWAIT_EXITING'],
'RDPMC exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_RDPMC_EXITING'],
'RDTSC exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_RDTSC_EXITING'],
'CR3-load exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_CR3_LOAD_EXITING'],
'CR3-store exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_CR3_STORE_EXITING'],
'CR8-load exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_CR8_LOAD_EXITING'],
'CR8-store exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_CR8_STORE_EXITING'],
'Use TPR shadow': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_TPR_SHADOW'],
'NMI-window exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_VIRTUAL_NMI_PENDING'],
'MOV-DR exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_MOV_DR_EXITING'],
'Unconditional I/O exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_UNCOND_IO_EXITING'],
'Use I/O bitmaps': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_USE_IO_BITMAPS'],
'Monitor trap flag': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_MONITOR_TRAP_FLAG'],
'Use MSR bitmaps': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_USE_MSR_BITMAPS'],
'MONITOR exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_MONITOR_EXITING'],
'PAUSE exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_PAUSE_EXITING'],
'Activate secondary control': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS'],
'Virtualize APIC accesses': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES'],
'Enable EPT': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_ENABLE_EPT'],
'Descriptor-table exiting': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_DESC'],
'Enable RDTSCP': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_RDTSCP'],
'Virtualize x2APIC mode': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE'],
'Enable VPID': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_ENABLE_VPID'],
'WBINVD exiting': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_WBINVD_EXITING'],
'Unrestricted guest': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST'],
'APIC register emulation': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT'],
'Virtual interrupt delivery': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY'],
'PAUSE-loop exiting': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_PAUSE_LOOP_EXITING'],
'RDRAND exiting': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_RDRAND_EXITING'],
'Enable INVPCID': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_ENABLE_INVPCID'],
'Enable VM functions': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_ENABLE_VMFUNC'],
'VMCS shadowing': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_SHADOW_VMCS'],
'RDSEED exiting': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_RDSEED_EXITING'],
'Enable PML': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_ENABLE_PML'],
'Enable XSAVES/XRSTORS': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_XSAVES'],
'Save debug controls': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_SAVE_DEBUG_CONTROLS'],
'Load IA32_PERF_GLOBAL_CTRL': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL'],
'Acknowledge interrupt on exit': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_ACK_INTR_ON_EXIT'],
'Save IA32_PAT': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_SAVE_IA32_PAT'],
'Load IA32_PAT': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_LOAD_IA32_PAT'],
'Save IA32_EFER': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_SAVE_IA32_EFER'],
'Load IA32_EFER': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_LOAD_IA32_EFER'],
'Save VMX-preemption timer value': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER'],
'Clear IA32_BNDCFGS': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_CLEAR_BNDCFGS'],
'Load debug controls': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS'],
'IA-32e mode guest': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_IA32E_MODE'],
'Load IA32_PERF_GLOBAL_CTRL': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL'],
'Load IA32_PAT': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_LOAD_IA32_PAT'],
'Load IA32_EFER': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_LOAD_IA32_EFER'],
'Load IA32_BNDCFGS': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_LOAD_BNDCFGS'],
'Store EFER.LMA into IA-32e mode guest control': ['FEAT_VMX_MISC', 'MSR_VMX_MISC_STORE_LMA'],
'HLT activity state': ['FEAT_VMX_MISC', 'MSR_VMX_MISC_ACTIVITY_HLT'],
'VMWRITE to VM-exit information fields': ['FEAT_VMX_MISC', 'MSR_VMX_MISC_VMWRITE_VMEXIT'],
'Inject event with insn length=0': ['FEAT_VMX_MISC', 'MSR_VMX_MISC_ZERO_LEN_INJECT'],
'Execute-only EPT translations': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_EXECONLY'],
'Page-walk length 4': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_PAGE_WALK_LENGTH_4'],
'Paging-structure memory type WB': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_WB'],
'2MB EPT pages': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_2MB | MSR_VMX_EPT_1GB'],
'INVEPT supported': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVEPT'],
'EPT accessed and dirty flags': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_AD_BITS'],
'Single-context INVEPT': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT'],
'All-context INVEPT': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVEPT_ALL_CONTEXT'],
'INVVPID supported': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVVPID'],
'Individual-address INVVPID': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVVPID_SINGLE_ADDR'],
'Single-context INVVPID': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT'],
'All-context INVVPID': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVVPID_ALL_CONTEXT'],
'Single-context-retaining-globals INVVPID': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS'],
'EPTP Switching': ['FEAT_VMX_VMFUNC', 'MSR_VMX_VMFUNC_EPT_SWITCHING']
}
import sys
import textwrap
out = {}
for l in sys.stdin.readlines():
l = l.rstrip()
if l.endswith('!!'):
l = l[:-2].rstrip()
if l.startswith(' ') and (l.endswith('default') or l.endswith('yes')):
l = l[4:]
for key, value in bits.items():
if l.startswith(key):
ctl, bit = value
if ctl in out:
out[ctl] = out[ctl] + ' | '
else:
out[ctl] = ' [%s] = ' % ctl
out[ctl] = out[ctl] + bit
for x in sorted(out.keys()):
print("\n ".join(textwrap.wrap(out[x] + ",")))
Note that the script has a bug in that some keys apply to both VM entry
and VM exit controls ("load IA32_PERF_GLOBAL_CTRL", "load IA32_EFER",
"load IA32_PAT". Those have to be fixed by hand.
Reviewed-by: Eduardo Habkost <ehabkost@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2019-11-20 18:37:53 +01:00
|
|
|
.features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS,
|
|
|
|
.features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE,
|
|
|
|
.features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT,
|
|
|
|
.features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT,
|
|
|
|
.features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
|
|
|
|
VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS,
|
|
|
|
.features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
|
|
|
|
VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
|
|
|
|
VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
|
|
|
|
VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
|
|
|
|
VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
|
|
|
|
VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
|
|
|
|
VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
|
|
|
|
VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
|
|
|
|
VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
|
|
|
|
VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
|
|
|
|
.features[FEAT_VMX_SECONDARY_CTLS] =
|
|
|
|
VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES,
|
2010-03-11 14:38:55 +01:00
|
|
|
.xlevel = 0x80000008,
|
|
|
|
.model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.name = "kvm64",
|
2015-07-09 21:07:39 +02:00
|
|
|
.level = 0xd,
|
2013-01-21 15:06:36 +01:00
|
|
|
.vendor = CPUID_VENDOR_INTEL,
|
2010-03-11 14:38:55 +01:00
|
|
|
.family = 15,
|
|
|
|
.model = 6,
|
|
|
|
.stepping = 1,
|
2014-12-10 17:12:41 +01:00
|
|
|
/* Missing: CPUID_HT */
|
2013-04-22 21:00:15 +02:00
|
|
|
.features[FEAT_1_EDX] =
|
2014-12-10 17:12:41 +01:00
|
|
|
PPRO_FEATURES | CPUID_VME |
|
2010-03-11 14:38:55 +01:00
|
|
|
CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
|
|
|
|
CPUID_PSE36,
|
|
|
|
/* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
|
2013-04-22 21:00:15 +02:00
|
|
|
.features[FEAT_1_ECX] =
|
2013-04-22 21:00:14 +02:00
|
|
|
CPUID_EXT_SSE3 | CPUID_EXT_CX16,
|
2010-03-11 14:38:55 +01:00
|
|
|
/* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
|
2013-04-22 21:00:15 +02:00
|
|
|
.features[FEAT_8000_0001_EDX] =
|
2010-03-11 14:38:55 +01:00
|
|
|
CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
|
|
|
|
/* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
|
|
|
|
CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
|
|
|
|
CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
|
|
|
|
CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
|
2013-04-22 21:00:15 +02:00
|
|
|
.features[FEAT_8000_0001_ECX] =
|
2013-04-22 21:00:14 +02:00
|
|
|
0,
|
target/i386: add VMX features to named CPU models
This allows using "-cpu Haswell,+vmx", which we did not really want to
support in QEMU but was produced by Libvirt when using the "host-model"
CPU model. Without this patch, no VMX feature is _actually_ supported
(only the basic instruction set extensions are) and KVM fails to load
in the guest.
This was produced from the output of scripts/kvm/vmxcap using the following
very ugly Python script:
bits = {
'INS/OUTS instruction information': ['FEAT_VMX_BASIC', 'MSR_VMX_BASIC_INS_OUTS'],
'IA32_VMX_TRUE_*_CTLS support': ['FEAT_VMX_BASIC', 'MSR_VMX_BASIC_TRUE_CTLS'],
'External interrupt exiting': ['FEAT_VMX_PINBASED_CTLS', 'VMX_PIN_BASED_EXT_INTR_MASK'],
'NMI exiting': ['FEAT_VMX_PINBASED_CTLS', 'VMX_PIN_BASED_NMI_EXITING'],
'Virtual NMIs': ['FEAT_VMX_PINBASED_CTLS', 'VMX_PIN_BASED_VIRTUAL_NMIS'],
'Activate VMX-preemption timer': ['FEAT_VMX_PINBASED_CTLS', 'VMX_PIN_BASED_VMX_PREEMPTION_TIMER'],
'Process posted interrupts': ['FEAT_VMX_PINBASED_CTLS', 'VMX_PIN_BASED_POSTED_INTR'],
'Interrupt window exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_VIRTUAL_INTR_PENDING'],
'Use TSC offsetting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_USE_TSC_OFFSETING'],
'HLT exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_HLT_EXITING'],
'INVLPG exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_INVLPG_EXITING'],
'MWAIT exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_MWAIT_EXITING'],
'RDPMC exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_RDPMC_EXITING'],
'RDTSC exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_RDTSC_EXITING'],
'CR3-load exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_CR3_LOAD_EXITING'],
'CR3-store exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_CR3_STORE_EXITING'],
'CR8-load exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_CR8_LOAD_EXITING'],
'CR8-store exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_CR8_STORE_EXITING'],
'Use TPR shadow': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_TPR_SHADOW'],
'NMI-window exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_VIRTUAL_NMI_PENDING'],
'MOV-DR exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_MOV_DR_EXITING'],
'Unconditional I/O exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_UNCOND_IO_EXITING'],
'Use I/O bitmaps': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_USE_IO_BITMAPS'],
'Monitor trap flag': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_MONITOR_TRAP_FLAG'],
'Use MSR bitmaps': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_USE_MSR_BITMAPS'],
'MONITOR exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_MONITOR_EXITING'],
'PAUSE exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_PAUSE_EXITING'],
'Activate secondary control': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS'],
'Virtualize APIC accesses': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES'],
'Enable EPT': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_ENABLE_EPT'],
'Descriptor-table exiting': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_DESC'],
'Enable RDTSCP': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_RDTSCP'],
'Virtualize x2APIC mode': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE'],
'Enable VPID': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_ENABLE_VPID'],
'WBINVD exiting': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_WBINVD_EXITING'],
'Unrestricted guest': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST'],
'APIC register emulation': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT'],
'Virtual interrupt delivery': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY'],
'PAUSE-loop exiting': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_PAUSE_LOOP_EXITING'],
'RDRAND exiting': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_RDRAND_EXITING'],
'Enable INVPCID': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_ENABLE_INVPCID'],
'Enable VM functions': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_ENABLE_VMFUNC'],
'VMCS shadowing': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_SHADOW_VMCS'],
'RDSEED exiting': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_RDSEED_EXITING'],
'Enable PML': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_ENABLE_PML'],
'Enable XSAVES/XRSTORS': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_XSAVES'],
'Save debug controls': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_SAVE_DEBUG_CONTROLS'],
'Load IA32_PERF_GLOBAL_CTRL': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL'],
'Acknowledge interrupt on exit': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_ACK_INTR_ON_EXIT'],
'Save IA32_PAT': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_SAVE_IA32_PAT'],
'Load IA32_PAT': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_LOAD_IA32_PAT'],
'Save IA32_EFER': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_SAVE_IA32_EFER'],
'Load IA32_EFER': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_LOAD_IA32_EFER'],
'Save VMX-preemption timer value': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER'],
'Clear IA32_BNDCFGS': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_CLEAR_BNDCFGS'],
'Load debug controls': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS'],
'IA-32e mode guest': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_IA32E_MODE'],
'Load IA32_PERF_GLOBAL_CTRL': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL'],
'Load IA32_PAT': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_LOAD_IA32_PAT'],
'Load IA32_EFER': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_LOAD_IA32_EFER'],
'Load IA32_BNDCFGS': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_LOAD_BNDCFGS'],
'Store EFER.LMA into IA-32e mode guest control': ['FEAT_VMX_MISC', 'MSR_VMX_MISC_STORE_LMA'],
'HLT activity state': ['FEAT_VMX_MISC', 'MSR_VMX_MISC_ACTIVITY_HLT'],
'VMWRITE to VM-exit information fields': ['FEAT_VMX_MISC', 'MSR_VMX_MISC_VMWRITE_VMEXIT'],
'Inject event with insn length=0': ['FEAT_VMX_MISC', 'MSR_VMX_MISC_ZERO_LEN_INJECT'],
'Execute-only EPT translations': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_EXECONLY'],
'Page-walk length 4': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_PAGE_WALK_LENGTH_4'],
'Paging-structure memory type WB': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_WB'],
'2MB EPT pages': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_2MB | MSR_VMX_EPT_1GB'],
'INVEPT supported': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVEPT'],
'EPT accessed and dirty flags': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_AD_BITS'],
'Single-context INVEPT': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT'],
'All-context INVEPT': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVEPT_ALL_CONTEXT'],
'INVVPID supported': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVVPID'],
'Individual-address INVVPID': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVVPID_SINGLE_ADDR'],
'Single-context INVVPID': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT'],
'All-context INVVPID': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVVPID_ALL_CONTEXT'],
'Single-context-retaining-globals INVVPID': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS'],
'EPTP Switching': ['FEAT_VMX_VMFUNC', 'MSR_VMX_VMFUNC_EPT_SWITCHING']
}
import sys
import textwrap
out = {}
for l in sys.stdin.readlines():
l = l.rstrip()
if l.endswith('!!'):
l = l[:-2].rstrip()
if l.startswith(' ') and (l.endswith('default') or l.endswith('yes')):
l = l[4:]
for key, value in bits.items():
if l.startswith(key):
ctl, bit = value
if ctl in out:
out[ctl] = out[ctl] + ' | '
else:
out[ctl] = ' [%s] = ' % ctl
out[ctl] = out[ctl] + bit
for x in sorted(out.keys()):
print("\n ".join(textwrap.wrap(out[x] + ",")))
Note that the script has a bug in that some keys apply to both VM entry
and VM exit controls ("load IA32_PERF_GLOBAL_CTRL", "load IA32_EFER",
"load IA32_PAT". Those have to be fixed by hand.
Reviewed-by: Eduardo Habkost <ehabkost@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2019-11-20 18:37:53 +01:00
|
|
|
/* VMX features from Cedar Mill/Prescott */
|
|
|
|
.features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE,
|
|
|
|
.features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT,
|
|
|
|
.features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT,
|
|
|
|
.features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
|
|
|
|
VMX_PIN_BASED_NMI_EXITING,
|
|
|
|
.features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
|
|
|
|
VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
|
|
|
|
VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
|
|
|
|
VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
|
|
|
|
VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
|
|
|
|
VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
|
|
|
|
VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
|
|
|
|
VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING,
|
2010-03-11 14:38:55 +01:00
|
|
|
.xlevel = 0x80000008,
|
|
|
|
.model_id = "Common KVM processor"
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.name = "qemu32",
|
|
|
|
.level = 4,
|
2013-01-21 15:06:36 +01:00
|
|
|
.vendor = CPUID_VENDOR_INTEL,
|
2010-03-11 14:38:55 +01:00
|
|
|
.family = 6,
|
2013-09-10 22:48:59 +02:00
|
|
|
.model = 6,
|
2010-03-11 14:38:55 +01:00
|
|
|
.stepping = 3,
|
2013-04-22 21:00:15 +02:00
|
|
|
.features[FEAT_1_EDX] =
|
2013-04-22 21:00:14 +02:00
|
|
|
PPRO_FEATURES,
|
2013-04-22 21:00:15 +02:00
|
|
|
.features[FEAT_1_ECX] =
|
2015-11-03 20:24:18 +01:00
|
|
|
CPUID_EXT_SSE3,
|
2010-03-11 14:39:06 +01:00
|
|
|
.xlevel = 0x80000004,
|
2016-04-09 21:44:20 +02:00
|
|
|
.model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
|
2010-03-11 14:38:55 +01:00
|
|
|
},
|
2010-05-21 09:50:51 +02:00
|
|
|
{
|
|
|
|
.name = "kvm32",
|
|
|
|
.level = 5,
|
2013-01-21 15:06:36 +01:00
|
|
|
.vendor = CPUID_VENDOR_INTEL,
|
2010-05-21 09:50:51 +02:00
|
|
|
.family = 15,
|
|
|
|
.model = 6,
|
|
|
|
.stepping = 1,
|
2013-04-22 21:00:15 +02:00
|
|
|
.features[FEAT_1_EDX] =
|
2014-12-10 17:12:41 +01:00
|
|
|
PPRO_FEATURES | CPUID_VME |
|
2010-05-21 09:50:51 +02:00
|
|
|
CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
|
2013-04-22 21:00:15 +02:00
|
|
|
.features[FEAT_1_ECX] =
|
2013-04-22 21:00:14 +02:00
|
|
|
CPUID_EXT_SSE3,
|
2013-04-22 21:00:15 +02:00
|
|
|
.features[FEAT_8000_0001_ECX] =
|
2013-04-22 21:00:14 +02:00
|
|
|
0,
|
target/i386: add VMX features to named CPU models
This allows using "-cpu Haswell,+vmx", which we did not really want to
support in QEMU but was produced by Libvirt when using the "host-model"
CPU model. Without this patch, no VMX feature is _actually_ supported
(only the basic instruction set extensions are) and KVM fails to load
in the guest.
This was produced from the output of scripts/kvm/vmxcap using the following
very ugly Python script:
bits = {
'INS/OUTS instruction information': ['FEAT_VMX_BASIC', 'MSR_VMX_BASIC_INS_OUTS'],
'IA32_VMX_TRUE_*_CTLS support': ['FEAT_VMX_BASIC', 'MSR_VMX_BASIC_TRUE_CTLS'],
'External interrupt exiting': ['FEAT_VMX_PINBASED_CTLS', 'VMX_PIN_BASED_EXT_INTR_MASK'],
'NMI exiting': ['FEAT_VMX_PINBASED_CTLS', 'VMX_PIN_BASED_NMI_EXITING'],
'Virtual NMIs': ['FEAT_VMX_PINBASED_CTLS', 'VMX_PIN_BASED_VIRTUAL_NMIS'],
'Activate VMX-preemption timer': ['FEAT_VMX_PINBASED_CTLS', 'VMX_PIN_BASED_VMX_PREEMPTION_TIMER'],
'Process posted interrupts': ['FEAT_VMX_PINBASED_CTLS', 'VMX_PIN_BASED_POSTED_INTR'],
'Interrupt window exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_VIRTUAL_INTR_PENDING'],
'Use TSC offsetting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_USE_TSC_OFFSETING'],
'HLT exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_HLT_EXITING'],
'INVLPG exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_INVLPG_EXITING'],
'MWAIT exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_MWAIT_EXITING'],
'RDPMC exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_RDPMC_EXITING'],
'RDTSC exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_RDTSC_EXITING'],
'CR3-load exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_CR3_LOAD_EXITING'],
'CR3-store exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_CR3_STORE_EXITING'],
'CR8-load exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_CR8_LOAD_EXITING'],
'CR8-store exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_CR8_STORE_EXITING'],
'Use TPR shadow': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_TPR_SHADOW'],
'NMI-window exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_VIRTUAL_NMI_PENDING'],
'MOV-DR exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_MOV_DR_EXITING'],
'Unconditional I/O exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_UNCOND_IO_EXITING'],
'Use I/O bitmaps': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_USE_IO_BITMAPS'],
'Monitor trap flag': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_MONITOR_TRAP_FLAG'],
'Use MSR bitmaps': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_USE_MSR_BITMAPS'],
'MONITOR exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_MONITOR_EXITING'],
'PAUSE exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_PAUSE_EXITING'],
'Activate secondary control': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS'],
'Virtualize APIC accesses': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES'],
'Enable EPT': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_ENABLE_EPT'],
'Descriptor-table exiting': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_DESC'],
'Enable RDTSCP': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_RDTSCP'],
'Virtualize x2APIC mode': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE'],
'Enable VPID': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_ENABLE_VPID'],
'WBINVD exiting': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_WBINVD_EXITING'],
'Unrestricted guest': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST'],
'APIC register emulation': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT'],
'Virtual interrupt delivery': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY'],
'PAUSE-loop exiting': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_PAUSE_LOOP_EXITING'],
'RDRAND exiting': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_RDRAND_EXITING'],
'Enable INVPCID': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_ENABLE_INVPCID'],
'Enable VM functions': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_ENABLE_VMFUNC'],
'VMCS shadowing': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_SHADOW_VMCS'],
'RDSEED exiting': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_RDSEED_EXITING'],
'Enable PML': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_ENABLE_PML'],
'Enable XSAVES/XRSTORS': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_XSAVES'],
'Save debug controls': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_SAVE_DEBUG_CONTROLS'],
'Load IA32_PERF_GLOBAL_CTRL': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL'],
'Acknowledge interrupt on exit': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_ACK_INTR_ON_EXIT'],
'Save IA32_PAT': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_SAVE_IA32_PAT'],
'Load IA32_PAT': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_LOAD_IA32_PAT'],
'Save IA32_EFER': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_SAVE_IA32_EFER'],
'Load IA32_EFER': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_LOAD_IA32_EFER'],
'Save VMX-preemption timer value': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER'],
'Clear IA32_BNDCFGS': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_CLEAR_BNDCFGS'],
'Load debug controls': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS'],
'IA-32e mode guest': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_IA32E_MODE'],
'Load IA32_PERF_GLOBAL_CTRL': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL'],
'Load IA32_PAT': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_LOAD_IA32_PAT'],
'Load IA32_EFER': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_LOAD_IA32_EFER'],
'Load IA32_BNDCFGS': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_LOAD_BNDCFGS'],
'Store EFER.LMA into IA-32e mode guest control': ['FEAT_VMX_MISC', 'MSR_VMX_MISC_STORE_LMA'],
'HLT activity state': ['FEAT_VMX_MISC', 'MSR_VMX_MISC_ACTIVITY_HLT'],
'VMWRITE to VM-exit information fields': ['FEAT_VMX_MISC', 'MSR_VMX_MISC_VMWRITE_VMEXIT'],
'Inject event with insn length=0': ['FEAT_VMX_MISC', 'MSR_VMX_MISC_ZERO_LEN_INJECT'],
'Execute-only EPT translations': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_EXECONLY'],
'Page-walk length 4': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_PAGE_WALK_LENGTH_4'],
'Paging-structure memory type WB': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_WB'],
'2MB EPT pages': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_2MB | MSR_VMX_EPT_1GB'],
'INVEPT supported': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVEPT'],
'EPT accessed and dirty flags': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_AD_BITS'],
'Single-context INVEPT': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT'],
'All-context INVEPT': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVEPT_ALL_CONTEXT'],
'INVVPID supported': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVVPID'],
'Individual-address INVVPID': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVVPID_SINGLE_ADDR'],
'Single-context INVVPID': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT'],
'All-context INVVPID': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVVPID_ALL_CONTEXT'],
'Single-context-retaining-globals INVVPID': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS'],
'EPTP Switching': ['FEAT_VMX_VMFUNC', 'MSR_VMX_VMFUNC_EPT_SWITCHING']
}
import sys
import textwrap
out = {}
for l in sys.stdin.readlines():
l = l.rstrip()
if l.endswith('!!'):
l = l[:-2].rstrip()
if l.startswith(' ') and (l.endswith('default') or l.endswith('yes')):
l = l[4:]
for key, value in bits.items():
if l.startswith(key):
ctl, bit = value
if ctl in out:
out[ctl] = out[ctl] + ' | '
else:
out[ctl] = ' [%s] = ' % ctl
out[ctl] = out[ctl] + bit
for x in sorted(out.keys()):
print("\n ".join(textwrap.wrap(out[x] + ",")))
Note that the script has a bug in that some keys apply to both VM entry
and VM exit controls ("load IA32_PERF_GLOBAL_CTRL", "load IA32_EFER",
"load IA32_PAT". Those have to be fixed by hand.
Reviewed-by: Eduardo Habkost <ehabkost@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2019-11-20 18:37:53 +01:00
|
|
|
/* VMX features from Yonah */
|
|
|
|
.features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE,
|
|
|
|
.features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT,
|
|
|
|
.features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT,
|
|
|
|
.features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
|
|
|
|
VMX_PIN_BASED_NMI_EXITING,
|
|
|
|
.features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
|
|
|
|
VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
|
|
|
|
VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
|
|
|
|
VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
|
|
|
|
VMX_CPU_BASED_MOV_DR_EXITING | VMX_CPU_BASED_UNCOND_IO_EXITING |
|
|
|
|
VMX_CPU_BASED_USE_IO_BITMAPS | VMX_CPU_BASED_MONITOR_EXITING |
|
|
|
|
VMX_CPU_BASED_PAUSE_EXITING | VMX_CPU_BASED_USE_MSR_BITMAPS,
|
2010-05-21 09:50:51 +02:00
|
|
|
.xlevel = 0x80000008,
|
|
|
|
.model_id = "Common 32-bit KVM processor"
|
|
|
|
},
|
2010-03-11 14:38:55 +01:00
|
|
|
{
|
|
|
|
.name = "coreduo",
|
|
|
|
.level = 10,
|
2013-01-21 15:06:36 +01:00
|
|
|
.vendor = CPUID_VENDOR_INTEL,
|
2010-03-11 14:38:55 +01:00
|
|
|
.family = 6,
|
|
|
|
.model = 14,
|
|
|
|
.stepping = 8,
|
2014-10-03 21:39:49 +02:00
|
|
|
/* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
|
2013-04-22 21:00:15 +02:00
|
|
|
.features[FEAT_1_EDX] =
|
2013-04-22 21:00:14 +02:00
|
|
|
PPRO_FEATURES | CPUID_VME |
|
2014-10-03 21:39:49 +02:00
|
|
|
CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
|
|
|
|
CPUID_SS,
|
|
|
|
/* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
|
2014-10-03 21:39:50 +02:00
|
|
|
* CPUID_EXT_PDCM, CPUID_EXT_VMX */
|
2013-04-22 21:00:15 +02:00
|
|
|
.features[FEAT_1_ECX] =
|
2014-10-03 21:39:50 +02:00
|
|
|
CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
|
2013-04-22 21:00:15 +02:00
|
|
|
.features[FEAT_8000_0001_EDX] =
|
2013-04-22 21:00:14 +02:00
|
|
|
CPUID_EXT2_NX,
|
target/i386: add VMX features to named CPU models
This allows using "-cpu Haswell,+vmx", which we did not really want to
support in QEMU but was produced by Libvirt when using the "host-model"
CPU model. Without this patch, no VMX feature is _actually_ supported
(only the basic instruction set extensions are) and KVM fails to load
in the guest.
This was produced from the output of scripts/kvm/vmxcap using the following
very ugly Python script:
bits = {
'INS/OUTS instruction information': ['FEAT_VMX_BASIC', 'MSR_VMX_BASIC_INS_OUTS'],
'IA32_VMX_TRUE_*_CTLS support': ['FEAT_VMX_BASIC', 'MSR_VMX_BASIC_TRUE_CTLS'],
'External interrupt exiting': ['FEAT_VMX_PINBASED_CTLS', 'VMX_PIN_BASED_EXT_INTR_MASK'],
'NMI exiting': ['FEAT_VMX_PINBASED_CTLS', 'VMX_PIN_BASED_NMI_EXITING'],
'Virtual NMIs': ['FEAT_VMX_PINBASED_CTLS', 'VMX_PIN_BASED_VIRTUAL_NMIS'],
'Activate VMX-preemption timer': ['FEAT_VMX_PINBASED_CTLS', 'VMX_PIN_BASED_VMX_PREEMPTION_TIMER'],
'Process posted interrupts': ['FEAT_VMX_PINBASED_CTLS', 'VMX_PIN_BASED_POSTED_INTR'],
'Interrupt window exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_VIRTUAL_INTR_PENDING'],
'Use TSC offsetting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_USE_TSC_OFFSETING'],
'HLT exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_HLT_EXITING'],
'INVLPG exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_INVLPG_EXITING'],
'MWAIT exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_MWAIT_EXITING'],
'RDPMC exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_RDPMC_EXITING'],
'RDTSC exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_RDTSC_EXITING'],
'CR3-load exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_CR3_LOAD_EXITING'],
'CR3-store exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_CR3_STORE_EXITING'],
'CR8-load exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_CR8_LOAD_EXITING'],
'CR8-store exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_CR8_STORE_EXITING'],
'Use TPR shadow': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_TPR_SHADOW'],
'NMI-window exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_VIRTUAL_NMI_PENDING'],
'MOV-DR exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_MOV_DR_EXITING'],
'Unconditional I/O exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_UNCOND_IO_EXITING'],
'Use I/O bitmaps': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_USE_IO_BITMAPS'],
'Monitor trap flag': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_MONITOR_TRAP_FLAG'],
'Use MSR bitmaps': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_USE_MSR_BITMAPS'],
'MONITOR exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_MONITOR_EXITING'],
'PAUSE exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_PAUSE_EXITING'],
'Activate secondary control': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS'],
'Virtualize APIC accesses': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES'],
'Enable EPT': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_ENABLE_EPT'],
'Descriptor-table exiting': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_DESC'],
'Enable RDTSCP': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_RDTSCP'],
'Virtualize x2APIC mode': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE'],
'Enable VPID': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_ENABLE_VPID'],
'WBINVD exiting': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_WBINVD_EXITING'],
'Unrestricted guest': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST'],
'APIC register emulation': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT'],
'Virtual interrupt delivery': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY'],
'PAUSE-loop exiting': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_PAUSE_LOOP_EXITING'],
'RDRAND exiting': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_RDRAND_EXITING'],
'Enable INVPCID': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_ENABLE_INVPCID'],
'Enable VM functions': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_ENABLE_VMFUNC'],
'VMCS shadowing': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_SHADOW_VMCS'],
'RDSEED exiting': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_RDSEED_EXITING'],
'Enable PML': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_ENABLE_PML'],
'Enable XSAVES/XRSTORS': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_XSAVES'],
'Save debug controls': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_SAVE_DEBUG_CONTROLS'],
'Load IA32_PERF_GLOBAL_CTRL': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL'],
'Acknowledge interrupt on exit': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_ACK_INTR_ON_EXIT'],
'Save IA32_PAT': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_SAVE_IA32_PAT'],
'Load IA32_PAT': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_LOAD_IA32_PAT'],
'Save IA32_EFER': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_SAVE_IA32_EFER'],
'Load IA32_EFER': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_LOAD_IA32_EFER'],
'Save VMX-preemption timer value': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER'],
'Clear IA32_BNDCFGS': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_CLEAR_BNDCFGS'],
'Load debug controls': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS'],
'IA-32e mode guest': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_IA32E_MODE'],
'Load IA32_PERF_GLOBAL_CTRL': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL'],
'Load IA32_PAT': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_LOAD_IA32_PAT'],
'Load IA32_EFER': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_LOAD_IA32_EFER'],
'Load IA32_BNDCFGS': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_LOAD_BNDCFGS'],
'Store EFER.LMA into IA-32e mode guest control': ['FEAT_VMX_MISC', 'MSR_VMX_MISC_STORE_LMA'],
'HLT activity state': ['FEAT_VMX_MISC', 'MSR_VMX_MISC_ACTIVITY_HLT'],
'VMWRITE to VM-exit information fields': ['FEAT_VMX_MISC', 'MSR_VMX_MISC_VMWRITE_VMEXIT'],
'Inject event with insn length=0': ['FEAT_VMX_MISC', 'MSR_VMX_MISC_ZERO_LEN_INJECT'],
'Execute-only EPT translations': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_EXECONLY'],
'Page-walk length 4': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_PAGE_WALK_LENGTH_4'],
'Paging-structure memory type WB': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_WB'],
'2MB EPT pages': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_2MB | MSR_VMX_EPT_1GB'],
'INVEPT supported': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVEPT'],
'EPT accessed and dirty flags': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_AD_BITS'],
'Single-context INVEPT': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT'],
'All-context INVEPT': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVEPT_ALL_CONTEXT'],
'INVVPID supported': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVVPID'],
'Individual-address INVVPID': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVVPID_SINGLE_ADDR'],
'Single-context INVVPID': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT'],
'All-context INVVPID': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVVPID_ALL_CONTEXT'],
'Single-context-retaining-globals INVVPID': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS'],
'EPTP Switching': ['FEAT_VMX_VMFUNC', 'MSR_VMX_VMFUNC_EPT_SWITCHING']
}
import sys
import textwrap
out = {}
for l in sys.stdin.readlines():
l = l.rstrip()
if l.endswith('!!'):
l = l[:-2].rstrip()
if l.startswith(' ') and (l.endswith('default') or l.endswith('yes')):
l = l[4:]
for key, value in bits.items():
if l.startswith(key):
ctl, bit = value
if ctl in out:
out[ctl] = out[ctl] + ' | '
else:
out[ctl] = ' [%s] = ' % ctl
out[ctl] = out[ctl] + bit
for x in sorted(out.keys()):
print("\n ".join(textwrap.wrap(out[x] + ",")))
Note that the script has a bug in that some keys apply to both VM entry
and VM exit controls ("load IA32_PERF_GLOBAL_CTRL", "load IA32_EFER",
"load IA32_PAT". Those have to be fixed by hand.
Reviewed-by: Eduardo Habkost <ehabkost@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2019-11-20 18:37:53 +01:00
|
|
|
.features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE,
|
|
|
|
.features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT,
|
|
|
|
.features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT,
|
|
|
|
.features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
|
|
|
|
VMX_PIN_BASED_NMI_EXITING,
|
|
|
|
.features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
|
|
|
|
VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
|
|
|
|
VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
|
|
|
|
VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
|
|
|
|
VMX_CPU_BASED_MOV_DR_EXITING | VMX_CPU_BASED_UNCOND_IO_EXITING |
|
|
|
|
VMX_CPU_BASED_USE_IO_BITMAPS | VMX_CPU_BASED_MONITOR_EXITING |
|
|
|
|
VMX_CPU_BASED_PAUSE_EXITING | VMX_CPU_BASED_USE_MSR_BITMAPS,
|
2010-03-11 14:38:55 +01:00
|
|
|
.xlevel = 0x80000008,
|
|
|
|
.model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.name = "486",
|
2010-03-11 14:39:06 +01:00
|
|
|
.level = 1,
|
2013-01-21 15:06:36 +01:00
|
|
|
.vendor = CPUID_VENDOR_INTEL,
|
2010-03-11 14:38:55 +01:00
|
|
|
.family = 4,
|
2013-05-01 17:30:51 +02:00
|
|
|
.model = 8,
|
2010-03-11 14:38:55 +01:00
|
|
|
.stepping = 0,
|
2013-04-22 21:00:15 +02:00
|
|
|
.features[FEAT_1_EDX] =
|
2013-04-22 21:00:14 +02:00
|
|
|
I486_FEATURES,
|
2010-03-11 14:38:55 +01:00
|
|
|
.xlevel = 0,
|
2018-01-09 16:45:13 +01:00
|
|
|
.model_id = "",
|
2010-03-11 14:38:55 +01:00
|
|
|
},
|
|
|
|
{
|
|
|
|
.name = "pentium",
|
|
|
|
.level = 1,
|
2013-01-21 15:06:36 +01:00
|
|
|
.vendor = CPUID_VENDOR_INTEL,
|
2010-03-11 14:38:55 +01:00
|
|
|
.family = 5,
|
|
|
|
.model = 4,
|
|
|
|
.stepping = 3,
|
2013-04-22 21:00:15 +02:00
|
|
|
.features[FEAT_1_EDX] =
|
2013-04-22 21:00:14 +02:00
|
|
|
PENTIUM_FEATURES,
|
2010-03-11 14:38:55 +01:00
|
|
|
.xlevel = 0,
|
2018-01-09 16:45:13 +01:00
|
|
|
.model_id = "",
|
2010-03-11 14:38:55 +01:00
|
|
|
},
|
|
|
|
{
|
|
|
|
.name = "pentium2",
|
|
|
|
.level = 2,
|
2013-01-21 15:06:36 +01:00
|
|
|
.vendor = CPUID_VENDOR_INTEL,
|
2010-03-11 14:38:55 +01:00
|
|
|
.family = 6,
|
|
|
|
.model = 5,
|
|
|
|
.stepping = 2,
|
2013-04-22 21:00:15 +02:00
|
|
|
.features[FEAT_1_EDX] =
|
2013-04-22 21:00:14 +02:00
|
|
|
PENTIUM2_FEATURES,
|
2010-03-11 14:38:55 +01:00
|
|
|
.xlevel = 0,
|
2018-01-09 16:45:13 +01:00
|
|
|
.model_id = "",
|
2010-03-11 14:38:55 +01:00
|
|
|
},
|
|
|
|
{
|
|
|
|
.name = "pentium3",
|
2015-07-09 21:07:39 +02:00
|
|
|
.level = 3,
|
2013-01-21 15:06:36 +01:00
|
|
|
.vendor = CPUID_VENDOR_INTEL,
|
2010-03-11 14:38:55 +01:00
|
|
|
.family = 6,
|
|
|
|
.model = 7,
|
|
|
|
.stepping = 3,
|
2013-04-22 21:00:15 +02:00
|
|
|
.features[FEAT_1_EDX] =
|
2013-04-22 21:00:14 +02:00
|
|
|
PENTIUM3_FEATURES,
|
2010-03-11 14:38:55 +01:00
|
|
|
.xlevel = 0,
|
2018-01-09 16:45:13 +01:00
|
|
|
.model_id = "",
|
2010-03-11 14:38:55 +01:00
|
|
|
},
|
|
|
|
{
|
|
|
|
.name = "athlon",
|
|
|
|
.level = 2,
|
2013-01-21 15:06:36 +01:00
|
|
|
.vendor = CPUID_VENDOR_AMD,
|
2010-03-11 14:38:55 +01:00
|
|
|
.family = 6,
|
|
|
|
.model = 2,
|
|
|
|
.stepping = 3,
|
2013-04-22 21:00:15 +02:00
|
|
|
.features[FEAT_1_EDX] =
|
2013-04-22 21:00:14 +02:00
|
|
|
PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
|
2012-09-06 12:05:37 +02:00
|
|
|
CPUID_MCA,
|
2013-04-22 21:00:15 +02:00
|
|
|
.features[FEAT_8000_0001_EDX] =
|
2012-09-06 12:05:37 +02:00
|
|
|
CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
|
2010-03-11 14:38:55 +01:00
|
|
|
.xlevel = 0x80000008,
|
2016-04-09 21:44:20 +02:00
|
|
|
.model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
|
2010-03-11 14:38:55 +01:00
|
|
|
},
|
|
|
|
{
|
|
|
|
.name = "n270",
|
2015-07-09 21:07:39 +02:00
|
|
|
.level = 10,
|
2013-01-21 15:06:36 +01:00
|
|
|
.vendor = CPUID_VENDOR_INTEL,
|
2010-03-11 14:38:55 +01:00
|
|
|
.family = 6,
|
|
|
|
.model = 28,
|
|
|
|
.stepping = 2,
|
2014-10-03 21:39:49 +02:00
|
|
|
/* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
|
2013-04-22 21:00:15 +02:00
|
|
|
.features[FEAT_1_EDX] =
|
2013-04-22 21:00:14 +02:00
|
|
|
PPRO_FEATURES |
|
2014-10-03 21:39:49 +02:00
|
|
|
CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
|
|
|
|
CPUID_ACPI | CPUID_SS,
|
2010-03-11 14:38:55 +01:00
|
|
|
/* Some CPUs got no CPUID_SEP */
|
2014-10-03 21:39:49 +02:00
|
|
|
/* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
|
|
|
|
* CPUID_EXT_XTPR */
|
2013-04-22 21:00:15 +02:00
|
|
|
.features[FEAT_1_ECX] =
|
2013-04-22 21:00:14 +02:00
|
|
|
CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
|
2013-04-25 20:43:04 +02:00
|
|
|
CPUID_EXT_MOVBE,
|
2013-04-22 21:00:15 +02:00
|
|
|
.features[FEAT_8000_0001_EDX] =
|
2012-09-06 12:05:37 +02:00
|
|
|
CPUID_EXT2_NX,
|
2013-04-22 21:00:15 +02:00
|
|
|
.features[FEAT_8000_0001_ECX] =
|
2013-04-22 21:00:14 +02:00
|
|
|
CPUID_EXT3_LAHF_LM,
|
2015-07-09 21:07:39 +02:00
|
|
|
.xlevel = 0x80000008,
|
2010-03-11 14:38:55 +01:00
|
|
|
.model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
|
|
|
|
},
|
2012-09-05 22:41:10 +02:00
|
|
|
{
|
|
|
|
.name = "Conroe",
|
2015-07-09 21:07:39 +02:00
|
|
|
.level = 10,
|
2013-01-21 15:06:36 +01:00
|
|
|
.vendor = CPUID_VENDOR_INTEL,
|
2012-09-05 22:41:10 +02:00
|
|
|
.family = 6,
|
2013-05-27 22:23:54 +02:00
|
|
|
.model = 15,
|
2012-09-05 22:41:10 +02:00
|
|
|
.stepping = 3,
|
2013-04-22 21:00:15 +02:00
|
|
|
.features[FEAT_1_EDX] =
|
2014-12-10 17:12:41 +01:00
|
|
|
CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
|
2014-06-18 01:05:29 +02:00
|
|
|
CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
|
|
|
|
CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
|
|
|
|
CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
|
|
|
|
CPUID_DE | CPUID_FP87,
|
2013-04-22 21:00:15 +02:00
|
|
|
.features[FEAT_1_ECX] =
|
2013-04-22 21:00:14 +02:00
|
|
|
CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
|
2013-04-22 21:00:15 +02:00
|
|
|
.features[FEAT_8000_0001_EDX] =
|
2013-04-22 21:00:14 +02:00
|
|
|
CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
|
2013-04-22 21:00:15 +02:00
|
|
|
.features[FEAT_8000_0001_ECX] =
|
2013-04-22 21:00:14 +02:00
|
|
|
CPUID_EXT3_LAHF_LM,
|
target/i386: add VMX features to named CPU models
This allows using "-cpu Haswell,+vmx", which we did not really want to
support in QEMU but was produced by Libvirt when using the "host-model"
CPU model. Without this patch, no VMX feature is _actually_ supported
(only the basic instruction set extensions are) and KVM fails to load
in the guest.
This was produced from the output of scripts/kvm/vmxcap using the following
very ugly Python script:
bits = {
'INS/OUTS instruction information': ['FEAT_VMX_BASIC', 'MSR_VMX_BASIC_INS_OUTS'],
'IA32_VMX_TRUE_*_CTLS support': ['FEAT_VMX_BASIC', 'MSR_VMX_BASIC_TRUE_CTLS'],
'External interrupt exiting': ['FEAT_VMX_PINBASED_CTLS', 'VMX_PIN_BASED_EXT_INTR_MASK'],
'NMI exiting': ['FEAT_VMX_PINBASED_CTLS', 'VMX_PIN_BASED_NMI_EXITING'],
'Virtual NMIs': ['FEAT_VMX_PINBASED_CTLS', 'VMX_PIN_BASED_VIRTUAL_NMIS'],
'Activate VMX-preemption timer': ['FEAT_VMX_PINBASED_CTLS', 'VMX_PIN_BASED_VMX_PREEMPTION_TIMER'],
'Process posted interrupts': ['FEAT_VMX_PINBASED_CTLS', 'VMX_PIN_BASED_POSTED_INTR'],
'Interrupt window exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_VIRTUAL_INTR_PENDING'],
'Use TSC offsetting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_USE_TSC_OFFSETING'],
'HLT exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_HLT_EXITING'],
'INVLPG exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_INVLPG_EXITING'],
'MWAIT exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_MWAIT_EXITING'],
'RDPMC exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_RDPMC_EXITING'],
'RDTSC exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_RDTSC_EXITING'],
'CR3-load exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_CR3_LOAD_EXITING'],
'CR3-store exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_CR3_STORE_EXITING'],
'CR8-load exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_CR8_LOAD_EXITING'],
'CR8-store exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_CR8_STORE_EXITING'],
'Use TPR shadow': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_TPR_SHADOW'],
'NMI-window exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_VIRTUAL_NMI_PENDING'],
'MOV-DR exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_MOV_DR_EXITING'],
'Unconditional I/O exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_UNCOND_IO_EXITING'],
'Use I/O bitmaps': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_USE_IO_BITMAPS'],
'Monitor trap flag': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_MONITOR_TRAP_FLAG'],
'Use MSR bitmaps': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_USE_MSR_BITMAPS'],
'MONITOR exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_MONITOR_EXITING'],
'PAUSE exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_PAUSE_EXITING'],
'Activate secondary control': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS'],
'Virtualize APIC accesses': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES'],
'Enable EPT': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_ENABLE_EPT'],
'Descriptor-table exiting': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_DESC'],
'Enable RDTSCP': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_RDTSCP'],
'Virtualize x2APIC mode': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE'],
'Enable VPID': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_ENABLE_VPID'],
'WBINVD exiting': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_WBINVD_EXITING'],
'Unrestricted guest': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST'],
'APIC register emulation': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT'],
'Virtual interrupt delivery': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY'],
'PAUSE-loop exiting': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_PAUSE_LOOP_EXITING'],
'RDRAND exiting': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_RDRAND_EXITING'],
'Enable INVPCID': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_ENABLE_INVPCID'],
'Enable VM functions': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_ENABLE_VMFUNC'],
'VMCS shadowing': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_SHADOW_VMCS'],
'RDSEED exiting': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_RDSEED_EXITING'],
'Enable PML': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_ENABLE_PML'],
'Enable XSAVES/XRSTORS': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_XSAVES'],
'Save debug controls': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_SAVE_DEBUG_CONTROLS'],
'Load IA32_PERF_GLOBAL_CTRL': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL'],
'Acknowledge interrupt on exit': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_ACK_INTR_ON_EXIT'],
'Save IA32_PAT': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_SAVE_IA32_PAT'],
'Load IA32_PAT': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_LOAD_IA32_PAT'],
'Save IA32_EFER': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_SAVE_IA32_EFER'],
'Load IA32_EFER': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_LOAD_IA32_EFER'],
'Save VMX-preemption timer value': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER'],
'Clear IA32_BNDCFGS': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_CLEAR_BNDCFGS'],
'Load debug controls': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS'],
'IA-32e mode guest': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_IA32E_MODE'],
'Load IA32_PERF_GLOBAL_CTRL': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL'],
'Load IA32_PAT': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_LOAD_IA32_PAT'],
'Load IA32_EFER': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_LOAD_IA32_EFER'],
'Load IA32_BNDCFGS': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_LOAD_BNDCFGS'],
'Store EFER.LMA into IA-32e mode guest control': ['FEAT_VMX_MISC', 'MSR_VMX_MISC_STORE_LMA'],
'HLT activity state': ['FEAT_VMX_MISC', 'MSR_VMX_MISC_ACTIVITY_HLT'],
'VMWRITE to VM-exit information fields': ['FEAT_VMX_MISC', 'MSR_VMX_MISC_VMWRITE_VMEXIT'],
'Inject event with insn length=0': ['FEAT_VMX_MISC', 'MSR_VMX_MISC_ZERO_LEN_INJECT'],
'Execute-only EPT translations': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_EXECONLY'],
'Page-walk length 4': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_PAGE_WALK_LENGTH_4'],
'Paging-structure memory type WB': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_WB'],
'2MB EPT pages': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_2MB | MSR_VMX_EPT_1GB'],
'INVEPT supported': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVEPT'],
'EPT accessed and dirty flags': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_AD_BITS'],
'Single-context INVEPT': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT'],
'All-context INVEPT': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVEPT_ALL_CONTEXT'],
'INVVPID supported': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVVPID'],
'Individual-address INVVPID': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVVPID_SINGLE_ADDR'],
'Single-context INVVPID': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT'],
'All-context INVVPID': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVVPID_ALL_CONTEXT'],
'Single-context-retaining-globals INVVPID': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS'],
'EPTP Switching': ['FEAT_VMX_VMFUNC', 'MSR_VMX_VMFUNC_EPT_SWITCHING']
}
import sys
import textwrap
out = {}
for l in sys.stdin.readlines():
l = l.rstrip()
if l.endswith('!!'):
l = l[:-2].rstrip()
if l.startswith(' ') and (l.endswith('default') or l.endswith('yes')):
l = l[4:]
for key, value in bits.items():
if l.startswith(key):
ctl, bit = value
if ctl in out:
out[ctl] = out[ctl] + ' | '
else:
out[ctl] = ' [%s] = ' % ctl
out[ctl] = out[ctl] + bit
for x in sorted(out.keys()):
print("\n ".join(textwrap.wrap(out[x] + ",")))
Note that the script has a bug in that some keys apply to both VM entry
and VM exit controls ("load IA32_PERF_GLOBAL_CTRL", "load IA32_EFER",
"load IA32_PAT". Those have to be fixed by hand.
Reviewed-by: Eduardo Habkost <ehabkost@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2019-11-20 18:37:53 +01:00
|
|
|
.features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS,
|
|
|
|
.features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE,
|
|
|
|
.features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT,
|
|
|
|
.features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT,
|
|
|
|
.features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
|
|
|
|
VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS,
|
|
|
|
.features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
|
|
|
|
VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
|
|
|
|
VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
|
|
|
|
VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
|
|
|
|
VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
|
|
|
|
VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
|
|
|
|
VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
|
|
|
|
VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
|
|
|
|
VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
|
|
|
|
VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
|
|
|
|
.features[FEAT_VMX_SECONDARY_CTLS] =
|
|
|
|
VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES,
|
2015-07-09 21:07:39 +02:00
|
|
|
.xlevel = 0x80000008,
|
2012-09-05 22:41:10 +02:00
|
|
|
.model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.name = "Penryn",
|
2015-07-09 21:07:39 +02:00
|
|
|
.level = 10,
|
2013-01-21 15:06:36 +01:00
|
|
|
.vendor = CPUID_VENDOR_INTEL,
|
2012-09-05 22:41:10 +02:00
|
|
|
.family = 6,
|
2013-05-27 22:23:54 +02:00
|
|
|
.model = 23,
|
2012-09-05 22:41:10 +02:00
|
|
|
.stepping = 3,
|
2013-04-22 21:00:15 +02:00
|
|
|
.features[FEAT_1_EDX] =
|
2014-12-10 17:12:41 +01:00
|
|
|
CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
|
2014-06-18 01:05:29 +02:00
|
|
|
CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
|
|
|
|
CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
|
|
|
|
CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
|
|
|
|
CPUID_DE | CPUID_FP87,
|
2013-04-22 21:00:15 +02:00
|
|
|
.features[FEAT_1_ECX] =
|
2013-04-22 21:00:14 +02:00
|
|
|
CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
|
2014-06-18 01:05:29 +02:00
|
|
|
CPUID_EXT_SSE3,
|
2013-04-22 21:00:15 +02:00
|
|
|
.features[FEAT_8000_0001_EDX] =
|
2013-04-22 21:00:14 +02:00
|
|
|
CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
|
2013-04-22 21:00:15 +02:00
|
|
|
.features[FEAT_8000_0001_ECX] =
|
2013-04-22 21:00:14 +02:00
|
|
|
CPUID_EXT3_LAHF_LM,
|
target/i386: add VMX features to named CPU models
This allows using "-cpu Haswell,+vmx", which we did not really want to
support in QEMU but was produced by Libvirt when using the "host-model"
CPU model. Without this patch, no VMX feature is _actually_ supported
(only the basic instruction set extensions are) and KVM fails to load
in the guest.
This was produced from the output of scripts/kvm/vmxcap using the following
very ugly Python script:
bits = {
'INS/OUTS instruction information': ['FEAT_VMX_BASIC', 'MSR_VMX_BASIC_INS_OUTS'],
'IA32_VMX_TRUE_*_CTLS support': ['FEAT_VMX_BASIC', 'MSR_VMX_BASIC_TRUE_CTLS'],
'External interrupt exiting': ['FEAT_VMX_PINBASED_CTLS', 'VMX_PIN_BASED_EXT_INTR_MASK'],
'NMI exiting': ['FEAT_VMX_PINBASED_CTLS', 'VMX_PIN_BASED_NMI_EXITING'],
'Virtual NMIs': ['FEAT_VMX_PINBASED_CTLS', 'VMX_PIN_BASED_VIRTUAL_NMIS'],
'Activate VMX-preemption timer': ['FEAT_VMX_PINBASED_CTLS', 'VMX_PIN_BASED_VMX_PREEMPTION_TIMER'],
'Process posted interrupts': ['FEAT_VMX_PINBASED_CTLS', 'VMX_PIN_BASED_POSTED_INTR'],
'Interrupt window exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_VIRTUAL_INTR_PENDING'],
'Use TSC offsetting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_USE_TSC_OFFSETING'],
'HLT exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_HLT_EXITING'],
'INVLPG exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_INVLPG_EXITING'],
'MWAIT exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_MWAIT_EXITING'],
'RDPMC exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_RDPMC_EXITING'],
'RDTSC exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_RDTSC_EXITING'],
'CR3-load exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_CR3_LOAD_EXITING'],
'CR3-store exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_CR3_STORE_EXITING'],
'CR8-load exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_CR8_LOAD_EXITING'],
'CR8-store exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_CR8_STORE_EXITING'],
'Use TPR shadow': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_TPR_SHADOW'],
'NMI-window exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_VIRTUAL_NMI_PENDING'],
'MOV-DR exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_MOV_DR_EXITING'],
'Unconditional I/O exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_UNCOND_IO_EXITING'],
'Use I/O bitmaps': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_USE_IO_BITMAPS'],
'Monitor trap flag': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_MONITOR_TRAP_FLAG'],
'Use MSR bitmaps': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_USE_MSR_BITMAPS'],
'MONITOR exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_MONITOR_EXITING'],
'PAUSE exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_PAUSE_EXITING'],
'Activate secondary control': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS'],
'Virtualize APIC accesses': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES'],
'Enable EPT': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_ENABLE_EPT'],
'Descriptor-table exiting': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_DESC'],
'Enable RDTSCP': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_RDTSCP'],
'Virtualize x2APIC mode': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE'],
'Enable VPID': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_ENABLE_VPID'],
'WBINVD exiting': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_WBINVD_EXITING'],
'Unrestricted guest': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST'],
'APIC register emulation': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT'],
'Virtual interrupt delivery': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY'],
'PAUSE-loop exiting': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_PAUSE_LOOP_EXITING'],
'RDRAND exiting': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_RDRAND_EXITING'],
'Enable INVPCID': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_ENABLE_INVPCID'],
'Enable VM functions': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_ENABLE_VMFUNC'],
'VMCS shadowing': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_SHADOW_VMCS'],
'RDSEED exiting': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_RDSEED_EXITING'],
'Enable PML': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_ENABLE_PML'],
'Enable XSAVES/XRSTORS': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_XSAVES'],
'Save debug controls': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_SAVE_DEBUG_CONTROLS'],
'Load IA32_PERF_GLOBAL_CTRL': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL'],
'Acknowledge interrupt on exit': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_ACK_INTR_ON_EXIT'],
'Save IA32_PAT': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_SAVE_IA32_PAT'],
'Load IA32_PAT': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_LOAD_IA32_PAT'],
'Save IA32_EFER': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_SAVE_IA32_EFER'],
'Load IA32_EFER': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_LOAD_IA32_EFER'],
'Save VMX-preemption timer value': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER'],
'Clear IA32_BNDCFGS': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_CLEAR_BNDCFGS'],
'Load debug controls': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS'],
'IA-32e mode guest': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_IA32E_MODE'],
'Load IA32_PERF_GLOBAL_CTRL': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL'],
'Load IA32_PAT': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_LOAD_IA32_PAT'],
'Load IA32_EFER': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_LOAD_IA32_EFER'],
'Load IA32_BNDCFGS': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_LOAD_BNDCFGS'],
'Store EFER.LMA into IA-32e mode guest control': ['FEAT_VMX_MISC', 'MSR_VMX_MISC_STORE_LMA'],
'HLT activity state': ['FEAT_VMX_MISC', 'MSR_VMX_MISC_ACTIVITY_HLT'],
'VMWRITE to VM-exit information fields': ['FEAT_VMX_MISC', 'MSR_VMX_MISC_VMWRITE_VMEXIT'],
'Inject event with insn length=0': ['FEAT_VMX_MISC', 'MSR_VMX_MISC_ZERO_LEN_INJECT'],
'Execute-only EPT translations': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_EXECONLY'],
'Page-walk length 4': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_PAGE_WALK_LENGTH_4'],
'Paging-structure memory type WB': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_WB'],
'2MB EPT pages': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_2MB | MSR_VMX_EPT_1GB'],
'INVEPT supported': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVEPT'],
'EPT accessed and dirty flags': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_AD_BITS'],
'Single-context INVEPT': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT'],
'All-context INVEPT': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVEPT_ALL_CONTEXT'],
'INVVPID supported': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVVPID'],
'Individual-address INVVPID': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVVPID_SINGLE_ADDR'],
'Single-context INVVPID': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT'],
'All-context INVVPID': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVVPID_ALL_CONTEXT'],
'Single-context-retaining-globals INVVPID': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS'],
'EPTP Switching': ['FEAT_VMX_VMFUNC', 'MSR_VMX_VMFUNC_EPT_SWITCHING']
}
import sys
import textwrap
out = {}
for l in sys.stdin.readlines():
l = l.rstrip()
if l.endswith('!!'):
l = l[:-2].rstrip()
if l.startswith(' ') and (l.endswith('default') or l.endswith('yes')):
l = l[4:]
for key, value in bits.items():
if l.startswith(key):
ctl, bit = value
if ctl in out:
out[ctl] = out[ctl] + ' | '
else:
out[ctl] = ' [%s] = ' % ctl
out[ctl] = out[ctl] + bit
for x in sorted(out.keys()):
print("\n ".join(textwrap.wrap(out[x] + ",")))
Note that the script has a bug in that some keys apply to both VM entry
and VM exit controls ("load IA32_PERF_GLOBAL_CTRL", "load IA32_EFER",
"load IA32_PAT". Those have to be fixed by hand.
Reviewed-by: Eduardo Habkost <ehabkost@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2019-11-20 18:37:53 +01:00
|
|
|
.features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS,
|
|
|
|
.features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
|
|
|
|
VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
|
|
|
|
.features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT |
|
|
|
|
VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL,
|
|
|
|
.features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT,
|
|
|
|
.features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
|
|
|
|
VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS,
|
|
|
|
.features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
|
|
|
|
VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
|
|
|
|
VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
|
|
|
|
VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
|
|
|
|
VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
|
|
|
|
VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
|
|
|
|
VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
|
|
|
|
VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
|
|
|
|
VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
|
|
|
|
VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
|
|
|
|
.features[FEAT_VMX_SECONDARY_CTLS] =
|
|
|
|
VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
|
|
|
|
VMX_SECONDARY_EXEC_WBINVD_EXITING,
|
2015-07-09 21:07:39 +02:00
|
|
|
.xlevel = 0x80000008,
|
2012-09-05 22:41:10 +02:00
|
|
|
.model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.name = "Nehalem",
|
2015-07-09 21:07:39 +02:00
|
|
|
.level = 11,
|
2013-01-21 15:06:36 +01:00
|
|
|
.vendor = CPUID_VENDOR_INTEL,
|
2012-09-05 22:41:10 +02:00
|
|
|
.family = 6,
|
2013-05-27 22:23:54 +02:00
|
|
|
.model = 26,
|
2012-09-05 22:41:10 +02:00
|
|
|
.stepping = 3,
|
2013-04-22 21:00:15 +02:00
|
|
|
.features[FEAT_1_EDX] =
|
2014-12-10 17:12:41 +01:00
|
|
|
CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
|
2014-06-18 01:05:29 +02:00
|
|
|
CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
|
|
|
|
CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
|
|
|
|
CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
|
|
|
|
CPUID_DE | CPUID_FP87,
|
2013-04-22 21:00:15 +02:00
|
|
|
.features[FEAT_1_ECX] =
|
2013-04-22 21:00:14 +02:00
|
|
|
CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
|
2014-06-18 01:05:29 +02:00
|
|
|
CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
|
2013-04-22 21:00:15 +02:00
|
|
|
.features[FEAT_8000_0001_EDX] =
|
2013-04-22 21:00:14 +02:00
|
|
|
CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
|
2013-04-22 21:00:15 +02:00
|
|
|
.features[FEAT_8000_0001_ECX] =
|
2013-04-22 21:00:14 +02:00
|
|
|
CPUID_EXT3_LAHF_LM,
|
target/i386: add VMX features to named CPU models
This allows using "-cpu Haswell,+vmx", which we did not really want to
support in QEMU but was produced by Libvirt when using the "host-model"
CPU model. Without this patch, no VMX feature is _actually_ supported
(only the basic instruction set extensions are) and KVM fails to load
in the guest.
This was produced from the output of scripts/kvm/vmxcap using the following
very ugly Python script:
bits = {
'INS/OUTS instruction information': ['FEAT_VMX_BASIC', 'MSR_VMX_BASIC_INS_OUTS'],
'IA32_VMX_TRUE_*_CTLS support': ['FEAT_VMX_BASIC', 'MSR_VMX_BASIC_TRUE_CTLS'],
'External interrupt exiting': ['FEAT_VMX_PINBASED_CTLS', 'VMX_PIN_BASED_EXT_INTR_MASK'],
'NMI exiting': ['FEAT_VMX_PINBASED_CTLS', 'VMX_PIN_BASED_NMI_EXITING'],
'Virtual NMIs': ['FEAT_VMX_PINBASED_CTLS', 'VMX_PIN_BASED_VIRTUAL_NMIS'],
'Activate VMX-preemption timer': ['FEAT_VMX_PINBASED_CTLS', 'VMX_PIN_BASED_VMX_PREEMPTION_TIMER'],
'Process posted interrupts': ['FEAT_VMX_PINBASED_CTLS', 'VMX_PIN_BASED_POSTED_INTR'],
'Interrupt window exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_VIRTUAL_INTR_PENDING'],
'Use TSC offsetting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_USE_TSC_OFFSETING'],
'HLT exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_HLT_EXITING'],
'INVLPG exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_INVLPG_EXITING'],
'MWAIT exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_MWAIT_EXITING'],
'RDPMC exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_RDPMC_EXITING'],
'RDTSC exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_RDTSC_EXITING'],
'CR3-load exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_CR3_LOAD_EXITING'],
'CR3-store exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_CR3_STORE_EXITING'],
'CR8-load exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_CR8_LOAD_EXITING'],
'CR8-store exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_CR8_STORE_EXITING'],
'Use TPR shadow': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_TPR_SHADOW'],
'NMI-window exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_VIRTUAL_NMI_PENDING'],
'MOV-DR exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_MOV_DR_EXITING'],
'Unconditional I/O exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_UNCOND_IO_EXITING'],
'Use I/O bitmaps': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_USE_IO_BITMAPS'],
'Monitor trap flag': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_MONITOR_TRAP_FLAG'],
'Use MSR bitmaps': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_USE_MSR_BITMAPS'],
'MONITOR exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_MONITOR_EXITING'],
'PAUSE exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_PAUSE_EXITING'],
'Activate secondary control': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS'],
'Virtualize APIC accesses': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES'],
'Enable EPT': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_ENABLE_EPT'],
'Descriptor-table exiting': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_DESC'],
'Enable RDTSCP': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_RDTSCP'],
'Virtualize x2APIC mode': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE'],
'Enable VPID': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_ENABLE_VPID'],
'WBINVD exiting': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_WBINVD_EXITING'],
'Unrestricted guest': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST'],
'APIC register emulation': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT'],
'Virtual interrupt delivery': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY'],
'PAUSE-loop exiting': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_PAUSE_LOOP_EXITING'],
'RDRAND exiting': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_RDRAND_EXITING'],
'Enable INVPCID': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_ENABLE_INVPCID'],
'Enable VM functions': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_ENABLE_VMFUNC'],
'VMCS shadowing': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_SHADOW_VMCS'],
'RDSEED exiting': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_RDSEED_EXITING'],
'Enable PML': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_ENABLE_PML'],
'Enable XSAVES/XRSTORS': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_XSAVES'],
'Save debug controls': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_SAVE_DEBUG_CONTROLS'],
'Load IA32_PERF_GLOBAL_CTRL': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL'],
'Acknowledge interrupt on exit': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_ACK_INTR_ON_EXIT'],
'Save IA32_PAT': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_SAVE_IA32_PAT'],
'Load IA32_PAT': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_LOAD_IA32_PAT'],
'Save IA32_EFER': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_SAVE_IA32_EFER'],
'Load IA32_EFER': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_LOAD_IA32_EFER'],
'Save VMX-preemption timer value': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER'],
'Clear IA32_BNDCFGS': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_CLEAR_BNDCFGS'],
'Load debug controls': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS'],
'IA-32e mode guest': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_IA32E_MODE'],
'Load IA32_PERF_GLOBAL_CTRL': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL'],
'Load IA32_PAT': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_LOAD_IA32_PAT'],
'Load IA32_EFER': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_LOAD_IA32_EFER'],
'Load IA32_BNDCFGS': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_LOAD_BNDCFGS'],
'Store EFER.LMA into IA-32e mode guest control': ['FEAT_VMX_MISC', 'MSR_VMX_MISC_STORE_LMA'],
'HLT activity state': ['FEAT_VMX_MISC', 'MSR_VMX_MISC_ACTIVITY_HLT'],
'VMWRITE to VM-exit information fields': ['FEAT_VMX_MISC', 'MSR_VMX_MISC_VMWRITE_VMEXIT'],
'Inject event with insn length=0': ['FEAT_VMX_MISC', 'MSR_VMX_MISC_ZERO_LEN_INJECT'],
'Execute-only EPT translations': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_EXECONLY'],
'Page-walk length 4': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_PAGE_WALK_LENGTH_4'],
'Paging-structure memory type WB': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_WB'],
'2MB EPT pages': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_2MB | MSR_VMX_EPT_1GB'],
'INVEPT supported': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVEPT'],
'EPT accessed and dirty flags': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_AD_BITS'],
'Single-context INVEPT': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT'],
'All-context INVEPT': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVEPT_ALL_CONTEXT'],
'INVVPID supported': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVVPID'],
'Individual-address INVVPID': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVVPID_SINGLE_ADDR'],
'Single-context INVVPID': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT'],
'All-context INVVPID': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVVPID_ALL_CONTEXT'],
'Single-context-retaining-globals INVVPID': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS'],
'EPTP Switching': ['FEAT_VMX_VMFUNC', 'MSR_VMX_VMFUNC_EPT_SWITCHING']
}
import sys
import textwrap
out = {}
for l in sys.stdin.readlines():
l = l.rstrip()
if l.endswith('!!'):
l = l[:-2].rstrip()
if l.startswith(' ') and (l.endswith('default') or l.endswith('yes')):
l = l[4:]
for key, value in bits.items():
if l.startswith(key):
ctl, bit = value
if ctl in out:
out[ctl] = out[ctl] + ' | '
else:
out[ctl] = ' [%s] = ' % ctl
out[ctl] = out[ctl] + bit
for x in sorted(out.keys()):
print("\n ".join(textwrap.wrap(out[x] + ",")))
Note that the script has a bug in that some keys apply to both VM entry
and VM exit controls ("load IA32_PERF_GLOBAL_CTRL", "load IA32_EFER",
"load IA32_PAT". Those have to be fixed by hand.
Reviewed-by: Eduardo Habkost <ehabkost@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2019-11-20 18:37:53 +01:00
|
|
|
.features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
|
|
|
|
MSR_VMX_BASIC_TRUE_CTLS,
|
|
|
|
.features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
|
|
|
|
VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
|
|
|
|
VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
|
|
|
|
.features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
|
|
|
|
MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
|
|
|
|
MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
|
|
|
|
MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
|
|
|
|
MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
|
|
|
|
MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
|
|
|
|
MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS,
|
|
|
|
.features[FEAT_VMX_EXIT_CTLS] =
|
|
|
|
VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
|
|
|
|
VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
|
|
|
|
VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
|
|
|
|
VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
|
|
|
|
VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
|
|
|
|
.features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT,
|
|
|
|
.features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
|
|
|
|
VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
|
|
|
|
VMX_PIN_BASED_VMX_PREEMPTION_TIMER,
|
|
|
|
.features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
|
|
|
|
VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
|
|
|
|
VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
|
|
|
|
VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
|
|
|
|
VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
|
|
|
|
VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
|
|
|
|
VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
|
|
|
|
VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
|
|
|
|
VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
|
|
|
|
VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
|
|
|
|
VMX_CPU_BASED_MONITOR_TRAP_FLAG |
|
|
|
|
VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
|
|
|
|
.features[FEAT_VMX_SECONDARY_CTLS] =
|
|
|
|
VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
|
|
|
|
VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
|
|
|
|
VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
|
|
|
|
VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
|
|
|
|
VMX_SECONDARY_EXEC_ENABLE_VPID,
|
2015-07-09 21:07:39 +02:00
|
|
|
.xlevel = 0x80000008,
|
2012-09-05 22:41:10 +02:00
|
|
|
.model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
|
2019-06-28 02:28:40 +02:00
|
|
|
.versions = (X86CPUVersionDefinition[]) {
|
|
|
|
{ .version = 1 },
|
|
|
|
{
|
|
|
|
.version = 2,
|
2019-06-28 02:28:41 +02:00
|
|
|
.alias = "Nehalem-IBRS",
|
2019-06-28 02:28:40 +02:00
|
|
|
.props = (PropValue[]) {
|
|
|
|
{ "spec-ctrl", "on" },
|
|
|
|
{ "model-id",
|
|
|
|
"Intel Core i7 9xx (Nehalem Core i7, IBRS update)" },
|
|
|
|
{ /* end of list */ }
|
|
|
|
}
|
|
|
|
},
|
|
|
|
{ /* end of list */ }
|
|
|
|
}
|
2012-09-05 22:41:10 +02:00
|
|
|
},
|
|
|
|
{
|
|
|
|
.name = "Westmere",
|
|
|
|
.level = 11,
|
2013-01-21 15:06:36 +01:00
|
|
|
.vendor = CPUID_VENDOR_INTEL,
|
2012-09-05 22:41:10 +02:00
|
|
|
.family = 6,
|
|
|
|
.model = 44,
|
|
|
|
.stepping = 1,
|
2013-04-22 21:00:15 +02:00
|
|
|
.features[FEAT_1_EDX] =
|
2014-12-10 17:12:41 +01:00
|
|
|
CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
|
2014-06-18 01:05:29 +02:00
|
|
|
CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
|
|
|
|
CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
|
|
|
|
CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
|
|
|
|
CPUID_DE | CPUID_FP87,
|
2013-04-22 21:00:15 +02:00
|
|
|
.features[FEAT_1_ECX] =
|
2013-04-22 21:00:14 +02:00
|
|
|
CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
|
2014-06-18 01:05:29 +02:00
|
|
|
CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
|
|
|
|
CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
|
2013-04-22 21:00:15 +02:00
|
|
|
.features[FEAT_8000_0001_EDX] =
|
2013-04-22 21:00:14 +02:00
|
|
|
CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
|
2013-04-22 21:00:15 +02:00
|
|
|
.features[FEAT_8000_0001_ECX] =
|
2013-04-22 21:00:14 +02:00
|
|
|
CPUID_EXT3_LAHF_LM,
|
2015-06-07 11:15:08 +02:00
|
|
|
.features[FEAT_6_EAX] =
|
|
|
|
CPUID_6_EAX_ARAT,
|
target/i386: add VMX features to named CPU models
This allows using "-cpu Haswell,+vmx", which we did not really want to
support in QEMU but was produced by Libvirt when using the "host-model"
CPU model. Without this patch, no VMX feature is _actually_ supported
(only the basic instruction set extensions are) and KVM fails to load
in the guest.
This was produced from the output of scripts/kvm/vmxcap using the following
very ugly Python script:
bits = {
'INS/OUTS instruction information': ['FEAT_VMX_BASIC', 'MSR_VMX_BASIC_INS_OUTS'],
'IA32_VMX_TRUE_*_CTLS support': ['FEAT_VMX_BASIC', 'MSR_VMX_BASIC_TRUE_CTLS'],
'External interrupt exiting': ['FEAT_VMX_PINBASED_CTLS', 'VMX_PIN_BASED_EXT_INTR_MASK'],
'NMI exiting': ['FEAT_VMX_PINBASED_CTLS', 'VMX_PIN_BASED_NMI_EXITING'],
'Virtual NMIs': ['FEAT_VMX_PINBASED_CTLS', 'VMX_PIN_BASED_VIRTUAL_NMIS'],
'Activate VMX-preemption timer': ['FEAT_VMX_PINBASED_CTLS', 'VMX_PIN_BASED_VMX_PREEMPTION_TIMER'],
'Process posted interrupts': ['FEAT_VMX_PINBASED_CTLS', 'VMX_PIN_BASED_POSTED_INTR'],
'Interrupt window exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_VIRTUAL_INTR_PENDING'],
'Use TSC offsetting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_USE_TSC_OFFSETING'],
'HLT exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_HLT_EXITING'],
'INVLPG exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_INVLPG_EXITING'],
'MWAIT exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_MWAIT_EXITING'],
'RDPMC exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_RDPMC_EXITING'],
'RDTSC exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_RDTSC_EXITING'],
'CR3-load exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_CR3_LOAD_EXITING'],
'CR3-store exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_CR3_STORE_EXITING'],
'CR8-load exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_CR8_LOAD_EXITING'],
'CR8-store exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_CR8_STORE_EXITING'],
'Use TPR shadow': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_TPR_SHADOW'],
'NMI-window exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_VIRTUAL_NMI_PENDING'],
'MOV-DR exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_MOV_DR_EXITING'],
'Unconditional I/O exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_UNCOND_IO_EXITING'],
'Use I/O bitmaps': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_USE_IO_BITMAPS'],
'Monitor trap flag': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_MONITOR_TRAP_FLAG'],
'Use MSR bitmaps': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_USE_MSR_BITMAPS'],
'MONITOR exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_MONITOR_EXITING'],
'PAUSE exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_PAUSE_EXITING'],
'Activate secondary control': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS'],
'Virtualize APIC accesses': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES'],
'Enable EPT': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_ENABLE_EPT'],
'Descriptor-table exiting': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_DESC'],
'Enable RDTSCP': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_RDTSCP'],
'Virtualize x2APIC mode': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE'],
'Enable VPID': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_ENABLE_VPID'],
'WBINVD exiting': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_WBINVD_EXITING'],
'Unrestricted guest': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST'],
'APIC register emulation': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT'],
'Virtual interrupt delivery': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY'],
'PAUSE-loop exiting': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_PAUSE_LOOP_EXITING'],
'RDRAND exiting': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_RDRAND_EXITING'],
'Enable INVPCID': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_ENABLE_INVPCID'],
'Enable VM functions': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_ENABLE_VMFUNC'],
'VMCS shadowing': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_SHADOW_VMCS'],
'RDSEED exiting': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_RDSEED_EXITING'],
'Enable PML': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_ENABLE_PML'],
'Enable XSAVES/XRSTORS': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_XSAVES'],
'Save debug controls': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_SAVE_DEBUG_CONTROLS'],
'Load IA32_PERF_GLOBAL_CTRL': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL'],
'Acknowledge interrupt on exit': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_ACK_INTR_ON_EXIT'],
'Save IA32_PAT': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_SAVE_IA32_PAT'],
'Load IA32_PAT': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_LOAD_IA32_PAT'],
'Save IA32_EFER': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_SAVE_IA32_EFER'],
'Load IA32_EFER': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_LOAD_IA32_EFER'],
'Save VMX-preemption timer value': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER'],
'Clear IA32_BNDCFGS': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_CLEAR_BNDCFGS'],
'Load debug controls': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS'],
'IA-32e mode guest': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_IA32E_MODE'],
'Load IA32_PERF_GLOBAL_CTRL': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL'],
'Load IA32_PAT': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_LOAD_IA32_PAT'],
'Load IA32_EFER': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_LOAD_IA32_EFER'],
'Load IA32_BNDCFGS': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_LOAD_BNDCFGS'],
'Store EFER.LMA into IA-32e mode guest control': ['FEAT_VMX_MISC', 'MSR_VMX_MISC_STORE_LMA'],
'HLT activity state': ['FEAT_VMX_MISC', 'MSR_VMX_MISC_ACTIVITY_HLT'],
'VMWRITE to VM-exit information fields': ['FEAT_VMX_MISC', 'MSR_VMX_MISC_VMWRITE_VMEXIT'],
'Inject event with insn length=0': ['FEAT_VMX_MISC', 'MSR_VMX_MISC_ZERO_LEN_INJECT'],
'Execute-only EPT translations': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_EXECONLY'],
'Page-walk length 4': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_PAGE_WALK_LENGTH_4'],
'Paging-structure memory type WB': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_WB'],
'2MB EPT pages': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_2MB | MSR_VMX_EPT_1GB'],
'INVEPT supported': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVEPT'],
'EPT accessed and dirty flags': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_AD_BITS'],
'Single-context INVEPT': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT'],
'All-context INVEPT': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVEPT_ALL_CONTEXT'],
'INVVPID supported': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVVPID'],
'Individual-address INVVPID': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVVPID_SINGLE_ADDR'],
'Single-context INVVPID': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT'],
'All-context INVVPID': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVVPID_ALL_CONTEXT'],
'Single-context-retaining-globals INVVPID': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS'],
'EPTP Switching': ['FEAT_VMX_VMFUNC', 'MSR_VMX_VMFUNC_EPT_SWITCHING']
}
import sys
import textwrap
out = {}
for l in sys.stdin.readlines():
l = l.rstrip()
if l.endswith('!!'):
l = l[:-2].rstrip()
if l.startswith(' ') and (l.endswith('default') or l.endswith('yes')):
l = l[4:]
for key, value in bits.items():
if l.startswith(key):
ctl, bit = value
if ctl in out:
out[ctl] = out[ctl] + ' | '
else:
out[ctl] = ' [%s] = ' % ctl
out[ctl] = out[ctl] + bit
for x in sorted(out.keys()):
print("\n ".join(textwrap.wrap(out[x] + ",")))
Note that the script has a bug in that some keys apply to both VM entry
and VM exit controls ("load IA32_PERF_GLOBAL_CTRL", "load IA32_EFER",
"load IA32_PAT". Those have to be fixed by hand.
Reviewed-by: Eduardo Habkost <ehabkost@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2019-11-20 18:37:53 +01:00
|
|
|
.features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
|
|
|
|
MSR_VMX_BASIC_TRUE_CTLS,
|
|
|
|
.features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
|
|
|
|
VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
|
|
|
|
VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
|
|
|
|
.features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
|
|
|
|
MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
|
|
|
|
MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
|
|
|
|
MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
|
|
|
|
MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
|
|
|
|
MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
|
|
|
|
MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS,
|
|
|
|
.features[FEAT_VMX_EXIT_CTLS] =
|
|
|
|
VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
|
|
|
|
VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
|
|
|
|
VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
|
|
|
|
VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
|
|
|
|
VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
|
|
|
|
.features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
|
|
|
|
MSR_VMX_MISC_STORE_LMA,
|
|
|
|
.features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
|
|
|
|
VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
|
|
|
|
VMX_PIN_BASED_VMX_PREEMPTION_TIMER,
|
|
|
|
.features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
|
|
|
|
VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
|
|
|
|
VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
|
|
|
|
VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
|
|
|
|
VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
|
|
|
|
VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
|
|
|
|
VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
|
|
|
|
VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
|
|
|
|
VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
|
|
|
|
VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
|
|
|
|
VMX_CPU_BASED_MONITOR_TRAP_FLAG |
|
|
|
|
VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
|
|
|
|
.features[FEAT_VMX_SECONDARY_CTLS] =
|
|
|
|
VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
|
|
|
|
VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
|
|
|
|
VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
|
|
|
|
VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
|
|
|
|
VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST,
|
2015-07-09 21:07:39 +02:00
|
|
|
.xlevel = 0x80000008,
|
2012-09-05 22:41:10 +02:00
|
|
|
.model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
|
2019-06-28 02:28:40 +02:00
|
|
|
.versions = (X86CPUVersionDefinition[]) {
|
|
|
|
{ .version = 1 },
|
|
|
|
{
|
|
|
|
.version = 2,
|
2019-06-28 02:28:41 +02:00
|
|
|
.alias = "Westmere-IBRS",
|
2019-06-28 02:28:40 +02:00
|
|
|
.props = (PropValue[]) {
|
|
|
|
{ "spec-ctrl", "on" },
|
|
|
|
{ "model-id",
|
|
|
|
"Westmere E56xx/L56xx/X56xx (IBRS update)" },
|
|
|
|
{ /* end of list */ }
|
|
|
|
}
|
|
|
|
},
|
|
|
|
{ /* end of list */ }
|
|
|
|
}
|
2012-09-05 22:41:10 +02:00
|
|
|
},
|
|
|
|
{
|
|
|
|
.name = "SandyBridge",
|
|
|
|
.level = 0xd,
|
2013-01-21 15:06:36 +01:00
|
|
|
.vendor = CPUID_VENDOR_INTEL,
|
2012-09-05 22:41:10 +02:00
|
|
|
.family = 6,
|
|
|
|
.model = 42,
|
|
|
|
.stepping = 1,
|
2013-04-22 21:00:15 +02:00
|
|
|
.features[FEAT_1_EDX] =
|
2014-12-10 17:12:41 +01:00
|
|
|
CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
|
2014-06-18 01:05:29 +02:00
|
|
|
CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
|
|
|
|
CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
|
|
|
|
CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
|
|
|
|
CPUID_DE | CPUID_FP87,
|
2013-04-22 21:00:15 +02:00
|
|
|
.features[FEAT_1_ECX] =
|
2013-04-22 21:00:14 +02:00
|
|
|
CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
|
2014-06-18 01:05:29 +02:00
|
|
|
CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
|
|
|
|
CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
|
|
|
|
CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
|
|
|
|
CPUID_EXT_SSE3,
|
2013-04-22 21:00:15 +02:00
|
|
|
.features[FEAT_8000_0001_EDX] =
|
2013-04-22 21:00:14 +02:00
|
|
|
CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
|
2014-06-18 01:05:29 +02:00
|
|
|
CPUID_EXT2_SYSCALL,
|
2013-04-22 21:00:15 +02:00
|
|
|
.features[FEAT_8000_0001_ECX] =
|
2013-04-22 21:00:14 +02:00
|
|
|
CPUID_EXT3_LAHF_LM,
|
2014-11-24 15:54:43 +01:00
|
|
|
.features[FEAT_XSAVE] =
|
|
|
|
CPUID_XSAVE_XSAVEOPT,
|
2015-06-07 11:15:08 +02:00
|
|
|
.features[FEAT_6_EAX] =
|
|
|
|
CPUID_6_EAX_ARAT,
|
target/i386: add VMX features to named CPU models
This allows using "-cpu Haswell,+vmx", which we did not really want to
support in QEMU but was produced by Libvirt when using the "host-model"
CPU model. Without this patch, no VMX feature is _actually_ supported
(only the basic instruction set extensions are) and KVM fails to load
in the guest.
This was produced from the output of scripts/kvm/vmxcap using the following
very ugly Python script:
bits = {
'INS/OUTS instruction information': ['FEAT_VMX_BASIC', 'MSR_VMX_BASIC_INS_OUTS'],
'IA32_VMX_TRUE_*_CTLS support': ['FEAT_VMX_BASIC', 'MSR_VMX_BASIC_TRUE_CTLS'],
'External interrupt exiting': ['FEAT_VMX_PINBASED_CTLS', 'VMX_PIN_BASED_EXT_INTR_MASK'],
'NMI exiting': ['FEAT_VMX_PINBASED_CTLS', 'VMX_PIN_BASED_NMI_EXITING'],
'Virtual NMIs': ['FEAT_VMX_PINBASED_CTLS', 'VMX_PIN_BASED_VIRTUAL_NMIS'],
'Activate VMX-preemption timer': ['FEAT_VMX_PINBASED_CTLS', 'VMX_PIN_BASED_VMX_PREEMPTION_TIMER'],
'Process posted interrupts': ['FEAT_VMX_PINBASED_CTLS', 'VMX_PIN_BASED_POSTED_INTR'],
'Interrupt window exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_VIRTUAL_INTR_PENDING'],
'Use TSC offsetting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_USE_TSC_OFFSETING'],
'HLT exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_HLT_EXITING'],
'INVLPG exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_INVLPG_EXITING'],
'MWAIT exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_MWAIT_EXITING'],
'RDPMC exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_RDPMC_EXITING'],
'RDTSC exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_RDTSC_EXITING'],
'CR3-load exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_CR3_LOAD_EXITING'],
'CR3-store exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_CR3_STORE_EXITING'],
'CR8-load exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_CR8_LOAD_EXITING'],
'CR8-store exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_CR8_STORE_EXITING'],
'Use TPR shadow': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_TPR_SHADOW'],
'NMI-window exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_VIRTUAL_NMI_PENDING'],
'MOV-DR exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_MOV_DR_EXITING'],
'Unconditional I/O exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_UNCOND_IO_EXITING'],
'Use I/O bitmaps': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_USE_IO_BITMAPS'],
'Monitor trap flag': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_MONITOR_TRAP_FLAG'],
'Use MSR bitmaps': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_USE_MSR_BITMAPS'],
'MONITOR exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_MONITOR_EXITING'],
'PAUSE exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_PAUSE_EXITING'],
'Activate secondary control': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS'],
'Virtualize APIC accesses': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES'],
'Enable EPT': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_ENABLE_EPT'],
'Descriptor-table exiting': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_DESC'],
'Enable RDTSCP': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_RDTSCP'],
'Virtualize x2APIC mode': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE'],
'Enable VPID': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_ENABLE_VPID'],
'WBINVD exiting': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_WBINVD_EXITING'],
'Unrestricted guest': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST'],
'APIC register emulation': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT'],
'Virtual interrupt delivery': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY'],
'PAUSE-loop exiting': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_PAUSE_LOOP_EXITING'],
'RDRAND exiting': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_RDRAND_EXITING'],
'Enable INVPCID': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_ENABLE_INVPCID'],
'Enable VM functions': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_ENABLE_VMFUNC'],
'VMCS shadowing': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_SHADOW_VMCS'],
'RDSEED exiting': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_RDSEED_EXITING'],
'Enable PML': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_ENABLE_PML'],
'Enable XSAVES/XRSTORS': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_XSAVES'],
'Save debug controls': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_SAVE_DEBUG_CONTROLS'],
'Load IA32_PERF_GLOBAL_CTRL': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL'],
'Acknowledge interrupt on exit': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_ACK_INTR_ON_EXIT'],
'Save IA32_PAT': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_SAVE_IA32_PAT'],
'Load IA32_PAT': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_LOAD_IA32_PAT'],
'Save IA32_EFER': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_SAVE_IA32_EFER'],
'Load IA32_EFER': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_LOAD_IA32_EFER'],
'Save VMX-preemption timer value': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER'],
'Clear IA32_BNDCFGS': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_CLEAR_BNDCFGS'],
'Load debug controls': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS'],
'IA-32e mode guest': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_IA32E_MODE'],
'Load IA32_PERF_GLOBAL_CTRL': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL'],
'Load IA32_PAT': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_LOAD_IA32_PAT'],
'Load IA32_EFER': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_LOAD_IA32_EFER'],
'Load IA32_BNDCFGS': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_LOAD_BNDCFGS'],
'Store EFER.LMA into IA-32e mode guest control': ['FEAT_VMX_MISC', 'MSR_VMX_MISC_STORE_LMA'],
'HLT activity state': ['FEAT_VMX_MISC', 'MSR_VMX_MISC_ACTIVITY_HLT'],
'VMWRITE to VM-exit information fields': ['FEAT_VMX_MISC', 'MSR_VMX_MISC_VMWRITE_VMEXIT'],
'Inject event with insn length=0': ['FEAT_VMX_MISC', 'MSR_VMX_MISC_ZERO_LEN_INJECT'],
'Execute-only EPT translations': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_EXECONLY'],
'Page-walk length 4': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_PAGE_WALK_LENGTH_4'],
'Paging-structure memory type WB': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_WB'],
'2MB EPT pages': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_2MB | MSR_VMX_EPT_1GB'],
'INVEPT supported': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVEPT'],
'EPT accessed and dirty flags': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_AD_BITS'],
'Single-context INVEPT': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT'],
'All-context INVEPT': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVEPT_ALL_CONTEXT'],
'INVVPID supported': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVVPID'],
'Individual-address INVVPID': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVVPID_SINGLE_ADDR'],
'Single-context INVVPID': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT'],
'All-context INVVPID': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVVPID_ALL_CONTEXT'],
'Single-context-retaining-globals INVVPID': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS'],
'EPTP Switching': ['FEAT_VMX_VMFUNC', 'MSR_VMX_VMFUNC_EPT_SWITCHING']
}
import sys
import textwrap
out = {}
for l in sys.stdin.readlines():
l = l.rstrip()
if l.endswith('!!'):
l = l[:-2].rstrip()
if l.startswith(' ') and (l.endswith('default') or l.endswith('yes')):
l = l[4:]
for key, value in bits.items():
if l.startswith(key):
ctl, bit = value
if ctl in out:
out[ctl] = out[ctl] + ' | '
else:
out[ctl] = ' [%s] = ' % ctl
out[ctl] = out[ctl] + bit
for x in sorted(out.keys()):
print("\n ".join(textwrap.wrap(out[x] + ",")))
Note that the script has a bug in that some keys apply to both VM entry
and VM exit controls ("load IA32_PERF_GLOBAL_CTRL", "load IA32_EFER",
"load IA32_PAT". Those have to be fixed by hand.
Reviewed-by: Eduardo Habkost <ehabkost@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2019-11-20 18:37:53 +01:00
|
|
|
.features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
|
|
|
|
MSR_VMX_BASIC_TRUE_CTLS,
|
|
|
|
.features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
|
|
|
|
VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
|
|
|
|
VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
|
|
|
|
.features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
|
|
|
|
MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
|
|
|
|
MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
|
|
|
|
MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
|
|
|
|
MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
|
|
|
|
MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
|
|
|
|
MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS,
|
|
|
|
.features[FEAT_VMX_EXIT_CTLS] =
|
|
|
|
VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
|
|
|
|
VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
|
|
|
|
VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
|
|
|
|
VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
|
|
|
|
VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
|
|
|
|
.features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
|
|
|
|
MSR_VMX_MISC_STORE_LMA,
|
|
|
|
.features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
|
|
|
|
VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
|
|
|
|
VMX_PIN_BASED_VMX_PREEMPTION_TIMER,
|
|
|
|
.features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
|
|
|
|
VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
|
|
|
|
VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
|
|
|
|
VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
|
|
|
|
VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
|
|
|
|
VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
|
|
|
|
VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
|
|
|
|
VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
|
|
|
|
VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
|
|
|
|
VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
|
|
|
|
VMX_CPU_BASED_MONITOR_TRAP_FLAG |
|
|
|
|
VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
|
|
|
|
.features[FEAT_VMX_SECONDARY_CTLS] =
|
|
|
|
VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
|
|
|
|
VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
|
|
|
|
VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
|
|
|
|
VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
|
|
|
|
VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST,
|
2015-07-09 21:07:39 +02:00
|
|
|
.xlevel = 0x80000008,
|
2012-09-05 22:41:10 +02:00
|
|
|
.model_id = "Intel Xeon E312xx (Sandy Bridge)",
|
2019-06-28 02:28:40 +02:00
|
|
|
.versions = (X86CPUVersionDefinition[]) {
|
|
|
|
{ .version = 1 },
|
|
|
|
{
|
|
|
|
.version = 2,
|
2019-06-28 02:28:41 +02:00
|
|
|
.alias = "SandyBridge-IBRS",
|
2019-06-28 02:28:40 +02:00
|
|
|
.props = (PropValue[]) {
|
|
|
|
{ "spec-ctrl", "on" },
|
|
|
|
{ "model-id",
|
|
|
|
"Intel Xeon E312xx (Sandy Bridge, IBRS update)" },
|
|
|
|
{ /* end of list */ }
|
|
|
|
}
|
|
|
|
},
|
|
|
|
{ /* end of list */ }
|
|
|
|
}
|
2012-09-05 22:41:10 +02:00
|
|
|
},
|
2014-12-05 10:55:23 +01:00
|
|
|
{
|
|
|
|
.name = "IvyBridge",
|
|
|
|
.level = 0xd,
|
|
|
|
.vendor = CPUID_VENDOR_INTEL,
|
|
|
|
.family = 6,
|
|
|
|
.model = 58,
|
|
|
|
.stepping = 9,
|
|
|
|
.features[FEAT_1_EDX] =
|
|
|
|
CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
|
|
|
|
CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
|
|
|
|
CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
|
|
|
|
CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
|
|
|
|
CPUID_DE | CPUID_FP87,
|
|
|
|
.features[FEAT_1_ECX] =
|
|
|
|
CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
|
|
|
|
CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
|
|
|
|
CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
|
|
|
|
CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
|
|
|
|
CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
|
|
|
|
.features[FEAT_7_0_EBX] =
|
|
|
|
CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
|
|
|
|
CPUID_7_0_EBX_ERMS,
|
|
|
|
.features[FEAT_8000_0001_EDX] =
|
|
|
|
CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
|
|
|
|
CPUID_EXT2_SYSCALL,
|
|
|
|
.features[FEAT_8000_0001_ECX] =
|
|
|
|
CPUID_EXT3_LAHF_LM,
|
|
|
|
.features[FEAT_XSAVE] =
|
|
|
|
CPUID_XSAVE_XSAVEOPT,
|
2015-06-07 11:15:08 +02:00
|
|
|
.features[FEAT_6_EAX] =
|
|
|
|
CPUID_6_EAX_ARAT,
|
target/i386: add VMX features to named CPU models
This allows using "-cpu Haswell,+vmx", which we did not really want to
support in QEMU but was produced by Libvirt when using the "host-model"
CPU model. Without this patch, no VMX feature is _actually_ supported
(only the basic instruction set extensions are) and KVM fails to load
in the guest.
This was produced from the output of scripts/kvm/vmxcap using the following
very ugly Python script:
bits = {
'INS/OUTS instruction information': ['FEAT_VMX_BASIC', 'MSR_VMX_BASIC_INS_OUTS'],
'IA32_VMX_TRUE_*_CTLS support': ['FEAT_VMX_BASIC', 'MSR_VMX_BASIC_TRUE_CTLS'],
'External interrupt exiting': ['FEAT_VMX_PINBASED_CTLS', 'VMX_PIN_BASED_EXT_INTR_MASK'],
'NMI exiting': ['FEAT_VMX_PINBASED_CTLS', 'VMX_PIN_BASED_NMI_EXITING'],
'Virtual NMIs': ['FEAT_VMX_PINBASED_CTLS', 'VMX_PIN_BASED_VIRTUAL_NMIS'],
'Activate VMX-preemption timer': ['FEAT_VMX_PINBASED_CTLS', 'VMX_PIN_BASED_VMX_PREEMPTION_TIMER'],
'Process posted interrupts': ['FEAT_VMX_PINBASED_CTLS', 'VMX_PIN_BASED_POSTED_INTR'],
'Interrupt window exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_VIRTUAL_INTR_PENDING'],
'Use TSC offsetting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_USE_TSC_OFFSETING'],
'HLT exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_HLT_EXITING'],
'INVLPG exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_INVLPG_EXITING'],
'MWAIT exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_MWAIT_EXITING'],
'RDPMC exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_RDPMC_EXITING'],
'RDTSC exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_RDTSC_EXITING'],
'CR3-load exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_CR3_LOAD_EXITING'],
'CR3-store exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_CR3_STORE_EXITING'],
'CR8-load exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_CR8_LOAD_EXITING'],
'CR8-store exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_CR8_STORE_EXITING'],
'Use TPR shadow': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_TPR_SHADOW'],
'NMI-window exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_VIRTUAL_NMI_PENDING'],
'MOV-DR exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_MOV_DR_EXITING'],
'Unconditional I/O exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_UNCOND_IO_EXITING'],
'Use I/O bitmaps': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_USE_IO_BITMAPS'],
'Monitor trap flag': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_MONITOR_TRAP_FLAG'],
'Use MSR bitmaps': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_USE_MSR_BITMAPS'],
'MONITOR exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_MONITOR_EXITING'],
'PAUSE exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_PAUSE_EXITING'],
'Activate secondary control': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS'],
'Virtualize APIC accesses': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES'],
'Enable EPT': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_ENABLE_EPT'],
'Descriptor-table exiting': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_DESC'],
'Enable RDTSCP': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_RDTSCP'],
'Virtualize x2APIC mode': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE'],
'Enable VPID': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_ENABLE_VPID'],
'WBINVD exiting': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_WBINVD_EXITING'],
'Unrestricted guest': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST'],
'APIC register emulation': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT'],
'Virtual interrupt delivery': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY'],
'PAUSE-loop exiting': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_PAUSE_LOOP_EXITING'],
'RDRAND exiting': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_RDRAND_EXITING'],
'Enable INVPCID': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_ENABLE_INVPCID'],
'Enable VM functions': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_ENABLE_VMFUNC'],
'VMCS shadowing': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_SHADOW_VMCS'],
'RDSEED exiting': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_RDSEED_EXITING'],
'Enable PML': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_ENABLE_PML'],
'Enable XSAVES/XRSTORS': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_XSAVES'],
'Save debug controls': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_SAVE_DEBUG_CONTROLS'],
'Load IA32_PERF_GLOBAL_CTRL': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL'],
'Acknowledge interrupt on exit': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_ACK_INTR_ON_EXIT'],
'Save IA32_PAT': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_SAVE_IA32_PAT'],
'Load IA32_PAT': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_LOAD_IA32_PAT'],
'Save IA32_EFER': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_SAVE_IA32_EFER'],
'Load IA32_EFER': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_LOAD_IA32_EFER'],
'Save VMX-preemption timer value': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER'],
'Clear IA32_BNDCFGS': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_CLEAR_BNDCFGS'],
'Load debug controls': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS'],
'IA-32e mode guest': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_IA32E_MODE'],
'Load IA32_PERF_GLOBAL_CTRL': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL'],
'Load IA32_PAT': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_LOAD_IA32_PAT'],
'Load IA32_EFER': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_LOAD_IA32_EFER'],
'Load IA32_BNDCFGS': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_LOAD_BNDCFGS'],
'Store EFER.LMA into IA-32e mode guest control': ['FEAT_VMX_MISC', 'MSR_VMX_MISC_STORE_LMA'],
'HLT activity state': ['FEAT_VMX_MISC', 'MSR_VMX_MISC_ACTIVITY_HLT'],
'VMWRITE to VM-exit information fields': ['FEAT_VMX_MISC', 'MSR_VMX_MISC_VMWRITE_VMEXIT'],
'Inject event with insn length=0': ['FEAT_VMX_MISC', 'MSR_VMX_MISC_ZERO_LEN_INJECT'],
'Execute-only EPT translations': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_EXECONLY'],
'Page-walk length 4': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_PAGE_WALK_LENGTH_4'],
'Paging-structure memory type WB': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_WB'],
'2MB EPT pages': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_2MB | MSR_VMX_EPT_1GB'],
'INVEPT supported': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVEPT'],
'EPT accessed and dirty flags': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_AD_BITS'],
'Single-context INVEPT': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT'],
'All-context INVEPT': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVEPT_ALL_CONTEXT'],
'INVVPID supported': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVVPID'],
'Individual-address INVVPID': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVVPID_SINGLE_ADDR'],
'Single-context INVVPID': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT'],
'All-context INVVPID': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVVPID_ALL_CONTEXT'],
'Single-context-retaining-globals INVVPID': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS'],
'EPTP Switching': ['FEAT_VMX_VMFUNC', 'MSR_VMX_VMFUNC_EPT_SWITCHING']
}
import sys
import textwrap
out = {}
for l in sys.stdin.readlines():
l = l.rstrip()
if l.endswith('!!'):
l = l[:-2].rstrip()
if l.startswith(' ') and (l.endswith('default') or l.endswith('yes')):
l = l[4:]
for key, value in bits.items():
if l.startswith(key):
ctl, bit = value
if ctl in out:
out[ctl] = out[ctl] + ' | '
else:
out[ctl] = ' [%s] = ' % ctl
out[ctl] = out[ctl] + bit
for x in sorted(out.keys()):
print("\n ".join(textwrap.wrap(out[x] + ",")))
Note that the script has a bug in that some keys apply to both VM entry
and VM exit controls ("load IA32_PERF_GLOBAL_CTRL", "load IA32_EFER",
"load IA32_PAT". Those have to be fixed by hand.
Reviewed-by: Eduardo Habkost <ehabkost@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2019-11-20 18:37:53 +01:00
|
|
|
.features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
|
|
|
|
MSR_VMX_BASIC_TRUE_CTLS,
|
|
|
|
.features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
|
|
|
|
VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
|
|
|
|
VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
|
|
|
|
.features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
|
|
|
|
MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
|
|
|
|
MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
|
|
|
|
MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
|
|
|
|
MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
|
|
|
|
MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
|
|
|
|
MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS,
|
|
|
|
.features[FEAT_VMX_EXIT_CTLS] =
|
|
|
|
VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
|
|
|
|
VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
|
|
|
|
VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
|
|
|
|
VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
|
|
|
|
VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
|
|
|
|
.features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
|
|
|
|
MSR_VMX_MISC_STORE_LMA,
|
|
|
|
.features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
|
|
|
|
VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
|
|
|
|
VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR,
|
|
|
|
.features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
|
|
|
|
VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
|
|
|
|
VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
|
|
|
|
VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
|
|
|
|
VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
|
|
|
|
VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
|
|
|
|
VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
|
|
|
|
VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
|
|
|
|
VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
|
|
|
|
VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
|
|
|
|
VMX_CPU_BASED_MONITOR_TRAP_FLAG |
|
|
|
|
VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
|
|
|
|
.features[FEAT_VMX_SECONDARY_CTLS] =
|
|
|
|
VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
|
|
|
|
VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
|
|
|
|
VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
|
|
|
|
VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
|
|
|
|
VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST |
|
|
|
|
VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT |
|
|
|
|
VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
|
|
|
|
VMX_SECONDARY_EXEC_RDRAND_EXITING,
|
2015-07-09 21:07:39 +02:00
|
|
|
.xlevel = 0x80000008,
|
2014-12-05 10:55:23 +01:00
|
|
|
.model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
|
2019-06-28 02:28:40 +02:00
|
|
|
.versions = (X86CPUVersionDefinition[]) {
|
|
|
|
{ .version = 1 },
|
|
|
|
{
|
|
|
|
.version = 2,
|
2019-06-28 02:28:41 +02:00
|
|
|
.alias = "IvyBridge-IBRS",
|
2019-06-28 02:28:40 +02:00
|
|
|
.props = (PropValue[]) {
|
|
|
|
{ "spec-ctrl", "on" },
|
|
|
|
{ "model-id",
|
|
|
|
"Intel Xeon E3-12xx v2 (Ivy Bridge, IBRS)" },
|
|
|
|
{ /* end of list */ }
|
|
|
|
}
|
|
|
|
},
|
|
|
|
{ /* end of list */ }
|
|
|
|
}
|
2014-12-05 10:55:23 +01:00
|
|
|
},
|
2018-01-09 16:45:17 +01:00
|
|
|
{
|
target-i386: Add Haswell CPU model
Features added to the model, in relation to SandyBridge:
fma CPUID[1].ECX[12]
pcid CPUID[1].ECX[17]
movbe CPUID[1].ECX[22]
fsgsbase CPUID[EAX=7,ECX=0].EBX[0]
bmi1 CPUID[EAX=7,ECX=0].EBX[3]
hle CPUID[EAX=7,ECX=0].EBX[4]
avx2 CPUID[EAX=7,ECX=0].EBX[5]
smep CPUID[EAX=7,ECX=0].EBX[7]
bmi2 CPUID[EAX=7,ECX=0].EBX[8]
erms CPUID[EAX=7,ECX=0].EBX[9]
invpcid CPUID[EAX=7,ECX=0].EBX[10]
rtm CPUID[EAX=7,ECX=0].EBX[11]
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
Reviewed-by: Igor Mammedov <imammedo@redhat.com>
Signed-off-by: Andreas Färber <afaerber@suse.de>
2012-11-14 19:28:54 +01:00
|
|
|
.name = "Haswell",
|
|
|
|
.level = 0xd,
|
2013-01-21 15:06:36 +01:00
|
|
|
.vendor = CPUID_VENDOR_INTEL,
|
target-i386: Add Haswell CPU model
Features added to the model, in relation to SandyBridge:
fma CPUID[1].ECX[12]
pcid CPUID[1].ECX[17]
movbe CPUID[1].ECX[22]
fsgsbase CPUID[EAX=7,ECX=0].EBX[0]
bmi1 CPUID[EAX=7,ECX=0].EBX[3]
hle CPUID[EAX=7,ECX=0].EBX[4]
avx2 CPUID[EAX=7,ECX=0].EBX[5]
smep CPUID[EAX=7,ECX=0].EBX[7]
bmi2 CPUID[EAX=7,ECX=0].EBX[8]
erms CPUID[EAX=7,ECX=0].EBX[9]
invpcid CPUID[EAX=7,ECX=0].EBX[10]
rtm CPUID[EAX=7,ECX=0].EBX[11]
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
Reviewed-by: Igor Mammedov <imammedo@redhat.com>
Signed-off-by: Andreas Färber <afaerber@suse.de>
2012-11-14 19:28:54 +01:00
|
|
|
.family = 6,
|
|
|
|
.model = 60,
|
2017-03-09 19:12:12 +01:00
|
|
|
.stepping = 4,
|
2013-04-22 21:00:15 +02:00
|
|
|
.features[FEAT_1_EDX] =
|
2014-12-10 17:12:41 +01:00
|
|
|
CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
|
2014-06-18 01:05:29 +02:00
|
|
|
CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
|
|
|
|
CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
|
|
|
|
CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
|
|
|
|
CPUID_DE | CPUID_FP87,
|
2013-04-22 21:00:15 +02:00
|
|
|
.features[FEAT_1_ECX] =
|
2013-04-22 21:00:14 +02:00
|
|
|
CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
|
2014-06-18 01:05:29 +02:00
|
|
|
CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
|
|
|
|
CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
|
|
|
|
CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
|
|
|
|
CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
|
2014-12-05 10:52:46 +01:00
|
|
|
CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
|
2013-04-22 21:00:15 +02:00
|
|
|
.features[FEAT_8000_0001_EDX] =
|
2013-04-22 21:00:14 +02:00
|
|
|
CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
|
2014-06-18 01:05:29 +02:00
|
|
|
CPUID_EXT2_SYSCALL,
|
2013-04-22 21:00:15 +02:00
|
|
|
.features[FEAT_8000_0001_ECX] =
|
2015-09-28 14:00:18 +02:00
|
|
|
CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
|
2013-04-22 21:00:15 +02:00
|
|
|
.features[FEAT_7_0_EBX] =
|
2013-04-22 21:00:14 +02:00
|
|
|
CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
|
2015-03-13 19:39:43 +01:00
|
|
|
CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
|
|
|
|
CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
|
|
|
|
CPUID_7_0_EBX_RTM,
|
2014-11-24 15:54:43 +01:00
|
|
|
.features[FEAT_XSAVE] =
|
|
|
|
CPUID_XSAVE_XSAVEOPT,
|
2015-06-07 11:15:08 +02:00
|
|
|
.features[FEAT_6_EAX] =
|
|
|
|
CPUID_6_EAX_ARAT,
|
target/i386: add VMX features to named CPU models
This allows using "-cpu Haswell,+vmx", which we did not really want to
support in QEMU but was produced by Libvirt when using the "host-model"
CPU model. Without this patch, no VMX feature is _actually_ supported
(only the basic instruction set extensions are) and KVM fails to load
in the guest.
This was produced from the output of scripts/kvm/vmxcap using the following
very ugly Python script:
bits = {
'INS/OUTS instruction information': ['FEAT_VMX_BASIC', 'MSR_VMX_BASIC_INS_OUTS'],
'IA32_VMX_TRUE_*_CTLS support': ['FEAT_VMX_BASIC', 'MSR_VMX_BASIC_TRUE_CTLS'],
'External interrupt exiting': ['FEAT_VMX_PINBASED_CTLS', 'VMX_PIN_BASED_EXT_INTR_MASK'],
'NMI exiting': ['FEAT_VMX_PINBASED_CTLS', 'VMX_PIN_BASED_NMI_EXITING'],
'Virtual NMIs': ['FEAT_VMX_PINBASED_CTLS', 'VMX_PIN_BASED_VIRTUAL_NMIS'],
'Activate VMX-preemption timer': ['FEAT_VMX_PINBASED_CTLS', 'VMX_PIN_BASED_VMX_PREEMPTION_TIMER'],
'Process posted interrupts': ['FEAT_VMX_PINBASED_CTLS', 'VMX_PIN_BASED_POSTED_INTR'],
'Interrupt window exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_VIRTUAL_INTR_PENDING'],
'Use TSC offsetting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_USE_TSC_OFFSETING'],
'HLT exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_HLT_EXITING'],
'INVLPG exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_INVLPG_EXITING'],
'MWAIT exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_MWAIT_EXITING'],
'RDPMC exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_RDPMC_EXITING'],
'RDTSC exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_RDTSC_EXITING'],
'CR3-load exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_CR3_LOAD_EXITING'],
'CR3-store exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_CR3_STORE_EXITING'],
'CR8-load exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_CR8_LOAD_EXITING'],
'CR8-store exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_CR8_STORE_EXITING'],
'Use TPR shadow': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_TPR_SHADOW'],
'NMI-window exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_VIRTUAL_NMI_PENDING'],
'MOV-DR exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_MOV_DR_EXITING'],
'Unconditional I/O exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_UNCOND_IO_EXITING'],
'Use I/O bitmaps': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_USE_IO_BITMAPS'],
'Monitor trap flag': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_MONITOR_TRAP_FLAG'],
'Use MSR bitmaps': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_USE_MSR_BITMAPS'],
'MONITOR exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_MONITOR_EXITING'],
'PAUSE exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_PAUSE_EXITING'],
'Activate secondary control': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS'],
'Virtualize APIC accesses': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES'],
'Enable EPT': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_ENABLE_EPT'],
'Descriptor-table exiting': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_DESC'],
'Enable RDTSCP': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_RDTSCP'],
'Virtualize x2APIC mode': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE'],
'Enable VPID': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_ENABLE_VPID'],
'WBINVD exiting': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_WBINVD_EXITING'],
'Unrestricted guest': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST'],
'APIC register emulation': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT'],
'Virtual interrupt delivery': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY'],
'PAUSE-loop exiting': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_PAUSE_LOOP_EXITING'],
'RDRAND exiting': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_RDRAND_EXITING'],
'Enable INVPCID': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_ENABLE_INVPCID'],
'Enable VM functions': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_ENABLE_VMFUNC'],
'VMCS shadowing': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_SHADOW_VMCS'],
'RDSEED exiting': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_RDSEED_EXITING'],
'Enable PML': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_ENABLE_PML'],
'Enable XSAVES/XRSTORS': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_XSAVES'],
'Save debug controls': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_SAVE_DEBUG_CONTROLS'],
'Load IA32_PERF_GLOBAL_CTRL': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL'],
'Acknowledge interrupt on exit': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_ACK_INTR_ON_EXIT'],
'Save IA32_PAT': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_SAVE_IA32_PAT'],
'Load IA32_PAT': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_LOAD_IA32_PAT'],
'Save IA32_EFER': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_SAVE_IA32_EFER'],
'Load IA32_EFER': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_LOAD_IA32_EFER'],
'Save VMX-preemption timer value': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER'],
'Clear IA32_BNDCFGS': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_CLEAR_BNDCFGS'],
'Load debug controls': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS'],
'IA-32e mode guest': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_IA32E_MODE'],
'Load IA32_PERF_GLOBAL_CTRL': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL'],
'Load IA32_PAT': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_LOAD_IA32_PAT'],
'Load IA32_EFER': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_LOAD_IA32_EFER'],
'Load IA32_BNDCFGS': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_LOAD_BNDCFGS'],
'Store EFER.LMA into IA-32e mode guest control': ['FEAT_VMX_MISC', 'MSR_VMX_MISC_STORE_LMA'],
'HLT activity state': ['FEAT_VMX_MISC', 'MSR_VMX_MISC_ACTIVITY_HLT'],
'VMWRITE to VM-exit information fields': ['FEAT_VMX_MISC', 'MSR_VMX_MISC_VMWRITE_VMEXIT'],
'Inject event with insn length=0': ['FEAT_VMX_MISC', 'MSR_VMX_MISC_ZERO_LEN_INJECT'],
'Execute-only EPT translations': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_EXECONLY'],
'Page-walk length 4': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_PAGE_WALK_LENGTH_4'],
'Paging-structure memory type WB': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_WB'],
'2MB EPT pages': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_2MB | MSR_VMX_EPT_1GB'],
'INVEPT supported': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVEPT'],
'EPT accessed and dirty flags': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_AD_BITS'],
'Single-context INVEPT': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT'],
'All-context INVEPT': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVEPT_ALL_CONTEXT'],
'INVVPID supported': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVVPID'],
'Individual-address INVVPID': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVVPID_SINGLE_ADDR'],
'Single-context INVVPID': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT'],
'All-context INVVPID': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVVPID_ALL_CONTEXT'],
'Single-context-retaining-globals INVVPID': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS'],
'EPTP Switching': ['FEAT_VMX_VMFUNC', 'MSR_VMX_VMFUNC_EPT_SWITCHING']
}
import sys
import textwrap
out = {}
for l in sys.stdin.readlines():
l = l.rstrip()
if l.endswith('!!'):
l = l[:-2].rstrip()
if l.startswith(' ') and (l.endswith('default') or l.endswith('yes')):
l = l[4:]
for key, value in bits.items():
if l.startswith(key):
ctl, bit = value
if ctl in out:
out[ctl] = out[ctl] + ' | '
else:
out[ctl] = ' [%s] = ' % ctl
out[ctl] = out[ctl] + bit
for x in sorted(out.keys()):
print("\n ".join(textwrap.wrap(out[x] + ",")))
Note that the script has a bug in that some keys apply to both VM entry
and VM exit controls ("load IA32_PERF_GLOBAL_CTRL", "load IA32_EFER",
"load IA32_PAT". Those have to be fixed by hand.
Reviewed-by: Eduardo Habkost <ehabkost@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2019-11-20 18:37:53 +01:00
|
|
|
.features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
|
|
|
|
MSR_VMX_BASIC_TRUE_CTLS,
|
|
|
|
.features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
|
|
|
|
VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
|
|
|
|
VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
|
|
|
|
.features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
|
|
|
|
MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
|
|
|
|
MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
|
|
|
|
MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
|
|
|
|
MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
|
|
|
|
MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
|
|
|
|
MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS,
|
|
|
|
.features[FEAT_VMX_EXIT_CTLS] =
|
|
|
|
VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
|
|
|
|
VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
|
|
|
|
VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
|
|
|
|
VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
|
|
|
|
VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
|
|
|
|
.features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
|
|
|
|
MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT,
|
|
|
|
.features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
|
|
|
|
VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
|
|
|
|
VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR,
|
|
|
|
.features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
|
|
|
|
VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
|
|
|
|
VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
|
|
|
|
VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
|
|
|
|
VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
|
|
|
|
VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
|
|
|
|
VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
|
|
|
|
VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
|
|
|
|
VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
|
|
|
|
VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
|
|
|
|
VMX_CPU_BASED_MONITOR_TRAP_FLAG |
|
|
|
|
VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
|
|
|
|
.features[FEAT_VMX_SECONDARY_CTLS] =
|
|
|
|
VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
|
|
|
|
VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
|
|
|
|
VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
|
|
|
|
VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
|
|
|
|
VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST |
|
|
|
|
VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT |
|
|
|
|
VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
|
|
|
|
VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID |
|
|
|
|
VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS,
|
|
|
|
.features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING,
|
2015-07-09 21:07:39 +02:00
|
|
|
.xlevel = 0x80000008,
|
target-i386: Add Haswell CPU model
Features added to the model, in relation to SandyBridge:
fma CPUID[1].ECX[12]
pcid CPUID[1].ECX[17]
movbe CPUID[1].ECX[22]
fsgsbase CPUID[EAX=7,ECX=0].EBX[0]
bmi1 CPUID[EAX=7,ECX=0].EBX[3]
hle CPUID[EAX=7,ECX=0].EBX[4]
avx2 CPUID[EAX=7,ECX=0].EBX[5]
smep CPUID[EAX=7,ECX=0].EBX[7]
bmi2 CPUID[EAX=7,ECX=0].EBX[8]
erms CPUID[EAX=7,ECX=0].EBX[9]
invpcid CPUID[EAX=7,ECX=0].EBX[10]
rtm CPUID[EAX=7,ECX=0].EBX[11]
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
Reviewed-by: Igor Mammedov <imammedo@redhat.com>
Signed-off-by: Andreas Färber <afaerber@suse.de>
2012-11-14 19:28:54 +01:00
|
|
|
.model_id = "Intel Core Processor (Haswell)",
|
2019-06-28 02:28:40 +02:00
|
|
|
.versions = (X86CPUVersionDefinition[]) {
|
|
|
|
{ .version = 1 },
|
|
|
|
{
|
|
|
|
.version = 2,
|
2019-06-28 02:28:41 +02:00
|
|
|
.alias = "Haswell-noTSX",
|
2019-06-28 02:28:40 +02:00
|
|
|
.props = (PropValue[]) {
|
|
|
|
{ "hle", "off" },
|
|
|
|
{ "rtm", "off" },
|
|
|
|
{ "stepping", "1" },
|
|
|
|
{ "model-id", "Intel Core Processor (Haswell, no TSX)", },
|
|
|
|
{ /* end of list */ }
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.version = 3,
|
2019-06-28 02:28:41 +02:00
|
|
|
.alias = "Haswell-IBRS",
|
2019-06-28 02:28:40 +02:00
|
|
|
.props = (PropValue[]) {
|
|
|
|
/* Restore TSX features removed by -v2 above */
|
|
|
|
{ "hle", "on" },
|
|
|
|
{ "rtm", "on" },
|
|
|
|
/*
|
|
|
|
* Haswell and Haswell-IBRS had stepping=4 in
|
|
|
|
* QEMU 4.0 and older
|
|
|
|
*/
|
|
|
|
{ "stepping", "4" },
|
|
|
|
{ "spec-ctrl", "on" },
|
|
|
|
{ "model-id",
|
|
|
|
"Intel Core Processor (Haswell, IBRS)" },
|
|
|
|
{ /* end of list */ }
|
|
|
|
}
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.version = 4,
|
2019-06-28 02:28:41 +02:00
|
|
|
.alias = "Haswell-noTSX-IBRS",
|
2019-06-28 02:28:40 +02:00
|
|
|
.props = (PropValue[]) {
|
|
|
|
{ "hle", "off" },
|
|
|
|
{ "rtm", "off" },
|
|
|
|
/* spec-ctrl was already enabled by -v3 above */
|
|
|
|
{ "stepping", "1" },
|
|
|
|
{ "model-id",
|
|
|
|
"Intel Core Processor (Haswell, no TSX, IBRS)" },
|
|
|
|
{ /* end of list */ }
|
|
|
|
}
|
|
|
|
},
|
|
|
|
{ /* end of list */ }
|
|
|
|
}
|
target-i386: Add Haswell CPU model
Features added to the model, in relation to SandyBridge:
fma CPUID[1].ECX[12]
pcid CPUID[1].ECX[17]
movbe CPUID[1].ECX[22]
fsgsbase CPUID[EAX=7,ECX=0].EBX[0]
bmi1 CPUID[EAX=7,ECX=0].EBX[3]
hle CPUID[EAX=7,ECX=0].EBX[4]
avx2 CPUID[EAX=7,ECX=0].EBX[5]
smep CPUID[EAX=7,ECX=0].EBX[7]
bmi2 CPUID[EAX=7,ECX=0].EBX[8]
erms CPUID[EAX=7,ECX=0].EBX[9]
invpcid CPUID[EAX=7,ECX=0].EBX[10]
rtm CPUID[EAX=7,ECX=0].EBX[11]
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
Reviewed-by: Igor Mammedov <imammedo@redhat.com>
Signed-off-by: Andreas Färber <afaerber@suse.de>
2012-11-14 19:28:54 +01:00
|
|
|
},
|
2014-06-17 22:11:40 +02:00
|
|
|
{
|
|
|
|
.name = "Broadwell",
|
|
|
|
.level = 0xd,
|
|
|
|
.vendor = CPUID_VENDOR_INTEL,
|
|
|
|
.family = 6,
|
|
|
|
.model = 61,
|
|
|
|
.stepping = 2,
|
|
|
|
.features[FEAT_1_EDX] =
|
2014-12-10 17:12:41 +01:00
|
|
|
CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
|
2014-06-17 22:11:40 +02:00
|
|
|
CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
|
|
|
|
CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
|
|
|
|
CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
|
|
|
|
CPUID_DE | CPUID_FP87,
|
|
|
|
.features[FEAT_1_ECX] =
|
|
|
|
CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
|
|
|
|
CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
|
|
|
|
CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
|
|
|
|
CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
|
|
|
|
CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
|
2014-12-05 10:52:46 +01:00
|
|
|
CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
|
2014-06-17 22:11:40 +02:00
|
|
|
.features[FEAT_8000_0001_EDX] =
|
|
|
|
CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
|
|
|
|
CPUID_EXT2_SYSCALL,
|
|
|
|
.features[FEAT_8000_0001_ECX] =
|
2015-09-28 14:00:18 +02:00
|
|
|
CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
|
2014-06-17 22:11:40 +02:00
|
|
|
.features[FEAT_7_0_EBX] =
|
|
|
|
CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
|
2015-03-13 19:39:43 +01:00
|
|
|
CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
|
2014-06-17 22:11:40 +02:00
|
|
|
CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
|
2015-03-13 19:39:43 +01:00
|
|
|
CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
|
2014-06-17 22:11:40 +02:00
|
|
|
CPUID_7_0_EBX_SMAP,
|
2014-11-24 15:54:43 +01:00
|
|
|
.features[FEAT_XSAVE] =
|
|
|
|
CPUID_XSAVE_XSAVEOPT,
|
2015-06-07 11:15:08 +02:00
|
|
|
.features[FEAT_6_EAX] =
|
|
|
|
CPUID_6_EAX_ARAT,
|
target/i386: add VMX features to named CPU models
This allows using "-cpu Haswell,+vmx", which we did not really want to
support in QEMU but was produced by Libvirt when using the "host-model"
CPU model. Without this patch, no VMX feature is _actually_ supported
(only the basic instruction set extensions are) and KVM fails to load
in the guest.
This was produced from the output of scripts/kvm/vmxcap using the following
very ugly Python script:
bits = {
'INS/OUTS instruction information': ['FEAT_VMX_BASIC', 'MSR_VMX_BASIC_INS_OUTS'],
'IA32_VMX_TRUE_*_CTLS support': ['FEAT_VMX_BASIC', 'MSR_VMX_BASIC_TRUE_CTLS'],
'External interrupt exiting': ['FEAT_VMX_PINBASED_CTLS', 'VMX_PIN_BASED_EXT_INTR_MASK'],
'NMI exiting': ['FEAT_VMX_PINBASED_CTLS', 'VMX_PIN_BASED_NMI_EXITING'],
'Virtual NMIs': ['FEAT_VMX_PINBASED_CTLS', 'VMX_PIN_BASED_VIRTUAL_NMIS'],
'Activate VMX-preemption timer': ['FEAT_VMX_PINBASED_CTLS', 'VMX_PIN_BASED_VMX_PREEMPTION_TIMER'],
'Process posted interrupts': ['FEAT_VMX_PINBASED_CTLS', 'VMX_PIN_BASED_POSTED_INTR'],
'Interrupt window exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_VIRTUAL_INTR_PENDING'],
'Use TSC offsetting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_USE_TSC_OFFSETING'],
'HLT exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_HLT_EXITING'],
'INVLPG exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_INVLPG_EXITING'],
'MWAIT exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_MWAIT_EXITING'],
'RDPMC exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_RDPMC_EXITING'],
'RDTSC exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_RDTSC_EXITING'],
'CR3-load exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_CR3_LOAD_EXITING'],
'CR3-store exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_CR3_STORE_EXITING'],
'CR8-load exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_CR8_LOAD_EXITING'],
'CR8-store exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_CR8_STORE_EXITING'],
'Use TPR shadow': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_TPR_SHADOW'],
'NMI-window exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_VIRTUAL_NMI_PENDING'],
'MOV-DR exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_MOV_DR_EXITING'],
'Unconditional I/O exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_UNCOND_IO_EXITING'],
'Use I/O bitmaps': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_USE_IO_BITMAPS'],
'Monitor trap flag': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_MONITOR_TRAP_FLAG'],
'Use MSR bitmaps': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_USE_MSR_BITMAPS'],
'MONITOR exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_MONITOR_EXITING'],
'PAUSE exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_PAUSE_EXITING'],
'Activate secondary control': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS'],
'Virtualize APIC accesses': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES'],
'Enable EPT': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_ENABLE_EPT'],
'Descriptor-table exiting': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_DESC'],
'Enable RDTSCP': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_RDTSCP'],
'Virtualize x2APIC mode': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE'],
'Enable VPID': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_ENABLE_VPID'],
'WBINVD exiting': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_WBINVD_EXITING'],
'Unrestricted guest': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST'],
'APIC register emulation': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT'],
'Virtual interrupt delivery': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY'],
'PAUSE-loop exiting': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_PAUSE_LOOP_EXITING'],
'RDRAND exiting': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_RDRAND_EXITING'],
'Enable INVPCID': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_ENABLE_INVPCID'],
'Enable VM functions': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_ENABLE_VMFUNC'],
'VMCS shadowing': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_SHADOW_VMCS'],
'RDSEED exiting': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_RDSEED_EXITING'],
'Enable PML': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_ENABLE_PML'],
'Enable XSAVES/XRSTORS': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_XSAVES'],
'Save debug controls': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_SAVE_DEBUG_CONTROLS'],
'Load IA32_PERF_GLOBAL_CTRL': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL'],
'Acknowledge interrupt on exit': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_ACK_INTR_ON_EXIT'],
'Save IA32_PAT': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_SAVE_IA32_PAT'],
'Load IA32_PAT': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_LOAD_IA32_PAT'],
'Save IA32_EFER': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_SAVE_IA32_EFER'],
'Load IA32_EFER': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_LOAD_IA32_EFER'],
'Save VMX-preemption timer value': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER'],
'Clear IA32_BNDCFGS': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_CLEAR_BNDCFGS'],
'Load debug controls': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS'],
'IA-32e mode guest': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_IA32E_MODE'],
'Load IA32_PERF_GLOBAL_CTRL': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL'],
'Load IA32_PAT': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_LOAD_IA32_PAT'],
'Load IA32_EFER': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_LOAD_IA32_EFER'],
'Load IA32_BNDCFGS': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_LOAD_BNDCFGS'],
'Store EFER.LMA into IA-32e mode guest control': ['FEAT_VMX_MISC', 'MSR_VMX_MISC_STORE_LMA'],
'HLT activity state': ['FEAT_VMX_MISC', 'MSR_VMX_MISC_ACTIVITY_HLT'],
'VMWRITE to VM-exit information fields': ['FEAT_VMX_MISC', 'MSR_VMX_MISC_VMWRITE_VMEXIT'],
'Inject event with insn length=0': ['FEAT_VMX_MISC', 'MSR_VMX_MISC_ZERO_LEN_INJECT'],
'Execute-only EPT translations': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_EXECONLY'],
'Page-walk length 4': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_PAGE_WALK_LENGTH_4'],
'Paging-structure memory type WB': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_WB'],
'2MB EPT pages': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_2MB | MSR_VMX_EPT_1GB'],
'INVEPT supported': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVEPT'],
'EPT accessed and dirty flags': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_AD_BITS'],
'Single-context INVEPT': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT'],
'All-context INVEPT': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVEPT_ALL_CONTEXT'],
'INVVPID supported': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVVPID'],
'Individual-address INVVPID': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVVPID_SINGLE_ADDR'],
'Single-context INVVPID': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT'],
'All-context INVVPID': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVVPID_ALL_CONTEXT'],
'Single-context-retaining-globals INVVPID': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS'],
'EPTP Switching': ['FEAT_VMX_VMFUNC', 'MSR_VMX_VMFUNC_EPT_SWITCHING']
}
import sys
import textwrap
out = {}
for l in sys.stdin.readlines():
l = l.rstrip()
if l.endswith('!!'):
l = l[:-2].rstrip()
if l.startswith(' ') and (l.endswith('default') or l.endswith('yes')):
l = l[4:]
for key, value in bits.items():
if l.startswith(key):
ctl, bit = value
if ctl in out:
out[ctl] = out[ctl] + ' | '
else:
out[ctl] = ' [%s] = ' % ctl
out[ctl] = out[ctl] + bit
for x in sorted(out.keys()):
print("\n ".join(textwrap.wrap(out[x] + ",")))
Note that the script has a bug in that some keys apply to both VM entry
and VM exit controls ("load IA32_PERF_GLOBAL_CTRL", "load IA32_EFER",
"load IA32_PAT". Those have to be fixed by hand.
Reviewed-by: Eduardo Habkost <ehabkost@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2019-11-20 18:37:53 +01:00
|
|
|
.features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
|
|
|
|
MSR_VMX_BASIC_TRUE_CTLS,
|
|
|
|
.features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
|
|
|
|
VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
|
|
|
|
VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
|
|
|
|
.features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
|
|
|
|
MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
|
|
|
|
MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
|
|
|
|
MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
|
|
|
|
MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
|
|
|
|
MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
|
|
|
|
MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS,
|
|
|
|
.features[FEAT_VMX_EXIT_CTLS] =
|
|
|
|
VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
|
|
|
|
VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
|
|
|
|
VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
|
|
|
|
VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
|
|
|
|
VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
|
|
|
|
.features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
|
|
|
|
MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT,
|
|
|
|
.features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
|
|
|
|
VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
|
|
|
|
VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR,
|
|
|
|
.features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
|
|
|
|
VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
|
|
|
|
VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
|
|
|
|
VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
|
|
|
|
VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
|
|
|
|
VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
|
|
|
|
VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
|
|
|
|
VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
|
|
|
|
VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
|
|
|
|
VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
|
|
|
|
VMX_CPU_BASED_MONITOR_TRAP_FLAG |
|
|
|
|
VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
|
|
|
|
.features[FEAT_VMX_SECONDARY_CTLS] =
|
|
|
|
VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
|
|
|
|
VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
|
|
|
|
VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
|
|
|
|
VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
|
|
|
|
VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST |
|
|
|
|
VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT |
|
|
|
|
VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
|
|
|
|
VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID |
|
|
|
|
VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS |
|
|
|
|
VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML,
|
|
|
|
.features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING,
|
2015-07-09 21:07:39 +02:00
|
|
|
.xlevel = 0x80000008,
|
2014-06-17 22:11:40 +02:00
|
|
|
.model_id = "Intel Core Processor (Broadwell)",
|
2019-06-28 02:28:40 +02:00
|
|
|
.versions = (X86CPUVersionDefinition[]) {
|
|
|
|
{ .version = 1 },
|
|
|
|
{
|
|
|
|
.version = 2,
|
2019-06-28 02:28:41 +02:00
|
|
|
.alias = "Broadwell-noTSX",
|
2019-06-28 02:28:40 +02:00
|
|
|
.props = (PropValue[]) {
|
|
|
|
{ "hle", "off" },
|
|
|
|
{ "rtm", "off" },
|
|
|
|
{ "model-id", "Intel Core Processor (Broadwell, no TSX)", },
|
|
|
|
{ /* end of list */ }
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.version = 3,
|
2019-06-28 02:28:41 +02:00
|
|
|
.alias = "Broadwell-IBRS",
|
2019-06-28 02:28:40 +02:00
|
|
|
.props = (PropValue[]) {
|
|
|
|
/* Restore TSX features removed by -v2 above */
|
|
|
|
{ "hle", "on" },
|
|
|
|
{ "rtm", "on" },
|
|
|
|
{ "spec-ctrl", "on" },
|
|
|
|
{ "model-id",
|
|
|
|
"Intel Core Processor (Broadwell, IBRS)" },
|
|
|
|
{ /* end of list */ }
|
|
|
|
}
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.version = 4,
|
2019-06-28 02:28:41 +02:00
|
|
|
.alias = "Broadwell-noTSX-IBRS",
|
2019-06-28 02:28:40 +02:00
|
|
|
.props = (PropValue[]) {
|
|
|
|
{ "hle", "off" },
|
|
|
|
{ "rtm", "off" },
|
|
|
|
/* spec-ctrl was already enabled by -v3 above */
|
|
|
|
{ "model-id",
|
|
|
|
"Intel Core Processor (Broadwell, no TSX, IBRS)" },
|
|
|
|
{ /* end of list */ }
|
|
|
|
}
|
|
|
|
},
|
|
|
|
{ /* end of list */ }
|
|
|
|
}
|
2014-06-17 22:11:40 +02:00
|
|
|
},
|
2016-04-27 10:13:06 +02:00
|
|
|
{
|
|
|
|
.name = "Skylake-Client",
|
|
|
|
.level = 0xd,
|
|
|
|
.vendor = CPUID_VENDOR_INTEL,
|
|
|
|
.family = 6,
|
|
|
|
.model = 94,
|
|
|
|
.stepping = 3,
|
|
|
|
.features[FEAT_1_EDX] =
|
|
|
|
CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
|
|
|
|
CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
|
|
|
|
CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
|
|
|
|
CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
|
|
|
|
CPUID_DE | CPUID_FP87,
|
|
|
|
.features[FEAT_1_ECX] =
|
|
|
|
CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
|
|
|
|
CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
|
|
|
|
CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
|
|
|
|
CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
|
|
|
|
CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
|
|
|
|
CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
|
|
|
|
.features[FEAT_8000_0001_EDX] =
|
|
|
|
CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
|
|
|
|
CPUID_EXT2_SYSCALL,
|
|
|
|
.features[FEAT_8000_0001_ECX] =
|
|
|
|
CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
|
|
|
|
.features[FEAT_7_0_EBX] =
|
|
|
|
CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
|
|
|
|
CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
|
|
|
|
CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
|
|
|
|
CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
|
2018-12-20 13:11:00 +01:00
|
|
|
CPUID_7_0_EBX_SMAP,
|
2021-04-12 09:39:52 +02:00
|
|
|
/* XSAVES is added in version 4 */
|
2016-04-27 10:13:06 +02:00
|
|
|
.features[FEAT_XSAVE] =
|
|
|
|
CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
|
|
|
|
CPUID_XSAVE_XGETBV1,
|
|
|
|
.features[FEAT_6_EAX] =
|
|
|
|
CPUID_6_EAX_ARAT,
|
target/i386: add VMX features to named CPU models
This allows using "-cpu Haswell,+vmx", which we did not really want to
support in QEMU but was produced by Libvirt when using the "host-model"
CPU model. Without this patch, no VMX feature is _actually_ supported
(only the basic instruction set extensions are) and KVM fails to load
in the guest.
This was produced from the output of scripts/kvm/vmxcap using the following
very ugly Python script:
bits = {
'INS/OUTS instruction information': ['FEAT_VMX_BASIC', 'MSR_VMX_BASIC_INS_OUTS'],
'IA32_VMX_TRUE_*_CTLS support': ['FEAT_VMX_BASIC', 'MSR_VMX_BASIC_TRUE_CTLS'],
'External interrupt exiting': ['FEAT_VMX_PINBASED_CTLS', 'VMX_PIN_BASED_EXT_INTR_MASK'],
'NMI exiting': ['FEAT_VMX_PINBASED_CTLS', 'VMX_PIN_BASED_NMI_EXITING'],
'Virtual NMIs': ['FEAT_VMX_PINBASED_CTLS', 'VMX_PIN_BASED_VIRTUAL_NMIS'],
'Activate VMX-preemption timer': ['FEAT_VMX_PINBASED_CTLS', 'VMX_PIN_BASED_VMX_PREEMPTION_TIMER'],
'Process posted interrupts': ['FEAT_VMX_PINBASED_CTLS', 'VMX_PIN_BASED_POSTED_INTR'],
'Interrupt window exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_VIRTUAL_INTR_PENDING'],
'Use TSC offsetting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_USE_TSC_OFFSETING'],
'HLT exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_HLT_EXITING'],
'INVLPG exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_INVLPG_EXITING'],
'MWAIT exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_MWAIT_EXITING'],
'RDPMC exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_RDPMC_EXITING'],
'RDTSC exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_RDTSC_EXITING'],
'CR3-load exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_CR3_LOAD_EXITING'],
'CR3-store exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_CR3_STORE_EXITING'],
'CR8-load exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_CR8_LOAD_EXITING'],
'CR8-store exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_CR8_STORE_EXITING'],
'Use TPR shadow': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_TPR_SHADOW'],
'NMI-window exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_VIRTUAL_NMI_PENDING'],
'MOV-DR exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_MOV_DR_EXITING'],
'Unconditional I/O exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_UNCOND_IO_EXITING'],
'Use I/O bitmaps': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_USE_IO_BITMAPS'],
'Monitor trap flag': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_MONITOR_TRAP_FLAG'],
'Use MSR bitmaps': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_USE_MSR_BITMAPS'],
'MONITOR exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_MONITOR_EXITING'],
'PAUSE exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_PAUSE_EXITING'],
'Activate secondary control': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS'],
'Virtualize APIC accesses': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES'],
'Enable EPT': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_ENABLE_EPT'],
'Descriptor-table exiting': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_DESC'],
'Enable RDTSCP': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_RDTSCP'],
'Virtualize x2APIC mode': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE'],
'Enable VPID': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_ENABLE_VPID'],
'WBINVD exiting': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_WBINVD_EXITING'],
'Unrestricted guest': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST'],
'APIC register emulation': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT'],
'Virtual interrupt delivery': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY'],
'PAUSE-loop exiting': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_PAUSE_LOOP_EXITING'],
'RDRAND exiting': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_RDRAND_EXITING'],
'Enable INVPCID': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_ENABLE_INVPCID'],
'Enable VM functions': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_ENABLE_VMFUNC'],
'VMCS shadowing': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_SHADOW_VMCS'],
'RDSEED exiting': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_RDSEED_EXITING'],
'Enable PML': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_ENABLE_PML'],
'Enable XSAVES/XRSTORS': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_XSAVES'],
'Save debug controls': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_SAVE_DEBUG_CONTROLS'],
'Load IA32_PERF_GLOBAL_CTRL': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL'],
'Acknowledge interrupt on exit': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_ACK_INTR_ON_EXIT'],
'Save IA32_PAT': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_SAVE_IA32_PAT'],
'Load IA32_PAT': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_LOAD_IA32_PAT'],
'Save IA32_EFER': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_SAVE_IA32_EFER'],
'Load IA32_EFER': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_LOAD_IA32_EFER'],
'Save VMX-preemption timer value': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER'],
'Clear IA32_BNDCFGS': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_CLEAR_BNDCFGS'],
'Load debug controls': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS'],
'IA-32e mode guest': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_IA32E_MODE'],
'Load IA32_PERF_GLOBAL_CTRL': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL'],
'Load IA32_PAT': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_LOAD_IA32_PAT'],
'Load IA32_EFER': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_LOAD_IA32_EFER'],
'Load IA32_BNDCFGS': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_LOAD_BNDCFGS'],
'Store EFER.LMA into IA-32e mode guest control': ['FEAT_VMX_MISC', 'MSR_VMX_MISC_STORE_LMA'],
'HLT activity state': ['FEAT_VMX_MISC', 'MSR_VMX_MISC_ACTIVITY_HLT'],
'VMWRITE to VM-exit information fields': ['FEAT_VMX_MISC', 'MSR_VMX_MISC_VMWRITE_VMEXIT'],
'Inject event with insn length=0': ['FEAT_VMX_MISC', 'MSR_VMX_MISC_ZERO_LEN_INJECT'],
'Execute-only EPT translations': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_EXECONLY'],
'Page-walk length 4': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_PAGE_WALK_LENGTH_4'],
'Paging-structure memory type WB': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_WB'],
'2MB EPT pages': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_2MB | MSR_VMX_EPT_1GB'],
'INVEPT supported': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVEPT'],
'EPT accessed and dirty flags': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_AD_BITS'],
'Single-context INVEPT': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT'],
'All-context INVEPT': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVEPT_ALL_CONTEXT'],
'INVVPID supported': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVVPID'],
'Individual-address INVVPID': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVVPID_SINGLE_ADDR'],
'Single-context INVVPID': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT'],
'All-context INVVPID': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVVPID_ALL_CONTEXT'],
'Single-context-retaining-globals INVVPID': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS'],
'EPTP Switching': ['FEAT_VMX_VMFUNC', 'MSR_VMX_VMFUNC_EPT_SWITCHING']
}
import sys
import textwrap
out = {}
for l in sys.stdin.readlines():
l = l.rstrip()
if l.endswith('!!'):
l = l[:-2].rstrip()
if l.startswith(' ') and (l.endswith('default') or l.endswith('yes')):
l = l[4:]
for key, value in bits.items():
if l.startswith(key):
ctl, bit = value
if ctl in out:
out[ctl] = out[ctl] + ' | '
else:
out[ctl] = ' [%s] = ' % ctl
out[ctl] = out[ctl] + bit
for x in sorted(out.keys()):
print("\n ".join(textwrap.wrap(out[x] + ",")))
Note that the script has a bug in that some keys apply to both VM entry
and VM exit controls ("load IA32_PERF_GLOBAL_CTRL", "load IA32_EFER",
"load IA32_PAT". Those have to be fixed by hand.
Reviewed-by: Eduardo Habkost <ehabkost@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2019-11-20 18:37:53 +01:00
|
|
|
/* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */
|
|
|
|
.features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
|
|
|
|
MSR_VMX_BASIC_TRUE_CTLS,
|
|
|
|
.features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
|
|
|
|
VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
|
|
|
|
VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
|
|
|
|
.features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
|
|
|
|
MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
|
|
|
|
MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
|
|
|
|
MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
|
|
|
|
MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
|
|
|
|
MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
|
|
|
|
MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS,
|
|
|
|
.features[FEAT_VMX_EXIT_CTLS] =
|
|
|
|
VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
|
|
|
|
VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
|
|
|
|
VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
|
|
|
|
VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
|
|
|
|
VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
|
|
|
|
.features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
|
|
|
|
MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT,
|
|
|
|
.features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
|
|
|
|
VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
|
|
|
|
VMX_PIN_BASED_VMX_PREEMPTION_TIMER,
|
|
|
|
.features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
|
|
|
|
VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
|
|
|
|
VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
|
|
|
|
VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
|
|
|
|
VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
|
|
|
|
VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
|
|
|
|
VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
|
|
|
|
VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
|
|
|
|
VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
|
|
|
|
VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
|
|
|
|
VMX_CPU_BASED_MONITOR_TRAP_FLAG |
|
|
|
|
VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
|
|
|
|
.features[FEAT_VMX_SECONDARY_CTLS] =
|
|
|
|
VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
|
|
|
|
VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
|
|
|
|
VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
|
|
|
|
VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST |
|
|
|
|
VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID |
|
|
|
|
VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS |
|
|
|
|
VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML,
|
|
|
|
.features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING,
|
2016-04-27 10:13:06 +02:00
|
|
|
.xlevel = 0x80000008,
|
|
|
|
.model_id = "Intel Core Processor (Skylake)",
|
2019-06-28 02:28:40 +02:00
|
|
|
.versions = (X86CPUVersionDefinition[]) {
|
|
|
|
{ .version = 1 },
|
|
|
|
{
|
|
|
|
.version = 2,
|
2019-06-28 02:28:41 +02:00
|
|
|
.alias = "Skylake-Client-IBRS",
|
2019-06-28 02:28:40 +02:00
|
|
|
.props = (PropValue[]) {
|
|
|
|
{ "spec-ctrl", "on" },
|
|
|
|
{ "model-id",
|
|
|
|
"Intel Core Processor (Skylake, IBRS)" },
|
|
|
|
{ /* end of list */ }
|
|
|
|
}
|
|
|
|
},
|
2019-11-20 17:49:11 +01:00
|
|
|
{
|
|
|
|
.version = 3,
|
2019-11-20 17:49:12 +01:00
|
|
|
.alias = "Skylake-Client-noTSX-IBRS",
|
2019-11-20 17:49:11 +01:00
|
|
|
.props = (PropValue[]) {
|
|
|
|
{ "hle", "off" },
|
|
|
|
{ "rtm", "off" },
|
target/i386: Add the 'model-id' for Skylake -v3 CPU models
This fixes a confusion in the help output. (Although, if you squint
long enough at the '-cpu help' output, you _do_ notice that
"Skylake-Client-noTSX-IBRS" is an alias of "Skylake-Client-v3";
similarly for Skylake-Server-v3.)
Without this patch:
$ qemu-system-x86 -cpu help
...
x86 Skylake-Client-v1 Intel Core Processor (Skylake)
x86 Skylake-Client-v2 Intel Core Processor (Skylake, IBRS)
x86 Skylake-Client-v3 Intel Core Processor (Skylake, IBRS)
...
x86 Skylake-Server-v1 Intel Xeon Processor (Skylake)
x86 Skylake-Server-v2 Intel Xeon Processor (Skylake, IBRS)
x86 Skylake-Server-v3 Intel Xeon Processor (Skylake, IBRS)
...
With this patch:
$ ./qemu-system-x86 -cpu help
...
x86 Skylake-Client-v1 Intel Core Processor (Skylake)
x86 Skylake-Client-v2 Intel Core Processor (Skylake, IBRS)
x86 Skylake-Client-v3 Intel Core Processor (Skylake, IBRS, no TSX)
...
x86 Skylake-Server-v1 Intel Xeon Processor (Skylake)
x86 Skylake-Server-v2 Intel Xeon Processor (Skylake, IBRS)
x86 Skylake-Server-v3 Intel Xeon Processor (Skylake, IBRS, no TSX)
...
Signed-off-by: Kashyap Chamarthy <kchamart@redhat.com>
Message-Id: <20200123090116.14409-1-kchamart@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2020-01-23 10:01:15 +01:00
|
|
|
{ "model-id",
|
|
|
|
"Intel Core Processor (Skylake, IBRS, no TSX)" },
|
2019-11-20 17:49:11 +01:00
|
|
|
{ /* end of list */ }
|
|
|
|
}
|
|
|
|
},
|
2021-04-12 09:39:52 +02:00
|
|
|
{
|
|
|
|
.version = 4,
|
|
|
|
.note = "IBRS, XSAVES, no TSX",
|
|
|
|
.props = (PropValue[]) {
|
|
|
|
{ "xsaves", "on" },
|
|
|
|
{ "vmx-xsaves", "on" },
|
|
|
|
{ /* end of list */ }
|
|
|
|
}
|
|
|
|
},
|
2019-06-28 02:28:40 +02:00
|
|
|
{ /* end of list */ }
|
|
|
|
}
|
2016-04-27 10:13:06 +02:00
|
|
|
},
|
2017-06-21 07:29:34 +02:00
|
|
|
{
|
|
|
|
.name = "Skylake-Server",
|
|
|
|
.level = 0xd,
|
|
|
|
.vendor = CPUID_VENDOR_INTEL,
|
|
|
|
.family = 6,
|
|
|
|
.model = 85,
|
|
|
|
.stepping = 4,
|
|
|
|
.features[FEAT_1_EDX] =
|
|
|
|
CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
|
|
|
|
CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
|
|
|
|
CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
|
|
|
|
CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
|
|
|
|
CPUID_DE | CPUID_FP87,
|
|
|
|
.features[FEAT_1_ECX] =
|
|
|
|
CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
|
|
|
|
CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
|
|
|
|
CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
|
|
|
|
CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
|
|
|
|
CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
|
|
|
|
CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
|
|
|
|
.features[FEAT_8000_0001_EDX] =
|
|
|
|
CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
|
|
|
|
CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
|
|
|
|
.features[FEAT_8000_0001_ECX] =
|
|
|
|
CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
|
|
|
|
.features[FEAT_7_0_EBX] =
|
|
|
|
CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
|
|
|
|
CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
|
|
|
|
CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
|
|
|
|
CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
|
2018-12-20 13:11:00 +01:00
|
|
|
CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB |
|
2017-06-21 07:29:34 +02:00
|
|
|
CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
|
|
|
|
CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
|
2017-12-19 04:37:30 +01:00
|
|
|
CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT,
|
2018-10-29 09:39:53 +01:00
|
|
|
.features[FEAT_7_0_ECX] =
|
|
|
|
CPUID_7_0_ECX_PKU,
|
2021-04-12 09:39:52 +02:00
|
|
|
/* XSAVES is added in version 5 */
|
2017-06-21 07:29:34 +02:00
|
|
|
.features[FEAT_XSAVE] =
|
|
|
|
CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
|
|
|
|
CPUID_XSAVE_XGETBV1,
|
|
|
|
.features[FEAT_6_EAX] =
|
|
|
|
CPUID_6_EAX_ARAT,
|
target/i386: add VMX features to named CPU models
This allows using "-cpu Haswell,+vmx", which we did not really want to
support in QEMU but was produced by Libvirt when using the "host-model"
CPU model. Without this patch, no VMX feature is _actually_ supported
(only the basic instruction set extensions are) and KVM fails to load
in the guest.
This was produced from the output of scripts/kvm/vmxcap using the following
very ugly Python script:
bits = {
'INS/OUTS instruction information': ['FEAT_VMX_BASIC', 'MSR_VMX_BASIC_INS_OUTS'],
'IA32_VMX_TRUE_*_CTLS support': ['FEAT_VMX_BASIC', 'MSR_VMX_BASIC_TRUE_CTLS'],
'External interrupt exiting': ['FEAT_VMX_PINBASED_CTLS', 'VMX_PIN_BASED_EXT_INTR_MASK'],
'NMI exiting': ['FEAT_VMX_PINBASED_CTLS', 'VMX_PIN_BASED_NMI_EXITING'],
'Virtual NMIs': ['FEAT_VMX_PINBASED_CTLS', 'VMX_PIN_BASED_VIRTUAL_NMIS'],
'Activate VMX-preemption timer': ['FEAT_VMX_PINBASED_CTLS', 'VMX_PIN_BASED_VMX_PREEMPTION_TIMER'],
'Process posted interrupts': ['FEAT_VMX_PINBASED_CTLS', 'VMX_PIN_BASED_POSTED_INTR'],
'Interrupt window exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_VIRTUAL_INTR_PENDING'],
'Use TSC offsetting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_USE_TSC_OFFSETING'],
'HLT exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_HLT_EXITING'],
'INVLPG exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_INVLPG_EXITING'],
'MWAIT exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_MWAIT_EXITING'],
'RDPMC exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_RDPMC_EXITING'],
'RDTSC exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_RDTSC_EXITING'],
'CR3-load exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_CR3_LOAD_EXITING'],
'CR3-store exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_CR3_STORE_EXITING'],
'CR8-load exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_CR8_LOAD_EXITING'],
'CR8-store exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_CR8_STORE_EXITING'],
'Use TPR shadow': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_TPR_SHADOW'],
'NMI-window exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_VIRTUAL_NMI_PENDING'],
'MOV-DR exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_MOV_DR_EXITING'],
'Unconditional I/O exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_UNCOND_IO_EXITING'],
'Use I/O bitmaps': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_USE_IO_BITMAPS'],
'Monitor trap flag': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_MONITOR_TRAP_FLAG'],
'Use MSR bitmaps': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_USE_MSR_BITMAPS'],
'MONITOR exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_MONITOR_EXITING'],
'PAUSE exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_PAUSE_EXITING'],
'Activate secondary control': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS'],
'Virtualize APIC accesses': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES'],
'Enable EPT': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_ENABLE_EPT'],
'Descriptor-table exiting': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_DESC'],
'Enable RDTSCP': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_RDTSCP'],
'Virtualize x2APIC mode': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE'],
'Enable VPID': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_ENABLE_VPID'],
'WBINVD exiting': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_WBINVD_EXITING'],
'Unrestricted guest': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST'],
'APIC register emulation': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT'],
'Virtual interrupt delivery': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY'],
'PAUSE-loop exiting': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_PAUSE_LOOP_EXITING'],
'RDRAND exiting': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_RDRAND_EXITING'],
'Enable INVPCID': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_ENABLE_INVPCID'],
'Enable VM functions': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_ENABLE_VMFUNC'],
'VMCS shadowing': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_SHADOW_VMCS'],
'RDSEED exiting': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_RDSEED_EXITING'],
'Enable PML': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_ENABLE_PML'],
'Enable XSAVES/XRSTORS': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_XSAVES'],
'Save debug controls': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_SAVE_DEBUG_CONTROLS'],
'Load IA32_PERF_GLOBAL_CTRL': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL'],
'Acknowledge interrupt on exit': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_ACK_INTR_ON_EXIT'],
'Save IA32_PAT': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_SAVE_IA32_PAT'],
'Load IA32_PAT': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_LOAD_IA32_PAT'],
'Save IA32_EFER': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_SAVE_IA32_EFER'],
'Load IA32_EFER': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_LOAD_IA32_EFER'],
'Save VMX-preemption timer value': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER'],
'Clear IA32_BNDCFGS': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_CLEAR_BNDCFGS'],
'Load debug controls': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS'],
'IA-32e mode guest': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_IA32E_MODE'],
'Load IA32_PERF_GLOBAL_CTRL': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL'],
'Load IA32_PAT': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_LOAD_IA32_PAT'],
'Load IA32_EFER': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_LOAD_IA32_EFER'],
'Load IA32_BNDCFGS': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_LOAD_BNDCFGS'],
'Store EFER.LMA into IA-32e mode guest control': ['FEAT_VMX_MISC', 'MSR_VMX_MISC_STORE_LMA'],
'HLT activity state': ['FEAT_VMX_MISC', 'MSR_VMX_MISC_ACTIVITY_HLT'],
'VMWRITE to VM-exit information fields': ['FEAT_VMX_MISC', 'MSR_VMX_MISC_VMWRITE_VMEXIT'],
'Inject event with insn length=0': ['FEAT_VMX_MISC', 'MSR_VMX_MISC_ZERO_LEN_INJECT'],
'Execute-only EPT translations': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_EXECONLY'],
'Page-walk length 4': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_PAGE_WALK_LENGTH_4'],
'Paging-structure memory type WB': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_WB'],
'2MB EPT pages': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_2MB | MSR_VMX_EPT_1GB'],
'INVEPT supported': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVEPT'],
'EPT accessed and dirty flags': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_AD_BITS'],
'Single-context INVEPT': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT'],
'All-context INVEPT': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVEPT_ALL_CONTEXT'],
'INVVPID supported': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVVPID'],
'Individual-address INVVPID': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVVPID_SINGLE_ADDR'],
'Single-context INVVPID': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT'],
'All-context INVVPID': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVVPID_ALL_CONTEXT'],
'Single-context-retaining-globals INVVPID': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS'],
'EPTP Switching': ['FEAT_VMX_VMFUNC', 'MSR_VMX_VMFUNC_EPT_SWITCHING']
}
import sys
import textwrap
out = {}
for l in sys.stdin.readlines():
l = l.rstrip()
if l.endswith('!!'):
l = l[:-2].rstrip()
if l.startswith(' ') and (l.endswith('default') or l.endswith('yes')):
l = l[4:]
for key, value in bits.items():
if l.startswith(key):
ctl, bit = value
if ctl in out:
out[ctl] = out[ctl] + ' | '
else:
out[ctl] = ' [%s] = ' % ctl
out[ctl] = out[ctl] + bit
for x in sorted(out.keys()):
print("\n ".join(textwrap.wrap(out[x] + ",")))
Note that the script has a bug in that some keys apply to both VM entry
and VM exit controls ("load IA32_PERF_GLOBAL_CTRL", "load IA32_EFER",
"load IA32_PAT". Those have to be fixed by hand.
Reviewed-by: Eduardo Habkost <ehabkost@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2019-11-20 18:37:53 +01:00
|
|
|
/* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */
|
|
|
|
.features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
|
|
|
|
MSR_VMX_BASIC_TRUE_CTLS,
|
|
|
|
.features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
|
|
|
|
VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
|
|
|
|
VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
|
|
|
|
.features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
|
|
|
|
MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
|
|
|
|
MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
|
|
|
|
MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
|
|
|
|
MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
|
|
|
|
MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
|
|
|
|
MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS,
|
|
|
|
.features[FEAT_VMX_EXIT_CTLS] =
|
|
|
|
VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
|
|
|
|
VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
|
|
|
|
VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
|
|
|
|
VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
|
|
|
|
VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
|
|
|
|
.features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
|
|
|
|
MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT,
|
|
|
|
.features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
|
|
|
|
VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
|
|
|
|
VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR,
|
|
|
|
.features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
|
|
|
|
VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
|
|
|
|
VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
|
|
|
|
VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
|
|
|
|
VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
|
|
|
|
VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
|
|
|
|
VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
|
|
|
|
VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
|
|
|
|
VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
|
|
|
|
VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
|
|
|
|
VMX_CPU_BASED_MONITOR_TRAP_FLAG |
|
|
|
|
VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
|
|
|
|
.features[FEAT_VMX_SECONDARY_CTLS] =
|
|
|
|
VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
|
|
|
|
VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
|
|
|
|
VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
|
|
|
|
VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
|
|
|
|
VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST |
|
|
|
|
VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT |
|
|
|
|
VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
|
|
|
|
VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID |
|
2019-11-25 19:12:16 +01:00
|
|
|
VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS |
|
|
|
|
VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML,
|
2017-06-21 07:29:34 +02:00
|
|
|
.xlevel = 0x80000008,
|
|
|
|
.model_id = "Intel Xeon Processor (Skylake)",
|
2019-06-28 02:28:40 +02:00
|
|
|
.versions = (X86CPUVersionDefinition[]) {
|
|
|
|
{ .version = 1 },
|
|
|
|
{
|
|
|
|
.version = 2,
|
2019-06-28 02:28:41 +02:00
|
|
|
.alias = "Skylake-Server-IBRS",
|
2019-06-28 02:28:40 +02:00
|
|
|
.props = (PropValue[]) {
|
|
|
|
/* clflushopt was not added to Skylake-Server-IBRS */
|
|
|
|
/* TODO: add -v3 including clflushopt */
|
|
|
|
{ "clflushopt", "off" },
|
|
|
|
{ "spec-ctrl", "on" },
|
|
|
|
{ "model-id",
|
|
|
|
"Intel Xeon Processor (Skylake, IBRS)" },
|
|
|
|
{ /* end of list */ }
|
|
|
|
}
|
|
|
|
},
|
2019-11-20 17:49:11 +01:00
|
|
|
{
|
|
|
|
.version = 3,
|
2019-11-20 17:49:12 +01:00
|
|
|
.alias = "Skylake-Server-noTSX-IBRS",
|
2019-11-20 17:49:11 +01:00
|
|
|
.props = (PropValue[]) {
|
|
|
|
{ "hle", "off" },
|
|
|
|
{ "rtm", "off" },
|
target/i386: Add the 'model-id' for Skylake -v3 CPU models
This fixes a confusion in the help output. (Although, if you squint
long enough at the '-cpu help' output, you _do_ notice that
"Skylake-Client-noTSX-IBRS" is an alias of "Skylake-Client-v3";
similarly for Skylake-Server-v3.)
Without this patch:
$ qemu-system-x86 -cpu help
...
x86 Skylake-Client-v1 Intel Core Processor (Skylake)
x86 Skylake-Client-v2 Intel Core Processor (Skylake, IBRS)
x86 Skylake-Client-v3 Intel Core Processor (Skylake, IBRS)
...
x86 Skylake-Server-v1 Intel Xeon Processor (Skylake)
x86 Skylake-Server-v2 Intel Xeon Processor (Skylake, IBRS)
x86 Skylake-Server-v3 Intel Xeon Processor (Skylake, IBRS)
...
With this patch:
$ ./qemu-system-x86 -cpu help
...
x86 Skylake-Client-v1 Intel Core Processor (Skylake)
x86 Skylake-Client-v2 Intel Core Processor (Skylake, IBRS)
x86 Skylake-Client-v3 Intel Core Processor (Skylake, IBRS, no TSX)
...
x86 Skylake-Server-v1 Intel Xeon Processor (Skylake)
x86 Skylake-Server-v2 Intel Xeon Processor (Skylake, IBRS)
x86 Skylake-Server-v3 Intel Xeon Processor (Skylake, IBRS, no TSX)
...
Signed-off-by: Kashyap Chamarthy <kchamart@redhat.com>
Message-Id: <20200123090116.14409-1-kchamart@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2020-01-23 10:01:15 +01:00
|
|
|
{ "model-id",
|
|
|
|
"Intel Xeon Processor (Skylake, IBRS, no TSX)" },
|
2019-11-20 17:49:11 +01:00
|
|
|
{ /* end of list */ }
|
|
|
|
}
|
|
|
|
},
|
2020-07-14 10:41:48 +02:00
|
|
|
{
|
|
|
|
.version = 4,
|
|
|
|
.props = (PropValue[]) {
|
|
|
|
{ "vmx-eptp-switching", "on" },
|
|
|
|
{ /* end of list */ }
|
|
|
|
}
|
|
|
|
},
|
2021-04-12 09:39:52 +02:00
|
|
|
{
|
|
|
|
.version = 5,
|
|
|
|
.note = "IBRS, XSAVES, EPT switching, no TSX",
|
|
|
|
.props = (PropValue[]) {
|
|
|
|
{ "xsaves", "on" },
|
|
|
|
{ "vmx-xsaves", "on" },
|
|
|
|
{ /* end of list */ }
|
|
|
|
}
|
|
|
|
},
|
2019-06-28 02:28:40 +02:00
|
|
|
{ /* end of list */ }
|
|
|
|
}
|
2017-06-21 07:29:34 +02:00
|
|
|
},
|
2018-09-19 05:11:22 +02:00
|
|
|
{
|
|
|
|
.name = "Cascadelake-Server",
|
|
|
|
.level = 0xd,
|
|
|
|
.vendor = CPUID_VENDOR_INTEL,
|
|
|
|
.family = 6,
|
|
|
|
.model = 85,
|
2018-12-27 03:43:03 +01:00
|
|
|
.stepping = 6,
|
2018-09-19 05:11:22 +02:00
|
|
|
.features[FEAT_1_EDX] =
|
|
|
|
CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
|
|
|
|
CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
|
|
|
|
CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
|
|
|
|
CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
|
|
|
|
CPUID_DE | CPUID_FP87,
|
|
|
|
.features[FEAT_1_ECX] =
|
|
|
|
CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
|
|
|
|
CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
|
|
|
|
CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
|
|
|
|
CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
|
|
|
|
CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
|
|
|
|
CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
|
|
|
|
.features[FEAT_8000_0001_EDX] =
|
|
|
|
CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
|
|
|
|
CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
|
|
|
|
.features[FEAT_8000_0001_ECX] =
|
|
|
|
CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
|
|
|
|
.features[FEAT_7_0_EBX] =
|
|
|
|
CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
|
|
|
|
CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
|
|
|
|
CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
|
|
|
|
CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
|
2018-12-20 13:11:00 +01:00
|
|
|
CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB |
|
2018-09-19 05:11:22 +02:00
|
|
|
CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
|
|
|
|
CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
|
2018-12-21 12:35:56 +01:00
|
|
|
CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT,
|
2018-09-19 05:11:22 +02:00
|
|
|
.features[FEAT_7_0_ECX] =
|
2019-03-19 21:05:15 +01:00
|
|
|
CPUID_7_0_ECX_PKU |
|
2018-09-19 05:11:22 +02:00
|
|
|
CPUID_7_0_ECX_AVX512VNNI,
|
|
|
|
.features[FEAT_7_0_EDX] =
|
|
|
|
CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD,
|
2021-04-12 09:39:52 +02:00
|
|
|
/* XSAVES is added in version 5 */
|
2018-09-19 05:11:22 +02:00
|
|
|
.features[FEAT_XSAVE] =
|
|
|
|
CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
|
|
|
|
CPUID_XSAVE_XGETBV1,
|
|
|
|
.features[FEAT_6_EAX] =
|
|
|
|
CPUID_6_EAX_ARAT,
|
target/i386: add VMX features to named CPU models
This allows using "-cpu Haswell,+vmx", which we did not really want to
support in QEMU but was produced by Libvirt when using the "host-model"
CPU model. Without this patch, no VMX feature is _actually_ supported
(only the basic instruction set extensions are) and KVM fails to load
in the guest.
This was produced from the output of scripts/kvm/vmxcap using the following
very ugly Python script:
bits = {
'INS/OUTS instruction information': ['FEAT_VMX_BASIC', 'MSR_VMX_BASIC_INS_OUTS'],
'IA32_VMX_TRUE_*_CTLS support': ['FEAT_VMX_BASIC', 'MSR_VMX_BASIC_TRUE_CTLS'],
'External interrupt exiting': ['FEAT_VMX_PINBASED_CTLS', 'VMX_PIN_BASED_EXT_INTR_MASK'],
'NMI exiting': ['FEAT_VMX_PINBASED_CTLS', 'VMX_PIN_BASED_NMI_EXITING'],
'Virtual NMIs': ['FEAT_VMX_PINBASED_CTLS', 'VMX_PIN_BASED_VIRTUAL_NMIS'],
'Activate VMX-preemption timer': ['FEAT_VMX_PINBASED_CTLS', 'VMX_PIN_BASED_VMX_PREEMPTION_TIMER'],
'Process posted interrupts': ['FEAT_VMX_PINBASED_CTLS', 'VMX_PIN_BASED_POSTED_INTR'],
'Interrupt window exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_VIRTUAL_INTR_PENDING'],
'Use TSC offsetting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_USE_TSC_OFFSETING'],
'HLT exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_HLT_EXITING'],
'INVLPG exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_INVLPG_EXITING'],
'MWAIT exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_MWAIT_EXITING'],
'RDPMC exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_RDPMC_EXITING'],
'RDTSC exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_RDTSC_EXITING'],
'CR3-load exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_CR3_LOAD_EXITING'],
'CR3-store exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_CR3_STORE_EXITING'],
'CR8-load exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_CR8_LOAD_EXITING'],
'CR8-store exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_CR8_STORE_EXITING'],
'Use TPR shadow': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_TPR_SHADOW'],
'NMI-window exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_VIRTUAL_NMI_PENDING'],
'MOV-DR exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_MOV_DR_EXITING'],
'Unconditional I/O exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_UNCOND_IO_EXITING'],
'Use I/O bitmaps': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_USE_IO_BITMAPS'],
'Monitor trap flag': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_MONITOR_TRAP_FLAG'],
'Use MSR bitmaps': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_USE_MSR_BITMAPS'],
'MONITOR exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_MONITOR_EXITING'],
'PAUSE exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_PAUSE_EXITING'],
'Activate secondary control': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS'],
'Virtualize APIC accesses': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES'],
'Enable EPT': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_ENABLE_EPT'],
'Descriptor-table exiting': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_DESC'],
'Enable RDTSCP': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_RDTSCP'],
'Virtualize x2APIC mode': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE'],
'Enable VPID': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_ENABLE_VPID'],
'WBINVD exiting': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_WBINVD_EXITING'],
'Unrestricted guest': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST'],
'APIC register emulation': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT'],
'Virtual interrupt delivery': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY'],
'PAUSE-loop exiting': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_PAUSE_LOOP_EXITING'],
'RDRAND exiting': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_RDRAND_EXITING'],
'Enable INVPCID': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_ENABLE_INVPCID'],
'Enable VM functions': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_ENABLE_VMFUNC'],
'VMCS shadowing': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_SHADOW_VMCS'],
'RDSEED exiting': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_RDSEED_EXITING'],
'Enable PML': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_ENABLE_PML'],
'Enable XSAVES/XRSTORS': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_XSAVES'],
'Save debug controls': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_SAVE_DEBUG_CONTROLS'],
'Load IA32_PERF_GLOBAL_CTRL': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL'],
'Acknowledge interrupt on exit': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_ACK_INTR_ON_EXIT'],
'Save IA32_PAT': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_SAVE_IA32_PAT'],
'Load IA32_PAT': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_LOAD_IA32_PAT'],
'Save IA32_EFER': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_SAVE_IA32_EFER'],
'Load IA32_EFER': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_LOAD_IA32_EFER'],
'Save VMX-preemption timer value': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER'],
'Clear IA32_BNDCFGS': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_CLEAR_BNDCFGS'],
'Load debug controls': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS'],
'IA-32e mode guest': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_IA32E_MODE'],
'Load IA32_PERF_GLOBAL_CTRL': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL'],
'Load IA32_PAT': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_LOAD_IA32_PAT'],
'Load IA32_EFER': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_LOAD_IA32_EFER'],
'Load IA32_BNDCFGS': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_LOAD_BNDCFGS'],
'Store EFER.LMA into IA-32e mode guest control': ['FEAT_VMX_MISC', 'MSR_VMX_MISC_STORE_LMA'],
'HLT activity state': ['FEAT_VMX_MISC', 'MSR_VMX_MISC_ACTIVITY_HLT'],
'VMWRITE to VM-exit information fields': ['FEAT_VMX_MISC', 'MSR_VMX_MISC_VMWRITE_VMEXIT'],
'Inject event with insn length=0': ['FEAT_VMX_MISC', 'MSR_VMX_MISC_ZERO_LEN_INJECT'],
'Execute-only EPT translations': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_EXECONLY'],
'Page-walk length 4': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_PAGE_WALK_LENGTH_4'],
'Paging-structure memory type WB': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_WB'],
'2MB EPT pages': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_2MB | MSR_VMX_EPT_1GB'],
'INVEPT supported': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVEPT'],
'EPT accessed and dirty flags': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_AD_BITS'],
'Single-context INVEPT': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT'],
'All-context INVEPT': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVEPT_ALL_CONTEXT'],
'INVVPID supported': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVVPID'],
'Individual-address INVVPID': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVVPID_SINGLE_ADDR'],
'Single-context INVVPID': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT'],
'All-context INVVPID': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVVPID_ALL_CONTEXT'],
'Single-context-retaining-globals INVVPID': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS'],
'EPTP Switching': ['FEAT_VMX_VMFUNC', 'MSR_VMX_VMFUNC_EPT_SWITCHING']
}
import sys
import textwrap
out = {}
for l in sys.stdin.readlines():
l = l.rstrip()
if l.endswith('!!'):
l = l[:-2].rstrip()
if l.startswith(' ') and (l.endswith('default') or l.endswith('yes')):
l = l[4:]
for key, value in bits.items():
if l.startswith(key):
ctl, bit = value
if ctl in out:
out[ctl] = out[ctl] + ' | '
else:
out[ctl] = ' [%s] = ' % ctl
out[ctl] = out[ctl] + bit
for x in sorted(out.keys()):
print("\n ".join(textwrap.wrap(out[x] + ",")))
Note that the script has a bug in that some keys apply to both VM entry
and VM exit controls ("load IA32_PERF_GLOBAL_CTRL", "load IA32_EFER",
"load IA32_PAT". Those have to be fixed by hand.
Reviewed-by: Eduardo Habkost <ehabkost@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2019-11-20 18:37:53 +01:00
|
|
|
/* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */
|
|
|
|
.features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
|
|
|
|
MSR_VMX_BASIC_TRUE_CTLS,
|
|
|
|
.features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
|
|
|
|
VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
|
|
|
|
VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
|
|
|
|
.features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
|
|
|
|
MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
|
|
|
|
MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
|
|
|
|
MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
|
|
|
|
MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
|
|
|
|
MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
|
|
|
|
MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS,
|
|
|
|
.features[FEAT_VMX_EXIT_CTLS] =
|
|
|
|
VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
|
|
|
|
VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
|
|
|
|
VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
|
|
|
|
VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
|
|
|
|
VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
|
|
|
|
.features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
|
|
|
|
MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT,
|
|
|
|
.features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
|
|
|
|
VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
|
|
|
|
VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR,
|
|
|
|
.features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
|
|
|
|
VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
|
|
|
|
VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
|
|
|
|
VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
|
|
|
|
VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
|
|
|
|
VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
|
|
|
|
VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
|
|
|
|
VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
|
|
|
|
VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
|
|
|
|
VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
|
|
|
|
VMX_CPU_BASED_MONITOR_TRAP_FLAG |
|
|
|
|
VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
|
|
|
|
.features[FEAT_VMX_SECONDARY_CTLS] =
|
|
|
|
VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
|
|
|
|
VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
|
|
|
|
VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
|
|
|
|
VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
|
|
|
|
VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST |
|
|
|
|
VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT |
|
|
|
|
VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
|
|
|
|
VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID |
|
2019-11-25 19:12:16 +01:00
|
|
|
VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS |
|
|
|
|
VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML,
|
2018-09-19 05:11:22 +02:00
|
|
|
.xlevel = 0x80000008,
|
|
|
|
.model_id = "Intel Xeon Processor (Cascadelake)",
|
2019-06-28 02:28:44 +02:00
|
|
|
.versions = (X86CPUVersionDefinition[]) {
|
|
|
|
{ .version = 1 },
|
|
|
|
{ .version = 2,
|
2020-03-24 06:10:34 +01:00
|
|
|
.note = "ARCH_CAPABILITIES",
|
2019-06-28 02:28:44 +02:00
|
|
|
.props = (PropValue[]) {
|
|
|
|
{ "arch-capabilities", "on" },
|
|
|
|
{ "rdctl-no", "on" },
|
|
|
|
{ "ibrs-all", "on" },
|
|
|
|
{ "skip-l1dfl-vmentry", "on" },
|
|
|
|
{ "mds-no", "on" },
|
|
|
|
{ /* end of list */ }
|
|
|
|
},
|
|
|
|
},
|
2019-11-20 17:49:11 +01:00
|
|
|
{ .version = 3,
|
2019-11-20 17:49:12 +01:00
|
|
|
.alias = "Cascadelake-Server-noTSX",
|
2020-03-24 06:10:34 +01:00
|
|
|
.note = "ARCH_CAPABILITIES, no TSX",
|
2019-11-20 17:49:11 +01:00
|
|
|
.props = (PropValue[]) {
|
|
|
|
{ "hle", "off" },
|
|
|
|
{ "rtm", "off" },
|
|
|
|
{ /* end of list */ }
|
|
|
|
},
|
|
|
|
},
|
2020-07-14 10:41:48 +02:00
|
|
|
{ .version = 4,
|
|
|
|
.note = "ARCH_CAPABILITIES, no TSX",
|
|
|
|
.props = (PropValue[]) {
|
|
|
|
{ "vmx-eptp-switching", "on" },
|
|
|
|
{ /* end of list */ }
|
|
|
|
},
|
|
|
|
},
|
2021-04-12 09:39:52 +02:00
|
|
|
{ .version = 5,
|
|
|
|
.note = "ARCH_CAPABILITIES, EPT switching, XSAVES, no TSX",
|
|
|
|
.props = (PropValue[]) {
|
|
|
|
{ "xsaves", "on" },
|
|
|
|
{ "vmx-xsaves", "on" },
|
|
|
|
{ /* end of list */ }
|
|
|
|
},
|
|
|
|
},
|
2019-06-28 02:28:44 +02:00
|
|
|
{ /* end of list */ }
|
|
|
|
}
|
2018-09-19 05:11:22 +02:00
|
|
|
},
|
2019-10-22 09:35:28 +02:00
|
|
|
{
|
|
|
|
.name = "Cooperlake",
|
|
|
|
.level = 0xd,
|
|
|
|
.vendor = CPUID_VENDOR_INTEL,
|
|
|
|
.family = 6,
|
|
|
|
.model = 85,
|
|
|
|
.stepping = 10,
|
|
|
|
.features[FEAT_1_EDX] =
|
|
|
|
CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
|
|
|
|
CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
|
|
|
|
CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
|
|
|
|
CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
|
|
|
|
CPUID_DE | CPUID_FP87,
|
|
|
|
.features[FEAT_1_ECX] =
|
|
|
|
CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
|
|
|
|
CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
|
|
|
|
CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
|
|
|
|
CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
|
|
|
|
CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
|
|
|
|
CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
|
|
|
|
.features[FEAT_8000_0001_EDX] =
|
|
|
|
CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
|
|
|
|
CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
|
|
|
|
.features[FEAT_8000_0001_ECX] =
|
|
|
|
CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
|
|
|
|
.features[FEAT_7_0_EBX] =
|
|
|
|
CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
|
|
|
|
CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
|
|
|
|
CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
|
|
|
|
CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
|
|
|
|
CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB |
|
|
|
|
CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
|
|
|
|
CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
|
|
|
|
CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT,
|
|
|
|
.features[FEAT_7_0_ECX] =
|
|
|
|
CPUID_7_0_ECX_PKU |
|
|
|
|
CPUID_7_0_ECX_AVX512VNNI,
|
|
|
|
.features[FEAT_7_0_EDX] =
|
|
|
|
CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_STIBP |
|
|
|
|
CPUID_7_0_EDX_SPEC_CTRL_SSBD | CPUID_7_0_EDX_ARCH_CAPABILITIES,
|
|
|
|
.features[FEAT_ARCH_CAPABILITIES] =
|
|
|
|
MSR_ARCH_CAP_RDCL_NO | MSR_ARCH_CAP_IBRS_ALL |
|
2019-12-25 07:30:18 +01:00
|
|
|
MSR_ARCH_CAP_SKIP_L1DFL_VMENTRY | MSR_ARCH_CAP_MDS_NO |
|
|
|
|
MSR_ARCH_CAP_PSCHANGE_MC_NO | MSR_ARCH_CAP_TAA_NO,
|
2019-10-22 09:35:28 +02:00
|
|
|
.features[FEAT_7_1_EAX] =
|
2021-08-20 07:46:11 +02:00
|
|
|
CPUID_7_1_EAX_AVX512_BF16,
|
2021-04-12 09:39:52 +02:00
|
|
|
/* XSAVES is added in version 2 */
|
2019-10-22 09:35:28 +02:00
|
|
|
.features[FEAT_XSAVE] =
|
|
|
|
CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
|
|
|
|
CPUID_XSAVE_XGETBV1,
|
|
|
|
.features[FEAT_6_EAX] =
|
|
|
|
CPUID_6_EAX_ARAT,
|
2019-12-25 07:30:18 +01:00
|
|
|
/* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */
|
|
|
|
.features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
|
|
|
|
MSR_VMX_BASIC_TRUE_CTLS,
|
|
|
|
.features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
|
|
|
|
VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
|
|
|
|
VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
|
|
|
|
.features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
|
|
|
|
MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
|
|
|
|
MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
|
|
|
|
MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
|
|
|
|
MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
|
|
|
|
MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
|
|
|
|
MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS,
|
|
|
|
.features[FEAT_VMX_EXIT_CTLS] =
|
|
|
|
VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
|
|
|
|
VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
|
|
|
|
VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
|
|
|
|
VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
|
|
|
|
VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
|
|
|
|
.features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
|
|
|
|
MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT,
|
|
|
|
.features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
|
|
|
|
VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
|
|
|
|
VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR,
|
|
|
|
.features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
|
|
|
|
VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
|
|
|
|
VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
|
|
|
|
VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
|
|
|
|
VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
|
|
|
|
VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
|
|
|
|
VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
|
|
|
|
VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
|
|
|
|
VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
|
|
|
|
VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
|
|
|
|
VMX_CPU_BASED_MONITOR_TRAP_FLAG |
|
|
|
|
VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
|
|
|
|
.features[FEAT_VMX_SECONDARY_CTLS] =
|
|
|
|
VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
|
|
|
|
VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
|
|
|
|
VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
|
|
|
|
VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
|
|
|
|
VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST |
|
|
|
|
VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT |
|
|
|
|
VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
|
|
|
|
VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID |
|
|
|
|
VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS |
|
|
|
|
VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML,
|
|
|
|
.features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING,
|
2019-10-22 09:35:28 +02:00
|
|
|
.xlevel = 0x80000008,
|
|
|
|
.model_id = "Intel Xeon Processor (Cooperlake)",
|
2021-04-12 09:39:52 +02:00
|
|
|
.versions = (X86CPUVersionDefinition[]) {
|
|
|
|
{ .version = 1 },
|
|
|
|
{ .version = 2,
|
|
|
|
.note = "XSAVES",
|
|
|
|
.props = (PropValue[]) {
|
|
|
|
{ "xsaves", "on" },
|
|
|
|
{ "vmx-xsaves", "on" },
|
|
|
|
{ /* end of list */ }
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{ /* end of list */ }
|
|
|
|
}
|
2019-10-22 09:35:28 +02:00
|
|
|
},
|
i386: Add new CPU model Icelake-{Server,Client}
New CPU models mostly inherit features from ancestor Skylake, while addin new
features: UMIP, New Instructions ( PCONIFIG (server only), WBNOINVD,
AVX512_VBMI2, GFNI, AVX512_VNNI, VPCLMULQDQ, VAES, AVX512_BITALG),
Intel PT and 5-level paging (Server only). As well as
IA32_PRED_CMD, SSBD support for speculative execution
side channel mitigations.
Note:
For 5-level paging, Guest physical address width can be configured, with
parameter "phys-bits". Unless explicitly specified, we still use its default
value, even for Icelake-Server cpu model.
At present, hold on expose IA32_ARCH_CAPABILITIES to guest, as 1) This MSR
actually presents more than 1 'feature', maintainers are considering expanding current
features presentation of only CPUIDs to MSR bits; 2) a reasonable default value
for MSR_IA32_ARCH_CAPABILITIES needs to settled first. These 2 are actully
beyond Icelake CPU model itself but fundamental. So split these work apart
and do it later.
https://lists.gnu.org/archive/html/qemu-devel/2018-07/msg00774.html
https://lists.gnu.org/archive/html/qemu-devel/2018-07/msg00796.html
Signed-off-by: Robert Hoo <robert.hu@linux.intel.com>
Message-Id: <1530781798-183214-6-git-send-email-robert.hu@linux.intel.com>
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
2018-07-05 11:09:58 +02:00
|
|
|
{
|
|
|
|
.name = "Icelake-Server",
|
|
|
|
.level = 0xd,
|
|
|
|
.vendor = CPUID_VENDOR_INTEL,
|
|
|
|
.family = 6,
|
|
|
|
.model = 134,
|
|
|
|
.stepping = 0,
|
|
|
|
.features[FEAT_1_EDX] =
|
|
|
|
CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
|
|
|
|
CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
|
|
|
|
CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
|
|
|
|
CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
|
|
|
|
CPUID_DE | CPUID_FP87,
|
|
|
|
.features[FEAT_1_ECX] =
|
|
|
|
CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
|
|
|
|
CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
|
|
|
|
CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
|
|
|
|
CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
|
|
|
|
CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
|
|
|
|
CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
|
|
|
|
.features[FEAT_8000_0001_EDX] =
|
|
|
|
CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
|
|
|
|
CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
|
|
|
|
.features[FEAT_8000_0001_ECX] =
|
|
|
|
CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
|
|
|
|
.features[FEAT_8000_0008_EBX] =
|
|
|
|
CPUID_8000_0008_EBX_WBNOINVD,
|
|
|
|
.features[FEAT_7_0_EBX] =
|
|
|
|
CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
|
|
|
|
CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
|
|
|
|
CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
|
|
|
|
CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
|
2018-12-20 13:11:00 +01:00
|
|
|
CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB |
|
i386: Add new CPU model Icelake-{Server,Client}
New CPU models mostly inherit features from ancestor Skylake, while addin new
features: UMIP, New Instructions ( PCONIFIG (server only), WBNOINVD,
AVX512_VBMI2, GFNI, AVX512_VNNI, VPCLMULQDQ, VAES, AVX512_BITALG),
Intel PT and 5-level paging (Server only). As well as
IA32_PRED_CMD, SSBD support for speculative execution
side channel mitigations.
Note:
For 5-level paging, Guest physical address width can be configured, with
parameter "phys-bits". Unless explicitly specified, we still use its default
value, even for Icelake-Server cpu model.
At present, hold on expose IA32_ARCH_CAPABILITIES to guest, as 1) This MSR
actually presents more than 1 'feature', maintainers are considering expanding current
features presentation of only CPUIDs to MSR bits; 2) a reasonable default value
for MSR_IA32_ARCH_CAPABILITIES needs to settled first. These 2 are actully
beyond Icelake CPU model itself but fundamental. So split these work apart
and do it later.
https://lists.gnu.org/archive/html/qemu-devel/2018-07/msg00774.html
https://lists.gnu.org/archive/html/qemu-devel/2018-07/msg00796.html
Signed-off-by: Robert Hoo <robert.hu@linux.intel.com>
Message-Id: <1530781798-183214-6-git-send-email-robert.hu@linux.intel.com>
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
2018-07-05 11:09:58 +02:00
|
|
|
CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
|
|
|
|
CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
|
2018-12-21 12:35:56 +01:00
|
|
|
CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT,
|
i386: Add new CPU model Icelake-{Server,Client}
New CPU models mostly inherit features from ancestor Skylake, while addin new
features: UMIP, New Instructions ( PCONIFIG (server only), WBNOINVD,
AVX512_VBMI2, GFNI, AVX512_VNNI, VPCLMULQDQ, VAES, AVX512_BITALG),
Intel PT and 5-level paging (Server only). As well as
IA32_PRED_CMD, SSBD support for speculative execution
side channel mitigations.
Note:
For 5-level paging, Guest physical address width can be configured, with
parameter "phys-bits". Unless explicitly specified, we still use its default
value, even for Icelake-Server cpu model.
At present, hold on expose IA32_ARCH_CAPABILITIES to guest, as 1) This MSR
actually presents more than 1 'feature', maintainers are considering expanding current
features presentation of only CPUIDs to MSR bits; 2) a reasonable default value
for MSR_IA32_ARCH_CAPABILITIES needs to settled first. These 2 are actully
beyond Icelake CPU model itself but fundamental. So split these work apart
and do it later.
https://lists.gnu.org/archive/html/qemu-devel/2018-07/msg00774.html
https://lists.gnu.org/archive/html/qemu-devel/2018-07/msg00796.html
Signed-off-by: Robert Hoo <robert.hu@linux.intel.com>
Message-Id: <1530781798-183214-6-git-send-email-robert.hu@linux.intel.com>
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
2018-07-05 11:09:58 +02:00
|
|
|
.features[FEAT_7_0_ECX] =
|
2019-09-26 04:10:55 +02:00
|
|
|
CPUID_7_0_ECX_AVX512_VBMI | CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_PKU |
|
|
|
|
CPUID_7_0_ECX_AVX512_VBMI2 | CPUID_7_0_ECX_GFNI |
|
i386: Add new CPU model Icelake-{Server,Client}
New CPU models mostly inherit features from ancestor Skylake, while addin new
features: UMIP, New Instructions ( PCONIFIG (server only), WBNOINVD,
AVX512_VBMI2, GFNI, AVX512_VNNI, VPCLMULQDQ, VAES, AVX512_BITALG),
Intel PT and 5-level paging (Server only). As well as
IA32_PRED_CMD, SSBD support for speculative execution
side channel mitigations.
Note:
For 5-level paging, Guest physical address width can be configured, with
parameter "phys-bits". Unless explicitly specified, we still use its default
value, even for Icelake-Server cpu model.
At present, hold on expose IA32_ARCH_CAPABILITIES to guest, as 1) This MSR
actually presents more than 1 'feature', maintainers are considering expanding current
features presentation of only CPUIDs to MSR bits; 2) a reasonable default value
for MSR_IA32_ARCH_CAPABILITIES needs to settled first. These 2 are actully
beyond Icelake CPU model itself but fundamental. So split these work apart
and do it later.
https://lists.gnu.org/archive/html/qemu-devel/2018-07/msg00774.html
https://lists.gnu.org/archive/html/qemu-devel/2018-07/msg00796.html
Signed-off-by: Robert Hoo <robert.hu@linux.intel.com>
Message-Id: <1530781798-183214-6-git-send-email-robert.hu@linux.intel.com>
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
2018-07-05 11:09:58 +02:00
|
|
|
CPUID_7_0_ECX_VAES | CPUID_7_0_ECX_VPCLMULQDQ |
|
|
|
|
CPUID_7_0_ECX_AVX512VNNI | CPUID_7_0_ECX_AVX512BITALG |
|
|
|
|
CPUID_7_0_ECX_AVX512_VPOPCNTDQ | CPUID_7_0_ECX_LA57,
|
|
|
|
.features[FEAT_7_0_EDX] =
|
2018-12-19 14:44:40 +01:00
|
|
|
CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD,
|
2021-04-12 09:39:52 +02:00
|
|
|
/* XSAVES is added in version 5 */
|
i386: Add new CPU model Icelake-{Server,Client}
New CPU models mostly inherit features from ancestor Skylake, while addin new
features: UMIP, New Instructions ( PCONIFIG (server only), WBNOINVD,
AVX512_VBMI2, GFNI, AVX512_VNNI, VPCLMULQDQ, VAES, AVX512_BITALG),
Intel PT and 5-level paging (Server only). As well as
IA32_PRED_CMD, SSBD support for speculative execution
side channel mitigations.
Note:
For 5-level paging, Guest physical address width can be configured, with
parameter "phys-bits". Unless explicitly specified, we still use its default
value, even for Icelake-Server cpu model.
At present, hold on expose IA32_ARCH_CAPABILITIES to guest, as 1) This MSR
actually presents more than 1 'feature', maintainers are considering expanding current
features presentation of only CPUIDs to MSR bits; 2) a reasonable default value
for MSR_IA32_ARCH_CAPABILITIES needs to settled first. These 2 are actully
beyond Icelake CPU model itself but fundamental. So split these work apart
and do it later.
https://lists.gnu.org/archive/html/qemu-devel/2018-07/msg00774.html
https://lists.gnu.org/archive/html/qemu-devel/2018-07/msg00796.html
Signed-off-by: Robert Hoo <robert.hu@linux.intel.com>
Message-Id: <1530781798-183214-6-git-send-email-robert.hu@linux.intel.com>
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
2018-07-05 11:09:58 +02:00
|
|
|
.features[FEAT_XSAVE] =
|
|
|
|
CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
|
|
|
|
CPUID_XSAVE_XGETBV1,
|
|
|
|
.features[FEAT_6_EAX] =
|
|
|
|
CPUID_6_EAX_ARAT,
|
target/i386: add VMX features to named CPU models
This allows using "-cpu Haswell,+vmx", which we did not really want to
support in QEMU but was produced by Libvirt when using the "host-model"
CPU model. Without this patch, no VMX feature is _actually_ supported
(only the basic instruction set extensions are) and KVM fails to load
in the guest.
This was produced from the output of scripts/kvm/vmxcap using the following
very ugly Python script:
bits = {
'INS/OUTS instruction information': ['FEAT_VMX_BASIC', 'MSR_VMX_BASIC_INS_OUTS'],
'IA32_VMX_TRUE_*_CTLS support': ['FEAT_VMX_BASIC', 'MSR_VMX_BASIC_TRUE_CTLS'],
'External interrupt exiting': ['FEAT_VMX_PINBASED_CTLS', 'VMX_PIN_BASED_EXT_INTR_MASK'],
'NMI exiting': ['FEAT_VMX_PINBASED_CTLS', 'VMX_PIN_BASED_NMI_EXITING'],
'Virtual NMIs': ['FEAT_VMX_PINBASED_CTLS', 'VMX_PIN_BASED_VIRTUAL_NMIS'],
'Activate VMX-preemption timer': ['FEAT_VMX_PINBASED_CTLS', 'VMX_PIN_BASED_VMX_PREEMPTION_TIMER'],
'Process posted interrupts': ['FEAT_VMX_PINBASED_CTLS', 'VMX_PIN_BASED_POSTED_INTR'],
'Interrupt window exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_VIRTUAL_INTR_PENDING'],
'Use TSC offsetting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_USE_TSC_OFFSETING'],
'HLT exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_HLT_EXITING'],
'INVLPG exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_INVLPG_EXITING'],
'MWAIT exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_MWAIT_EXITING'],
'RDPMC exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_RDPMC_EXITING'],
'RDTSC exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_RDTSC_EXITING'],
'CR3-load exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_CR3_LOAD_EXITING'],
'CR3-store exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_CR3_STORE_EXITING'],
'CR8-load exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_CR8_LOAD_EXITING'],
'CR8-store exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_CR8_STORE_EXITING'],
'Use TPR shadow': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_TPR_SHADOW'],
'NMI-window exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_VIRTUAL_NMI_PENDING'],
'MOV-DR exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_MOV_DR_EXITING'],
'Unconditional I/O exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_UNCOND_IO_EXITING'],
'Use I/O bitmaps': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_USE_IO_BITMAPS'],
'Monitor trap flag': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_MONITOR_TRAP_FLAG'],
'Use MSR bitmaps': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_USE_MSR_BITMAPS'],
'MONITOR exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_MONITOR_EXITING'],
'PAUSE exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_PAUSE_EXITING'],
'Activate secondary control': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS'],
'Virtualize APIC accesses': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES'],
'Enable EPT': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_ENABLE_EPT'],
'Descriptor-table exiting': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_DESC'],
'Enable RDTSCP': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_RDTSCP'],
'Virtualize x2APIC mode': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE'],
'Enable VPID': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_ENABLE_VPID'],
'WBINVD exiting': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_WBINVD_EXITING'],
'Unrestricted guest': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST'],
'APIC register emulation': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT'],
'Virtual interrupt delivery': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY'],
'PAUSE-loop exiting': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_PAUSE_LOOP_EXITING'],
'RDRAND exiting': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_RDRAND_EXITING'],
'Enable INVPCID': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_ENABLE_INVPCID'],
'Enable VM functions': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_ENABLE_VMFUNC'],
'VMCS shadowing': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_SHADOW_VMCS'],
'RDSEED exiting': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_RDSEED_EXITING'],
'Enable PML': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_ENABLE_PML'],
'Enable XSAVES/XRSTORS': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_XSAVES'],
'Save debug controls': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_SAVE_DEBUG_CONTROLS'],
'Load IA32_PERF_GLOBAL_CTRL': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL'],
'Acknowledge interrupt on exit': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_ACK_INTR_ON_EXIT'],
'Save IA32_PAT': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_SAVE_IA32_PAT'],
'Load IA32_PAT': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_LOAD_IA32_PAT'],
'Save IA32_EFER': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_SAVE_IA32_EFER'],
'Load IA32_EFER': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_LOAD_IA32_EFER'],
'Save VMX-preemption timer value': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER'],
'Clear IA32_BNDCFGS': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_CLEAR_BNDCFGS'],
'Load debug controls': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS'],
'IA-32e mode guest': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_IA32E_MODE'],
'Load IA32_PERF_GLOBAL_CTRL': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL'],
'Load IA32_PAT': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_LOAD_IA32_PAT'],
'Load IA32_EFER': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_LOAD_IA32_EFER'],
'Load IA32_BNDCFGS': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_LOAD_BNDCFGS'],
'Store EFER.LMA into IA-32e mode guest control': ['FEAT_VMX_MISC', 'MSR_VMX_MISC_STORE_LMA'],
'HLT activity state': ['FEAT_VMX_MISC', 'MSR_VMX_MISC_ACTIVITY_HLT'],
'VMWRITE to VM-exit information fields': ['FEAT_VMX_MISC', 'MSR_VMX_MISC_VMWRITE_VMEXIT'],
'Inject event with insn length=0': ['FEAT_VMX_MISC', 'MSR_VMX_MISC_ZERO_LEN_INJECT'],
'Execute-only EPT translations': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_EXECONLY'],
'Page-walk length 4': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_PAGE_WALK_LENGTH_4'],
'Paging-structure memory type WB': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_WB'],
'2MB EPT pages': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_2MB | MSR_VMX_EPT_1GB'],
'INVEPT supported': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVEPT'],
'EPT accessed and dirty flags': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_AD_BITS'],
'Single-context INVEPT': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT'],
'All-context INVEPT': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVEPT_ALL_CONTEXT'],
'INVVPID supported': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVVPID'],
'Individual-address INVVPID': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVVPID_SINGLE_ADDR'],
'Single-context INVVPID': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT'],
'All-context INVVPID': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVVPID_ALL_CONTEXT'],
'Single-context-retaining-globals INVVPID': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS'],
'EPTP Switching': ['FEAT_VMX_VMFUNC', 'MSR_VMX_VMFUNC_EPT_SWITCHING']
}
import sys
import textwrap
out = {}
for l in sys.stdin.readlines():
l = l.rstrip()
if l.endswith('!!'):
l = l[:-2].rstrip()
if l.startswith(' ') and (l.endswith('default') or l.endswith('yes')):
l = l[4:]
for key, value in bits.items():
if l.startswith(key):
ctl, bit = value
if ctl in out:
out[ctl] = out[ctl] + ' | '
else:
out[ctl] = ' [%s] = ' % ctl
out[ctl] = out[ctl] + bit
for x in sorted(out.keys()):
print("\n ".join(textwrap.wrap(out[x] + ",")))
Note that the script has a bug in that some keys apply to both VM entry
and VM exit controls ("load IA32_PERF_GLOBAL_CTRL", "load IA32_EFER",
"load IA32_PAT". Those have to be fixed by hand.
Reviewed-by: Eduardo Habkost <ehabkost@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2019-11-20 18:37:53 +01:00
|
|
|
/* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */
|
|
|
|
.features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
|
|
|
|
MSR_VMX_BASIC_TRUE_CTLS,
|
|
|
|
.features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
|
|
|
|
VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
|
|
|
|
VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
|
|
|
|
.features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
|
|
|
|
MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
|
|
|
|
MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
|
|
|
|
MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
|
|
|
|
MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
|
|
|
|
MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
|
|
|
|
MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS,
|
|
|
|
.features[FEAT_VMX_EXIT_CTLS] =
|
|
|
|
VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
|
|
|
|
VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
|
|
|
|
VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
|
|
|
|
VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
|
|
|
|
VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
|
|
|
|
.features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
|
|
|
|
MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT,
|
|
|
|
.features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
|
|
|
|
VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
|
|
|
|
VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR,
|
|
|
|
.features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
|
|
|
|
VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
|
|
|
|
VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
|
|
|
|
VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
|
|
|
|
VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
|
|
|
|
VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
|
|
|
|
VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
|
|
|
|
VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
|
|
|
|
VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
|
|
|
|
VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
|
|
|
|
VMX_CPU_BASED_MONITOR_TRAP_FLAG |
|
|
|
|
VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
|
|
|
|
.features[FEAT_VMX_SECONDARY_CTLS] =
|
|
|
|
VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
|
|
|
|
VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
|
|
|
|
VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
|
|
|
|
VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
|
|
|
|
VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST |
|
|
|
|
VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT |
|
|
|
|
VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
|
|
|
|
VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID |
|
|
|
|
VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS,
|
i386: Add new CPU model Icelake-{Server,Client}
New CPU models mostly inherit features from ancestor Skylake, while addin new
features: UMIP, New Instructions ( PCONIFIG (server only), WBNOINVD,
AVX512_VBMI2, GFNI, AVX512_VNNI, VPCLMULQDQ, VAES, AVX512_BITALG),
Intel PT and 5-level paging (Server only). As well as
IA32_PRED_CMD, SSBD support for speculative execution
side channel mitigations.
Note:
For 5-level paging, Guest physical address width can be configured, with
parameter "phys-bits". Unless explicitly specified, we still use its default
value, even for Icelake-Server cpu model.
At present, hold on expose IA32_ARCH_CAPABILITIES to guest, as 1) This MSR
actually presents more than 1 'feature', maintainers are considering expanding current
features presentation of only CPUIDs to MSR bits; 2) a reasonable default value
for MSR_IA32_ARCH_CAPABILITIES needs to settled first. These 2 are actully
beyond Icelake CPU model itself but fundamental. So split these work apart
and do it later.
https://lists.gnu.org/archive/html/qemu-devel/2018-07/msg00774.html
https://lists.gnu.org/archive/html/qemu-devel/2018-07/msg00796.html
Signed-off-by: Robert Hoo <robert.hu@linux.intel.com>
Message-Id: <1530781798-183214-6-git-send-email-robert.hu@linux.intel.com>
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
2018-07-05 11:09:58 +02:00
|
|
|
.xlevel = 0x80000008,
|
|
|
|
.model_id = "Intel Xeon Processor (Icelake)",
|
2019-11-20 17:49:11 +01:00
|
|
|
.versions = (X86CPUVersionDefinition[]) {
|
|
|
|
{ .version = 1 },
|
|
|
|
{
|
|
|
|
.version = 2,
|
2020-03-24 06:10:34 +01:00
|
|
|
.note = "no TSX",
|
2019-11-20 17:49:12 +01:00
|
|
|
.alias = "Icelake-Server-noTSX",
|
2019-11-20 17:49:11 +01:00
|
|
|
.props = (PropValue[]) {
|
|
|
|
{ "hle", "off" },
|
|
|
|
{ "rtm", "off" },
|
|
|
|
{ /* end of list */ }
|
|
|
|
},
|
|
|
|
},
|
2020-03-16 10:56:05 +01:00
|
|
|
{
|
|
|
|
.version = 3,
|
|
|
|
.props = (PropValue[]) {
|
|
|
|
{ "arch-capabilities", "on" },
|
|
|
|
{ "rdctl-no", "on" },
|
|
|
|
{ "ibrs-all", "on" },
|
|
|
|
{ "skip-l1dfl-vmentry", "on" },
|
|
|
|
{ "mds-no", "on" },
|
|
|
|
{ "pschange-mc-no", "on" },
|
|
|
|
{ "taa-no", "on" },
|
|
|
|
{ /* end of list */ }
|
|
|
|
},
|
|
|
|
},
|
2020-07-14 10:41:47 +02:00
|
|
|
{
|
|
|
|
.version = 4,
|
|
|
|
.props = (PropValue[]) {
|
|
|
|
{ "sha-ni", "on" },
|
|
|
|
{ "avx512ifma", "on" },
|
|
|
|
{ "rdpid", "on" },
|
|
|
|
{ "fsrm", "on" },
|
|
|
|
{ "vmx-rdseed-exit", "on" },
|
|
|
|
{ "vmx-pml", "on" },
|
|
|
|
{ "vmx-eptp-switching", "on" },
|
|
|
|
{ "model", "106" },
|
|
|
|
{ /* end of list */ }
|
|
|
|
},
|
|
|
|
},
|
2021-04-12 09:39:52 +02:00
|
|
|
{
|
|
|
|
.version = 5,
|
|
|
|
.note = "XSAVES",
|
|
|
|
.props = (PropValue[]) {
|
|
|
|
{ "xsaves", "on" },
|
|
|
|
{ "vmx-xsaves", "on" },
|
|
|
|
{ /* end of list */ }
|
|
|
|
},
|
|
|
|
},
|
2022-02-21 15:53:15 +01:00
|
|
|
{
|
|
|
|
.version = 6,
|
|
|
|
.note = "5-level EPT",
|
|
|
|
.props = (PropValue[]) {
|
|
|
|
{ "vmx-page-walk-5", "on" },
|
|
|
|
{ /* end of list */ }
|
|
|
|
},
|
|
|
|
},
|
2019-11-20 17:49:11 +01:00
|
|
|
{ /* end of list */ }
|
|
|
|
}
|
i386: Add new CPU model Icelake-{Server,Client}
New CPU models mostly inherit features from ancestor Skylake, while addin new
features: UMIP, New Instructions ( PCONIFIG (server only), WBNOINVD,
AVX512_VBMI2, GFNI, AVX512_VNNI, VPCLMULQDQ, VAES, AVX512_BITALG),
Intel PT and 5-level paging (Server only). As well as
IA32_PRED_CMD, SSBD support for speculative execution
side channel mitigations.
Note:
For 5-level paging, Guest physical address width can be configured, with
parameter "phys-bits". Unless explicitly specified, we still use its default
value, even for Icelake-Server cpu model.
At present, hold on expose IA32_ARCH_CAPABILITIES to guest, as 1) This MSR
actually presents more than 1 'feature', maintainers are considering expanding current
features presentation of only CPUIDs to MSR bits; 2) a reasonable default value
for MSR_IA32_ARCH_CAPABILITIES needs to settled first. These 2 are actully
beyond Icelake CPU model itself but fundamental. So split these work apart
and do it later.
https://lists.gnu.org/archive/html/qemu-devel/2018-07/msg00774.html
https://lists.gnu.org/archive/html/qemu-devel/2018-07/msg00796.html
Signed-off-by: Robert Hoo <robert.hu@linux.intel.com>
Message-Id: <1530781798-183214-6-git-send-email-robert.hu@linux.intel.com>
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
2018-07-05 11:09:58 +02:00
|
|
|
},
|
2019-07-18 09:34:05 +02:00
|
|
|
{
|
|
|
|
.name = "Denverton",
|
|
|
|
.level = 21,
|
|
|
|
.vendor = CPUID_VENDOR_INTEL,
|
|
|
|
.family = 6,
|
|
|
|
.model = 95,
|
|
|
|
.stepping = 1,
|
|
|
|
.features[FEAT_1_EDX] =
|
|
|
|
CPUID_FP87 | CPUID_VME | CPUID_DE | CPUID_PSE | CPUID_TSC |
|
|
|
|
CPUID_MSR | CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC |
|
|
|
|
CPUID_SEP | CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV |
|
|
|
|
CPUID_PAT | CPUID_PSE36 | CPUID_CLFLUSH | CPUID_MMX | CPUID_FXSR |
|
|
|
|
CPUID_SSE | CPUID_SSE2,
|
|
|
|
.features[FEAT_1_ECX] =
|
|
|
|
CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_MONITOR |
|
|
|
|
CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | CPUID_EXT_SSE41 |
|
|
|
|
CPUID_EXT_SSE42 | CPUID_EXT_X2APIC | CPUID_EXT_MOVBE |
|
|
|
|
CPUID_EXT_POPCNT | CPUID_EXT_TSC_DEADLINE_TIMER |
|
|
|
|
CPUID_EXT_AES | CPUID_EXT_XSAVE | CPUID_EXT_RDRAND,
|
|
|
|
.features[FEAT_8000_0001_EDX] =
|
|
|
|
CPUID_EXT2_SYSCALL | CPUID_EXT2_NX | CPUID_EXT2_PDPE1GB |
|
|
|
|
CPUID_EXT2_RDTSCP | CPUID_EXT2_LM,
|
|
|
|
.features[FEAT_8000_0001_ECX] =
|
|
|
|
CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
|
|
|
|
.features[FEAT_7_0_EBX] =
|
|
|
|
CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_ERMS |
|
|
|
|
CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_SMAP |
|
|
|
|
CPUID_7_0_EBX_CLFLUSHOPT | CPUID_7_0_EBX_SHA_NI,
|
|
|
|
.features[FEAT_7_0_EDX] =
|
|
|
|
CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_ARCH_CAPABILITIES |
|
|
|
|
CPUID_7_0_EDX_SPEC_CTRL_SSBD,
|
2021-04-12 09:39:52 +02:00
|
|
|
/* XSAVES is added in version 3 */
|
2019-07-18 09:34:05 +02:00
|
|
|
.features[FEAT_XSAVE] =
|
|
|
|
CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | CPUID_XSAVE_XGETBV1,
|
|
|
|
.features[FEAT_6_EAX] =
|
|
|
|
CPUID_6_EAX_ARAT,
|
|
|
|
.features[FEAT_ARCH_CAPABILITIES] =
|
|
|
|
MSR_ARCH_CAP_RDCL_NO | MSR_ARCH_CAP_SKIP_L1DFL_VMENTRY,
|
target/i386: add VMX features to named CPU models
This allows using "-cpu Haswell,+vmx", which we did not really want to
support in QEMU but was produced by Libvirt when using the "host-model"
CPU model. Without this patch, no VMX feature is _actually_ supported
(only the basic instruction set extensions are) and KVM fails to load
in the guest.
This was produced from the output of scripts/kvm/vmxcap using the following
very ugly Python script:
bits = {
'INS/OUTS instruction information': ['FEAT_VMX_BASIC', 'MSR_VMX_BASIC_INS_OUTS'],
'IA32_VMX_TRUE_*_CTLS support': ['FEAT_VMX_BASIC', 'MSR_VMX_BASIC_TRUE_CTLS'],
'External interrupt exiting': ['FEAT_VMX_PINBASED_CTLS', 'VMX_PIN_BASED_EXT_INTR_MASK'],
'NMI exiting': ['FEAT_VMX_PINBASED_CTLS', 'VMX_PIN_BASED_NMI_EXITING'],
'Virtual NMIs': ['FEAT_VMX_PINBASED_CTLS', 'VMX_PIN_BASED_VIRTUAL_NMIS'],
'Activate VMX-preemption timer': ['FEAT_VMX_PINBASED_CTLS', 'VMX_PIN_BASED_VMX_PREEMPTION_TIMER'],
'Process posted interrupts': ['FEAT_VMX_PINBASED_CTLS', 'VMX_PIN_BASED_POSTED_INTR'],
'Interrupt window exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_VIRTUAL_INTR_PENDING'],
'Use TSC offsetting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_USE_TSC_OFFSETING'],
'HLT exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_HLT_EXITING'],
'INVLPG exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_INVLPG_EXITING'],
'MWAIT exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_MWAIT_EXITING'],
'RDPMC exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_RDPMC_EXITING'],
'RDTSC exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_RDTSC_EXITING'],
'CR3-load exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_CR3_LOAD_EXITING'],
'CR3-store exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_CR3_STORE_EXITING'],
'CR8-load exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_CR8_LOAD_EXITING'],
'CR8-store exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_CR8_STORE_EXITING'],
'Use TPR shadow': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_TPR_SHADOW'],
'NMI-window exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_VIRTUAL_NMI_PENDING'],
'MOV-DR exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_MOV_DR_EXITING'],
'Unconditional I/O exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_UNCOND_IO_EXITING'],
'Use I/O bitmaps': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_USE_IO_BITMAPS'],
'Monitor trap flag': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_MONITOR_TRAP_FLAG'],
'Use MSR bitmaps': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_USE_MSR_BITMAPS'],
'MONITOR exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_MONITOR_EXITING'],
'PAUSE exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_PAUSE_EXITING'],
'Activate secondary control': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS'],
'Virtualize APIC accesses': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES'],
'Enable EPT': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_ENABLE_EPT'],
'Descriptor-table exiting': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_DESC'],
'Enable RDTSCP': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_RDTSCP'],
'Virtualize x2APIC mode': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE'],
'Enable VPID': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_ENABLE_VPID'],
'WBINVD exiting': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_WBINVD_EXITING'],
'Unrestricted guest': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST'],
'APIC register emulation': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT'],
'Virtual interrupt delivery': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY'],
'PAUSE-loop exiting': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_PAUSE_LOOP_EXITING'],
'RDRAND exiting': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_RDRAND_EXITING'],
'Enable INVPCID': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_ENABLE_INVPCID'],
'Enable VM functions': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_ENABLE_VMFUNC'],
'VMCS shadowing': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_SHADOW_VMCS'],
'RDSEED exiting': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_RDSEED_EXITING'],
'Enable PML': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_ENABLE_PML'],
'Enable XSAVES/XRSTORS': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_XSAVES'],
'Save debug controls': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_SAVE_DEBUG_CONTROLS'],
'Load IA32_PERF_GLOBAL_CTRL': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL'],
'Acknowledge interrupt on exit': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_ACK_INTR_ON_EXIT'],
'Save IA32_PAT': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_SAVE_IA32_PAT'],
'Load IA32_PAT': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_LOAD_IA32_PAT'],
'Save IA32_EFER': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_SAVE_IA32_EFER'],
'Load IA32_EFER': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_LOAD_IA32_EFER'],
'Save VMX-preemption timer value': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER'],
'Clear IA32_BNDCFGS': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_CLEAR_BNDCFGS'],
'Load debug controls': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS'],
'IA-32e mode guest': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_IA32E_MODE'],
'Load IA32_PERF_GLOBAL_CTRL': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL'],
'Load IA32_PAT': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_LOAD_IA32_PAT'],
'Load IA32_EFER': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_LOAD_IA32_EFER'],
'Load IA32_BNDCFGS': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_LOAD_BNDCFGS'],
'Store EFER.LMA into IA-32e mode guest control': ['FEAT_VMX_MISC', 'MSR_VMX_MISC_STORE_LMA'],
'HLT activity state': ['FEAT_VMX_MISC', 'MSR_VMX_MISC_ACTIVITY_HLT'],
'VMWRITE to VM-exit information fields': ['FEAT_VMX_MISC', 'MSR_VMX_MISC_VMWRITE_VMEXIT'],
'Inject event with insn length=0': ['FEAT_VMX_MISC', 'MSR_VMX_MISC_ZERO_LEN_INJECT'],
'Execute-only EPT translations': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_EXECONLY'],
'Page-walk length 4': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_PAGE_WALK_LENGTH_4'],
'Paging-structure memory type WB': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_WB'],
'2MB EPT pages': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_2MB | MSR_VMX_EPT_1GB'],
'INVEPT supported': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVEPT'],
'EPT accessed and dirty flags': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_AD_BITS'],
'Single-context INVEPT': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT'],
'All-context INVEPT': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVEPT_ALL_CONTEXT'],
'INVVPID supported': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVVPID'],
'Individual-address INVVPID': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVVPID_SINGLE_ADDR'],
'Single-context INVVPID': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT'],
'All-context INVVPID': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVVPID_ALL_CONTEXT'],
'Single-context-retaining-globals INVVPID': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS'],
'EPTP Switching': ['FEAT_VMX_VMFUNC', 'MSR_VMX_VMFUNC_EPT_SWITCHING']
}
import sys
import textwrap
out = {}
for l in sys.stdin.readlines():
l = l.rstrip()
if l.endswith('!!'):
l = l[:-2].rstrip()
if l.startswith(' ') and (l.endswith('default') or l.endswith('yes')):
l = l[4:]
for key, value in bits.items():
if l.startswith(key):
ctl, bit = value
if ctl in out:
out[ctl] = out[ctl] + ' | '
else:
out[ctl] = ' [%s] = ' % ctl
out[ctl] = out[ctl] + bit
for x in sorted(out.keys()):
print("\n ".join(textwrap.wrap(out[x] + ",")))
Note that the script has a bug in that some keys apply to both VM entry
and VM exit controls ("load IA32_PERF_GLOBAL_CTRL", "load IA32_EFER",
"load IA32_PAT". Those have to be fixed by hand.
Reviewed-by: Eduardo Habkost <ehabkost@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2019-11-20 18:37:53 +01:00
|
|
|
.features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
|
|
|
|
MSR_VMX_BASIC_TRUE_CTLS,
|
|
|
|
.features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
|
|
|
|
VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
|
|
|
|
VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
|
|
|
|
.features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
|
|
|
|
MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
|
|
|
|
MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
|
|
|
|
MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
|
|
|
|
MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
|
|
|
|
MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
|
|
|
|
MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS,
|
|
|
|
.features[FEAT_VMX_EXIT_CTLS] =
|
|
|
|
VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
|
|
|
|
VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
|
|
|
|
VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
|
|
|
|
VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
|
|
|
|
VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
|
|
|
|
.features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
|
|
|
|
MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT,
|
|
|
|
.features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
|
|
|
|
VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
|
|
|
|
VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR,
|
|
|
|
.features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
|
|
|
|
VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
|
|
|
|
VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
|
|
|
|
VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
|
|
|
|
VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
|
|
|
|
VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
|
|
|
|
VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
|
|
|
|
VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
|
|
|
|
VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
|
|
|
|
VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
|
|
|
|
VMX_CPU_BASED_MONITOR_TRAP_FLAG |
|
|
|
|
VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
|
|
|
|
.features[FEAT_VMX_SECONDARY_CTLS] =
|
|
|
|
VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
|
|
|
|
VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
|
|
|
|
VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
|
|
|
|
VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
|
|
|
|
VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST |
|
|
|
|
VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT |
|
|
|
|
VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
|
|
|
|
VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID |
|
|
|
|
VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS |
|
|
|
|
VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML,
|
|
|
|
.features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING,
|
2019-07-18 09:34:05 +02:00
|
|
|
.xlevel = 0x80000008,
|
|
|
|
.model_id = "Intel Atom Processor (Denverton)",
|
2020-02-12 09:13:25 +01:00
|
|
|
.versions = (X86CPUVersionDefinition[]) {
|
|
|
|
{ .version = 1 },
|
|
|
|
{
|
|
|
|
.version = 2,
|
2020-03-24 06:10:34 +01:00
|
|
|
.note = "no MPX, no MONITOR",
|
2020-02-12 09:13:25 +01:00
|
|
|
.props = (PropValue[]) {
|
|
|
|
{ "monitor", "off" },
|
|
|
|
{ "mpx", "off" },
|
|
|
|
{ /* end of list */ },
|
|
|
|
},
|
|
|
|
},
|
2021-04-12 09:39:52 +02:00
|
|
|
{
|
|
|
|
.version = 3,
|
|
|
|
.note = "XSAVES, no MPX, no MONITOR",
|
|
|
|
.props = (PropValue[]) {
|
|
|
|
{ "xsaves", "on" },
|
|
|
|
{ "vmx-xsaves", "on" },
|
|
|
|
{ /* end of list */ },
|
|
|
|
},
|
|
|
|
},
|
2020-02-12 09:13:25 +01:00
|
|
|
{ /* end of list */ },
|
|
|
|
},
|
2019-07-18 09:34:05 +02:00
|
|
|
},
|
2019-06-26 18:21:29 +02:00
|
|
|
{
|
2019-07-16 17:58:08 +02:00
|
|
|
.name = "Snowridge",
|
2019-06-26 18:21:29 +02:00
|
|
|
.level = 27,
|
|
|
|
.vendor = CPUID_VENDOR_INTEL,
|
|
|
|
.family = 6,
|
|
|
|
.model = 134,
|
|
|
|
.stepping = 1,
|
|
|
|
.features[FEAT_1_EDX] =
|
|
|
|
/* missing: CPUID_PN CPUID_IA64 */
|
|
|
|
/* missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
|
|
|
|
CPUID_FP87 | CPUID_VME | CPUID_DE | CPUID_PSE |
|
|
|
|
CPUID_TSC | CPUID_MSR | CPUID_PAE | CPUID_MCE |
|
|
|
|
CPUID_CX8 | CPUID_APIC | CPUID_SEP |
|
|
|
|
CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV |
|
|
|
|
CPUID_PAT | CPUID_PSE36 | CPUID_CLFLUSH |
|
|
|
|
CPUID_MMX |
|
|
|
|
CPUID_FXSR | CPUID_SSE | CPUID_SSE2,
|
|
|
|
.features[FEAT_1_ECX] =
|
|
|
|
CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_MONITOR |
|
|
|
|
CPUID_EXT_SSSE3 |
|
|
|
|
CPUID_EXT_CX16 |
|
|
|
|
CPUID_EXT_SSE41 |
|
|
|
|
CPUID_EXT_SSE42 | CPUID_EXT_X2APIC | CPUID_EXT_MOVBE |
|
|
|
|
CPUID_EXT_POPCNT |
|
|
|
|
CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_AES | CPUID_EXT_XSAVE |
|
|
|
|
CPUID_EXT_RDRAND,
|
|
|
|
.features[FEAT_8000_0001_EDX] =
|
|
|
|
CPUID_EXT2_SYSCALL |
|
|
|
|
CPUID_EXT2_NX |
|
|
|
|
CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
|
|
|
|
CPUID_EXT2_LM,
|
|
|
|
.features[FEAT_8000_0001_ECX] =
|
|
|
|
CPUID_EXT3_LAHF_LM |
|
|
|
|
CPUID_EXT3_3DNOWPREFETCH,
|
|
|
|
.features[FEAT_7_0_EBX] =
|
|
|
|
CPUID_7_0_EBX_FSGSBASE |
|
|
|
|
CPUID_7_0_EBX_SMEP |
|
|
|
|
CPUID_7_0_EBX_ERMS |
|
|
|
|
CPUID_7_0_EBX_MPX | /* missing bits 13, 15 */
|
|
|
|
CPUID_7_0_EBX_RDSEED |
|
|
|
|
CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT |
|
|
|
|
CPUID_7_0_EBX_CLWB |
|
|
|
|
CPUID_7_0_EBX_SHA_NI,
|
|
|
|
.features[FEAT_7_0_ECX] =
|
|
|
|
CPUID_7_0_ECX_UMIP |
|
|
|
|
/* missing bit 5 */
|
|
|
|
CPUID_7_0_ECX_GFNI |
|
|
|
|
CPUID_7_0_ECX_MOVDIRI | CPUID_7_0_ECX_CLDEMOTE |
|
|
|
|
CPUID_7_0_ECX_MOVDIR64B,
|
|
|
|
.features[FEAT_7_0_EDX] =
|
|
|
|
CPUID_7_0_EDX_SPEC_CTRL |
|
|
|
|
CPUID_7_0_EDX_ARCH_CAPABILITIES | CPUID_7_0_EDX_SPEC_CTRL_SSBD |
|
|
|
|
CPUID_7_0_EDX_CORE_CAPABILITY,
|
|
|
|
.features[FEAT_CORE_CAPABILITY] =
|
|
|
|
MSR_CORE_CAP_SPLIT_LOCK_DETECT,
|
2021-04-12 09:39:52 +02:00
|
|
|
/* XSAVES is is added in version 3 */
|
2019-06-26 18:21:29 +02:00
|
|
|
.features[FEAT_XSAVE] =
|
|
|
|
CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
|
|
|
|
CPUID_XSAVE_XGETBV1,
|
|
|
|
.features[FEAT_6_EAX] =
|
|
|
|
CPUID_6_EAX_ARAT,
|
target/i386: add VMX features to named CPU models
This allows using "-cpu Haswell,+vmx", which we did not really want to
support in QEMU but was produced by Libvirt when using the "host-model"
CPU model. Without this patch, no VMX feature is _actually_ supported
(only the basic instruction set extensions are) and KVM fails to load
in the guest.
This was produced from the output of scripts/kvm/vmxcap using the following
very ugly Python script:
bits = {
'INS/OUTS instruction information': ['FEAT_VMX_BASIC', 'MSR_VMX_BASIC_INS_OUTS'],
'IA32_VMX_TRUE_*_CTLS support': ['FEAT_VMX_BASIC', 'MSR_VMX_BASIC_TRUE_CTLS'],
'External interrupt exiting': ['FEAT_VMX_PINBASED_CTLS', 'VMX_PIN_BASED_EXT_INTR_MASK'],
'NMI exiting': ['FEAT_VMX_PINBASED_CTLS', 'VMX_PIN_BASED_NMI_EXITING'],
'Virtual NMIs': ['FEAT_VMX_PINBASED_CTLS', 'VMX_PIN_BASED_VIRTUAL_NMIS'],
'Activate VMX-preemption timer': ['FEAT_VMX_PINBASED_CTLS', 'VMX_PIN_BASED_VMX_PREEMPTION_TIMER'],
'Process posted interrupts': ['FEAT_VMX_PINBASED_CTLS', 'VMX_PIN_BASED_POSTED_INTR'],
'Interrupt window exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_VIRTUAL_INTR_PENDING'],
'Use TSC offsetting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_USE_TSC_OFFSETING'],
'HLT exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_HLT_EXITING'],
'INVLPG exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_INVLPG_EXITING'],
'MWAIT exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_MWAIT_EXITING'],
'RDPMC exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_RDPMC_EXITING'],
'RDTSC exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_RDTSC_EXITING'],
'CR3-load exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_CR3_LOAD_EXITING'],
'CR3-store exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_CR3_STORE_EXITING'],
'CR8-load exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_CR8_LOAD_EXITING'],
'CR8-store exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_CR8_STORE_EXITING'],
'Use TPR shadow': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_TPR_SHADOW'],
'NMI-window exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_VIRTUAL_NMI_PENDING'],
'MOV-DR exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_MOV_DR_EXITING'],
'Unconditional I/O exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_UNCOND_IO_EXITING'],
'Use I/O bitmaps': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_USE_IO_BITMAPS'],
'Monitor trap flag': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_MONITOR_TRAP_FLAG'],
'Use MSR bitmaps': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_USE_MSR_BITMAPS'],
'MONITOR exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_MONITOR_EXITING'],
'PAUSE exiting': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_PAUSE_EXITING'],
'Activate secondary control': ['FEAT_VMX_PROCBASED_CTLS', 'VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS'],
'Virtualize APIC accesses': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES'],
'Enable EPT': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_ENABLE_EPT'],
'Descriptor-table exiting': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_DESC'],
'Enable RDTSCP': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_RDTSCP'],
'Virtualize x2APIC mode': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE'],
'Enable VPID': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_ENABLE_VPID'],
'WBINVD exiting': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_WBINVD_EXITING'],
'Unrestricted guest': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST'],
'APIC register emulation': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT'],
'Virtual interrupt delivery': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY'],
'PAUSE-loop exiting': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_PAUSE_LOOP_EXITING'],
'RDRAND exiting': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_RDRAND_EXITING'],
'Enable INVPCID': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_ENABLE_INVPCID'],
'Enable VM functions': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_ENABLE_VMFUNC'],
'VMCS shadowing': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_SHADOW_VMCS'],
'RDSEED exiting': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_RDSEED_EXITING'],
'Enable PML': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_ENABLE_PML'],
'Enable XSAVES/XRSTORS': ['FEAT_VMX_SECONDARY_CTLS', 'VMX_SECONDARY_EXEC_XSAVES'],
'Save debug controls': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_SAVE_DEBUG_CONTROLS'],
'Load IA32_PERF_GLOBAL_CTRL': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL'],
'Acknowledge interrupt on exit': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_ACK_INTR_ON_EXIT'],
'Save IA32_PAT': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_SAVE_IA32_PAT'],
'Load IA32_PAT': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_LOAD_IA32_PAT'],
'Save IA32_EFER': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_SAVE_IA32_EFER'],
'Load IA32_EFER': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_LOAD_IA32_EFER'],
'Save VMX-preemption timer value': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER'],
'Clear IA32_BNDCFGS': ['FEAT_VMX_EXIT_CTLS', 'VMX_VM_EXIT_CLEAR_BNDCFGS'],
'Load debug controls': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS'],
'IA-32e mode guest': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_IA32E_MODE'],
'Load IA32_PERF_GLOBAL_CTRL': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL'],
'Load IA32_PAT': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_LOAD_IA32_PAT'],
'Load IA32_EFER': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_LOAD_IA32_EFER'],
'Load IA32_BNDCFGS': ['FEAT_VMX_ENTRY_CTLS', 'VMX_VM_ENTRY_LOAD_BNDCFGS'],
'Store EFER.LMA into IA-32e mode guest control': ['FEAT_VMX_MISC', 'MSR_VMX_MISC_STORE_LMA'],
'HLT activity state': ['FEAT_VMX_MISC', 'MSR_VMX_MISC_ACTIVITY_HLT'],
'VMWRITE to VM-exit information fields': ['FEAT_VMX_MISC', 'MSR_VMX_MISC_VMWRITE_VMEXIT'],
'Inject event with insn length=0': ['FEAT_VMX_MISC', 'MSR_VMX_MISC_ZERO_LEN_INJECT'],
'Execute-only EPT translations': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_EXECONLY'],
'Page-walk length 4': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_PAGE_WALK_LENGTH_4'],
'Paging-structure memory type WB': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_WB'],
'2MB EPT pages': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_2MB | MSR_VMX_EPT_1GB'],
'INVEPT supported': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVEPT'],
'EPT accessed and dirty flags': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_AD_BITS'],
'Single-context INVEPT': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT'],
'All-context INVEPT': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVEPT_ALL_CONTEXT'],
'INVVPID supported': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVVPID'],
'Individual-address INVVPID': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVVPID_SINGLE_ADDR'],
'Single-context INVVPID': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT'],
'All-context INVVPID': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVVPID_ALL_CONTEXT'],
'Single-context-retaining-globals INVVPID': ['FEAT_VMX_EPT_VPID_CAPS', 'MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS'],
'EPTP Switching': ['FEAT_VMX_VMFUNC', 'MSR_VMX_VMFUNC_EPT_SWITCHING']
}
import sys
import textwrap
out = {}
for l in sys.stdin.readlines():
l = l.rstrip()
if l.endswith('!!'):
l = l[:-2].rstrip()
if l.startswith(' ') and (l.endswith('default') or l.endswith('yes')):
l = l[4:]
for key, value in bits.items():
if l.startswith(key):
ctl, bit = value
if ctl in out:
out[ctl] = out[ctl] + ' | '
else:
out[ctl] = ' [%s] = ' % ctl
out[ctl] = out[ctl] + bit
for x in sorted(out.keys()):
print("\n ".join(textwrap.wrap(out[x] + ",")))
Note that the script has a bug in that some keys apply to both VM entry
and VM exit controls ("load IA32_PERF_GLOBAL_CTRL", "load IA32_EFER",
"load IA32_PAT". Those have to be fixed by hand.
Reviewed-by: Eduardo Habkost <ehabkost@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2019-11-20 18:37:53 +01:00
|
|
|
.features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
|
|
|
|
MSR_VMX_BASIC_TRUE_CTLS,
|
|
|
|
.features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
|
|
|
|
VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
|
|
|
|
VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
|
|
|
|
.features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
|
|
|
|
MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
|
|
|
|
MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
|
|
|
|
MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
|
|
|
|
MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
|
|
|
|
MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
|
|
|
|
MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS,
|
|
|
|
.features[FEAT_VMX_EXIT_CTLS] =
|
|
|
|
VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
|
|
|
|
VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
|
|
|
|
VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
|
|
|
|
VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
|
|
|
|
VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
|
|
|
|
.features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
|
|
|
|
MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT,
|
|
|
|
.features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
|
|
|
|
VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
|
|
|
|
VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR,
|
|
|
|
.features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
|
|
|
|
VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
|
|
|
|
VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
|
|
|
|
VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
|
|
|
|
VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
|
|
|
|
VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
|
|
|
|
VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
|
|
|
|
VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
|
|
|
|
VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
|
|
|
|
VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
|
|
|
|
VMX_CPU_BASED_MONITOR_TRAP_FLAG |
|
|
|
|
VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
|
|
|
|
.features[FEAT_VMX_SECONDARY_CTLS] =
|
|
|
|
VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
|
|
|
|
VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
|
|
|
|
VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
|
|
|
|
VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
|
|
|
|
VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST |
|
|
|
|
VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT |
|
|
|
|
VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
|
|
|
|
VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID |
|
|
|
|
VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS |
|
|
|
|
VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML,
|
|
|
|
.features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING,
|
2019-06-26 18:21:29 +02:00
|
|
|
.xlevel = 0x80000008,
|
|
|
|
.model_id = "Intel Atom Processor (SnowRidge)",
|
2019-10-12 04:47:48 +02:00
|
|
|
.versions = (X86CPUVersionDefinition[]) {
|
|
|
|
{ .version = 1 },
|
|
|
|
{
|
|
|
|
.version = 2,
|
|
|
|
.props = (PropValue[]) {
|
|
|
|
{ "mpx", "off" },
|
|
|
|
{ "model-id", "Intel Atom Processor (Snowridge, no MPX)" },
|
|
|
|
{ /* end of list */ },
|
|
|
|
},
|
|
|
|
},
|
2021-04-12 09:39:52 +02:00
|
|
|
{
|
|
|
|
.version = 3,
|
|
|
|
.note = "XSAVES, no MPX",
|
|
|
|
.props = (PropValue[]) {
|
|
|
|
{ "xsaves", "on" },
|
|
|
|
{ "vmx-xsaves", "on" },
|
|
|
|
{ /* end of list */ },
|
|
|
|
},
|
|
|
|
},
|
2021-06-30 03:20:53 +02:00
|
|
|
{
|
|
|
|
.version = 4,
|
2021-08-27 08:48:18 +02:00
|
|
|
.note = "no split lock detect, no core-capability",
|
2021-06-30 03:20:53 +02:00
|
|
|
.props = (PropValue[]) {
|
|
|
|
{ "split-lock-detect", "off" },
|
2021-08-27 08:48:18 +02:00
|
|
|
{ "core-capability", "off" },
|
2021-06-30 03:20:53 +02:00
|
|
|
{ /* end of list */ },
|
|
|
|
},
|
|
|
|
},
|
2019-10-12 04:47:48 +02:00
|
|
|
{ /* end of list */ },
|
|
|
|
},
|
2019-06-26 18:21:29 +02:00
|
|
|
},
|
2018-03-20 01:08:15 +01:00
|
|
|
{
|
|
|
|
.name = "KnightsMill",
|
|
|
|
.level = 0xd,
|
|
|
|
.vendor = CPUID_VENDOR_INTEL,
|
|
|
|
.family = 6,
|
|
|
|
.model = 133,
|
|
|
|
.stepping = 0,
|
|
|
|
.features[FEAT_1_EDX] =
|
|
|
|
CPUID_VME | CPUID_SS | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR |
|
|
|
|
CPUID_MMX | CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV |
|
|
|
|
CPUID_MCA | CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC |
|
|
|
|
CPUID_CX8 | CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC |
|
|
|
|
CPUID_PSE | CPUID_DE | CPUID_FP87,
|
|
|
|
.features[FEAT_1_ECX] =
|
|
|
|
CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
|
|
|
|
CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
|
|
|
|
CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
|
|
|
|
CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
|
|
|
|
CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
|
|
|
|
CPUID_EXT_F16C | CPUID_EXT_RDRAND,
|
|
|
|
.features[FEAT_8000_0001_EDX] =
|
|
|
|
CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
|
|
|
|
CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
|
|
|
|
.features[FEAT_8000_0001_ECX] =
|
|
|
|
CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
|
|
|
|
.features[FEAT_7_0_EBX] =
|
|
|
|
CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
|
|
|
|
CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS |
|
|
|
|
CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_AVX512F |
|
|
|
|
CPUID_7_0_EBX_AVX512CD | CPUID_7_0_EBX_AVX512PF |
|
|
|
|
CPUID_7_0_EBX_AVX512ER,
|
|
|
|
.features[FEAT_7_0_ECX] =
|
|
|
|
CPUID_7_0_ECX_AVX512_VPOPCNTDQ,
|
|
|
|
.features[FEAT_7_0_EDX] =
|
|
|
|
CPUID_7_0_EDX_AVX512_4VNNIW | CPUID_7_0_EDX_AVX512_4FMAPS,
|
|
|
|
.features[FEAT_XSAVE] =
|
|
|
|
CPUID_XSAVE_XSAVEOPT,
|
|
|
|
.features[FEAT_6_EAX] =
|
|
|
|
CPUID_6_EAX_ARAT,
|
|
|
|
.xlevel = 0x80000008,
|
|
|
|
.model_id = "Intel Xeon Phi Processor (Knights Mill)",
|
|
|
|
},
|
2012-09-05 22:41:10 +02:00
|
|
|
{
|
|
|
|
.name = "Opteron_G1",
|
|
|
|
.level = 5,
|
2013-01-21 15:06:36 +01:00
|
|
|
.vendor = CPUID_VENDOR_AMD,
|
2012-09-05 22:41:10 +02:00
|
|
|
.family = 15,
|
|
|
|
.model = 6,
|
|
|
|
.stepping = 1,
|
2013-04-22 21:00:15 +02:00
|
|
|
.features[FEAT_1_EDX] =
|
2014-12-10 17:12:41 +01:00
|
|
|
CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
|
2014-06-18 01:05:29 +02:00
|
|
|
CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
|
|
|
|
CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
|
|
|
|
CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
|
|
|
|
CPUID_DE | CPUID_FP87,
|
2013-04-22 21:00:15 +02:00
|
|
|
.features[FEAT_1_ECX] =
|
2013-04-22 21:00:14 +02:00
|
|
|
CPUID_EXT_SSE3,
|
2013-04-22 21:00:15 +02:00
|
|
|
.features[FEAT_8000_0001_EDX] =
|
2017-01-13 20:00:57 +01:00
|
|
|
CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
|
2012-09-05 22:41:10 +02:00
|
|
|
.xlevel = 0x80000008,
|
|
|
|
.model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.name = "Opteron_G2",
|
|
|
|
.level = 5,
|
2013-01-21 15:06:36 +01:00
|
|
|
.vendor = CPUID_VENDOR_AMD,
|
2012-09-05 22:41:10 +02:00
|
|
|
.family = 15,
|
|
|
|
.model = 6,
|
|
|
|
.stepping = 1,
|
2013-04-22 21:00:15 +02:00
|
|
|
.features[FEAT_1_EDX] =
|
2014-12-10 17:12:41 +01:00
|
|
|
CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
|
2014-06-18 01:05:29 +02:00
|
|
|
CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
|
|
|
|
CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
|
|
|
|
CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
|
|
|
|
CPUID_DE | CPUID_FP87,
|
2013-04-22 21:00:15 +02:00
|
|
|
.features[FEAT_1_ECX] =
|
2013-04-22 21:00:14 +02:00
|
|
|
CPUID_EXT_CX16 | CPUID_EXT_SSE3,
|
2013-04-22 21:00:15 +02:00
|
|
|
.features[FEAT_8000_0001_EDX] =
|
2017-01-13 20:00:57 +01:00
|
|
|
CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
|
2013-04-22 21:00:15 +02:00
|
|
|
.features[FEAT_8000_0001_ECX] =
|
2013-04-22 21:00:14 +02:00
|
|
|
CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
|
2012-09-05 22:41:10 +02:00
|
|
|
.xlevel = 0x80000008,
|
|
|
|
.model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.name = "Opteron_G3",
|
|
|
|
.level = 5,
|
2013-01-21 15:06:36 +01:00
|
|
|
.vendor = CPUID_VENDOR_AMD,
|
2016-10-03 13:50:02 +02:00
|
|
|
.family = 16,
|
|
|
|
.model = 2,
|
|
|
|
.stepping = 3,
|
2013-04-22 21:00:15 +02:00
|
|
|
.features[FEAT_1_EDX] =
|
2014-12-10 17:12:41 +01:00
|
|
|
CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
|
2014-06-18 01:05:29 +02:00
|
|
|
CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
|
|
|
|
CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
|
|
|
|
CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
|
|
|
|
CPUID_DE | CPUID_FP87,
|
2013-04-22 21:00:15 +02:00
|
|
|
.features[FEAT_1_ECX] =
|
2013-04-22 21:00:14 +02:00
|
|
|
CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
|
2014-06-18 01:05:29 +02:00
|
|
|
CPUID_EXT_SSE3,
|
2013-04-22 21:00:15 +02:00
|
|
|
.features[FEAT_8000_0001_EDX] =
|
2018-12-20 13:07:32 +01:00
|
|
|
CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL |
|
|
|
|
CPUID_EXT2_RDTSCP,
|
2013-04-22 21:00:15 +02:00
|
|
|
.features[FEAT_8000_0001_ECX] =
|
2013-04-22 21:00:14 +02:00
|
|
|
CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
|
2014-06-18 01:05:29 +02:00
|
|
|
CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
|
2012-09-05 22:41:10 +02:00
|
|
|
.xlevel = 0x80000008,
|
|
|
|
.model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.name = "Opteron_G4",
|
|
|
|
.level = 0xd,
|
2013-01-21 15:06:36 +01:00
|
|
|
.vendor = CPUID_VENDOR_AMD,
|
2012-09-05 22:41:10 +02:00
|
|
|
.family = 21,
|
|
|
|
.model = 1,
|
|
|
|
.stepping = 2,
|
2013-04-22 21:00:15 +02:00
|
|
|
.features[FEAT_1_EDX] =
|
2014-12-10 17:12:41 +01:00
|
|
|
CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
|
2014-06-18 01:05:29 +02:00
|
|
|
CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
|
|
|
|
CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
|
|
|
|
CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
|
|
|
|
CPUID_DE | CPUID_FP87,
|
2013-04-22 21:00:15 +02:00
|
|
|
.features[FEAT_1_ECX] =
|
2013-04-22 21:00:14 +02:00
|
|
|
CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
|
2014-06-18 01:05:29 +02:00
|
|
|
CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
|
|
|
|
CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
|
|
|
|
CPUID_EXT_SSE3,
|
2013-04-22 21:00:15 +02:00
|
|
|
.features[FEAT_8000_0001_EDX] =
|
2017-01-13 20:00:57 +01:00
|
|
|
CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
|
2018-12-20 13:07:32 +01:00
|
|
|
CPUID_EXT2_SYSCALL | CPUID_EXT2_RDTSCP,
|
2013-04-22 21:00:15 +02:00
|
|
|
.features[FEAT_8000_0001_ECX] =
|
2013-04-22 21:00:14 +02:00
|
|
|
CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
|
2014-06-18 01:05:29 +02:00
|
|
|
CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
|
|
|
|
CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
|
|
|
|
CPUID_EXT3_LAHF_LM,
|
2019-01-21 16:50:51 +01:00
|
|
|
.features[FEAT_SVM] =
|
|
|
|
CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE,
|
2014-11-24 15:54:43 +01:00
|
|
|
/* no xsaveopt! */
|
2012-09-05 22:41:10 +02:00
|
|
|
.xlevel = 0x8000001A,
|
|
|
|
.model_id = "AMD Opteron 62xx class CPU",
|
|
|
|
},
|
2012-11-14 19:28:53 +01:00
|
|
|
{
|
|
|
|
.name = "Opteron_G5",
|
|
|
|
.level = 0xd,
|
2013-01-21 15:06:36 +01:00
|
|
|
.vendor = CPUID_VENDOR_AMD,
|
2012-11-14 19:28:53 +01:00
|
|
|
.family = 21,
|
|
|
|
.model = 2,
|
|
|
|
.stepping = 0,
|
2013-04-22 21:00:15 +02:00
|
|
|
.features[FEAT_1_EDX] =
|
2014-12-10 17:12:41 +01:00
|
|
|
CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
|
2014-06-18 01:05:29 +02:00
|
|
|
CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
|
|
|
|
CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
|
|
|
|
CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
|
|
|
|
CPUID_DE | CPUID_FP87,
|
2013-04-22 21:00:15 +02:00
|
|
|
.features[FEAT_1_ECX] =
|
2013-04-22 21:00:14 +02:00
|
|
|
CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
|
2014-06-18 01:05:29 +02:00
|
|
|
CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
|
|
|
|
CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
|
|
|
|
CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
|
2013-04-22 21:00:15 +02:00
|
|
|
.features[FEAT_8000_0001_EDX] =
|
2017-01-13 20:00:57 +01:00
|
|
|
CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
|
2018-12-20 13:07:32 +01:00
|
|
|
CPUID_EXT2_SYSCALL | CPUID_EXT2_RDTSCP,
|
2013-04-22 21:00:15 +02:00
|
|
|
.features[FEAT_8000_0001_ECX] =
|
2013-04-22 21:00:14 +02:00
|
|
|
CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
|
2014-06-18 01:05:29 +02:00
|
|
|
CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
|
|
|
|
CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
|
|
|
|
CPUID_EXT3_LAHF_LM,
|
2019-01-21 16:50:51 +01:00
|
|
|
.features[FEAT_SVM] =
|
|
|
|
CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE,
|
2014-11-24 15:54:43 +01:00
|
|
|
/* no xsaveopt! */
|
2012-11-14 19:28:53 +01:00
|
|
|
.xlevel = 0x8000001A,
|
|
|
|
.model_id = "AMD Opteron 63xx class CPU",
|
|
|
|
},
|
target-i386/cpu: Add new EPYC CPU model
Add a new base CPU model called 'EPYC' to model processors from AMD EPYC
family (which includes EPYC 76xx,75xx,74xx, 73xx and 72xx).
The following features bits have been added/removed compare to Opteron_G5
Added: monitor, movbe, rdrand, mmxext, ffxsr, rdtscp, cr8legacy, osvw,
fsgsbase, bmi1, avx2, smep, bmi2, rdseed, adx, smap, clfshopt, sha
xsaveopt, xsavec, xgetbv1, arat
Removed: xop, fma4, tbm
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Richard Henderson <rth@twiddle.net>
Cc: Eduardo Habkost <ehabkost@redhat.com>
Cc: Tom Lendacky <Thomas.Lendacky@amd.com>
Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
Message-Id: <20170815170051.127257-1-brijesh.singh@amd.com>
Reviewed-by: Eduardo Habkost <ehabkost@redhat.com>
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
2017-08-15 19:00:51 +02:00
|
|
|
{
|
|
|
|
.name = "EPYC",
|
|
|
|
.level = 0xd,
|
|
|
|
.vendor = CPUID_VENDOR_AMD,
|
|
|
|
.family = 23,
|
|
|
|
.model = 1,
|
|
|
|
.stepping = 2,
|
|
|
|
.features[FEAT_1_EDX] =
|
|
|
|
CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
|
|
|
|
CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
|
|
|
|
CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
|
|
|
|
CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
|
|
|
|
CPUID_VME | CPUID_FP87,
|
|
|
|
.features[FEAT_1_ECX] =
|
|
|
|
CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
|
|
|
|
CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT |
|
|
|
|
CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
|
|
|
|
CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 |
|
|
|
|
CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
|
|
|
|
.features[FEAT_8000_0001_EDX] =
|
|
|
|
CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
|
|
|
|
CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
|
|
|
|
CPUID_EXT2_SYSCALL,
|
|
|
|
.features[FEAT_8000_0001_ECX] =
|
|
|
|
CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
|
|
|
|
CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
|
2018-06-19 23:31:58 +02:00
|
|
|
CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM |
|
|
|
|
CPUID_EXT3_TOPOEXT,
|
target-i386/cpu: Add new EPYC CPU model
Add a new base CPU model called 'EPYC' to model processors from AMD EPYC
family (which includes EPYC 76xx,75xx,74xx, 73xx and 72xx).
The following features bits have been added/removed compare to Opteron_G5
Added: monitor, movbe, rdrand, mmxext, ffxsr, rdtscp, cr8legacy, osvw,
fsgsbase, bmi1, avx2, smep, bmi2, rdseed, adx, smap, clfshopt, sha
xsaveopt, xsavec, xgetbv1, arat
Removed: xop, fma4, tbm
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Richard Henderson <rth@twiddle.net>
Cc: Eduardo Habkost <ehabkost@redhat.com>
Cc: Tom Lendacky <Thomas.Lendacky@amd.com>
Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
Message-Id: <20170815170051.127257-1-brijesh.singh@amd.com>
Reviewed-by: Eduardo Habkost <ehabkost@redhat.com>
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
2017-08-15 19:00:51 +02:00
|
|
|
.features[FEAT_7_0_EBX] =
|
|
|
|
CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
|
|
|
|
CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED |
|
|
|
|
CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT |
|
|
|
|
CPUID_7_0_EBX_SHA_NI,
|
|
|
|
.features[FEAT_XSAVE] =
|
|
|
|
CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
|
|
|
|
CPUID_XSAVE_XGETBV1,
|
|
|
|
.features[FEAT_6_EAX] =
|
|
|
|
CPUID_6_EAX_ARAT,
|
2019-01-21 16:50:51 +01:00
|
|
|
.features[FEAT_SVM] =
|
|
|
|
CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE,
|
2018-06-19 23:31:58 +02:00
|
|
|
.xlevel = 0x8000001E,
|
target-i386/cpu: Add new EPYC CPU model
Add a new base CPU model called 'EPYC' to model processors from AMD EPYC
family (which includes EPYC 76xx,75xx,74xx, 73xx and 72xx).
The following features bits have been added/removed compare to Opteron_G5
Added: monitor, movbe, rdrand, mmxext, ffxsr, rdtscp, cr8legacy, osvw,
fsgsbase, bmi1, avx2, smep, bmi2, rdseed, adx, smap, clfshopt, sha
xsaveopt, xsavec, xgetbv1, arat
Removed: xop, fma4, tbm
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Richard Henderson <rth@twiddle.net>
Cc: Eduardo Habkost <ehabkost@redhat.com>
Cc: Tom Lendacky <Thomas.Lendacky@amd.com>
Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
Message-Id: <20170815170051.127257-1-brijesh.singh@amd.com>
Reviewed-by: Eduardo Habkost <ehabkost@redhat.com>
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
2017-08-15 19:00:51 +02:00
|
|
|
.model_id = "AMD EPYC Processor",
|
2018-05-10 22:41:44 +02:00
|
|
|
.cache_info = &epyc_cache_info,
|
2019-06-28 02:28:40 +02:00
|
|
|
.versions = (X86CPUVersionDefinition[]) {
|
|
|
|
{ .version = 1 },
|
|
|
|
{
|
|
|
|
.version = 2,
|
2019-06-28 02:28:41 +02:00
|
|
|
.alias = "EPYC-IBPB",
|
2019-06-28 02:28:40 +02:00
|
|
|
.props = (PropValue[]) {
|
|
|
|
{ "ibpb", "on" },
|
|
|
|
{ "model-id",
|
|
|
|
"AMD EPYC Processor (with IBPB)" },
|
|
|
|
{ /* end of list */ }
|
|
|
|
}
|
|
|
|
},
|
2019-11-07 19:00:57 +01:00
|
|
|
{
|
|
|
|
.version = 3,
|
|
|
|
.props = (PropValue[]) {
|
|
|
|
{ "ibpb", "on" },
|
|
|
|
{ "perfctr-core", "on" },
|
|
|
|
{ "clzero", "on" },
|
|
|
|
{ "xsaveerptr", "on" },
|
|
|
|
{ "xsaves", "on" },
|
|
|
|
{ "model-id",
|
|
|
|
"AMD EPYC Processor" },
|
|
|
|
{ /* end of list */ }
|
|
|
|
}
|
|
|
|
},
|
2019-06-28 02:28:40 +02:00
|
|
|
{ /* end of list */ }
|
|
|
|
}
|
target-i386/cpu: Add new EPYC CPU model
Add a new base CPU model called 'EPYC' to model processors from AMD EPYC
family (which includes EPYC 76xx,75xx,74xx, 73xx and 72xx).
The following features bits have been added/removed compare to Opteron_G5
Added: monitor, movbe, rdrand, mmxext, ffxsr, rdtscp, cr8legacy, osvw,
fsgsbase, bmi1, avx2, smep, bmi2, rdseed, adx, smap, clfshopt, sha
xsaveopt, xsavec, xgetbv1, arat
Removed: xop, fma4, tbm
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Richard Henderson <rth@twiddle.net>
Cc: Eduardo Habkost <ehabkost@redhat.com>
Cc: Tom Lendacky <Thomas.Lendacky@amd.com>
Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
Message-Id: <20170815170051.127257-1-brijesh.singh@amd.com>
Reviewed-by: Eduardo Habkost <ehabkost@redhat.com>
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
2017-08-15 19:00:51 +02:00
|
|
|
},
|
2019-04-16 14:06:13 +02:00
|
|
|
{
|
|
|
|
.name = "Dhyana",
|
|
|
|
.level = 0xd,
|
|
|
|
.vendor = CPUID_VENDOR_HYGON,
|
|
|
|
.family = 24,
|
|
|
|
.model = 0,
|
|
|
|
.stepping = 1,
|
|
|
|
.features[FEAT_1_EDX] =
|
|
|
|
CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
|
|
|
|
CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
|
|
|
|
CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
|
|
|
|
CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
|
|
|
|
CPUID_VME | CPUID_FP87,
|
|
|
|
.features[FEAT_1_ECX] =
|
|
|
|
CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
|
|
|
|
CPUID_EXT_XSAVE | CPUID_EXT_POPCNT |
|
|
|
|
CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
|
|
|
|
CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 |
|
|
|
|
CPUID_EXT_MONITOR | CPUID_EXT_SSE3,
|
|
|
|
.features[FEAT_8000_0001_EDX] =
|
|
|
|
CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
|
|
|
|
CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
|
|
|
|
CPUID_EXT2_SYSCALL,
|
|
|
|
.features[FEAT_8000_0001_ECX] =
|
|
|
|
CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
|
|
|
|
CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
|
|
|
|
CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM |
|
|
|
|
CPUID_EXT3_TOPOEXT,
|
|
|
|
.features[FEAT_8000_0008_EBX] =
|
|
|
|
CPUID_8000_0008_EBX_IBPB,
|
|
|
|
.features[FEAT_7_0_EBX] =
|
|
|
|
CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
|
|
|
|
CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED |
|
|
|
|
CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT,
|
2021-04-12 09:39:52 +02:00
|
|
|
/* XSAVES is added in version 2 */
|
2019-04-16 14:06:13 +02:00
|
|
|
.features[FEAT_XSAVE] =
|
|
|
|
CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
|
|
|
|
CPUID_XSAVE_XGETBV1,
|
|
|
|
.features[FEAT_6_EAX] =
|
|
|
|
CPUID_6_EAX_ARAT,
|
|
|
|
.features[FEAT_SVM] =
|
|
|
|
CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE,
|
|
|
|
.xlevel = 0x8000001E,
|
|
|
|
.model_id = "Hygon Dhyana Processor",
|
|
|
|
.cache_info = &epyc_cache_info,
|
2021-04-12 09:39:52 +02:00
|
|
|
.versions = (X86CPUVersionDefinition[]) {
|
|
|
|
{ .version = 1 },
|
|
|
|
{ .version = 2,
|
|
|
|
.note = "XSAVES",
|
|
|
|
.props = (PropValue[]) {
|
|
|
|
{ "xsaves", "on" },
|
|
|
|
{ /* end of list */ }
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{ /* end of list */ }
|
|
|
|
}
|
2019-04-16 14:06:13 +02:00
|
|
|
},
|
2019-11-07 19:01:04 +01:00
|
|
|
{
|
|
|
|
.name = "EPYC-Rome",
|
|
|
|
.level = 0xd,
|
|
|
|
.vendor = CPUID_VENDOR_AMD,
|
|
|
|
.family = 23,
|
|
|
|
.model = 49,
|
|
|
|
.stepping = 0,
|
|
|
|
.features[FEAT_1_EDX] =
|
|
|
|
CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
|
|
|
|
CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
|
|
|
|
CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
|
|
|
|
CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
|
|
|
|
CPUID_VME | CPUID_FP87,
|
|
|
|
.features[FEAT_1_ECX] =
|
|
|
|
CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
|
|
|
|
CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT |
|
|
|
|
CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
|
|
|
|
CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 |
|
|
|
|
CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
|
|
|
|
.features[FEAT_8000_0001_EDX] =
|
|
|
|
CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
|
|
|
|
CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
|
|
|
|
CPUID_EXT2_SYSCALL,
|
|
|
|
.features[FEAT_8000_0001_ECX] =
|
|
|
|
CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
|
|
|
|
CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
|
|
|
|
CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM |
|
|
|
|
CPUID_EXT3_TOPOEXT | CPUID_EXT3_PERFCORE,
|
|
|
|
.features[FEAT_8000_0008_EBX] =
|
|
|
|
CPUID_8000_0008_EBX_CLZERO | CPUID_8000_0008_EBX_XSAVEERPTR |
|
|
|
|
CPUID_8000_0008_EBX_WBNOINVD | CPUID_8000_0008_EBX_IBPB |
|
|
|
|
CPUID_8000_0008_EBX_STIBP,
|
|
|
|
.features[FEAT_7_0_EBX] =
|
|
|
|
CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
|
|
|
|
CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED |
|
|
|
|
CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT |
|
|
|
|
CPUID_7_0_EBX_SHA_NI | CPUID_7_0_EBX_CLWB,
|
|
|
|
.features[FEAT_7_0_ECX] =
|
|
|
|
CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_RDPID,
|
|
|
|
.features[FEAT_XSAVE] =
|
|
|
|
CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
|
|
|
|
CPUID_XSAVE_XGETBV1 | CPUID_XSAVE_XSAVES,
|
|
|
|
.features[FEAT_6_EAX] =
|
|
|
|
CPUID_6_EAX_ARAT,
|
|
|
|
.features[FEAT_SVM] =
|
|
|
|
CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE,
|
|
|
|
.xlevel = 0x8000001E,
|
|
|
|
.model_id = "AMD EPYC-Rome Processor",
|
|
|
|
.cache_info = &epyc_rome_cache_info,
|
2021-03-03 16:45:30 +01:00
|
|
|
.versions = (X86CPUVersionDefinition[]) {
|
|
|
|
{ .version = 1 },
|
|
|
|
{
|
|
|
|
.version = 2,
|
|
|
|
.props = (PropValue[]) {
|
|
|
|
{ "ibrs", "on" },
|
|
|
|
{ "amd-ssbd", "on" },
|
|
|
|
{ /* end of list */ }
|
|
|
|
}
|
|
|
|
},
|
|
|
|
{ /* end of list */ }
|
|
|
|
}
|
2019-11-07 19:01:04 +01:00
|
|
|
},
|
2021-02-09 22:04:05 +01:00
|
|
|
{
|
|
|
|
.name = "EPYC-Milan",
|
|
|
|
.level = 0xd,
|
|
|
|
.vendor = CPUID_VENDOR_AMD,
|
|
|
|
.family = 25,
|
|
|
|
.model = 1,
|
|
|
|
.stepping = 1,
|
|
|
|
.features[FEAT_1_EDX] =
|
|
|
|
CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
|
|
|
|
CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
|
|
|
|
CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
|
|
|
|
CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
|
|
|
|
CPUID_VME | CPUID_FP87,
|
|
|
|
.features[FEAT_1_ECX] =
|
|
|
|
CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
|
|
|
|
CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT |
|
|
|
|
CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
|
|
|
|
CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 |
|
|
|
|
CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
|
|
|
|
CPUID_EXT_PCID,
|
|
|
|
.features[FEAT_8000_0001_EDX] =
|
|
|
|
CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
|
|
|
|
CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
|
|
|
|
CPUID_EXT2_SYSCALL,
|
|
|
|
.features[FEAT_8000_0001_ECX] =
|
|
|
|
CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
|
|
|
|
CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
|
|
|
|
CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM |
|
|
|
|
CPUID_EXT3_TOPOEXT | CPUID_EXT3_PERFCORE,
|
|
|
|
.features[FEAT_8000_0008_EBX] =
|
|
|
|
CPUID_8000_0008_EBX_CLZERO | CPUID_8000_0008_EBX_XSAVEERPTR |
|
|
|
|
CPUID_8000_0008_EBX_WBNOINVD | CPUID_8000_0008_EBX_IBPB |
|
|
|
|
CPUID_8000_0008_EBX_IBRS | CPUID_8000_0008_EBX_STIBP |
|
|
|
|
CPUID_8000_0008_EBX_AMD_SSBD,
|
|
|
|
.features[FEAT_7_0_EBX] =
|
|
|
|
CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
|
|
|
|
CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED |
|
|
|
|
CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT |
|
|
|
|
CPUID_7_0_EBX_SHA_NI | CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_ERMS |
|
|
|
|
CPUID_7_0_EBX_INVPCID,
|
|
|
|
.features[FEAT_7_0_ECX] =
|
|
|
|
CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_RDPID | CPUID_7_0_ECX_PKU,
|
|
|
|
.features[FEAT_7_0_EDX] =
|
|
|
|
CPUID_7_0_EDX_FSRM,
|
|
|
|
.features[FEAT_XSAVE] =
|
|
|
|
CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
|
|
|
|
CPUID_XSAVE_XGETBV1 | CPUID_XSAVE_XSAVES,
|
|
|
|
.features[FEAT_6_EAX] =
|
|
|
|
CPUID_6_EAX_ARAT,
|
|
|
|
.features[FEAT_SVM] =
|
|
|
|
CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE | CPUID_SVM_SVME_ADDR_CHK,
|
|
|
|
.xlevel = 0x8000001E,
|
|
|
|
.model_id = "AMD EPYC-Milan Processor",
|
|
|
|
.cache_info = &epyc_milan_cache_info,
|
|
|
|
},
|
2010-03-11 14:38:55 +01:00
|
|
|
};
|
|
|
|
|
i386: Resolve CPU models to v1 by default
When using `query-cpu-definitions` using `-machine none`,
QEMU is resolving all CPU models to their latest versions. The
actual CPU model version being used by another machine type (e.g.
`pc-q35-4.0`) might be different.
In theory, this was OK because the correct CPU model
version is returned when using the correct `-machine` argument.
Except that in practice, this breaks libvirt expectations:
libvirt always use `-machine none` when checking if a CPU model
is runnable, because runnability is not expected to be affected
when the machine type is changed.
For example, when running on a Haswell host without TSX,
Haswell-v4 is runnable, but Haswell-v1 is not. On those hosts,
`query-cpu-definitions` says Haswell is runnable if using
`-machine none`, but Haswell is actually not runnable using any
of the `pc-*` machine types (because they resolve Haswell to
Haswell-v1). In other words, we're breaking the "runnability
guarantee" we promised to not break for a few releases (see
qemu-deprecated.texi).
To address this issue, change the default CPU model version to v1
on all machine types, so we make `query-cpu-definitions` output
when using `-machine none` match the results when using `pc-*`.
This will change in the future (the plan is to always return the
latest CPU model version if using `-machine none`), but only
after giving libvirt the opportunity to adapt.
Fixes: https://bugzilla.redhat.com/show_bug.cgi?id=1779078
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
Message-Id: <20191205223339.764534-1-ehabkost@redhat.com>
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
2019-12-05 23:33:39 +01:00
|
|
|
/*
|
|
|
|
* We resolve CPU model aliases using -v1 when using "-machine
|
|
|
|
* none", but this is just for compatibility while libvirt isn't
|
|
|
|
* adapted to resolve CPU model versions before creating VMs.
|
2020-09-29 09:58:24 +02:00
|
|
|
* See "Runnability guarantee of CPU models" at
|
2021-07-23 08:58:28 +02:00
|
|
|
* docs/about/deprecated.rst.
|
i386: Resolve CPU models to v1 by default
When using `query-cpu-definitions` using `-machine none`,
QEMU is resolving all CPU models to their latest versions. The
actual CPU model version being used by another machine type (e.g.
`pc-q35-4.0`) might be different.
In theory, this was OK because the correct CPU model
version is returned when using the correct `-machine` argument.
Except that in practice, this breaks libvirt expectations:
libvirt always use `-machine none` when checking if a CPU model
is runnable, because runnability is not expected to be affected
when the machine type is changed.
For example, when running on a Haswell host without TSX,
Haswell-v4 is runnable, but Haswell-v1 is not. On those hosts,
`query-cpu-definitions` says Haswell is runnable if using
`-machine none`, but Haswell is actually not runnable using any
of the `pc-*` machine types (because they resolve Haswell to
Haswell-v1). In other words, we're breaking the "runnability
guarantee" we promised to not break for a few releases (see
qemu-deprecated.texi).
To address this issue, change the default CPU model version to v1
on all machine types, so we make `query-cpu-definitions` output
when using `-machine none` match the results when using `pc-*`.
This will change in the future (the plan is to always return the
latest CPU model version if using `-machine none`), but only
after giving libvirt the opportunity to adapt.
Fixes: https://bugzilla.redhat.com/show_bug.cgi?id=1779078
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
Message-Id: <20191205223339.764534-1-ehabkost@redhat.com>
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
2019-12-05 23:33:39 +01:00
|
|
|
*/
|
|
|
|
X86CPUVersion default_cpu_version = 1;
|
2019-06-28 02:28:42 +02:00
|
|
|
|
|
|
|
void x86_cpu_set_default_version(X86CPUVersion version)
|
|
|
|
{
|
|
|
|
/* Translating CPU_VERSION_AUTO to CPU_VERSION_AUTO doesn't make sense */
|
|
|
|
assert(version != CPU_VERSION_AUTO);
|
|
|
|
default_cpu_version = version;
|
|
|
|
}
|
|
|
|
|
2019-06-28 02:28:39 +02:00
|
|
|
static X86CPUVersion x86_cpu_model_last_version(const X86CPUModel *model)
|
|
|
|
{
|
|
|
|
int v = 0;
|
|
|
|
const X86CPUVersionDefinition *vdef =
|
|
|
|
x86_cpu_def_get_versions(model->cpudef);
|
|
|
|
while (vdef->version) {
|
|
|
|
v = vdef->version;
|
|
|
|
vdef++;
|
|
|
|
}
|
|
|
|
return v;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Return the actual version being used for a specific CPU model */
|
|
|
|
static X86CPUVersion x86_cpu_model_resolve_version(const X86CPUModel *model)
|
|
|
|
{
|
|
|
|
X86CPUVersion v = model->version;
|
2019-06-28 02:28:42 +02:00
|
|
|
if (v == CPU_VERSION_AUTO) {
|
|
|
|
v = default_cpu_version;
|
|
|
|
}
|
2019-06-28 02:28:39 +02:00
|
|
|
if (v == CPU_VERSION_LATEST) {
|
|
|
|
return x86_cpu_model_last_version(model);
|
|
|
|
}
|
|
|
|
return v;
|
|
|
|
}
|
|
|
|
|
2017-02-22 19:39:17 +01:00
|
|
|
static Property max_x86_cpu_properties[] = {
|
2014-06-17 22:31:53 +02:00
|
|
|
DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
|
2015-09-02 16:19:11 +02:00
|
|
|
DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false),
|
2014-04-30 18:48:41 +02:00
|
|
|
DEFINE_PROP_END_OF_LIST()
|
|
|
|
};
|
|
|
|
|
2017-02-22 19:39:17 +01:00
|
|
|
static void max_x86_cpu_class_init(ObjectClass *oc, void *data)
|
2010-03-11 14:38:55 +01:00
|
|
|
{
|
2014-04-30 18:48:41 +02:00
|
|
|
DeviceClass *dc = DEVICE_CLASS(oc);
|
2014-02-10 11:21:30 +01:00
|
|
|
X86CPUClass *xcc = X86_CPU_CLASS(oc);
|
2010-03-11 14:38:55 +01:00
|
|
|
|
2017-01-19 22:04:45 +01:00
|
|
|
xcc->ordering = 9;
|
2012-10-24 23:44:06 +02:00
|
|
|
|
2016-09-30 20:49:36 +02:00
|
|
|
xcc->model_description =
|
2017-02-22 19:39:17 +01:00
|
|
|
"Enables all features supported by the accelerator in the current host";
|
2014-02-10 11:21:30 +01:00
|
|
|
|
2020-01-10 16:30:32 +01:00
|
|
|
device_class_set_props(dc, max_x86_cpu_properties);
|
2014-02-10 11:21:30 +01:00
|
|
|
}
|
|
|
|
|
2017-02-22 19:39:17 +01:00
|
|
|
static void max_x86_cpu_initfn(Object *obj)
|
2014-02-10 11:21:30 +01:00
|
|
|
{
|
|
|
|
X86CPU *cpu = X86_CPU(obj);
|
|
|
|
|
2014-08-20 22:30:12 +02:00
|
|
|
/* We can't fill the features array here because we don't know yet if
|
|
|
|
* "migratable" is true or false.
|
|
|
|
*/
|
2017-01-19 22:04:46 +01:00
|
|
|
cpu->max_features = true;
|
qom: Put name parameter before value / visitor parameter
The object_property_set_FOO() setters take property name and value in
an unusual order:
void object_property_set_FOO(Object *obj, FOO_TYPE value,
const char *name, Error **errp)
Having to pass value before name feels grating. Swap them.
Same for object_property_set(), object_property_get(), and
object_property_parse().
Convert callers with this Coccinelle script:
@@
identifier fun = {
object_property_get, object_property_parse, object_property_set_str,
object_property_set_link, object_property_set_bool,
object_property_set_int, object_property_set_uint, object_property_set,
object_property_set_qobject
};
expression obj, v, name, errp;
@@
- fun(obj, v, name, errp)
+ fun(obj, name, v, errp)
Chokes on hw/arm/musicpal.c's lcd_refresh() with the unhelpful error
message "no position information". Convert that one manually.
Fails to convert hw/arm/armsse.c, because Coccinelle gets confused by
ARMSSE being used both as typedef and function-like macro there.
Convert manually.
Fails to convert hw/rx/rx-gdbsim.c, because Coccinelle gets confused
by RXCPU being used both as typedef and function-like macro there.
Convert manually. The other files using RXCPU that way don't need
conversion.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Message-Id: <20200707160613.848843-27-armbru@redhat.com>
[Straightforwad conflict with commit 2336172d9b "audio: set default
value for pcspk.iobase property" resolved]
2020-07-07 18:05:54 +02:00
|
|
|
object_property_set_bool(OBJECT(cpu), "pmu", true, &error_abort);
|
2021-03-22 14:27:40 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* these defaults are used for TCG and all other accelerators
|
|
|
|
* besides KVM and HVF, which overwrite these values
|
|
|
|
*/
|
|
|
|
object_property_set_str(OBJECT(cpu), "vendor", CPUID_VENDOR_AMD,
|
|
|
|
&error_abort);
|
2021-05-07 15:36:50 +02:00
|
|
|
#ifdef TARGET_X86_64
|
|
|
|
object_property_set_int(OBJECT(cpu), "family", 15, &error_abort);
|
|
|
|
object_property_set_int(OBJECT(cpu), "model", 107, &error_abort);
|
|
|
|
object_property_set_int(OBJECT(cpu), "stepping", 1, &error_abort);
|
|
|
|
#else
|
2021-03-22 14:27:40 +01:00
|
|
|
object_property_set_int(OBJECT(cpu), "family", 6, &error_abort);
|
|
|
|
object_property_set_int(OBJECT(cpu), "model", 6, &error_abort);
|
|
|
|
object_property_set_int(OBJECT(cpu), "stepping", 3, &error_abort);
|
2021-05-07 15:36:50 +02:00
|
|
|
#endif
|
2021-03-22 14:27:40 +01:00
|
|
|
object_property_set_str(OBJECT(cpu), "model-id",
|
|
|
|
"QEMU TCG CPU version " QEMU_HW_VERSION,
|
|
|
|
&error_abort);
|
2010-03-11 14:38:55 +01:00
|
|
|
}
|
|
|
|
|
2017-02-22 19:39:17 +01:00
|
|
|
static const TypeInfo max_x86_cpu_type_info = {
|
|
|
|
.name = X86_CPU_TYPE_NAME("max"),
|
|
|
|
.parent = TYPE_X86_CPU,
|
|
|
|
.instance_init = max_x86_cpu_initfn,
|
|
|
|
.class_init = max_x86_cpu_class_init,
|
|
|
|
};
|
|
|
|
|
2018-10-15 06:47:24 +02:00
|
|
|
static char *feature_word_description(FeatureWordInfo *f, uint32_t bit)
|
|
|
|
{
|
|
|
|
assert(f->type == CPUID_FEATURE_WORD || f->type == MSR_FEATURE_WORD);
|
|
|
|
|
|
|
|
switch (f->type) {
|
|
|
|
case CPUID_FEATURE_WORD:
|
|
|
|
{
|
|
|
|
const char *reg = get_register_name_32(f->cpuid.reg);
|
|
|
|
assert(reg);
|
|
|
|
return g_strdup_printf("CPUID.%02XH:%s",
|
|
|
|
f->cpuid.eax, reg);
|
|
|
|
}
|
|
|
|
case MSR_FEATURE_WORD:
|
|
|
|
return g_strdup_printf("MSR(%02XH)",
|
|
|
|
f->msr.index);
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2019-07-02 15:32:41 +02:00
|
|
|
static bool x86_cpu_have_filtered_features(X86CPU *cpu)
|
2010-03-11 14:38:55 +01:00
|
|
|
{
|
2019-07-02 15:32:41 +02:00
|
|
|
FeatureWord w;
|
|
|
|
|
|
|
|
for (w = 0; w < FEATURE_WORDS; w++) {
|
|
|
|
if (cpu->filtered_features[w]) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2019-07-01 17:38:54 +02:00
|
|
|
static void mark_unavailable_features(X86CPU *cpu, FeatureWord w, uint64_t mask,
|
2019-07-02 15:32:41 +02:00
|
|
|
const char *verbose_prefix)
|
|
|
|
{
|
|
|
|
CPUX86State *env = &cpu->env;
|
2014-04-30 18:48:31 +02:00
|
|
|
FeatureWordInfo *f = &feature_word_info[w];
|
2010-03-11 14:38:55 +01:00
|
|
|
int i;
|
|
|
|
|
2019-07-02 15:32:41 +02:00
|
|
|
if (!cpu->force_features) {
|
|
|
|
env->features[w] &= ~mask;
|
|
|
|
}
|
|
|
|
cpu->filtered_features[w] |= mask;
|
|
|
|
|
|
|
|
if (!verbose_prefix) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2019-07-01 17:38:54 +02:00
|
|
|
for (i = 0; i < 64; ++i) {
|
|
|
|
if ((1ULL << i) & mask) {
|
2019-10-25 04:56:32 +02:00
|
|
|
g_autofree char *feat_word_str = feature_word_description(f, i);
|
2019-07-02 15:32:41 +02:00
|
|
|
warn_report("%s: %s%s%s [bit %d]",
|
|
|
|
verbose_prefix,
|
2018-10-15 06:47:24 +02:00
|
|
|
feat_word_str,
|
2017-09-11 21:52:53 +02:00
|
|
|
f->feat_names[i] ? "." : "",
|
|
|
|
f->feat_names[i] ? f->feat_names[i] : "", i);
|
2010-03-11 14:38:55 +01:00
|
|
|
}
|
2014-04-30 18:48:29 +02:00
|
|
|
}
|
2010-03-11 14:38:55 +01:00
|
|
|
}
|
|
|
|
|
qom: Swap 'name' next to visitor in ObjectPropertyAccessor
Similar to the previous patch, it's nice to have all functions
in the tree that involve a visitor and a name for conversion to
or from QAPI to consistently stick the 'name' parameter next
to the Visitor parameter.
Done by manually changing include/qom/object.h and qom/object.c,
then running this Coccinelle script and touching up the fallout
(Coccinelle insisted on adding some trailing whitespace).
@ rule1 @
identifier fn;
typedef Object, Visitor, Error;
identifier obj, v, opaque, name, errp;
@@
void fn
- (Object *obj, Visitor *v, void *opaque, const char *name,
+ (Object *obj, Visitor *v, const char *name, void *opaque,
Error **errp) { ... }
@@
identifier rule1.fn;
expression obj, v, opaque, name, errp;
@@
fn(obj, v,
- opaque, name,
+ name, opaque,
errp)
Signed-off-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Marc-André Lureau <marcandre.lureau@redhat.com>
Message-Id: <1454075341-13658-20-git-send-email-eblake@redhat.com>
Signed-off-by: Markus Armbruster <armbru@redhat.com>
2016-01-29 14:48:55 +01:00
|
|
|
static void x86_cpuid_version_get_family(Object *obj, Visitor *v,
|
|
|
|
const char *name, void *opaque,
|
|
|
|
Error **errp)
|
2012-04-17 14:42:22 +02:00
|
|
|
{
|
|
|
|
X86CPU *cpu = X86_CPU(obj);
|
|
|
|
CPUX86State *env = &cpu->env;
|
|
|
|
int64_t value;
|
|
|
|
|
|
|
|
value = (env->cpuid_version >> 8) & 0xf;
|
|
|
|
if (value == 0xf) {
|
|
|
|
value += (env->cpuid_version >> 20) & 0xff;
|
|
|
|
}
|
qapi: Swap visit_* arguments for consistent 'name' placement
JSON uses "name":value, but many of our visitor interfaces were
called with visit_type_FOO(v, &value, name, errp). This can be
a bit confusing to have to mentally swap the parameter order to
match JSON order. It's particularly bad for visit_start_struct(),
where the 'name' parameter is smack in the middle of the
otherwise-related group of 'obj, kind, size' parameters! It's
time to do a global swap of the parameter ordering, so that the
'name' parameter is always immediately after the Visitor argument.
Additional reason in favor of the swap: the existing include/qjson.h
prefers listing 'name' first in json_prop_*(), and I have plans to
unify that file with the qapi visitors; listing 'name' first in
qapi will minimize churn to the (admittedly few) qjson.h clients.
Later patches will then fix docs, object.h, visitor-impl.h, and
those clients to match.
Done by first patching scripts/qapi*.py by hand to make generated
files do what I want, then by running the following Coccinelle
script to affect the rest of the code base:
$ spatch --sp-file script `git grep -l '\bvisit_' -- '**/*.[ch]'`
I then had to apply some touchups (Coccinelle insisted on TAB
indentation in visitor.h, and botched the signature of
visit_type_enum() by rewriting 'const char *const strings[]' to
the syntactically invalid 'const char*const[] strings'). The
movement of parameters is sufficient to provoke compiler errors
if any callers were missed.
// Part 1: Swap declaration order
@@
type TV, TErr, TObj, T1, T2;
identifier OBJ, ARG1, ARG2;
@@
void visit_start_struct
-(TV v, TObj OBJ, T1 ARG1, const char *name, T2 ARG2, TErr errp)
+(TV v, const char *name, TObj OBJ, T1 ARG1, T2 ARG2, TErr errp)
{ ... }
@@
type bool, TV, T1;
identifier ARG1;
@@
bool visit_optional
-(TV v, T1 ARG1, const char *name)
+(TV v, const char *name, T1 ARG1)
{ ... }
@@
type TV, TErr, TObj, T1;
identifier OBJ, ARG1;
@@
void visit_get_next_type
-(TV v, TObj OBJ, T1 ARG1, const char *name, TErr errp)
+(TV v, const char *name, TObj OBJ, T1 ARG1, TErr errp)
{ ... }
@@
type TV, TErr, TObj, T1, T2;
identifier OBJ, ARG1, ARG2;
@@
void visit_type_enum
-(TV v, TObj OBJ, T1 ARG1, T2 ARG2, const char *name, TErr errp)
+(TV v, const char *name, TObj OBJ, T1 ARG1, T2 ARG2, TErr errp)
{ ... }
@@
type TV, TErr, TObj;
identifier OBJ;
identifier VISIT_TYPE =~ "^visit_type_";
@@
void VISIT_TYPE
-(TV v, TObj OBJ, const char *name, TErr errp)
+(TV v, const char *name, TObj OBJ, TErr errp)
{ ... }
// Part 2: swap caller order
@@
expression V, NAME, OBJ, ARG1, ARG2, ERR;
identifier VISIT_TYPE =~ "^visit_type_";
@@
(
-visit_start_struct(V, OBJ, ARG1, NAME, ARG2, ERR)
+visit_start_struct(V, NAME, OBJ, ARG1, ARG2, ERR)
|
-visit_optional(V, ARG1, NAME)
+visit_optional(V, NAME, ARG1)
|
-visit_get_next_type(V, OBJ, ARG1, NAME, ERR)
+visit_get_next_type(V, NAME, OBJ, ARG1, ERR)
|
-visit_type_enum(V, OBJ, ARG1, ARG2, NAME, ERR)
+visit_type_enum(V, NAME, OBJ, ARG1, ARG2, ERR)
|
-VISIT_TYPE(V, OBJ, NAME, ERR)
+VISIT_TYPE(V, NAME, OBJ, ERR)
)
Signed-off-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Marc-André Lureau <marcandre.lureau@redhat.com>
Message-Id: <1454075341-13658-19-git-send-email-eblake@redhat.com>
Signed-off-by: Markus Armbruster <armbru@redhat.com>
2016-01-29 14:48:54 +01:00
|
|
|
visit_type_int(v, name, &value, errp);
|
2012-04-17 14:42:22 +02:00
|
|
|
}
|
|
|
|
|
qom: Swap 'name' next to visitor in ObjectPropertyAccessor
Similar to the previous patch, it's nice to have all functions
in the tree that involve a visitor and a name for conversion to
or from QAPI to consistently stick the 'name' parameter next
to the Visitor parameter.
Done by manually changing include/qom/object.h and qom/object.c,
then running this Coccinelle script and touching up the fallout
(Coccinelle insisted on adding some trailing whitespace).
@ rule1 @
identifier fn;
typedef Object, Visitor, Error;
identifier obj, v, opaque, name, errp;
@@
void fn
- (Object *obj, Visitor *v, void *opaque, const char *name,
+ (Object *obj, Visitor *v, const char *name, void *opaque,
Error **errp) { ... }
@@
identifier rule1.fn;
expression obj, v, opaque, name, errp;
@@
fn(obj, v,
- opaque, name,
+ name, opaque,
errp)
Signed-off-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Marc-André Lureau <marcandre.lureau@redhat.com>
Message-Id: <1454075341-13658-20-git-send-email-eblake@redhat.com>
Signed-off-by: Markus Armbruster <armbru@redhat.com>
2016-01-29 14:48:55 +01:00
|
|
|
static void x86_cpuid_version_set_family(Object *obj, Visitor *v,
|
|
|
|
const char *name, void *opaque,
|
|
|
|
Error **errp)
|
2012-02-17 17:46:01 +01:00
|
|
|
{
|
2012-04-17 12:10:29 +02:00
|
|
|
X86CPU *cpu = X86_CPU(obj);
|
|
|
|
CPUX86State *env = &cpu->env;
|
|
|
|
const int64_t min = 0;
|
|
|
|
const int64_t max = 0xff + 0xf;
|
|
|
|
int64_t value;
|
|
|
|
|
error: Eliminate error_propagate() with Coccinelle, part 1
When all we do with an Error we receive into a local variable is
propagating to somewhere else, we can just as well receive it there
right away. Convert
if (!foo(..., &err)) {
...
error_propagate(errp, err);
...
return ...
}
to
if (!foo(..., errp)) {
...
...
return ...
}
where nothing else needs @err. Coccinelle script:
@rule1 forall@
identifier fun, err, errp, lbl;
expression list args, args2;
binary operator op;
constant c1, c2;
symbol false;
@@
if (
(
- fun(args, &err, args2)
+ fun(args, errp, args2)
|
- !fun(args, &err, args2)
+ !fun(args, errp, args2)
|
- fun(args, &err, args2) op c1
+ fun(args, errp, args2) op c1
)
)
{
... when != err
when != lbl:
when strict
- error_propagate(errp, err);
... when != err
(
return;
|
return c2;
|
return false;
)
}
@rule2 forall@
identifier fun, err, errp, lbl;
expression list args, args2;
expression var;
binary operator op;
constant c1, c2;
symbol false;
@@
- var = fun(args, &err, args2);
+ var = fun(args, errp, args2);
... when != err
if (
(
var
|
!var
|
var op c1
)
)
{
... when != err
when != lbl:
when strict
- error_propagate(errp, err);
... when != err
(
return;
|
return c2;
|
return false;
|
return var;
)
}
@depends on rule1 || rule2@
identifier err;
@@
- Error *err = NULL;
... when != err
Not exactly elegant, I'm afraid.
The "when != lbl:" is necessary to avoid transforming
if (fun(args, &err)) {
goto out
}
...
out:
error_propagate(errp, err);
even though other paths to label out still need the error_propagate().
For an actual example, see sclp_realize().
Without the "when strict", Coccinelle transforms vfio_msix_setup(),
incorrectly. I don't know what exactly "when strict" does, only that
it helps here.
The match of return is narrower than what I want, but I can't figure
out how to express "return where the operand doesn't use @err". For
an example where it's too narrow, see vfio_intx_enable().
Silently fails to convert hw/arm/armsse.c, because Coccinelle gets
confused by ARMSSE being used both as typedef and function-like macro
there. Converted manually.
Line breaks tidied up manually. One nested declaration of @local_err
deleted manually. Preexisting unwanted blank line dropped in
hw/riscv/sifive_e.c.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Message-Id: <20200707160613.848843-35-armbru@redhat.com>
2020-07-07 18:06:02 +02:00
|
|
|
if (!visit_type_int(v, name, &value, errp)) {
|
2012-04-17 12:10:29 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (value < min || value > max) {
|
2015-03-17 11:54:50 +01:00
|
|
|
error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
|
|
|
|
name ? name : "null", value, min, max);
|
2012-04-17 12:10:29 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2012-02-17 17:46:01 +01:00
|
|
|
env->cpuid_version &= ~0xff00f00;
|
2012-04-17 12:10:29 +02:00
|
|
|
if (value > 0x0f) {
|
|
|
|
env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
|
2012-02-17 17:46:01 +01:00
|
|
|
} else {
|
2012-04-17 12:10:29 +02:00
|
|
|
env->cpuid_version |= value << 8;
|
2012-02-17 17:46:01 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
qom: Swap 'name' next to visitor in ObjectPropertyAccessor
Similar to the previous patch, it's nice to have all functions
in the tree that involve a visitor and a name for conversion to
or from QAPI to consistently stick the 'name' parameter next
to the Visitor parameter.
Done by manually changing include/qom/object.h and qom/object.c,
then running this Coccinelle script and touching up the fallout
(Coccinelle insisted on adding some trailing whitespace).
@ rule1 @
identifier fn;
typedef Object, Visitor, Error;
identifier obj, v, opaque, name, errp;
@@
void fn
- (Object *obj, Visitor *v, void *opaque, const char *name,
+ (Object *obj, Visitor *v, const char *name, void *opaque,
Error **errp) { ... }
@@
identifier rule1.fn;
expression obj, v, opaque, name, errp;
@@
fn(obj, v,
- opaque, name,
+ name, opaque,
errp)
Signed-off-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Marc-André Lureau <marcandre.lureau@redhat.com>
Message-Id: <1454075341-13658-20-git-send-email-eblake@redhat.com>
Signed-off-by: Markus Armbruster <armbru@redhat.com>
2016-01-29 14:48:55 +01:00
|
|
|
static void x86_cpuid_version_get_model(Object *obj, Visitor *v,
|
|
|
|
const char *name, void *opaque,
|
|
|
|
Error **errp)
|
2012-04-17 14:48:14 +02:00
|
|
|
{
|
|
|
|
X86CPU *cpu = X86_CPU(obj);
|
|
|
|
CPUX86State *env = &cpu->env;
|
|
|
|
int64_t value;
|
|
|
|
|
|
|
|
value = (env->cpuid_version >> 4) & 0xf;
|
|
|
|
value |= ((env->cpuid_version >> 16) & 0xf) << 4;
|
qapi: Swap visit_* arguments for consistent 'name' placement
JSON uses "name":value, but many of our visitor interfaces were
called with visit_type_FOO(v, &value, name, errp). This can be
a bit confusing to have to mentally swap the parameter order to
match JSON order. It's particularly bad for visit_start_struct(),
where the 'name' parameter is smack in the middle of the
otherwise-related group of 'obj, kind, size' parameters! It's
time to do a global swap of the parameter ordering, so that the
'name' parameter is always immediately after the Visitor argument.
Additional reason in favor of the swap: the existing include/qjson.h
prefers listing 'name' first in json_prop_*(), and I have plans to
unify that file with the qapi visitors; listing 'name' first in
qapi will minimize churn to the (admittedly few) qjson.h clients.
Later patches will then fix docs, object.h, visitor-impl.h, and
those clients to match.
Done by first patching scripts/qapi*.py by hand to make generated
files do what I want, then by running the following Coccinelle
script to affect the rest of the code base:
$ spatch --sp-file script `git grep -l '\bvisit_' -- '**/*.[ch]'`
I then had to apply some touchups (Coccinelle insisted on TAB
indentation in visitor.h, and botched the signature of
visit_type_enum() by rewriting 'const char *const strings[]' to
the syntactically invalid 'const char*const[] strings'). The
movement of parameters is sufficient to provoke compiler errors
if any callers were missed.
// Part 1: Swap declaration order
@@
type TV, TErr, TObj, T1, T2;
identifier OBJ, ARG1, ARG2;
@@
void visit_start_struct
-(TV v, TObj OBJ, T1 ARG1, const char *name, T2 ARG2, TErr errp)
+(TV v, const char *name, TObj OBJ, T1 ARG1, T2 ARG2, TErr errp)
{ ... }
@@
type bool, TV, T1;
identifier ARG1;
@@
bool visit_optional
-(TV v, T1 ARG1, const char *name)
+(TV v, const char *name, T1 ARG1)
{ ... }
@@
type TV, TErr, TObj, T1;
identifier OBJ, ARG1;
@@
void visit_get_next_type
-(TV v, TObj OBJ, T1 ARG1, const char *name, TErr errp)
+(TV v, const char *name, TObj OBJ, T1 ARG1, TErr errp)
{ ... }
@@
type TV, TErr, TObj, T1, T2;
identifier OBJ, ARG1, ARG2;
@@
void visit_type_enum
-(TV v, TObj OBJ, T1 ARG1, T2 ARG2, const char *name, TErr errp)
+(TV v, const char *name, TObj OBJ, T1 ARG1, T2 ARG2, TErr errp)
{ ... }
@@
type TV, TErr, TObj;
identifier OBJ;
identifier VISIT_TYPE =~ "^visit_type_";
@@
void VISIT_TYPE
-(TV v, TObj OBJ, const char *name, TErr errp)
+(TV v, const char *name, TObj OBJ, TErr errp)
{ ... }
// Part 2: swap caller order
@@
expression V, NAME, OBJ, ARG1, ARG2, ERR;
identifier VISIT_TYPE =~ "^visit_type_";
@@
(
-visit_start_struct(V, OBJ, ARG1, NAME, ARG2, ERR)
+visit_start_struct(V, NAME, OBJ, ARG1, ARG2, ERR)
|
-visit_optional(V, ARG1, NAME)
+visit_optional(V, NAME, ARG1)
|
-visit_get_next_type(V, OBJ, ARG1, NAME, ERR)
+visit_get_next_type(V, NAME, OBJ, ARG1, ERR)
|
-visit_type_enum(V, OBJ, ARG1, ARG2, NAME, ERR)
+visit_type_enum(V, NAME, OBJ, ARG1, ARG2, ERR)
|
-VISIT_TYPE(V, OBJ, NAME, ERR)
+VISIT_TYPE(V, NAME, OBJ, ERR)
)
Signed-off-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Marc-André Lureau <marcandre.lureau@redhat.com>
Message-Id: <1454075341-13658-19-git-send-email-eblake@redhat.com>
Signed-off-by: Markus Armbruster <armbru@redhat.com>
2016-01-29 14:48:54 +01:00
|
|
|
visit_type_int(v, name, &value, errp);
|
2012-04-17 14:48:14 +02:00
|
|
|
}
|
|
|
|
|
qom: Swap 'name' next to visitor in ObjectPropertyAccessor
Similar to the previous patch, it's nice to have all functions
in the tree that involve a visitor and a name for conversion to
or from QAPI to consistently stick the 'name' parameter next
to the Visitor parameter.
Done by manually changing include/qom/object.h and qom/object.c,
then running this Coccinelle script and touching up the fallout
(Coccinelle insisted on adding some trailing whitespace).
@ rule1 @
identifier fn;
typedef Object, Visitor, Error;
identifier obj, v, opaque, name, errp;
@@
void fn
- (Object *obj, Visitor *v, void *opaque, const char *name,
+ (Object *obj, Visitor *v, const char *name, void *opaque,
Error **errp) { ... }
@@
identifier rule1.fn;
expression obj, v, opaque, name, errp;
@@
fn(obj, v,
- opaque, name,
+ name, opaque,
errp)
Signed-off-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Marc-André Lureau <marcandre.lureau@redhat.com>
Message-Id: <1454075341-13658-20-git-send-email-eblake@redhat.com>
Signed-off-by: Markus Armbruster <armbru@redhat.com>
2016-01-29 14:48:55 +01:00
|
|
|
static void x86_cpuid_version_set_model(Object *obj, Visitor *v,
|
|
|
|
const char *name, void *opaque,
|
|
|
|
Error **errp)
|
2012-02-17 17:46:02 +01:00
|
|
|
{
|
2012-04-17 12:16:39 +02:00
|
|
|
X86CPU *cpu = X86_CPU(obj);
|
|
|
|
CPUX86State *env = &cpu->env;
|
|
|
|
const int64_t min = 0;
|
|
|
|
const int64_t max = 0xff;
|
|
|
|
int64_t value;
|
|
|
|
|
error: Eliminate error_propagate() with Coccinelle, part 1
When all we do with an Error we receive into a local variable is
propagating to somewhere else, we can just as well receive it there
right away. Convert
if (!foo(..., &err)) {
...
error_propagate(errp, err);
...
return ...
}
to
if (!foo(..., errp)) {
...
...
return ...
}
where nothing else needs @err. Coccinelle script:
@rule1 forall@
identifier fun, err, errp, lbl;
expression list args, args2;
binary operator op;
constant c1, c2;
symbol false;
@@
if (
(
- fun(args, &err, args2)
+ fun(args, errp, args2)
|
- !fun(args, &err, args2)
+ !fun(args, errp, args2)
|
- fun(args, &err, args2) op c1
+ fun(args, errp, args2) op c1
)
)
{
... when != err
when != lbl:
when strict
- error_propagate(errp, err);
... when != err
(
return;
|
return c2;
|
return false;
)
}
@rule2 forall@
identifier fun, err, errp, lbl;
expression list args, args2;
expression var;
binary operator op;
constant c1, c2;
symbol false;
@@
- var = fun(args, &err, args2);
+ var = fun(args, errp, args2);
... when != err
if (
(
var
|
!var
|
var op c1
)
)
{
... when != err
when != lbl:
when strict
- error_propagate(errp, err);
... when != err
(
return;
|
return c2;
|
return false;
|
return var;
)
}
@depends on rule1 || rule2@
identifier err;
@@
- Error *err = NULL;
... when != err
Not exactly elegant, I'm afraid.
The "when != lbl:" is necessary to avoid transforming
if (fun(args, &err)) {
goto out
}
...
out:
error_propagate(errp, err);
even though other paths to label out still need the error_propagate().
For an actual example, see sclp_realize().
Without the "when strict", Coccinelle transforms vfio_msix_setup(),
incorrectly. I don't know what exactly "when strict" does, only that
it helps here.
The match of return is narrower than what I want, but I can't figure
out how to express "return where the operand doesn't use @err". For
an example where it's too narrow, see vfio_intx_enable().
Silently fails to convert hw/arm/armsse.c, because Coccinelle gets
confused by ARMSSE being used both as typedef and function-like macro
there. Converted manually.
Line breaks tidied up manually. One nested declaration of @local_err
deleted manually. Preexisting unwanted blank line dropped in
hw/riscv/sifive_e.c.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Message-Id: <20200707160613.848843-35-armbru@redhat.com>
2020-07-07 18:06:02 +02:00
|
|
|
if (!visit_type_int(v, name, &value, errp)) {
|
2012-04-17 12:16:39 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (value < min || value > max) {
|
2015-03-17 11:54:50 +01:00
|
|
|
error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
|
|
|
|
name ? name : "null", value, min, max);
|
2012-04-17 12:16:39 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2012-02-17 17:46:02 +01:00
|
|
|
env->cpuid_version &= ~0xf00f0;
|
2012-04-17 12:16:39 +02:00
|
|
|
env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
|
2012-02-17 17:46:02 +01:00
|
|
|
}
|
|
|
|
|
2012-04-17 14:50:53 +02:00
|
|
|
static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
|
qom: Swap 'name' next to visitor in ObjectPropertyAccessor
Similar to the previous patch, it's nice to have all functions
in the tree that involve a visitor and a name for conversion to
or from QAPI to consistently stick the 'name' parameter next
to the Visitor parameter.
Done by manually changing include/qom/object.h and qom/object.c,
then running this Coccinelle script and touching up the fallout
(Coccinelle insisted on adding some trailing whitespace).
@ rule1 @
identifier fn;
typedef Object, Visitor, Error;
identifier obj, v, opaque, name, errp;
@@
void fn
- (Object *obj, Visitor *v, void *opaque, const char *name,
+ (Object *obj, Visitor *v, const char *name, void *opaque,
Error **errp) { ... }
@@
identifier rule1.fn;
expression obj, v, opaque, name, errp;
@@
fn(obj, v,
- opaque, name,
+ name, opaque,
errp)
Signed-off-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Marc-André Lureau <marcandre.lureau@redhat.com>
Message-Id: <1454075341-13658-20-git-send-email-eblake@redhat.com>
Signed-off-by: Markus Armbruster <armbru@redhat.com>
2016-01-29 14:48:55 +01:00
|
|
|
const char *name, void *opaque,
|
2012-04-17 14:50:53 +02:00
|
|
|
Error **errp)
|
|
|
|
{
|
|
|
|
X86CPU *cpu = X86_CPU(obj);
|
|
|
|
CPUX86State *env = &cpu->env;
|
|
|
|
int64_t value;
|
|
|
|
|
|
|
|
value = env->cpuid_version & 0xf;
|
qapi: Swap visit_* arguments for consistent 'name' placement
JSON uses "name":value, but many of our visitor interfaces were
called with visit_type_FOO(v, &value, name, errp). This can be
a bit confusing to have to mentally swap the parameter order to
match JSON order. It's particularly bad for visit_start_struct(),
where the 'name' parameter is smack in the middle of the
otherwise-related group of 'obj, kind, size' parameters! It's
time to do a global swap of the parameter ordering, so that the
'name' parameter is always immediately after the Visitor argument.
Additional reason in favor of the swap: the existing include/qjson.h
prefers listing 'name' first in json_prop_*(), and I have plans to
unify that file with the qapi visitors; listing 'name' first in
qapi will minimize churn to the (admittedly few) qjson.h clients.
Later patches will then fix docs, object.h, visitor-impl.h, and
those clients to match.
Done by first patching scripts/qapi*.py by hand to make generated
files do what I want, then by running the following Coccinelle
script to affect the rest of the code base:
$ spatch --sp-file script `git grep -l '\bvisit_' -- '**/*.[ch]'`
I then had to apply some touchups (Coccinelle insisted on TAB
indentation in visitor.h, and botched the signature of
visit_type_enum() by rewriting 'const char *const strings[]' to
the syntactically invalid 'const char*const[] strings'). The
movement of parameters is sufficient to provoke compiler errors
if any callers were missed.
// Part 1: Swap declaration order
@@
type TV, TErr, TObj, T1, T2;
identifier OBJ, ARG1, ARG2;
@@
void visit_start_struct
-(TV v, TObj OBJ, T1 ARG1, const char *name, T2 ARG2, TErr errp)
+(TV v, const char *name, TObj OBJ, T1 ARG1, T2 ARG2, TErr errp)
{ ... }
@@
type bool, TV, T1;
identifier ARG1;
@@
bool visit_optional
-(TV v, T1 ARG1, const char *name)
+(TV v, const char *name, T1 ARG1)
{ ... }
@@
type TV, TErr, TObj, T1;
identifier OBJ, ARG1;
@@
void visit_get_next_type
-(TV v, TObj OBJ, T1 ARG1, const char *name, TErr errp)
+(TV v, const char *name, TObj OBJ, T1 ARG1, TErr errp)
{ ... }
@@
type TV, TErr, TObj, T1, T2;
identifier OBJ, ARG1, ARG2;
@@
void visit_type_enum
-(TV v, TObj OBJ, T1 ARG1, T2 ARG2, const char *name, TErr errp)
+(TV v, const char *name, TObj OBJ, T1 ARG1, T2 ARG2, TErr errp)
{ ... }
@@
type TV, TErr, TObj;
identifier OBJ;
identifier VISIT_TYPE =~ "^visit_type_";
@@
void VISIT_TYPE
-(TV v, TObj OBJ, const char *name, TErr errp)
+(TV v, const char *name, TObj OBJ, TErr errp)
{ ... }
// Part 2: swap caller order
@@
expression V, NAME, OBJ, ARG1, ARG2, ERR;
identifier VISIT_TYPE =~ "^visit_type_";
@@
(
-visit_start_struct(V, OBJ, ARG1, NAME, ARG2, ERR)
+visit_start_struct(V, NAME, OBJ, ARG1, ARG2, ERR)
|
-visit_optional(V, ARG1, NAME)
+visit_optional(V, NAME, ARG1)
|
-visit_get_next_type(V, OBJ, ARG1, NAME, ERR)
+visit_get_next_type(V, NAME, OBJ, ARG1, ERR)
|
-visit_type_enum(V, OBJ, ARG1, ARG2, NAME, ERR)
+visit_type_enum(V, NAME, OBJ, ARG1, ARG2, ERR)
|
-VISIT_TYPE(V, OBJ, NAME, ERR)
+VISIT_TYPE(V, NAME, OBJ, ERR)
)
Signed-off-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Marc-André Lureau <marcandre.lureau@redhat.com>
Message-Id: <1454075341-13658-19-git-send-email-eblake@redhat.com>
Signed-off-by: Markus Armbruster <armbru@redhat.com>
2016-01-29 14:48:54 +01:00
|
|
|
visit_type_int(v, name, &value, errp);
|
2012-04-17 14:50:53 +02:00
|
|
|
}
|
|
|
|
|
2012-04-17 14:14:18 +02:00
|
|
|
static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
|
qom: Swap 'name' next to visitor in ObjectPropertyAccessor
Similar to the previous patch, it's nice to have all functions
in the tree that involve a visitor and a name for conversion to
or from QAPI to consistently stick the 'name' parameter next
to the Visitor parameter.
Done by manually changing include/qom/object.h and qom/object.c,
then running this Coccinelle script and touching up the fallout
(Coccinelle insisted on adding some trailing whitespace).
@ rule1 @
identifier fn;
typedef Object, Visitor, Error;
identifier obj, v, opaque, name, errp;
@@
void fn
- (Object *obj, Visitor *v, void *opaque, const char *name,
+ (Object *obj, Visitor *v, const char *name, void *opaque,
Error **errp) { ... }
@@
identifier rule1.fn;
expression obj, v, opaque, name, errp;
@@
fn(obj, v,
- opaque, name,
+ name, opaque,
errp)
Signed-off-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Marc-André Lureau <marcandre.lureau@redhat.com>
Message-Id: <1454075341-13658-20-git-send-email-eblake@redhat.com>
Signed-off-by: Markus Armbruster <armbru@redhat.com>
2016-01-29 14:48:55 +01:00
|
|
|
const char *name, void *opaque,
|
2012-04-17 14:14:18 +02:00
|
|
|
Error **errp)
|
2012-02-17 17:46:03 +01:00
|
|
|
{
|
2012-04-17 14:14:18 +02:00
|
|
|
X86CPU *cpu = X86_CPU(obj);
|
|
|
|
CPUX86State *env = &cpu->env;
|
|
|
|
const int64_t min = 0;
|
|
|
|
const int64_t max = 0xf;
|
|
|
|
int64_t value;
|
|
|
|
|
error: Eliminate error_propagate() with Coccinelle, part 1
When all we do with an Error we receive into a local variable is
propagating to somewhere else, we can just as well receive it there
right away. Convert
if (!foo(..., &err)) {
...
error_propagate(errp, err);
...
return ...
}
to
if (!foo(..., errp)) {
...
...
return ...
}
where nothing else needs @err. Coccinelle script:
@rule1 forall@
identifier fun, err, errp, lbl;
expression list args, args2;
binary operator op;
constant c1, c2;
symbol false;
@@
if (
(
- fun(args, &err, args2)
+ fun(args, errp, args2)
|
- !fun(args, &err, args2)
+ !fun(args, errp, args2)
|
- fun(args, &err, args2) op c1
+ fun(args, errp, args2) op c1
)
)
{
... when != err
when != lbl:
when strict
- error_propagate(errp, err);
... when != err
(
return;
|
return c2;
|
return false;
)
}
@rule2 forall@
identifier fun, err, errp, lbl;
expression list args, args2;
expression var;
binary operator op;
constant c1, c2;
symbol false;
@@
- var = fun(args, &err, args2);
+ var = fun(args, errp, args2);
... when != err
if (
(
var
|
!var
|
var op c1
)
)
{
... when != err
when != lbl:
when strict
- error_propagate(errp, err);
... when != err
(
return;
|
return c2;
|
return false;
|
return var;
)
}
@depends on rule1 || rule2@
identifier err;
@@
- Error *err = NULL;
... when != err
Not exactly elegant, I'm afraid.
The "when != lbl:" is necessary to avoid transforming
if (fun(args, &err)) {
goto out
}
...
out:
error_propagate(errp, err);
even though other paths to label out still need the error_propagate().
For an actual example, see sclp_realize().
Without the "when strict", Coccinelle transforms vfio_msix_setup(),
incorrectly. I don't know what exactly "when strict" does, only that
it helps here.
The match of return is narrower than what I want, but I can't figure
out how to express "return where the operand doesn't use @err". For
an example where it's too narrow, see vfio_intx_enable().
Silently fails to convert hw/arm/armsse.c, because Coccinelle gets
confused by ARMSSE being used both as typedef and function-like macro
there. Converted manually.
Line breaks tidied up manually. One nested declaration of @local_err
deleted manually. Preexisting unwanted blank line dropped in
hw/riscv/sifive_e.c.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Message-Id: <20200707160613.848843-35-armbru@redhat.com>
2020-07-07 18:06:02 +02:00
|
|
|
if (!visit_type_int(v, name, &value, errp)) {
|
2012-04-17 14:14:18 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (value < min || value > max) {
|
2015-03-17 11:54:50 +01:00
|
|
|
error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
|
|
|
|
name ? name : "null", value, min, max);
|
2012-04-17 14:14:18 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2012-02-17 17:46:03 +01:00
|
|
|
env->cpuid_version &= ~0xf;
|
2012-04-17 14:14:18 +02:00
|
|
|
env->cpuid_version |= value & 0xf;
|
2012-02-17 17:46:03 +01:00
|
|
|
}
|
|
|
|
|
2012-04-17 19:22:58 +02:00
|
|
|
static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
|
|
|
|
{
|
|
|
|
X86CPU *cpu = X86_CPU(obj);
|
|
|
|
CPUX86State *env = &cpu->env;
|
|
|
|
char *value;
|
|
|
|
|
2014-12-04 14:46:46 +01:00
|
|
|
value = g_malloc(CPUID_VENDOR_SZ + 1);
|
2013-01-21 15:06:36 +01:00
|
|
|
x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
|
|
|
|
env->cpuid_vendor3);
|
2012-04-17 19:22:58 +02:00
|
|
|
return value;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void x86_cpuid_set_vendor(Object *obj, const char *value,
|
|
|
|
Error **errp)
|
|
|
|
{
|
|
|
|
X86CPU *cpu = X86_CPU(obj);
|
|
|
|
CPUX86State *env = &cpu->env;
|
|
|
|
int i;
|
|
|
|
|
2012-10-22 17:03:10 +02:00
|
|
|
if (strlen(value) != CPUID_VENDOR_SZ) {
|
2015-03-17 11:54:50 +01:00
|
|
|
error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
|
2012-04-17 19:22:58 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
env->cpuid_vendor1 = 0;
|
|
|
|
env->cpuid_vendor2 = 0;
|
|
|
|
env->cpuid_vendor3 = 0;
|
|
|
|
for (i = 0; i < 4; i++) {
|
|
|
|
env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
|
|
|
|
env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
|
|
|
|
env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-04-17 23:02:26 +02:00
|
|
|
static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
|
|
|
|
{
|
|
|
|
X86CPU *cpu = X86_CPU(obj);
|
|
|
|
CPUX86State *env = &cpu->env;
|
|
|
|
char *value;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
value = g_malloc(48 + 1);
|
|
|
|
for (i = 0; i < 48; i++) {
|
|
|
|
value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
|
|
|
|
}
|
|
|
|
value[48] = '\0';
|
|
|
|
return value;
|
|
|
|
}
|
|
|
|
|
2012-04-17 15:17:27 +02:00
|
|
|
static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
|
|
|
|
Error **errp)
|
2012-02-17 17:46:04 +01:00
|
|
|
{
|
2012-04-17 15:17:27 +02:00
|
|
|
X86CPU *cpu = X86_CPU(obj);
|
|
|
|
CPUX86State *env = &cpu->env;
|
2012-02-17 17:46:04 +01:00
|
|
|
int c, len, i;
|
|
|
|
|
|
|
|
if (model_id == NULL) {
|
|
|
|
model_id = "";
|
|
|
|
}
|
|
|
|
len = strlen(model_id);
|
2012-04-17 18:21:52 +02:00
|
|
|
memset(env->cpuid_model, 0, 48);
|
2012-02-17 17:46:04 +01:00
|
|
|
for (i = 0; i < 48; i++) {
|
|
|
|
if (i >= len) {
|
|
|
|
c = '\0';
|
|
|
|
} else {
|
|
|
|
c = (uint8_t)model_id[i];
|
|
|
|
}
|
|
|
|
env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
qom: Swap 'name' next to visitor in ObjectPropertyAccessor
Similar to the previous patch, it's nice to have all functions
in the tree that involve a visitor and a name for conversion to
or from QAPI to consistently stick the 'name' parameter next
to the Visitor parameter.
Done by manually changing include/qom/object.h and qom/object.c,
then running this Coccinelle script and touching up the fallout
(Coccinelle insisted on adding some trailing whitespace).
@ rule1 @
identifier fn;
typedef Object, Visitor, Error;
identifier obj, v, opaque, name, errp;
@@
void fn
- (Object *obj, Visitor *v, void *opaque, const char *name,
+ (Object *obj, Visitor *v, const char *name, void *opaque,
Error **errp) { ... }
@@
identifier rule1.fn;
expression obj, v, opaque, name, errp;
@@
fn(obj, v,
- opaque, name,
+ name, opaque,
errp)
Signed-off-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Marc-André Lureau <marcandre.lureau@redhat.com>
Message-Id: <1454075341-13658-20-git-send-email-eblake@redhat.com>
Signed-off-by: Markus Armbruster <armbru@redhat.com>
2016-01-29 14:48:55 +01:00
|
|
|
static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name,
|
|
|
|
void *opaque, Error **errp)
|
2012-04-18 00:12:23 +02:00
|
|
|
{
|
|
|
|
X86CPU *cpu = X86_CPU(obj);
|
|
|
|
int64_t value;
|
|
|
|
|
|
|
|
value = cpu->env.tsc_khz * 1000;
|
qapi: Swap visit_* arguments for consistent 'name' placement
JSON uses "name":value, but many of our visitor interfaces were
called with visit_type_FOO(v, &value, name, errp). This can be
a bit confusing to have to mentally swap the parameter order to
match JSON order. It's particularly bad for visit_start_struct(),
where the 'name' parameter is smack in the middle of the
otherwise-related group of 'obj, kind, size' parameters! It's
time to do a global swap of the parameter ordering, so that the
'name' parameter is always immediately after the Visitor argument.
Additional reason in favor of the swap: the existing include/qjson.h
prefers listing 'name' first in json_prop_*(), and I have plans to
unify that file with the qapi visitors; listing 'name' first in
qapi will minimize churn to the (admittedly few) qjson.h clients.
Later patches will then fix docs, object.h, visitor-impl.h, and
those clients to match.
Done by first patching scripts/qapi*.py by hand to make generated
files do what I want, then by running the following Coccinelle
script to affect the rest of the code base:
$ spatch --sp-file script `git grep -l '\bvisit_' -- '**/*.[ch]'`
I then had to apply some touchups (Coccinelle insisted on TAB
indentation in visitor.h, and botched the signature of
visit_type_enum() by rewriting 'const char *const strings[]' to
the syntactically invalid 'const char*const[] strings'). The
movement of parameters is sufficient to provoke compiler errors
if any callers were missed.
// Part 1: Swap declaration order
@@
type TV, TErr, TObj, T1, T2;
identifier OBJ, ARG1, ARG2;
@@
void visit_start_struct
-(TV v, TObj OBJ, T1 ARG1, const char *name, T2 ARG2, TErr errp)
+(TV v, const char *name, TObj OBJ, T1 ARG1, T2 ARG2, TErr errp)
{ ... }
@@
type bool, TV, T1;
identifier ARG1;
@@
bool visit_optional
-(TV v, T1 ARG1, const char *name)
+(TV v, const char *name, T1 ARG1)
{ ... }
@@
type TV, TErr, TObj, T1;
identifier OBJ, ARG1;
@@
void visit_get_next_type
-(TV v, TObj OBJ, T1 ARG1, const char *name, TErr errp)
+(TV v, const char *name, TObj OBJ, T1 ARG1, TErr errp)
{ ... }
@@
type TV, TErr, TObj, T1, T2;
identifier OBJ, ARG1, ARG2;
@@
void visit_type_enum
-(TV v, TObj OBJ, T1 ARG1, T2 ARG2, const char *name, TErr errp)
+(TV v, const char *name, TObj OBJ, T1 ARG1, T2 ARG2, TErr errp)
{ ... }
@@
type TV, TErr, TObj;
identifier OBJ;
identifier VISIT_TYPE =~ "^visit_type_";
@@
void VISIT_TYPE
-(TV v, TObj OBJ, const char *name, TErr errp)
+(TV v, const char *name, TObj OBJ, TErr errp)
{ ... }
// Part 2: swap caller order
@@
expression V, NAME, OBJ, ARG1, ARG2, ERR;
identifier VISIT_TYPE =~ "^visit_type_";
@@
(
-visit_start_struct(V, OBJ, ARG1, NAME, ARG2, ERR)
+visit_start_struct(V, NAME, OBJ, ARG1, ARG2, ERR)
|
-visit_optional(V, ARG1, NAME)
+visit_optional(V, NAME, ARG1)
|
-visit_get_next_type(V, OBJ, ARG1, NAME, ERR)
+visit_get_next_type(V, NAME, OBJ, ARG1, ERR)
|
-visit_type_enum(V, OBJ, ARG1, ARG2, NAME, ERR)
+visit_type_enum(V, NAME, OBJ, ARG1, ARG2, ERR)
|
-VISIT_TYPE(V, OBJ, NAME, ERR)
+VISIT_TYPE(V, NAME, OBJ, ERR)
)
Signed-off-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Marc-André Lureau <marcandre.lureau@redhat.com>
Message-Id: <1454075341-13658-19-git-send-email-eblake@redhat.com>
Signed-off-by: Markus Armbruster <armbru@redhat.com>
2016-01-29 14:48:54 +01:00
|
|
|
visit_type_int(v, name, &value, errp);
|
2012-04-18 00:12:23 +02:00
|
|
|
}
|
|
|
|
|
qom: Swap 'name' next to visitor in ObjectPropertyAccessor
Similar to the previous patch, it's nice to have all functions
in the tree that involve a visitor and a name for conversion to
or from QAPI to consistently stick the 'name' parameter next
to the Visitor parameter.
Done by manually changing include/qom/object.h and qom/object.c,
then running this Coccinelle script and touching up the fallout
(Coccinelle insisted on adding some trailing whitespace).
@ rule1 @
identifier fn;
typedef Object, Visitor, Error;
identifier obj, v, opaque, name, errp;
@@
void fn
- (Object *obj, Visitor *v, void *opaque, const char *name,
+ (Object *obj, Visitor *v, const char *name, void *opaque,
Error **errp) { ... }
@@
identifier rule1.fn;
expression obj, v, opaque, name, errp;
@@
fn(obj, v,
- opaque, name,
+ name, opaque,
errp)
Signed-off-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Marc-André Lureau <marcandre.lureau@redhat.com>
Message-Id: <1454075341-13658-20-git-send-email-eblake@redhat.com>
Signed-off-by: Markus Armbruster <armbru@redhat.com>
2016-01-29 14:48:55 +01:00
|
|
|
static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name,
|
|
|
|
void *opaque, Error **errp)
|
2012-04-18 00:12:23 +02:00
|
|
|
{
|
|
|
|
X86CPU *cpu = X86_CPU(obj);
|
|
|
|
const int64_t min = 0;
|
2012-09-22 02:13:13 +02:00
|
|
|
const int64_t max = INT64_MAX;
|
2012-04-18 00:12:23 +02:00
|
|
|
int64_t value;
|
|
|
|
|
error: Eliminate error_propagate() with Coccinelle, part 1
When all we do with an Error we receive into a local variable is
propagating to somewhere else, we can just as well receive it there
right away. Convert
if (!foo(..., &err)) {
...
error_propagate(errp, err);
...
return ...
}
to
if (!foo(..., errp)) {
...
...
return ...
}
where nothing else needs @err. Coccinelle script:
@rule1 forall@
identifier fun, err, errp, lbl;
expression list args, args2;
binary operator op;
constant c1, c2;
symbol false;
@@
if (
(
- fun(args, &err, args2)
+ fun(args, errp, args2)
|
- !fun(args, &err, args2)
+ !fun(args, errp, args2)
|
- fun(args, &err, args2) op c1
+ fun(args, errp, args2) op c1
)
)
{
... when != err
when != lbl:
when strict
- error_propagate(errp, err);
... when != err
(
return;
|
return c2;
|
return false;
)
}
@rule2 forall@
identifier fun, err, errp, lbl;
expression list args, args2;
expression var;
binary operator op;
constant c1, c2;
symbol false;
@@
- var = fun(args, &err, args2);
+ var = fun(args, errp, args2);
... when != err
if (
(
var
|
!var
|
var op c1
)
)
{
... when != err
when != lbl:
when strict
- error_propagate(errp, err);
... when != err
(
return;
|
return c2;
|
return false;
|
return var;
)
}
@depends on rule1 || rule2@
identifier err;
@@
- Error *err = NULL;
... when != err
Not exactly elegant, I'm afraid.
The "when != lbl:" is necessary to avoid transforming
if (fun(args, &err)) {
goto out
}
...
out:
error_propagate(errp, err);
even though other paths to label out still need the error_propagate().
For an actual example, see sclp_realize().
Without the "when strict", Coccinelle transforms vfio_msix_setup(),
incorrectly. I don't know what exactly "when strict" does, only that
it helps here.
The match of return is narrower than what I want, but I can't figure
out how to express "return where the operand doesn't use @err". For
an example where it's too narrow, see vfio_intx_enable().
Silently fails to convert hw/arm/armsse.c, because Coccinelle gets
confused by ARMSSE being used both as typedef and function-like macro
there. Converted manually.
Line breaks tidied up manually. One nested declaration of @local_err
deleted manually. Preexisting unwanted blank line dropped in
hw/riscv/sifive_e.c.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Message-Id: <20200707160613.848843-35-armbru@redhat.com>
2020-07-07 18:06:02 +02:00
|
|
|
if (!visit_type_int(v, name, &value, errp)) {
|
2012-04-18 00:12:23 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (value < min || value > max) {
|
2015-03-17 11:54:50 +01:00
|
|
|
error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
|
|
|
|
name ? name : "null", value, min, max);
|
2012-04-18 00:12:23 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2015-11-24 04:33:57 +01:00
|
|
|
cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000;
|
2012-04-18 00:12:23 +02:00
|
|
|
}
|
|
|
|
|
2013-05-06 18:20:09 +02:00
|
|
|
/* Generic getter for "feature-words" and "filtered-features" properties */
|
qom: Swap 'name' next to visitor in ObjectPropertyAccessor
Similar to the previous patch, it's nice to have all functions
in the tree that involve a visitor and a name for conversion to
or from QAPI to consistently stick the 'name' parameter next
to the Visitor parameter.
Done by manually changing include/qom/object.h and qom/object.c,
then running this Coccinelle script and touching up the fallout
(Coccinelle insisted on adding some trailing whitespace).
@ rule1 @
identifier fn;
typedef Object, Visitor, Error;
identifier obj, v, opaque, name, errp;
@@
void fn
- (Object *obj, Visitor *v, void *opaque, const char *name,
+ (Object *obj, Visitor *v, const char *name, void *opaque,
Error **errp) { ... }
@@
identifier rule1.fn;
expression obj, v, opaque, name, errp;
@@
fn(obj, v,
- opaque, name,
+ name, opaque,
errp)
Signed-off-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Marc-André Lureau <marcandre.lureau@redhat.com>
Message-Id: <1454075341-13658-20-git-send-email-eblake@redhat.com>
Signed-off-by: Markus Armbruster <armbru@redhat.com>
2016-01-29 14:48:55 +01:00
|
|
|
static void x86_cpu_get_feature_words(Object *obj, Visitor *v,
|
|
|
|
const char *name, void *opaque,
|
|
|
|
Error **errp)
|
2013-05-06 18:20:07 +02:00
|
|
|
{
|
2019-07-01 17:38:54 +02:00
|
|
|
uint64_t *array = (uint64_t *)opaque;
|
2013-05-06 18:20:07 +02:00
|
|
|
FeatureWord w;
|
|
|
|
X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
|
|
|
|
X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
|
|
|
|
X86CPUFeatureWordInfoList *list = NULL;
|
|
|
|
|
|
|
|
for (w = 0; w < FEATURE_WORDS; w++) {
|
|
|
|
FeatureWordInfo *wi = &feature_word_info[w];
|
2018-10-15 06:47:24 +02:00
|
|
|
/*
|
|
|
|
* We didn't have MSR features when "feature-words" was
|
|
|
|
* introduced. Therefore skipped other type entries.
|
|
|
|
*/
|
|
|
|
if (wi->type != CPUID_FEATURE_WORD) {
|
|
|
|
continue;
|
|
|
|
}
|
2013-05-06 18:20:07 +02:00
|
|
|
X86CPUFeatureWordInfo *qwi = &word_infos[w];
|
2018-10-15 06:47:24 +02:00
|
|
|
qwi->cpuid_input_eax = wi->cpuid.eax;
|
|
|
|
qwi->has_cpuid_input_ecx = wi->cpuid.needs_ecx;
|
|
|
|
qwi->cpuid_input_ecx = wi->cpuid.ecx;
|
|
|
|
qwi->cpuid_register = x86_reg_info_32[wi->cpuid.reg].qapi_enum;
|
2013-05-06 18:20:09 +02:00
|
|
|
qwi->features = array[w];
|
2013-05-06 18:20:07 +02:00
|
|
|
|
|
|
|
/* List will be in reverse order, but order shouldn't matter */
|
|
|
|
list_entries[w].next = list;
|
|
|
|
list_entries[w].value = &word_infos[w];
|
|
|
|
list = &list_entries[w];
|
|
|
|
}
|
|
|
|
|
2016-06-13 23:57:57 +02:00
|
|
|
visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, errp);
|
2013-05-06 18:20:07 +02:00
|
|
|
}
|
|
|
|
|
2013-04-26 18:04:32 +02:00
|
|
|
/* Convert all '_' in a feature string option name to '-', to make feature
|
|
|
|
* name conform to QOM property naming rule, which uses '-' instead of '_'.
|
|
|
|
*/
|
|
|
|
static inline void feat2prop(char *s)
|
|
|
|
{
|
|
|
|
while ((s = strchr(s, '_'))) {
|
|
|
|
*s = '-';
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-03 18:48:13 +02:00
|
|
|
/* Return the feature property name for a feature flag bit */
|
|
|
|
static const char *x86_cpu_feature_name(FeatureWord w, int bitnr)
|
|
|
|
{
|
2019-07-01 17:38:54 +02:00
|
|
|
const char *name;
|
2016-05-03 18:48:13 +02:00
|
|
|
/* XSAVE components are automatically enabled by other features,
|
|
|
|
* so return the original feature name instead
|
|
|
|
*/
|
2022-02-15 20:52:54 +01:00
|
|
|
if (w == FEAT_XSAVE_XCR0_LO || w == FEAT_XSAVE_XCR0_HI) {
|
|
|
|
int comp = (w == FEAT_XSAVE_XCR0_HI) ? bitnr + 32 : bitnr;
|
2016-05-03 18:48:13 +02:00
|
|
|
|
|
|
|
if (comp < ARRAY_SIZE(x86_ext_save_areas) &&
|
|
|
|
x86_ext_save_areas[comp].bits) {
|
|
|
|
w = x86_ext_save_areas[comp].feature;
|
|
|
|
bitnr = ctz32(x86_ext_save_areas[comp].bits);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-07-01 17:38:54 +02:00
|
|
|
assert(bitnr < 64);
|
2016-05-03 18:48:13 +02:00
|
|
|
assert(w < FEATURE_WORDS);
|
2019-07-01 17:38:54 +02:00
|
|
|
name = feature_word_info[w].feat_names[bitnr];
|
|
|
|
assert(bitnr < 32 || !(name && feature_word_info[w].type == CPUID_FEATURE_WORD));
|
|
|
|
return name;
|
2016-05-03 18:48:13 +02:00
|
|
|
}
|
|
|
|
|
2016-06-06 17:16:44 +02:00
|
|
|
/* Compatibily hack to maintain legacy +-feat semantic,
|
|
|
|
* where +-feat overwrites any feature set by
|
|
|
|
* feat=on|feat even if the later is parsed after +-feat
|
|
|
|
* (i.e. "-x2apic,x2apic=on" will result in x2apic disabled)
|
|
|
|
*/
|
2016-09-30 20:49:39 +02:00
|
|
|
static GList *plus_features, *minus_features;
|
2016-06-06 17:16:44 +02:00
|
|
|
|
2016-10-24 21:53:52 +02:00
|
|
|
static gint compare_string(gconstpointer a, gconstpointer b)
|
|
|
|
{
|
|
|
|
return g_strcmp0(a, b);
|
|
|
|
}
|
|
|
|
|
2012-12-04 20:34:39 +01:00
|
|
|
/* Parse "+feature,-feature,feature=foo" CPU feature string
|
|
|
|
*/
|
2016-06-09 19:11:01 +02:00
|
|
|
static void x86_cpu_parse_featurestr(const char *typename, char *features,
|
2014-03-03 23:19:19 +01:00
|
|
|
Error **errp)
|
2012-12-04 20:34:39 +01:00
|
|
|
{
|
|
|
|
char *featurestr; /* Single 'key=value" string being parsed */
|
2016-06-09 19:11:01 +02:00
|
|
|
static bool cpu_globals_initialized;
|
2016-10-24 21:53:52 +02:00
|
|
|
bool ambiguous = false;
|
2016-06-09 19:11:01 +02:00
|
|
|
|
|
|
|
if (cpu_globals_initialized) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
cpu_globals_initialized = true;
|
2012-12-04 20:34:39 +01:00
|
|
|
|
2016-06-09 19:10:58 +02:00
|
|
|
if (!features) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (featurestr = strtok(features, ",");
|
2016-11-25 16:21:53 +01:00
|
|
|
featurestr;
|
2016-06-09 19:10:58 +02:00
|
|
|
featurestr = strtok(NULL, ",")) {
|
|
|
|
const char *name;
|
|
|
|
const char *val = NULL;
|
|
|
|
char *eq = NULL;
|
2016-06-21 14:04:40 +02:00
|
|
|
char num[32];
|
2016-06-09 19:11:01 +02:00
|
|
|
GlobalProperty *prop;
|
2010-03-11 14:38:55 +01:00
|
|
|
|
2016-06-09 19:10:58 +02:00
|
|
|
/* Compatibility syntax: */
|
2010-03-11 14:38:55 +01:00
|
|
|
if (featurestr[0] == '+') {
|
2016-09-30 20:49:39 +02:00
|
|
|
plus_features = g_list_append(plus_features,
|
|
|
|
g_strdup(featurestr + 1));
|
2016-06-09 19:10:58 +02:00
|
|
|
continue;
|
2010-03-11 14:38:55 +01:00
|
|
|
} else if (featurestr[0] == '-') {
|
2016-09-30 20:49:39 +02:00
|
|
|
minus_features = g_list_append(minus_features,
|
|
|
|
g_strdup(featurestr + 1));
|
2016-06-09 19:10:58 +02:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
eq = strchr(featurestr, '=');
|
|
|
|
if (eq) {
|
|
|
|
*eq++ = 0;
|
|
|
|
val = eq;
|
2010-03-11 14:38:55 +01:00
|
|
|
} else {
|
2016-06-09 19:10:58 +02:00
|
|
|
val = "on";
|
2013-01-21 15:06:38 +01:00
|
|
|
}
|
2016-06-09 19:10:58 +02:00
|
|
|
|
|
|
|
feat2prop(featurestr);
|
|
|
|
name = featurestr;
|
|
|
|
|
2016-10-24 21:53:52 +02:00
|
|
|
if (g_list_find_custom(plus_features, name, compare_string)) {
|
2017-07-12 15:57:41 +02:00
|
|
|
warn_report("Ambiguous CPU model string. "
|
|
|
|
"Don't mix both \"+%s\" and \"%s=%s\"",
|
|
|
|
name, name, val);
|
2016-10-24 21:53:52 +02:00
|
|
|
ambiguous = true;
|
|
|
|
}
|
|
|
|
if (g_list_find_custom(minus_features, name, compare_string)) {
|
2017-07-12 15:57:41 +02:00
|
|
|
warn_report("Ambiguous CPU model string. "
|
|
|
|
"Don't mix both \"-%s\" and \"%s=%s\"",
|
|
|
|
name, name, val);
|
2016-10-24 21:53:52 +02:00
|
|
|
ambiguous = true;
|
|
|
|
}
|
|
|
|
|
2016-06-09 19:10:58 +02:00
|
|
|
/* Special case: */
|
|
|
|
if (!strcmp(name, "tsc-freq")) {
|
2017-02-21 21:14:06 +01:00
|
|
|
int ret;
|
2017-02-21 21:14:07 +01:00
|
|
|
uint64_t tsc_freq;
|
2016-06-09 19:10:58 +02:00
|
|
|
|
2017-02-21 21:14:06 +01:00
|
|
|
ret = qemu_strtosz_metric(val, NULL, &tsc_freq);
|
2017-02-21 21:14:07 +01:00
|
|
|
if (ret < 0 || tsc_freq > INT64_MAX) {
|
2016-06-09 19:10:58 +02:00
|
|
|
error_setg(errp, "bad numerical value %s", val);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
|
|
|
|
val = num;
|
|
|
|
name = "tsc-frequency";
|
2010-03-11 14:38:55 +01:00
|
|
|
}
|
2016-06-09 19:10:58 +02:00
|
|
|
|
2016-06-09 19:11:01 +02:00
|
|
|
prop = g_new0(typeof(*prop), 1);
|
|
|
|
prop->driver = typename;
|
|
|
|
prop->property = g_strdup(name);
|
|
|
|
prop->value = g_strdup(val);
|
|
|
|
qdev_prop_register_global(prop);
|
2016-06-09 19:10:58 +02:00
|
|
|
}
|
|
|
|
|
2016-10-24 21:53:52 +02:00
|
|
|
if (ambiguous) {
|
2017-07-12 15:57:41 +02:00
|
|
|
warn_report("Compatibility of ambiguous CPU model "
|
|
|
|
"strings won't be kept on future QEMU versions");
|
2016-10-24 21:53:52 +02:00
|
|
|
}
|
2010-03-11 14:38:55 +01:00
|
|
|
}
|
|
|
|
|
2019-07-02 15:32:41 +02:00
|
|
|
static void x86_cpu_filter_features(X86CPU *cpu, bool verbose);
|
2016-05-03 18:48:13 +02:00
|
|
|
|
2019-04-23 01:47:41 +02:00
|
|
|
/* Build a list with the name of all features on a feature word array */
|
|
|
|
static void x86_cpu_list_feature_names(FeatureWordArray features,
|
2021-01-13 23:10:12 +01:00
|
|
|
strList **list)
|
2019-04-23 01:47:41 +02:00
|
|
|
{
|
2021-01-13 23:10:12 +01:00
|
|
|
strList **tail = list;
|
2019-04-23 01:47:41 +02:00
|
|
|
FeatureWord w;
|
|
|
|
|
|
|
|
for (w = 0; w < FEATURE_WORDS; w++) {
|
2019-07-01 17:38:54 +02:00
|
|
|
uint64_t filtered = features[w];
|
2019-04-23 01:47:41 +02:00
|
|
|
int i;
|
2019-07-01 17:38:54 +02:00
|
|
|
for (i = 0; i < 64; i++) {
|
|
|
|
if (filtered & (1ULL << i)) {
|
2021-01-13 23:10:12 +01:00
|
|
|
QAPI_LIST_APPEND(tail, g_strdup(x86_cpu_feature_name(w, i)));
|
2019-04-23 01:47:41 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-04-23 01:47:42 +02:00
|
|
|
static void x86_cpu_get_unavailable_features(Object *obj, Visitor *v,
|
|
|
|
const char *name, void *opaque,
|
|
|
|
Error **errp)
|
|
|
|
{
|
|
|
|
X86CPU *xc = X86_CPU(obj);
|
|
|
|
strList *result = NULL;
|
|
|
|
|
|
|
|
x86_cpu_list_feature_names(xc->filtered_features, &result);
|
|
|
|
visit_type_strList(v, "unavailable-features", &result, errp);
|
|
|
|
}
|
|
|
|
|
2016-05-03 18:48:13 +02:00
|
|
|
/* Check for missing features that may prevent the CPU class from
|
|
|
|
* running using the current machine and accelerator.
|
|
|
|
*/
|
|
|
|
static void x86_cpu_class_check_missing_features(X86CPUClass *xcc,
|
2021-01-13 23:10:12 +01:00
|
|
|
strList **list)
|
2016-05-03 18:48:13 +02:00
|
|
|
{
|
2021-01-13 23:10:12 +01:00
|
|
|
strList **tail = list;
|
2016-05-03 18:48:13 +02:00
|
|
|
X86CPU *xc;
|
|
|
|
Error *err = NULL;
|
|
|
|
|
2017-09-13 11:05:19 +02:00
|
|
|
if (xcc->host_cpuid_required && !accel_uses_host_cpuid()) {
|
2021-01-13 23:10:12 +01:00
|
|
|
QAPI_LIST_APPEND(tail, g_strdup("kvm"));
|
2016-05-03 18:48:13 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2019-11-13 13:57:55 +01:00
|
|
|
xc = X86_CPU(object_new_with_class(OBJECT_CLASS(xcc)));
|
2016-05-03 18:48:13 +02:00
|
|
|
|
2017-01-16 22:11:21 +01:00
|
|
|
x86_cpu_expand_features(xc, &err);
|
2016-05-03 18:48:13 +02:00
|
|
|
if (err) {
|
2017-01-16 22:11:21 +01:00
|
|
|
/* Errors at x86_cpu_expand_features should never happen,
|
2016-05-03 18:48:13 +02:00
|
|
|
* but in case it does, just report the model as not
|
|
|
|
* runnable at all using the "type" property.
|
|
|
|
*/
|
2021-01-13 23:10:12 +01:00
|
|
|
QAPI_LIST_APPEND(tail, g_strdup("type"));
|
2020-08-31 15:43:11 +02:00
|
|
|
error_free(err);
|
2016-05-03 18:48:13 +02:00
|
|
|
}
|
|
|
|
|
2019-07-02 15:32:41 +02:00
|
|
|
x86_cpu_filter_features(xc, false);
|
2016-05-03 18:48:13 +02:00
|
|
|
|
2021-01-13 23:10:12 +01:00
|
|
|
x86_cpu_list_feature_names(xc->filtered_features, tail);
|
2016-05-03 18:48:13 +02:00
|
|
|
|
|
|
|
object_unref(OBJECT(xc));
|
|
|
|
}
|
|
|
|
|
2015-02-03 18:48:55 +01:00
|
|
|
/* Print all cpuid feature names in featureset
|
2010-03-11 14:38:55 +01:00
|
|
|
*/
|
2019-04-17 21:17:57 +02:00
|
|
|
static void listflags(GList *features)
|
2015-03-03 01:29:17 +01:00
|
|
|
{
|
2018-06-06 18:55:27 +02:00
|
|
|
size_t len = 0;
|
|
|
|
GList *tmp;
|
|
|
|
|
|
|
|
for (tmp = features; tmp; tmp = tmp->next) {
|
|
|
|
const char *name = tmp->data;
|
|
|
|
if ((len + strlen(name) + 1) >= 75) {
|
2019-04-17 21:17:57 +02:00
|
|
|
qemu_printf("\n");
|
2018-06-06 18:55:27 +02:00
|
|
|
len = 0;
|
2010-03-11 14:38:55 +01:00
|
|
|
}
|
2019-04-17 21:17:57 +02:00
|
|
|
qemu_printf("%s%s", len == 0 ? " " : " ", name);
|
2018-06-06 18:55:27 +02:00
|
|
|
len += strlen(name) + 1;
|
2015-02-03 18:48:55 +01:00
|
|
|
}
|
2019-04-17 21:17:57 +02:00
|
|
|
qemu_printf("\n");
|
2010-03-11 14:38:55 +01:00
|
|
|
}
|
|
|
|
|
2017-01-19 22:04:45 +01:00
|
|
|
/* Sort alphabetically by type name, respecting X86CPUClass::ordering. */
|
2016-09-30 20:49:36 +02:00
|
|
|
static gint x86_cpu_list_compare(gconstpointer a, gconstpointer b)
|
|
|
|
{
|
|
|
|
ObjectClass *class_a = (ObjectClass *)a;
|
|
|
|
ObjectClass *class_b = (ObjectClass *)b;
|
|
|
|
X86CPUClass *cc_a = X86_CPU_CLASS(class_a);
|
|
|
|
X86CPUClass *cc_b = X86_CPU_CLASS(class_b);
|
i386: improve sorting of CPU model names
The current list of CPU model names output by "-cpu help" is sorted
alphabetically based on the internal QOM class name. The text that is
displayed, however, uses the CPU model name, which is equivalent to the
QOM class name, minus a suffix. Unfortunately that suffix has an effect
on the sort ordering, for example, causing the various Broadwell
variants to appear reversed:
x86 486
x86 Broadwell-IBRS Intel Core Processor (Broadwell, IBRS)
x86 Broadwell-noTSX-IBRS Intel Core Processor (Broadwell, no TSX, IBRS
x86 Broadwell-noTSX Intel Core Processor (Broadwell, no TSX)
x86 Broadwell Intel Core Processor (Broadwell)
x86 Conroe Intel Celeron_4x0 (Conroe/Merom Class Core 2)
By sorting on the actual CPU model name text that is displayed, the
result is
x86 486
x86 Broadwell Intel Core Processor (Broadwell)
x86 Broadwell-IBRS Intel Core Processor (Broadwell, IBRS)
x86 Broadwell-noTSX Intel Core Processor (Broadwell, no TSX)
x86 Broadwell-noTSX-IBRS Intel Core Processor (Broadwell, no TSX, IBRS)
x86 Conroe Intel Celeron_4x0 (Conroe/Merom Class Core 2)
This requires extra string allocations during sorting, but this is not a
concern given the usage scenario and the number of CPU models that exist.
Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
Message-Id: <20180606165527.17365-3-berrange@redhat.com>
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
2018-06-06 18:55:26 +02:00
|
|
|
int ret;
|
2016-09-30 20:49:36 +02:00
|
|
|
|
2017-01-19 22:04:45 +01:00
|
|
|
if (cc_a->ordering != cc_b->ordering) {
|
i386: improve sorting of CPU model names
The current list of CPU model names output by "-cpu help" is sorted
alphabetically based on the internal QOM class name. The text that is
displayed, however, uses the CPU model name, which is equivalent to the
QOM class name, minus a suffix. Unfortunately that suffix has an effect
on the sort ordering, for example, causing the various Broadwell
variants to appear reversed:
x86 486
x86 Broadwell-IBRS Intel Core Processor (Broadwell, IBRS)
x86 Broadwell-noTSX-IBRS Intel Core Processor (Broadwell, no TSX, IBRS
x86 Broadwell-noTSX Intel Core Processor (Broadwell, no TSX)
x86 Broadwell Intel Core Processor (Broadwell)
x86 Conroe Intel Celeron_4x0 (Conroe/Merom Class Core 2)
By sorting on the actual CPU model name text that is displayed, the
result is
x86 486
x86 Broadwell Intel Core Processor (Broadwell)
x86 Broadwell-IBRS Intel Core Processor (Broadwell, IBRS)
x86 Broadwell-noTSX Intel Core Processor (Broadwell, no TSX)
x86 Broadwell-noTSX-IBRS Intel Core Processor (Broadwell, no TSX, IBRS)
x86 Conroe Intel Celeron_4x0 (Conroe/Merom Class Core 2)
This requires extra string allocations during sorting, but this is not a
concern given the usage scenario and the number of CPU models that exist.
Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
Message-Id: <20180606165527.17365-3-berrange@redhat.com>
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
2018-06-06 18:55:26 +02:00
|
|
|
ret = cc_a->ordering - cc_b->ordering;
|
2016-09-30 20:49:36 +02:00
|
|
|
} else {
|
2019-10-25 04:56:32 +02:00
|
|
|
g_autofree char *name_a = x86_cpu_class_get_model_name(cc_a);
|
|
|
|
g_autofree char *name_b = x86_cpu_class_get_model_name(cc_b);
|
i386: improve sorting of CPU model names
The current list of CPU model names output by "-cpu help" is sorted
alphabetically based on the internal QOM class name. The text that is
displayed, however, uses the CPU model name, which is equivalent to the
QOM class name, minus a suffix. Unfortunately that suffix has an effect
on the sort ordering, for example, causing the various Broadwell
variants to appear reversed:
x86 486
x86 Broadwell-IBRS Intel Core Processor (Broadwell, IBRS)
x86 Broadwell-noTSX-IBRS Intel Core Processor (Broadwell, no TSX, IBRS
x86 Broadwell-noTSX Intel Core Processor (Broadwell, no TSX)
x86 Broadwell Intel Core Processor (Broadwell)
x86 Conroe Intel Celeron_4x0 (Conroe/Merom Class Core 2)
By sorting on the actual CPU model name text that is displayed, the
result is
x86 486
x86 Broadwell Intel Core Processor (Broadwell)
x86 Broadwell-IBRS Intel Core Processor (Broadwell, IBRS)
x86 Broadwell-noTSX Intel Core Processor (Broadwell, no TSX)
x86 Broadwell-noTSX-IBRS Intel Core Processor (Broadwell, no TSX, IBRS)
x86 Conroe Intel Celeron_4x0 (Conroe/Merom Class Core 2)
This requires extra string allocations during sorting, but this is not a
concern given the usage scenario and the number of CPU models that exist.
Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
Message-Id: <20180606165527.17365-3-berrange@redhat.com>
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
2018-06-06 18:55:26 +02:00
|
|
|
ret = strcmp(name_a, name_b);
|
2016-09-30 20:49:36 +02:00
|
|
|
}
|
i386: improve sorting of CPU model names
The current list of CPU model names output by "-cpu help" is sorted
alphabetically based on the internal QOM class name. The text that is
displayed, however, uses the CPU model name, which is equivalent to the
QOM class name, minus a suffix. Unfortunately that suffix has an effect
on the sort ordering, for example, causing the various Broadwell
variants to appear reversed:
x86 486
x86 Broadwell-IBRS Intel Core Processor (Broadwell, IBRS)
x86 Broadwell-noTSX-IBRS Intel Core Processor (Broadwell, no TSX, IBRS
x86 Broadwell-noTSX Intel Core Processor (Broadwell, no TSX)
x86 Broadwell Intel Core Processor (Broadwell)
x86 Conroe Intel Celeron_4x0 (Conroe/Merom Class Core 2)
By sorting on the actual CPU model name text that is displayed, the
result is
x86 486
x86 Broadwell Intel Core Processor (Broadwell)
x86 Broadwell-IBRS Intel Core Processor (Broadwell, IBRS)
x86 Broadwell-noTSX Intel Core Processor (Broadwell, no TSX)
x86 Broadwell-noTSX-IBRS Intel Core Processor (Broadwell, no TSX, IBRS)
x86 Conroe Intel Celeron_4x0 (Conroe/Merom Class Core 2)
This requires extra string allocations during sorting, but this is not a
concern given the usage scenario and the number of CPU models that exist.
Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
Message-Id: <20180606165527.17365-3-berrange@redhat.com>
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
2018-06-06 18:55:26 +02:00
|
|
|
return ret;
|
2016-09-30 20:49:36 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static GSList *get_sorted_cpu_model_list(void)
|
|
|
|
{
|
|
|
|
GSList *list = object_class_get_list(TYPE_X86_CPU, false);
|
|
|
|
list = g_slist_sort(list, x86_cpu_list_compare);
|
|
|
|
return list;
|
|
|
|
}
|
|
|
|
|
2019-06-28 02:28:38 +02:00
|
|
|
static char *x86_cpu_class_get_model_id(X86CPUClass *xc)
|
|
|
|
{
|
2019-11-13 13:57:55 +01:00
|
|
|
Object *obj = object_new_with_class(OBJECT_CLASS(xc));
|
2019-06-28 02:28:38 +02:00
|
|
|
char *r = object_property_get_str(obj, "model-id", &error_abort);
|
|
|
|
object_unref(obj);
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2019-06-28 02:28:42 +02:00
|
|
|
static char *x86_cpu_class_get_alias_of(X86CPUClass *cc)
|
|
|
|
{
|
|
|
|
X86CPUVersion version;
|
|
|
|
|
|
|
|
if (!cc->model || !cc->model->is_alias) {
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
version = x86_cpu_model_resolve_version(cc->model);
|
|
|
|
if (version <= 0) {
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
return x86_cpu_versioned_model_name(cc->model->cpudef, version);
|
|
|
|
}
|
|
|
|
|
2016-09-30 20:49:36 +02:00
|
|
|
static void x86_cpu_list_entry(gpointer data, gpointer user_data)
|
|
|
|
{
|
|
|
|
ObjectClass *oc = data;
|
|
|
|
X86CPUClass *cc = X86_CPU_CLASS(oc);
|
2019-10-25 04:56:32 +02:00
|
|
|
g_autofree char *name = x86_cpu_class_get_model_name(cc);
|
|
|
|
g_autofree char *desc = g_strdup(cc->model_description);
|
|
|
|
g_autofree char *alias_of = x86_cpu_class_get_alias_of(cc);
|
2020-02-12 09:13:27 +01:00
|
|
|
g_autofree char *model_id = x86_cpu_class_get_model_id(cc);
|
2019-06-28 02:28:38 +02:00
|
|
|
|
2019-06-28 02:28:42 +02:00
|
|
|
if (!desc && alias_of) {
|
|
|
|
if (cc->model && cc->model->version == CPU_VERSION_AUTO) {
|
|
|
|
desc = g_strdup("(alias configured by machine type)");
|
|
|
|
} else {
|
|
|
|
desc = g_strdup_printf("(alias of %s)", alias_of);
|
|
|
|
}
|
|
|
|
}
|
2020-02-12 09:13:27 +01:00
|
|
|
if (!desc && cc->model && cc->model->note) {
|
|
|
|
desc = g_strdup_printf("%s [%s]", model_id, cc->model->note);
|
|
|
|
}
|
2019-06-28 02:28:38 +02:00
|
|
|
if (!desc) {
|
2020-02-12 09:13:27 +01:00
|
|
|
desc = g_strdup_printf("%s", model_id);
|
2016-09-30 20:49:36 +02:00
|
|
|
}
|
|
|
|
|
2021-10-09 17:24:01 +02:00
|
|
|
qemu_printf("x86 %-20s %s\n", name, desc);
|
2016-09-30 20:49:36 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* list available CPU models and flags */
|
2019-04-17 21:17:57 +02:00
|
|
|
void x86_cpu_list(void)
|
2010-03-11 14:38:55 +01:00
|
|
|
{
|
2018-06-06 18:55:27 +02:00
|
|
|
int i, j;
|
2016-09-30 20:49:36 +02:00
|
|
|
GSList *list;
|
2018-06-06 18:55:27 +02:00
|
|
|
GList *names = NULL;
|
2010-03-11 14:38:55 +01:00
|
|
|
|
2019-04-17 21:17:57 +02:00
|
|
|
qemu_printf("Available CPUs:\n");
|
2016-09-30 20:49:36 +02:00
|
|
|
list = get_sorted_cpu_model_list();
|
2019-04-17 21:17:57 +02:00
|
|
|
g_slist_foreach(list, x86_cpu_list_entry, NULL);
|
2016-09-30 20:49:36 +02:00
|
|
|
g_slist_free(list);
|
2013-03-24 17:01:02 +01:00
|
|
|
|
2018-06-06 18:55:27 +02:00
|
|
|
names = NULL;
|
2013-02-27 10:15:51 +01:00
|
|
|
for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
|
|
|
|
FeatureWordInfo *fw = &feature_word_info[i];
|
2019-07-01 17:38:54 +02:00
|
|
|
for (j = 0; j < 64; j++) {
|
2018-06-06 18:55:27 +02:00
|
|
|
if (fw->feat_names[j]) {
|
|
|
|
names = g_list_append(names, (gpointer)fw->feat_names[j]);
|
|
|
|
}
|
|
|
|
}
|
2013-02-27 10:15:51 +01:00
|
|
|
}
|
2018-06-06 18:55:27 +02:00
|
|
|
|
|
|
|
names = g_list_sort(names, (GCompareFunc)strcmp);
|
|
|
|
|
2019-04-17 21:17:57 +02:00
|
|
|
qemu_printf("\nRecognized CPUID flags:\n");
|
|
|
|
listflags(names);
|
|
|
|
qemu_printf("\n");
|
2018-06-06 18:55:27 +02:00
|
|
|
g_list_free(names);
|
2010-03-11 14:38:55 +01:00
|
|
|
}
|
|
|
|
|
2016-09-30 20:49:36 +02:00
|
|
|
static void x86_cpu_definition_entry(gpointer data, gpointer user_data)
|
|
|
|
{
|
|
|
|
ObjectClass *oc = data;
|
|
|
|
X86CPUClass *cc = X86_CPU_CLASS(oc);
|
|
|
|
CpuDefinitionInfoList **cpu_list = user_data;
|
|
|
|
CpuDefinitionInfo *info;
|
|
|
|
|
|
|
|
info = g_malloc0(sizeof(*info));
|
|
|
|
info->name = x86_cpu_class_get_model_name(cc);
|
2016-05-03 18:48:13 +02:00
|
|
|
x86_cpu_class_check_missing_features(cc, &info->unavailable_features);
|
|
|
|
info->has_unavailable_features = true;
|
2016-11-16 19:21:39 +01:00
|
|
|
info->q_typename = g_strdup(object_class_get_name(oc));
|
2017-01-16 19:12:12 +01:00
|
|
|
info->migration_safe = cc->migration_safe;
|
|
|
|
info->has_migration_safe = true;
|
i386: Define static "base" CPU model
The query-cpu-model-expand QMP command needs at least one static
model, to allow the "static" expansion mode to be implemented.
Instead of defining static versions of every CPU model, define a
"base" CPU model that has absolutely no feature flag enabled.
Despite having no CPUID data set at all, "-cpu base" is even a
functional CPU:
* It can boot a Slackware Linux 1.01 image with a Linux 0.99.12
kernel[1].
* It is even possible to boot[2] a modern Fedora x86_64 guest by
manually enabling the following CPU features:
-cpu base,+lm,+msr,+pae,+fpu,+cx8,+cmov,+sse,+sse2,+fxsr
[1] http://www.qemu-advent-calendar.org/2014/#day-1
[2] This is what can be seen in the guest:
[root@localhost ~]# cat /proc/cpuinfo
processor : 0
vendor_id : unknown
cpu family : 0
model : 0
model name : 00/00
stepping : 0
physical id : 0
siblings : 1
core id : 0
cpu cores : 1
apicid : 0
initial apicid : 0
fpu : yes
fpu_exception : yes
cpuid level : 1
wp : yes
flags : fpu msr pae cx8 cmov fxsr sse sse2 lm nopl
bugs :
bogomips : 5832.70
clflush size : 64
cache_alignment : 64
address sizes : 36 bits physical, 48 bits virtual
power management:
[root@localhost ~]# x86info -v -a
x86info v1.30. Dave Jones 2001-2011
Feedback to <davej@redhat.com>.
No TSC, MHz calculation cannot be performed.
Unknown vendor (0)
MP Table:
Family: 0 Model: 0 Stepping: 0
CPU Model (x86info's best guess):
eax in: 0x00000000, eax = 00000001 ebx = 00000000 ecx = 00000000 edx = 00000000
eax in: 0x00000001, eax = 00000000 ebx = 00000800 ecx = 00000000 edx = 07008161
eax in: 0x80000000, eax = 80000001 ebx = 00000000 ecx = 00000000 edx = 00000000
eax in: 0x80000001, eax = 00000000 ebx = 00000000 ecx = 00000000 edx = 20000000
Feature flags:
fpu Onboard FPU
msr Model-Specific Registers
pae Physical Address Extensions
cx8 CMPXCHG8 instruction
cmov CMOV instruction
fxsr FXSAVE and FXRSTOR instructions
sse SSE support
sse2 SSE2 support
Long NOPs supported: yes
Address sizes : 0 bits physical, 0 bits virtual
0MHz processor (estimate).
running at an estimated 0MHz
[root@localhost ~]#
Message-Id: <20170222190029.17243-2-ehabkost@redhat.com>
Reviewed-by: David Hildenbrand <david@redhat.com>
Tested-by: Jiri Denemark <jdenemar@redhat.com>
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
2017-02-22 20:00:27 +01:00
|
|
|
info->q_static = cc->static_model;
|
2020-09-22 09:14:14 +02:00
|
|
|
if (cc->model && cc->model->cpudef->deprecation_note) {
|
|
|
|
info->deprecated = true;
|
|
|
|
} else {
|
|
|
|
info->deprecated = false;
|
|
|
|
}
|
2019-06-28 02:28:42 +02:00
|
|
|
/*
|
|
|
|
* Old machine types won't report aliases, so that alias translation
|
|
|
|
* doesn't break compatibility with previous QEMU versions.
|
|
|
|
*/
|
|
|
|
if (default_cpu_version != CPU_VERSION_LEGACY) {
|
|
|
|
info->alias_of = x86_cpu_class_get_alias_of(cc);
|
|
|
|
info->has_alias_of = !!info->alias_of;
|
|
|
|
}
|
2016-09-30 20:49:36 +02:00
|
|
|
|
2020-11-13 02:13:37 +01:00
|
|
|
QAPI_LIST_PREPEND(*cpu_list, info);
|
2016-09-30 20:49:36 +02:00
|
|
|
}
|
|
|
|
|
2019-02-14 16:22:47 +01:00
|
|
|
CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp)
|
2012-08-10 18:04:14 +02:00
|
|
|
{
|
|
|
|
CpuDefinitionInfoList *cpu_list = NULL;
|
2016-09-30 20:49:36 +02:00
|
|
|
GSList *list = get_sorted_cpu_model_list();
|
|
|
|
g_slist_foreach(list, x86_cpu_definition_entry, &cpu_list);
|
|
|
|
g_slist_free(list);
|
2012-08-10 18:04:14 +02:00
|
|
|
return cpu_list;
|
|
|
|
}
|
|
|
|
|
2022-03-23 12:33:25 +01:00
|
|
|
uint64_t x86_cpu_get_supported_feature_word(FeatureWord w,
|
|
|
|
bool migratable_only)
|
2014-04-30 18:48:32 +02:00
|
|
|
{
|
|
|
|
FeatureWordInfo *wi = &feature_word_info[w];
|
2019-07-01 17:38:54 +02:00
|
|
|
uint64_t r = 0;
|
2014-04-30 18:48:32 +02:00
|
|
|
|
2014-04-30 18:48:39 +02:00
|
|
|
if (kvm_enabled()) {
|
2018-10-15 06:47:24 +02:00
|
|
|
switch (wi->type) {
|
|
|
|
case CPUID_FEATURE_WORD:
|
|
|
|
r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid.eax,
|
|
|
|
wi->cpuid.ecx,
|
|
|
|
wi->cpuid.reg);
|
|
|
|
break;
|
|
|
|
case MSR_FEATURE_WORD:
|
2018-10-15 06:47:25 +02:00
|
|
|
r = kvm_arch_get_supported_msr_feature(kvm_state,
|
|
|
|
wi->msr.index);
|
2018-10-15 06:47:24 +02:00
|
|
|
break;
|
|
|
|
}
|
2017-09-13 11:05:19 +02:00
|
|
|
} else if (hvf_enabled()) {
|
2018-10-15 06:47:24 +02:00
|
|
|
if (wi->type != CPUID_FEATURE_WORD) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
r = hvf_get_supported_cpuid(wi->cpuid.eax,
|
|
|
|
wi->cpuid.ecx,
|
|
|
|
wi->cpuid.reg);
|
2014-04-30 18:48:39 +02:00
|
|
|
} else if (tcg_enabled()) {
|
2014-04-30 18:48:41 +02:00
|
|
|
r = wi->tcg_features;
|
2014-04-30 18:48:39 +02:00
|
|
|
} else {
|
|
|
|
return ~0;
|
|
|
|
}
|
2021-01-25 23:04:01 +01:00
|
|
|
#ifndef TARGET_X86_64
|
|
|
|
if (w == FEAT_8000_0001_EDX) {
|
|
|
|
r &= ~CPUID_EXT2_LM;
|
|
|
|
}
|
|
|
|
#endif
|
2014-04-30 18:48:41 +02:00
|
|
|
if (migratable_only) {
|
|
|
|
r &= x86_cpu_get_migratable_flags(w);
|
|
|
|
}
|
|
|
|
return r;
|
2014-04-30 18:48:32 +02:00
|
|
|
}
|
|
|
|
|
2022-04-29 20:52:52 +02:00
|
|
|
static void x86_cpu_get_supported_cpuid(uint32_t func, uint32_t index,
|
|
|
|
uint32_t *eax, uint32_t *ebx,
|
|
|
|
uint32_t *ecx, uint32_t *edx)
|
|
|
|
{
|
|
|
|
if (kvm_enabled()) {
|
|
|
|
*eax = kvm_arch_get_supported_cpuid(kvm_state, func, index, R_EAX);
|
|
|
|
*ebx = kvm_arch_get_supported_cpuid(kvm_state, func, index, R_EBX);
|
|
|
|
*ecx = kvm_arch_get_supported_cpuid(kvm_state, func, index, R_ECX);
|
|
|
|
*edx = kvm_arch_get_supported_cpuid(kvm_state, func, index, R_EDX);
|
|
|
|
} else if (hvf_enabled()) {
|
|
|
|
*eax = hvf_get_supported_cpuid(func, index, R_EAX);
|
|
|
|
*ebx = hvf_get_supported_cpuid(func, index, R_EBX);
|
|
|
|
*ecx = hvf_get_supported_cpuid(func, index, R_ECX);
|
|
|
|
*edx = hvf_get_supported_cpuid(func, index, R_EDX);
|
|
|
|
} else {
|
|
|
|
*eax = 0;
|
|
|
|
*ebx = 0;
|
|
|
|
*ecx = 0;
|
|
|
|
*edx = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-04-29 21:16:28 +02:00
|
|
|
static void x86_cpu_get_cache_cpuid(uint32_t func, uint32_t index,
|
|
|
|
uint32_t *eax, uint32_t *ebx,
|
|
|
|
uint32_t *ecx, uint32_t *edx)
|
|
|
|
{
|
|
|
|
uint32_t level, unused;
|
|
|
|
|
|
|
|
/* Only return valid host leaves. */
|
|
|
|
switch (func) {
|
|
|
|
case 2:
|
|
|
|
case 4:
|
|
|
|
host_cpuid(0, 0, &level, &unused, &unused, &unused);
|
|
|
|
break;
|
|
|
|
case 0x80000005:
|
|
|
|
case 0x80000006:
|
|
|
|
case 0x8000001d:
|
|
|
|
host_cpuid(0x80000000, 0, &level, &unused, &unused, &unused);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (func > level) {
|
|
|
|
*eax = 0;
|
|
|
|
*ebx = 0;
|
|
|
|
*ecx = 0;
|
|
|
|
*edx = 0;
|
|
|
|
} else {
|
|
|
|
host_cpuid(func, index, eax, ebx, ecx, edx);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
i386: do not call cpudef-only models functions for max, host, base
Some cpu properties have to be set only for cpu models in builtin_x86_defs,
registered with x86_register_cpu_model_type, and not for
cpu models "base", "max", and the subclass "host".
These properties are the ones set by function x86_cpu_apply_props,
(also including kvm_default_props, tcg_default_props),
and the "vendor" property for the KVM and HVF accelerators.
After recent refactoring of cpu, which also affected these properties,
they were instead set unconditionally for all x86 cpus.
This has been detected as a bug with Nested on AMD with cpu "host",
as svm was not turned on by default, due to the wrongful setting of
kvm_default_props via x86_cpu_apply_props, which set svm to "off".
Rectify the bug introduced in commit "i386: split cpu accelerators"
and document the functions that are builtin_x86_defs-only.
Signed-off-by: Claudio Fontana <cfontana@suse.de>
Tested-by: Alexander Bulekov <alxndr@bu.edu>
Fixes: f5cc5a5c ("i386: split cpu accelerators from cpu.c,"...)
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/477
Message-Id: <20210723112921.12637-1-cfontana@suse.de>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2021-07-23 13:29:21 +02:00
|
|
|
/*
|
|
|
|
* Only for builtin_x86_defs models initialized with x86_register_cpudef_types.
|
|
|
|
*/
|
2021-03-22 14:27:40 +01:00
|
|
|
void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
|
2015-09-11 17:40:27 +02:00
|
|
|
{
|
|
|
|
PropValue *pv;
|
|
|
|
for (pv = props; pv->prop; pv++) {
|
|
|
|
if (!pv->value) {
|
|
|
|
continue;
|
|
|
|
}
|
qom: Put name parameter before value / visitor parameter
The object_property_set_FOO() setters take property name and value in
an unusual order:
void object_property_set_FOO(Object *obj, FOO_TYPE value,
const char *name, Error **errp)
Having to pass value before name feels grating. Swap them.
Same for object_property_set(), object_property_get(), and
object_property_parse().
Convert callers with this Coccinelle script:
@@
identifier fun = {
object_property_get, object_property_parse, object_property_set_str,
object_property_set_link, object_property_set_bool,
object_property_set_int, object_property_set_uint, object_property_set,
object_property_set_qobject
};
expression obj, v, name, errp;
@@
- fun(obj, v, name, errp)
+ fun(obj, name, v, errp)
Chokes on hw/arm/musicpal.c's lcd_refresh() with the unhelpful error
message "no position information". Convert that one manually.
Fails to convert hw/arm/armsse.c, because Coccinelle gets confused by
ARMSSE being used both as typedef and function-like macro there.
Convert manually.
Fails to convert hw/rx/rx-gdbsim.c, because Coccinelle gets confused
by RXCPU being used both as typedef and function-like macro there.
Convert manually. The other files using RXCPU that way don't need
conversion.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Message-Id: <20200707160613.848843-27-armbru@redhat.com>
[Straightforwad conflict with commit 2336172d9b "audio: set default
value for pcspk.iobase property" resolved]
2020-07-07 18:05:54 +02:00
|
|
|
object_property_parse(OBJECT(cpu), pv->prop, pv->value,
|
2015-09-11 17:40:27 +02:00
|
|
|
&error_abort);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
i386: do not call cpudef-only models functions for max, host, base
Some cpu properties have to be set only for cpu models in builtin_x86_defs,
registered with x86_register_cpu_model_type, and not for
cpu models "base", "max", and the subclass "host".
These properties are the ones set by function x86_cpu_apply_props,
(also including kvm_default_props, tcg_default_props),
and the "vendor" property for the KVM and HVF accelerators.
After recent refactoring of cpu, which also affected these properties,
they were instead set unconditionally for all x86 cpus.
This has been detected as a bug with Nested on AMD with cpu "host",
as svm was not turned on by default, due to the wrongful setting of
kvm_default_props via x86_cpu_apply_props, which set svm to "off".
Rectify the bug introduced in commit "i386: split cpu accelerators"
and document the functions that are builtin_x86_defs-only.
Signed-off-by: Claudio Fontana <cfontana@suse.de>
Tested-by: Alexander Bulekov <alxndr@bu.edu>
Fixes: f5cc5a5c ("i386: split cpu accelerators from cpu.c,"...)
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/477
Message-Id: <20210723112921.12637-1-cfontana@suse.de>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2021-07-23 13:29:21 +02:00
|
|
|
/*
|
|
|
|
* Apply properties for the CPU model version specified in model.
|
|
|
|
* Only for builtin_x86_defs models initialized with x86_register_cpudef_types.
|
|
|
|
*/
|
|
|
|
|
2019-06-28 02:28:39 +02:00
|
|
|
static void x86_cpu_apply_version_props(X86CPU *cpu, X86CPUModel *model)
|
|
|
|
{
|
|
|
|
const X86CPUVersionDefinition *vdef;
|
|
|
|
X86CPUVersion version = x86_cpu_model_resolve_version(model);
|
|
|
|
|
|
|
|
if (version == CPU_VERSION_LEGACY) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (vdef = x86_cpu_def_get_versions(model->cpudef); vdef->version; vdef++) {
|
|
|
|
PropValue *p;
|
|
|
|
|
|
|
|
for (p = vdef->props; p && p->prop; p++) {
|
qom: Put name parameter before value / visitor parameter
The object_property_set_FOO() setters take property name and value in
an unusual order:
void object_property_set_FOO(Object *obj, FOO_TYPE value,
const char *name, Error **errp)
Having to pass value before name feels grating. Swap them.
Same for object_property_set(), object_property_get(), and
object_property_parse().
Convert callers with this Coccinelle script:
@@
identifier fun = {
object_property_get, object_property_parse, object_property_set_str,
object_property_set_link, object_property_set_bool,
object_property_set_int, object_property_set_uint, object_property_set,
object_property_set_qobject
};
expression obj, v, name, errp;
@@
- fun(obj, v, name, errp)
+ fun(obj, name, v, errp)
Chokes on hw/arm/musicpal.c's lcd_refresh() with the unhelpful error
message "no position information". Convert that one manually.
Fails to convert hw/arm/armsse.c, because Coccinelle gets confused by
ARMSSE being used both as typedef and function-like macro there.
Convert manually.
Fails to convert hw/rx/rx-gdbsim.c, because Coccinelle gets confused
by RXCPU being used both as typedef and function-like macro there.
Convert manually. The other files using RXCPU that way don't need
conversion.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Message-Id: <20200707160613.848843-27-armbru@redhat.com>
[Straightforwad conflict with commit 2336172d9b "audio: set default
value for pcspk.iobase property" resolved]
2020-07-07 18:05:54 +02:00
|
|
|
object_property_parse(OBJECT(cpu), p->prop, p->value,
|
2019-06-28 02:28:39 +02:00
|
|
|
&error_abort);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (vdef->version == version) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we reached the end of the list, version number was invalid
|
|
|
|
*/
|
|
|
|
assert(vdef->version == version);
|
|
|
|
}
|
|
|
|
|
i386: do not call cpudef-only models functions for max, host, base
Some cpu properties have to be set only for cpu models in builtin_x86_defs,
registered with x86_register_cpu_model_type, and not for
cpu models "base", "max", and the subclass "host".
These properties are the ones set by function x86_cpu_apply_props,
(also including kvm_default_props, tcg_default_props),
and the "vendor" property for the KVM and HVF accelerators.
After recent refactoring of cpu, which also affected these properties,
they were instead set unconditionally for all x86 cpus.
This has been detected as a bug with Nested on AMD with cpu "host",
as svm was not turned on by default, due to the wrongful setting of
kvm_default_props via x86_cpu_apply_props, which set svm to "off".
Rectify the bug introduced in commit "i386: split cpu accelerators"
and document the functions that are builtin_x86_defs-only.
Signed-off-by: Claudio Fontana <cfontana@suse.de>
Tested-by: Alexander Bulekov <alxndr@bu.edu>
Fixes: f5cc5a5c ("i386: split cpu accelerators from cpu.c,"...)
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/477
Message-Id: <20210723112921.12637-1-cfontana@suse.de>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2021-07-23 13:29:21 +02:00
|
|
|
/*
|
|
|
|
* Load data from X86CPUDefinition into a X86CPU object.
|
|
|
|
* Only for builtin_x86_defs models initialized with x86_register_cpudef_types.
|
2014-01-30 20:48:56 +01:00
|
|
|
*/
|
2020-05-05 12:19:08 +02:00
|
|
|
static void x86_cpu_load_model(X86CPU *cpu, X86CPUModel *model)
|
2010-03-11 14:38:55 +01:00
|
|
|
{
|
2021-05-03 19:35:24 +02:00
|
|
|
const X86CPUDefinition *def = model->cpudef;
|
2012-04-17 12:00:51 +02:00
|
|
|
CPUX86State *env = &cpu->env;
|
2014-04-30 18:48:37 +02:00
|
|
|
FeatureWord w;
|
2010-03-11 14:38:55 +01:00
|
|
|
|
2017-02-22 20:00:28 +01:00
|
|
|
/*NOTE: any property set by this function should be returned by
|
|
|
|
* x86_cpu_static_props(), so static expansion of
|
|
|
|
* query-cpu-model-expansion is always complete.
|
|
|
|
*/
|
|
|
|
|
2016-09-21 18:30:12 +02:00
|
|
|
/* CPU models only set _minimum_ values for level/xlevel: */
|
qom: Put name parameter before value / visitor parameter
The object_property_set_FOO() setters take property name and value in
an unusual order:
void object_property_set_FOO(Object *obj, FOO_TYPE value,
const char *name, Error **errp)
Having to pass value before name feels grating. Swap them.
Same for object_property_set(), object_property_get(), and
object_property_parse().
Convert callers with this Coccinelle script:
@@
identifier fun = {
object_property_get, object_property_parse, object_property_set_str,
object_property_set_link, object_property_set_bool,
object_property_set_int, object_property_set_uint, object_property_set,
object_property_set_qobject
};
expression obj, v, name, errp;
@@
- fun(obj, v, name, errp)
+ fun(obj, name, v, errp)
Chokes on hw/arm/musicpal.c's lcd_refresh() with the unhelpful error
message "no position information". Convert that one manually.
Fails to convert hw/arm/armsse.c, because Coccinelle gets confused by
ARMSSE being used both as typedef and function-like macro there.
Convert manually.
Fails to convert hw/rx/rx-gdbsim.c, because Coccinelle gets confused
by RXCPU being used both as typedef and function-like macro there.
Convert manually. The other files using RXCPU that way don't need
conversion.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Message-Id: <20200707160613.848843-27-armbru@redhat.com>
[Straightforwad conflict with commit 2336172d9b "audio: set default
value for pcspk.iobase property" resolved]
2020-07-07 18:05:54 +02:00
|
|
|
object_property_set_uint(OBJECT(cpu), "min-level", def->level,
|
2020-05-05 12:19:08 +02:00
|
|
|
&error_abort);
|
qom: Put name parameter before value / visitor parameter
The object_property_set_FOO() setters take property name and value in
an unusual order:
void object_property_set_FOO(Object *obj, FOO_TYPE value,
const char *name, Error **errp)
Having to pass value before name feels grating. Swap them.
Same for object_property_set(), object_property_get(), and
object_property_parse().
Convert callers with this Coccinelle script:
@@
identifier fun = {
object_property_get, object_property_parse, object_property_set_str,
object_property_set_link, object_property_set_bool,
object_property_set_int, object_property_set_uint, object_property_set,
object_property_set_qobject
};
expression obj, v, name, errp;
@@
- fun(obj, v, name, errp)
+ fun(obj, name, v, errp)
Chokes on hw/arm/musicpal.c's lcd_refresh() with the unhelpful error
message "no position information". Convert that one manually.
Fails to convert hw/arm/armsse.c, because Coccinelle gets confused by
ARMSSE being used both as typedef and function-like macro there.
Convert manually.
Fails to convert hw/rx/rx-gdbsim.c, because Coccinelle gets confused
by RXCPU being used both as typedef and function-like macro there.
Convert manually. The other files using RXCPU that way don't need
conversion.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Message-Id: <20200707160613.848843-27-armbru@redhat.com>
[Straightforwad conflict with commit 2336172d9b "audio: set default
value for pcspk.iobase property" resolved]
2020-07-07 18:05:54 +02:00
|
|
|
object_property_set_uint(OBJECT(cpu), "min-xlevel", def->xlevel,
|
2020-05-05 12:19:08 +02:00
|
|
|
&error_abort);
|
|
|
|
|
qom: Put name parameter before value / visitor parameter
The object_property_set_FOO() setters take property name and value in
an unusual order:
void object_property_set_FOO(Object *obj, FOO_TYPE value,
const char *name, Error **errp)
Having to pass value before name feels grating. Swap them.
Same for object_property_set(), object_property_get(), and
object_property_parse().
Convert callers with this Coccinelle script:
@@
identifier fun = {
object_property_get, object_property_parse, object_property_set_str,
object_property_set_link, object_property_set_bool,
object_property_set_int, object_property_set_uint, object_property_set,
object_property_set_qobject
};
expression obj, v, name, errp;
@@
- fun(obj, v, name, errp)
+ fun(obj, name, v, errp)
Chokes on hw/arm/musicpal.c's lcd_refresh() with the unhelpful error
message "no position information". Convert that one manually.
Fails to convert hw/arm/armsse.c, because Coccinelle gets confused by
ARMSSE being used both as typedef and function-like macro there.
Convert manually.
Fails to convert hw/rx/rx-gdbsim.c, because Coccinelle gets confused
by RXCPU being used both as typedef and function-like macro there.
Convert manually. The other files using RXCPU that way don't need
conversion.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Message-Id: <20200707160613.848843-27-armbru@redhat.com>
[Straightforwad conflict with commit 2336172d9b "audio: set default
value for pcspk.iobase property" resolved]
2020-07-07 18:05:54 +02:00
|
|
|
object_property_set_int(OBJECT(cpu), "family", def->family, &error_abort);
|
|
|
|
object_property_set_int(OBJECT(cpu), "model", def->model, &error_abort);
|
|
|
|
object_property_set_int(OBJECT(cpu), "stepping", def->stepping,
|
2020-05-05 12:19:08 +02:00
|
|
|
&error_abort);
|
qom: Put name parameter before value / visitor parameter
The object_property_set_FOO() setters take property name and value in
an unusual order:
void object_property_set_FOO(Object *obj, FOO_TYPE value,
const char *name, Error **errp)
Having to pass value before name feels grating. Swap them.
Same for object_property_set(), object_property_get(), and
object_property_parse().
Convert callers with this Coccinelle script:
@@
identifier fun = {
object_property_get, object_property_parse, object_property_set_str,
object_property_set_link, object_property_set_bool,
object_property_set_int, object_property_set_uint, object_property_set,
object_property_set_qobject
};
expression obj, v, name, errp;
@@
- fun(obj, v, name, errp)
+ fun(obj, name, v, errp)
Chokes on hw/arm/musicpal.c's lcd_refresh() with the unhelpful error
message "no position information". Convert that one manually.
Fails to convert hw/arm/armsse.c, because Coccinelle gets confused by
ARMSSE being used both as typedef and function-like macro there.
Convert manually.
Fails to convert hw/rx/rx-gdbsim.c, because Coccinelle gets confused
by RXCPU being used both as typedef and function-like macro there.
Convert manually. The other files using RXCPU that way don't need
conversion.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Message-Id: <20200707160613.848843-27-armbru@redhat.com>
[Straightforwad conflict with commit 2336172d9b "audio: set default
value for pcspk.iobase property" resolved]
2020-07-07 18:05:54 +02:00
|
|
|
object_property_set_str(OBJECT(cpu), "model-id", def->model_id,
|
2020-05-05 12:19:08 +02:00
|
|
|
&error_abort);
|
2014-04-30 18:48:37 +02:00
|
|
|
for (w = 0; w < FEATURE_WORDS; w++) {
|
|
|
|
env->features[w] = def->features[w];
|
|
|
|
}
|
2014-01-30 20:48:54 +01:00
|
|
|
|
2018-05-24 17:43:30 +02:00
|
|
|
/* legacy-cache defaults to 'off' if CPU model provides cache info */
|
|
|
|
cpu->legacy_cache = !def->cache_info;
|
2018-05-14 18:41:51 +02:00
|
|
|
|
2014-01-30 20:48:54 +01:00
|
|
|
env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
|
2014-01-30 20:48:55 +01:00
|
|
|
|
|
|
|
/* sysenter isn't supported in compatibility mode on AMD,
|
|
|
|
* syscall isn't supported in compatibility mode on Intel.
|
|
|
|
* Normally we advertise the actual CPU vendor, but you can
|
|
|
|
* override this using the 'vendor' property if you want to use
|
|
|
|
* KVM's sysenter/syscall emulation in compatibility mode and
|
|
|
|
* when doing cross vendor migration
|
|
|
|
*/
|
|
|
|
|
2021-03-22 14:27:40 +01:00
|
|
|
/*
|
|
|
|
* vendor property is set here but then overloaded with the
|
|
|
|
* host cpu vendor for KVM and HVF.
|
|
|
|
*/
|
|
|
|
object_property_set_str(OBJECT(cpu), "vendor", def->vendor, &error_abort);
|
2014-01-30 20:48:55 +01:00
|
|
|
|
2019-06-28 02:28:39 +02:00
|
|
|
x86_cpu_apply_version_props(cpu, model);
|
2020-07-13 19:44:35 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Properties in versioned CPU model are not user specified features.
|
|
|
|
* We can simply clear env->user_features here since it will be filled later
|
|
|
|
* in x86_cpu_expand_features() based on plus_features and minus_features.
|
|
|
|
*/
|
|
|
|
memset(&env->user_features, 0, sizeof(env->user_features));
|
2010-03-11 14:38:55 +01:00
|
|
|
}
|
|
|
|
|
2016-12-28 17:34:02 +01:00
|
|
|
static gchar *x86_gdb_arch_name(CPUState *cs)
|
|
|
|
{
|
|
|
|
#ifdef TARGET_X86_64
|
|
|
|
return g_strdup("i386:x86-64");
|
|
|
|
#else
|
|
|
|
return g_strdup("i386");
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2014-02-10 11:21:30 +01:00
|
|
|
static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
|
|
|
|
{
|
2019-06-28 02:28:39 +02:00
|
|
|
X86CPUModel *model = data;
|
2014-02-10 11:21:30 +01:00
|
|
|
X86CPUClass *xcc = X86_CPU_CLASS(oc);
|
2020-09-22 09:14:14 +02:00
|
|
|
CPUClass *cc = CPU_CLASS(oc);
|
2014-02-10 11:21:30 +01:00
|
|
|
|
2019-06-28 02:28:39 +02:00
|
|
|
xcc->model = model;
|
2017-01-16 19:12:12 +01:00
|
|
|
xcc->migration_safe = true;
|
2020-09-22 09:14:14 +02:00
|
|
|
cc->deprecation_note = model->cpudef->deprecation_note;
|
2014-02-10 11:21:30 +01:00
|
|
|
}
|
|
|
|
|
2019-06-28 02:28:39 +02:00
|
|
|
static void x86_register_cpu_model_type(const char *name, X86CPUModel *model)
|
2014-02-10 11:21:30 +01:00
|
|
|
{
|
2019-10-25 04:56:32 +02:00
|
|
|
g_autofree char *typename = x86_cpu_type_name(name);
|
2014-02-10 11:21:30 +01:00
|
|
|
TypeInfo ti = {
|
|
|
|
.name = typename,
|
|
|
|
.parent = TYPE_X86_CPU,
|
|
|
|
.class_init = x86_cpu_cpudef_class_init,
|
2019-06-28 02:28:39 +02:00
|
|
|
.class_data = model,
|
2014-02-10 11:21:30 +01:00
|
|
|
};
|
|
|
|
|
2019-06-28 02:28:39 +02:00
|
|
|
type_register(&ti);
|
|
|
|
}
|
|
|
|
|
i386: do not call cpudef-only models functions for max, host, base
Some cpu properties have to be set only for cpu models in builtin_x86_defs,
registered with x86_register_cpu_model_type, and not for
cpu models "base", "max", and the subclass "host".
These properties are the ones set by function x86_cpu_apply_props,
(also including kvm_default_props, tcg_default_props),
and the "vendor" property for the KVM and HVF accelerators.
After recent refactoring of cpu, which also affected these properties,
they were instead set unconditionally for all x86 cpus.
This has been detected as a bug with Nested on AMD with cpu "host",
as svm was not turned on by default, due to the wrongful setting of
kvm_default_props via x86_cpu_apply_props, which set svm to "off".
Rectify the bug introduced in commit "i386: split cpu accelerators"
and document the functions that are builtin_x86_defs-only.
Signed-off-by: Claudio Fontana <cfontana@suse.de>
Tested-by: Alexander Bulekov <alxndr@bu.edu>
Fixes: f5cc5a5c ("i386: split cpu accelerators from cpu.c,"...)
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/477
Message-Id: <20210723112921.12637-1-cfontana@suse.de>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2021-07-23 13:29:21 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* register builtin_x86_defs;
|
|
|
|
* "max", "base" and subclasses ("host") are not registered here.
|
|
|
|
* See x86_cpu_register_types for all model registrations.
|
|
|
|
*/
|
2021-05-03 19:35:24 +02:00
|
|
|
static void x86_register_cpudef_types(const X86CPUDefinition *def)
|
2019-06-28 02:28:39 +02:00
|
|
|
{
|
|
|
|
X86CPUModel *m;
|
|
|
|
const X86CPUVersionDefinition *vdef;
|
|
|
|
|
2017-01-13 20:00:57 +01:00
|
|
|
/* AMD aliases are handled at runtime based on CPUID vendor, so
|
|
|
|
* they shouldn't be set on the CPU model table.
|
|
|
|
*/
|
|
|
|
assert(!(def->features[FEAT_8000_0001_EDX] & CPUID_EXT2_AMD_ALIASES));
|
2018-01-09 16:45:13 +01:00
|
|
|
/* catch mistakes instead of silently truncating model_id when too long */
|
|
|
|
assert(def->model_id && strlen(def->model_id) <= 48);
|
|
|
|
|
2019-06-28 02:28:39 +02:00
|
|
|
/* Unversioned model: */
|
|
|
|
m = g_new0(X86CPUModel, 1);
|
|
|
|
m->cpudef = def;
|
2019-06-28 02:28:42 +02:00
|
|
|
m->version = CPU_VERSION_AUTO;
|
|
|
|
m->is_alias = true;
|
2019-06-28 02:28:39 +02:00
|
|
|
x86_register_cpu_model_type(def->name, m);
|
|
|
|
|
|
|
|
/* Versioned models: */
|
|
|
|
|
|
|
|
for (vdef = x86_cpu_def_get_versions(def); vdef->version; vdef++) {
|
|
|
|
X86CPUModel *m = g_new0(X86CPUModel, 1);
|
2019-10-25 04:56:32 +02:00
|
|
|
g_autofree char *name =
|
|
|
|
x86_cpu_versioned_model_name(def, vdef->version);
|
2019-06-28 02:28:39 +02:00
|
|
|
m->cpudef = def;
|
|
|
|
m->version = vdef->version;
|
2020-02-12 09:13:27 +01:00
|
|
|
m->note = vdef->note;
|
2019-06-28 02:28:39 +02:00
|
|
|
x86_register_cpu_model_type(name, m);
|
2019-06-28 02:28:41 +02:00
|
|
|
|
|
|
|
if (vdef->alias) {
|
|
|
|
X86CPUModel *am = g_new0(X86CPUModel, 1);
|
|
|
|
am->cpudef = def;
|
|
|
|
am->version = vdef->version;
|
2019-06-28 02:28:42 +02:00
|
|
|
am->is_alias = true;
|
2019-06-28 02:28:41 +02:00
|
|
|
x86_register_cpu_model_type(vdef->alias, am);
|
|
|
|
}
|
2019-06-28 02:28:39 +02:00
|
|
|
}
|
2017-01-13 20:00:57 +01:00
|
|
|
|
2014-02-10 11:21:30 +01:00
|
|
|
}
|
|
|
|
|
2021-08-04 13:30:58 +02:00
|
|
|
uint32_t cpu_x86_virtual_addr_width(CPUX86State *env)
|
|
|
|
{
|
|
|
|
if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_LA57) {
|
|
|
|
return 57; /* 57 bits virtual */
|
|
|
|
} else {
|
|
|
|
return 48; /* 48 bits virtual */
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-03-11 14:38:55 +01:00
|
|
|
void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
|
|
|
|
uint32_t *eax, uint32_t *ebx,
|
|
|
|
uint32_t *ecx, uint32_t *edx)
|
|
|
|
{
|
2019-03-23 02:08:48 +01:00
|
|
|
X86CPU *cpu = env_archcpu(env);
|
|
|
|
CPUState *cs = env_cpu(env);
|
2019-06-12 10:40:59 +02:00
|
|
|
uint32_t die_offset;
|
2017-05-09 15:27:35 +02:00
|
|
|
uint32_t limit;
|
2017-05-09 15:27:36 +02:00
|
|
|
uint32_t signature[3];
|
2020-03-11 23:52:59 +01:00
|
|
|
X86CPUTopoInfo topo_info;
|
|
|
|
|
|
|
|
topo_info.dies_per_pkg = env->nr_dies;
|
|
|
|
topo_info.cores_per_die = cs->nr_cores;
|
|
|
|
topo_info.threads_per_core = cs->nr_threads;
|
2012-12-01 05:35:08 +01:00
|
|
|
|
2017-05-09 15:27:35 +02:00
|
|
|
/* Calculate & apply limits for different index ranges */
|
|
|
|
if (index >= 0xC0000000) {
|
|
|
|
limit = env->cpuid_xlevel2;
|
|
|
|
} else if (index >= 0x80000000) {
|
|
|
|
limit = env->cpuid_xlevel;
|
2017-05-09 15:27:36 +02:00
|
|
|
} else if (index >= 0x40000000) {
|
|
|
|
limit = 0x40000001;
|
2010-03-11 14:38:55 +01:00
|
|
|
} else {
|
2017-05-09 15:27:35 +02:00
|
|
|
limit = env->cpuid_level;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (index > limit) {
|
|
|
|
/* Intel documentation states that invalid EAX input will
|
|
|
|
* return the same information as EAX=cpuid_level
|
|
|
|
* (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
|
|
|
|
*/
|
|
|
|
index = env->cpuid_level;
|
2010-03-11 14:38:55 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
switch(index) {
|
|
|
|
case 0:
|
|
|
|
*eax = env->cpuid_level;
|
2015-02-03 18:57:50 +01:00
|
|
|
*ebx = env->cpuid_vendor1;
|
|
|
|
*edx = env->cpuid_vendor2;
|
|
|
|
*ecx = env->cpuid_vendor3;
|
2010-03-11 14:38:55 +01:00
|
|
|
break;
|
|
|
|
case 1:
|
|
|
|
*eax = env->cpuid_version;
|
2014-12-19 02:20:10 +01:00
|
|
|
*ebx = (cpu->apic_id << 24) |
|
|
|
|
8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
|
2013-04-22 21:00:15 +02:00
|
|
|
*ecx = env->features[FEAT_1_ECX];
|
2015-07-02 15:53:40 +02:00
|
|
|
if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) {
|
|
|
|
*ecx |= CPUID_EXT_OSXSAVE;
|
|
|
|
}
|
2013-04-22 21:00:15 +02:00
|
|
|
*edx = env->features[FEAT_1_EDX];
|
2012-12-17 03:27:07 +01:00
|
|
|
if (cs->nr_cores * cs->nr_threads > 1) {
|
|
|
|
*ebx |= (cs->nr_cores * cs->nr_threads) << 16;
|
2015-07-02 15:53:40 +02:00
|
|
|
*edx |= CPUID_HT;
|
2010-03-11 14:38:55 +01:00
|
|
|
}
|
2020-05-29 09:43:47 +02:00
|
|
|
if (!cpu->enable_pmu) {
|
|
|
|
*ecx &= ~CPUID_EXT_PDCM;
|
|
|
|
}
|
2010-03-11 14:38:55 +01:00
|
|
|
break;
|
|
|
|
case 2:
|
|
|
|
/* cache info: needed for Pentium Pro compatibility */
|
2013-09-02 17:06:37 +02:00
|
|
|
if (cpu->cache_info_passthrough) {
|
2022-04-29 21:16:28 +02:00
|
|
|
x86_cpu_get_cache_cpuid(index, 0, eax, ebx, ecx, edx);
|
2013-09-02 17:06:37 +02:00
|
|
|
break;
|
2021-07-08 02:36:23 +02:00
|
|
|
} else if (cpu->vendor_cpuid_only && IS_AMD_CPU(env)) {
|
|
|
|
*eax = *ebx = *ecx = *edx = 0;
|
|
|
|
break;
|
2013-09-02 17:06:37 +02:00
|
|
|
}
|
2013-08-27 17:24:37 +02:00
|
|
|
*eax = 1; /* Number of CPUID[EAX=2] calls required */
|
2010-03-11 14:38:55 +01:00
|
|
|
*ebx = 0;
|
target-i386: present virtual L3 cache info for vcpus
Some software algorithms are based on the hardware's cache info, for example,
for x86 linux kernel, when cpu1 want to wakeup a task on cpu2, cpu1 will trigger
a resched IPI and told cpu2 to do the wakeup if they don't share low level
cache. Oppositely, cpu1 will access cpu2's runqueue directly if they share llc.
The relevant linux-kernel code as bellow:
static void ttwu_queue(struct task_struct *p, int cpu)
{
struct rq *rq = cpu_rq(cpu);
......
if (... && !cpus_share_cache(smp_processor_id(), cpu)) {
......
ttwu_queue_remote(p, cpu); /* will trigger RES IPI */
return;
}
......
ttwu_do_activate(rq, p, 0); /* access target's rq directly */
......
}
In real hardware, the cpus on the same socket share L3 cache, so one won't
trigger a resched IPIs when wakeup a task on others. But QEMU doesn't present a
virtual L3 cache info for VM, then the linux guest will trigger lots of RES IPIs
under some workloads even if the virtual cpus belongs to the same virtual socket.
For KVM, there will be lots of vmexit due to guest send IPIs.
The workload is a SAP HANA's testsuite, we run it one round(about 40 minuates)
and observe the (Suse11sp3)Guest's amounts of RES IPIs which triggering during
the period:
No-L3 With-L3(applied this patch)
cpu0: 363890 44582
cpu1: 373405 43109
cpu2: 340783 43797
cpu3: 333854 43409
cpu4: 327170 40038
cpu5: 325491 39922
cpu6: 319129 42391
cpu7: 306480 41035
cpu8: 161139 32188
cpu9: 164649 31024
cpu10: 149823 30398
cpu11: 149823 32455
cpu12: 164830 35143
cpu13: 172269 35805
cpu14: 179979 33898
cpu15: 194505 32754
avg: 268963.6 40129.8
The VM's topology is "1*socket 8*cores 2*threads".
After present virtual L3 cache info for VM, the amounts of RES IPIs in guest
reduce 85%.
For KVM, vcpus send IPIs will cause vmexit which is expensive, so it can cause
severe performance degradation. We had tested the overall system performance if
vcpus actually run on sparate physical socket. With L3 cache, the performance
improves 7.2%~33.1%(avg:15.7%).
Signed-off-by: Longpeng(Mike) <longpeng2@huawei.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2016-09-07 07:21:13 +02:00
|
|
|
if (!cpu->enable_l3_cache) {
|
|
|
|
*ecx = 0;
|
|
|
|
} else {
|
2018-05-24 17:43:30 +02:00
|
|
|
*ecx = cpuid2_cache_descriptor(env->cache_info_cpuid2.l3_cache);
|
target-i386: present virtual L3 cache info for vcpus
Some software algorithms are based on the hardware's cache info, for example,
for x86 linux kernel, when cpu1 want to wakeup a task on cpu2, cpu1 will trigger
a resched IPI and told cpu2 to do the wakeup if they don't share low level
cache. Oppositely, cpu1 will access cpu2's runqueue directly if they share llc.
The relevant linux-kernel code as bellow:
static void ttwu_queue(struct task_struct *p, int cpu)
{
struct rq *rq = cpu_rq(cpu);
......
if (... && !cpus_share_cache(smp_processor_id(), cpu)) {
......
ttwu_queue_remote(p, cpu); /* will trigger RES IPI */
return;
}
......
ttwu_do_activate(rq, p, 0); /* access target's rq directly */
......
}
In real hardware, the cpus on the same socket share L3 cache, so one won't
trigger a resched IPIs when wakeup a task on others. But QEMU doesn't present a
virtual L3 cache info for VM, then the linux guest will trigger lots of RES IPIs
under some workloads even if the virtual cpus belongs to the same virtual socket.
For KVM, there will be lots of vmexit due to guest send IPIs.
The workload is a SAP HANA's testsuite, we run it one round(about 40 minuates)
and observe the (Suse11sp3)Guest's amounts of RES IPIs which triggering during
the period:
No-L3 With-L3(applied this patch)
cpu0: 363890 44582
cpu1: 373405 43109
cpu2: 340783 43797
cpu3: 333854 43409
cpu4: 327170 40038
cpu5: 325491 39922
cpu6: 319129 42391
cpu7: 306480 41035
cpu8: 161139 32188
cpu9: 164649 31024
cpu10: 149823 30398
cpu11: 149823 32455
cpu12: 164830 35143
cpu13: 172269 35805
cpu14: 179979 33898
cpu15: 194505 32754
avg: 268963.6 40129.8
The VM's topology is "1*socket 8*cores 2*threads".
After present virtual L3 cache info for VM, the amounts of RES IPIs in guest
reduce 85%.
For KVM, vcpus send IPIs will cause vmexit which is expensive, so it can cause
severe performance degradation. We had tested the overall system performance if
vcpus actually run on sparate physical socket. With L3 cache, the performance
improves 7.2%~33.1%(avg:15.7%).
Signed-off-by: Longpeng(Mike) <longpeng2@huawei.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2016-09-07 07:21:13 +02:00
|
|
|
}
|
2018-05-24 17:43:30 +02:00
|
|
|
*edx = (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1d_cache) << 16) |
|
|
|
|
(cpuid2_cache_descriptor(env->cache_info_cpuid2.l1i_cache) << 8) |
|
|
|
|
(cpuid2_cache_descriptor(env->cache_info_cpuid2.l2_cache));
|
2010-03-11 14:38:55 +01:00
|
|
|
break;
|
|
|
|
case 4:
|
|
|
|
/* cache info: needed for Core compatibility */
|
2013-09-02 17:06:37 +02:00
|
|
|
if (cpu->cache_info_passthrough) {
|
2022-04-29 21:16:28 +02:00
|
|
|
x86_cpu_get_cache_cpuid(index, count, eax, ebx, ecx, edx);
|
2018-05-10 22:41:41 +02:00
|
|
|
/* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
|
2013-11-19 17:49:46 +01:00
|
|
|
*eax &= ~0xFC000000;
|
2018-05-10 22:41:41 +02:00
|
|
|
if ((*eax & 31) && cs->nr_cores > 1) {
|
|
|
|
*eax |= (cs->nr_cores - 1) << 26;
|
|
|
|
}
|
2021-07-08 02:36:23 +02:00
|
|
|
} else if (cpu->vendor_cpuid_only && IS_AMD_CPU(env)) {
|
|
|
|
*eax = *ebx = *ecx = *edx = 0;
|
2010-03-11 14:38:55 +01:00
|
|
|
} else {
|
2010-03-13 16:46:33 +01:00
|
|
|
*eax = 0;
|
2013-11-19 17:49:46 +01:00
|
|
|
switch (count) {
|
2010-03-11 14:38:55 +01:00
|
|
|
case 0: /* L1 dcache info */
|
2018-05-24 17:43:30 +02:00
|
|
|
encode_cache_cpuid4(env->cache_info_cpuid4.l1d_cache,
|
|
|
|
1, cs->nr_cores,
|
2018-05-10 22:41:41 +02:00
|
|
|
eax, ebx, ecx, edx);
|
2010-03-11 14:38:55 +01:00
|
|
|
break;
|
|
|
|
case 1: /* L1 icache info */
|
2018-05-24 17:43:30 +02:00
|
|
|
encode_cache_cpuid4(env->cache_info_cpuid4.l1i_cache,
|
|
|
|
1, cs->nr_cores,
|
2018-05-10 22:41:41 +02:00
|
|
|
eax, ebx, ecx, edx);
|
2010-03-11 14:38:55 +01:00
|
|
|
break;
|
|
|
|
case 2: /* L2 cache info */
|
2018-05-24 17:43:30 +02:00
|
|
|
encode_cache_cpuid4(env->cache_info_cpuid4.l2_cache,
|
|
|
|
cs->nr_threads, cs->nr_cores,
|
2018-05-10 22:41:41 +02:00
|
|
|
eax, ebx, ecx, edx);
|
2010-03-11 14:38:55 +01:00
|
|
|
break;
|
target-i386: present virtual L3 cache info for vcpus
Some software algorithms are based on the hardware's cache info, for example,
for x86 linux kernel, when cpu1 want to wakeup a task on cpu2, cpu1 will trigger
a resched IPI and told cpu2 to do the wakeup if they don't share low level
cache. Oppositely, cpu1 will access cpu2's runqueue directly if they share llc.
The relevant linux-kernel code as bellow:
static void ttwu_queue(struct task_struct *p, int cpu)
{
struct rq *rq = cpu_rq(cpu);
......
if (... && !cpus_share_cache(smp_processor_id(), cpu)) {
......
ttwu_queue_remote(p, cpu); /* will trigger RES IPI */
return;
}
......
ttwu_do_activate(rq, p, 0); /* access target's rq directly */
......
}
In real hardware, the cpus on the same socket share L3 cache, so one won't
trigger a resched IPIs when wakeup a task on others. But QEMU doesn't present a
virtual L3 cache info for VM, then the linux guest will trigger lots of RES IPIs
under some workloads even if the virtual cpus belongs to the same virtual socket.
For KVM, there will be lots of vmexit due to guest send IPIs.
The workload is a SAP HANA's testsuite, we run it one round(about 40 minuates)
and observe the (Suse11sp3)Guest's amounts of RES IPIs which triggering during
the period:
No-L3 With-L3(applied this patch)
cpu0: 363890 44582
cpu1: 373405 43109
cpu2: 340783 43797
cpu3: 333854 43409
cpu4: 327170 40038
cpu5: 325491 39922
cpu6: 319129 42391
cpu7: 306480 41035
cpu8: 161139 32188
cpu9: 164649 31024
cpu10: 149823 30398
cpu11: 149823 32455
cpu12: 164830 35143
cpu13: 172269 35805
cpu14: 179979 33898
cpu15: 194505 32754
avg: 268963.6 40129.8
The VM's topology is "1*socket 8*cores 2*threads".
After present virtual L3 cache info for VM, the amounts of RES IPIs in guest
reduce 85%.
For KVM, vcpus send IPIs will cause vmexit which is expensive, so it can cause
severe performance degradation. We had tested the overall system performance if
vcpus actually run on sparate physical socket. With L3 cache, the performance
improves 7.2%~33.1%(avg:15.7%).
Signed-off-by: Longpeng(Mike) <longpeng2@huawei.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2016-09-07 07:21:13 +02:00
|
|
|
case 3: /* L3 cache info */
|
2020-03-11 23:52:59 +01:00
|
|
|
die_offset = apicid_die_offset(&topo_info);
|
2018-05-10 22:41:41 +02:00
|
|
|
if (cpu->enable_l3_cache) {
|
2018-05-24 17:43:30 +02:00
|
|
|
encode_cache_cpuid4(env->cache_info_cpuid4.l3_cache,
|
2019-06-12 10:40:59 +02:00
|
|
|
(1 << die_offset), cs->nr_cores,
|
2018-05-10 22:41:41 +02:00
|
|
|
eax, ebx, ecx, edx);
|
target-i386: present virtual L3 cache info for vcpus
Some software algorithms are based on the hardware's cache info, for example,
for x86 linux kernel, when cpu1 want to wakeup a task on cpu2, cpu1 will trigger
a resched IPI and told cpu2 to do the wakeup if they don't share low level
cache. Oppositely, cpu1 will access cpu2's runqueue directly if they share llc.
The relevant linux-kernel code as bellow:
static void ttwu_queue(struct task_struct *p, int cpu)
{
struct rq *rq = cpu_rq(cpu);
......
if (... && !cpus_share_cache(smp_processor_id(), cpu)) {
......
ttwu_queue_remote(p, cpu); /* will trigger RES IPI */
return;
}
......
ttwu_do_activate(rq, p, 0); /* access target's rq directly */
......
}
In real hardware, the cpus on the same socket share L3 cache, so one won't
trigger a resched IPIs when wakeup a task on others. But QEMU doesn't present a
virtual L3 cache info for VM, then the linux guest will trigger lots of RES IPIs
under some workloads even if the virtual cpus belongs to the same virtual socket.
For KVM, there will be lots of vmexit due to guest send IPIs.
The workload is a SAP HANA's testsuite, we run it one round(about 40 minuates)
and observe the (Suse11sp3)Guest's amounts of RES IPIs which triggering during
the period:
No-L3 With-L3(applied this patch)
cpu0: 363890 44582
cpu1: 373405 43109
cpu2: 340783 43797
cpu3: 333854 43409
cpu4: 327170 40038
cpu5: 325491 39922
cpu6: 319129 42391
cpu7: 306480 41035
cpu8: 161139 32188
cpu9: 164649 31024
cpu10: 149823 30398
cpu11: 149823 32455
cpu12: 164830 35143
cpu13: 172269 35805
cpu14: 179979 33898
cpu15: 194505 32754
avg: 268963.6 40129.8
The VM's topology is "1*socket 8*cores 2*threads".
After present virtual L3 cache info for VM, the amounts of RES IPIs in guest
reduce 85%.
For KVM, vcpus send IPIs will cause vmexit which is expensive, so it can cause
severe performance degradation. We had tested the overall system performance if
vcpus actually run on sparate physical socket. With L3 cache, the performance
improves 7.2%~33.1%(avg:15.7%).
Signed-off-by: Longpeng(Mike) <longpeng2@huawei.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2016-09-07 07:21:13 +02:00
|
|
|
break;
|
|
|
|
}
|
2018-05-10 22:41:41 +02:00
|
|
|
/* fall through */
|
2010-03-11 14:38:55 +01:00
|
|
|
default: /* end of info */
|
2018-05-10 22:41:41 +02:00
|
|
|
*eax = *ebx = *ecx = *edx = 0;
|
2010-03-11 14:38:55 +01:00
|
|
|
break;
|
2013-11-19 17:49:46 +01:00
|
|
|
}
|
|
|
|
}
|
2010-03-11 14:38:55 +01:00
|
|
|
break;
|
|
|
|
case 5:
|
2018-06-22 21:22:05 +02:00
|
|
|
/* MONITOR/MWAIT Leaf */
|
|
|
|
*eax = cpu->mwait.eax; /* Smallest monitor-line size in bytes */
|
|
|
|
*ebx = cpu->mwait.ebx; /* Largest monitor-line size in bytes */
|
|
|
|
*ecx = cpu->mwait.ecx; /* flags */
|
|
|
|
*edx = cpu->mwait.edx; /* mwait substates */
|
2010-03-11 14:38:55 +01:00
|
|
|
break;
|
|
|
|
case 6:
|
|
|
|
/* Thermal and Power Leaf */
|
2015-06-07 11:15:08 +02:00
|
|
|
*eax = env->features[FEAT_6_EAX];
|
2010-03-11 14:38:55 +01:00
|
|
|
*ebx = 0;
|
|
|
|
*ecx = 0;
|
|
|
|
*edx = 0;
|
|
|
|
break;
|
2011-05-30 17:17:42 +02:00
|
|
|
case 7:
|
Expose CPUID leaf 7 only for -cpu host
Changes v2 -> v3;
- Check for kvm_enabled() before setting cpuid_7_0_ebx_features
Changes v1 -> v2:
- Use kvm_arch_get_supported_cpuid() instead of host_cpuid() on
cpu_x86_fill_host().
We should use GET_SUPPORTED_CPUID for all bits on "-cpu host"
eventually, but I am not changing all the other CPUID leaves because
we may not be able to test such an intrusive change in time for 1.1.
Description of the bug:
Since QEMU 0.15, the CPUID information on CPUID[EAX=7,ECX=0] is being
returned unfiltered to the guest, directly from the GET_SUPPORTED_CPUID
return value.
The problem is that this makes the resulting CPU feature flags
unpredictable and dependent on the host CPU and kernel version. This
breaks live-migration badly if migrating from a host CPU that supports
some features on that CPUID leaf (running a recent kernel) to a kernel
or host CPU that doesn't support it.
Migration also is incorrect (the virtual CPU changes under the guest's
feet) if you migrate in the opposite direction (from an old CPU/kernel
to a new CPU/kernel), but with less serious consequences (guests
normally query CPUID information only once on boot).
Fortunately, the bug affects only users using cpudefs with level >= 7.
The right behavior should be to explicitly enable those features on
[cpudef] config sections or on the "-cpu" command-line arguments. Right
now there is no predefined CPU model on QEMU that has those features:
the latest Intel model we have is Sandy Bridge.
I would like to get this fixed on 1.1, so I am submitting this patch,
that enables those features only if "-cpu host" is being used (as we
don't have any pre-defined CPU model that actually have those features).
After 1.1 is released, we can make those features properly configurable
on [cpudef] and -cpu configuration.
One problem is: with this patch, users with the following setup:
- Running QEMU 1.0;
- Using a cpudef having level >= 7;
- Running a kernel that supports the features on CPUID leaf 7; and
- Running on a CPU that supports some features on CPUID leaf 7
won't be able to live-migrate to QEMU 1.1. But for these users
live-migration is already broken (they can't live-migrate to hosts with
older CPUs or older kernels, already), I don't see how to avoid this
problem.
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
2012-05-21 16:27:02 +02:00
|
|
|
/* Structured Extended Feature Flags Enumeration Leaf */
|
|
|
|
if (count == 0) {
|
2019-07-25 08:14:16 +02:00
|
|
|
/* Maximum ECX value for sub-leaves */
|
|
|
|
*eax = env->cpuid_level_func7;
|
2013-04-22 21:00:15 +02:00
|
|
|
*ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
|
2015-11-18 03:20:15 +01:00
|
|
|
*ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */
|
2016-02-09 14:14:28 +01:00
|
|
|
if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) {
|
|
|
|
*ecx |= CPUID_7_0_ECX_OSPKE;
|
|
|
|
}
|
2016-10-31 09:27:26 +01:00
|
|
|
*edx = env->features[FEAT_7_0_EDX]; /* Feature flags */
|
2021-07-19 13:21:15 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* SGX cannot be emulated in software. If hardware does not
|
|
|
|
* support enabling SGX and/or SGX flexible launch control,
|
|
|
|
* then we need to update the VM's CPUID values accordingly.
|
|
|
|
*/
|
|
|
|
if ((*ebx & CPUID_7_0_EBX_SGX) &&
|
|
|
|
(!kvm_enabled() ||
|
|
|
|
!(kvm_arch_get_supported_cpuid(cs->kvm_state, 0x7, 0, R_EBX) &
|
|
|
|
CPUID_7_0_EBX_SGX))) {
|
|
|
|
*ebx &= ~CPUID_7_0_EBX_SGX;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((*ecx & CPUID_7_0_ECX_SGX_LC) &&
|
|
|
|
(!(*ebx & CPUID_7_0_EBX_SGX) || !kvm_enabled() ||
|
|
|
|
!(kvm_arch_get_supported_cpuid(cs->kvm_state, 0x7, 0, R_ECX) &
|
|
|
|
CPUID_7_0_ECX_SGX_LC))) {
|
|
|
|
*ecx &= ~CPUID_7_0_ECX_SGX_LC;
|
|
|
|
}
|
2019-07-25 08:14:16 +02:00
|
|
|
} else if (count == 1) {
|
|
|
|
*eax = env->features[FEAT_7_1_EAX];
|
|
|
|
*ebx = 0;
|
|
|
|
*ecx = 0;
|
|
|
|
*edx = 0;
|
2011-05-30 17:17:42 +02:00
|
|
|
} else {
|
|
|
|
*eax = 0;
|
|
|
|
*ebx = 0;
|
|
|
|
*ecx = 0;
|
|
|
|
*edx = 0;
|
|
|
|
}
|
|
|
|
break;
|
2010-03-11 14:38:55 +01:00
|
|
|
case 9:
|
|
|
|
/* Direct Cache Access Information Leaf */
|
|
|
|
*eax = 0; /* Bits 0-31 in DCA_CAP MSR */
|
|
|
|
*ebx = 0;
|
|
|
|
*ecx = 0;
|
|
|
|
*edx = 0;
|
|
|
|
break;
|
|
|
|
case 0xA:
|
|
|
|
/* Architectural Performance Monitoring Leaf */
|
2022-04-29 20:52:52 +02:00
|
|
|
if (accel_uses_host_cpuid() && cpu->enable_pmu) {
|
|
|
|
x86_cpu_get_supported_cpuid(0xA, count, eax, ebx, ecx, edx);
|
2011-12-15 11:44:05 +01:00
|
|
|
} else {
|
|
|
|
*eax = 0;
|
|
|
|
*ebx = 0;
|
|
|
|
*ecx = 0;
|
|
|
|
*edx = 0;
|
|
|
|
}
|
2010-03-11 14:38:55 +01:00
|
|
|
break;
|
2016-05-12 19:24:26 +02:00
|
|
|
case 0xB:
|
|
|
|
/* Extended Topology Enumeration Leaf */
|
|
|
|
if (!cpu->enable_cpuid_0xb) {
|
|
|
|
*eax = *ebx = *ecx = *edx = 0;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
*ecx = count & 0xff;
|
|
|
|
*edx = cpu->apic_id;
|
|
|
|
|
|
|
|
switch (count) {
|
|
|
|
case 0:
|
2020-03-11 23:52:59 +01:00
|
|
|
*eax = apicid_core_offset(&topo_info);
|
2016-09-16 17:50:24 +02:00
|
|
|
*ebx = cs->nr_threads;
|
2016-05-12 19:24:26 +02:00
|
|
|
*ecx |= CPUID_TOPOLOGY_LEVEL_SMT;
|
|
|
|
break;
|
|
|
|
case 1:
|
2020-08-31 20:42:11 +02:00
|
|
|
*eax = apicid_pkg_offset(&topo_info);
|
2016-09-16 17:50:24 +02:00
|
|
|
*ebx = cs->nr_cores * cs->nr_threads;
|
2016-05-12 19:24:26 +02:00
|
|
|
*ecx |= CPUID_TOPOLOGY_LEVEL_CORE;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
*eax = 0;
|
|
|
|
*ebx = 0;
|
|
|
|
*ecx |= CPUID_TOPOLOGY_LEVEL_INVALID;
|
|
|
|
}
|
|
|
|
|
2019-06-20 07:45:23 +02:00
|
|
|
assert(!(*eax & ~0x1f));
|
|
|
|
*ebx &= 0xffff; /* The count doesn't need to be reliable. */
|
|
|
|
break;
|
2022-02-15 20:52:58 +01:00
|
|
|
case 0x1C:
|
|
|
|
if (accel_uses_host_cpuid() && cpu->enable_pmu &&
|
|
|
|
(env->features[FEAT_7_0_EDX] & CPUID_7_0_EDX_ARCH_LBR)) {
|
|
|
|
x86_cpu_get_supported_cpuid(0x1C, 0, eax, ebx, ecx, edx);
|
|
|
|
*edx = 0;
|
|
|
|
}
|
|
|
|
break;
|
2019-06-20 07:45:23 +02:00
|
|
|
case 0x1F:
|
|
|
|
/* V2 Extended Topology Enumeration Leaf */
|
|
|
|
if (env->nr_dies < 2) {
|
|
|
|
*eax = *ebx = *ecx = *edx = 0;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
*ecx = count & 0xff;
|
|
|
|
*edx = cpu->apic_id;
|
|
|
|
switch (count) {
|
|
|
|
case 0:
|
2020-03-11 23:52:59 +01:00
|
|
|
*eax = apicid_core_offset(&topo_info);
|
2019-06-20 07:45:23 +02:00
|
|
|
*ebx = cs->nr_threads;
|
|
|
|
*ecx |= CPUID_TOPOLOGY_LEVEL_SMT;
|
|
|
|
break;
|
|
|
|
case 1:
|
2020-03-11 23:52:59 +01:00
|
|
|
*eax = apicid_die_offset(&topo_info);
|
2019-06-20 07:45:23 +02:00
|
|
|
*ebx = cs->nr_cores * cs->nr_threads;
|
|
|
|
*ecx |= CPUID_TOPOLOGY_LEVEL_CORE;
|
|
|
|
break;
|
|
|
|
case 2:
|
2020-08-31 20:42:11 +02:00
|
|
|
*eax = apicid_pkg_offset(&topo_info);
|
2019-06-20 07:45:23 +02:00
|
|
|
*ebx = env->nr_dies * cs->nr_cores * cs->nr_threads;
|
|
|
|
*ecx |= CPUID_TOPOLOGY_LEVEL_DIE;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
*eax = 0;
|
|
|
|
*ebx = 0;
|
|
|
|
*ecx |= CPUID_TOPOLOGY_LEVEL_INVALID;
|
|
|
|
}
|
2016-05-12 19:24:26 +02:00
|
|
|
assert(!(*eax & ~0x1f));
|
|
|
|
*ebx &= 0xffff; /* The count doesn't need to be reliable. */
|
|
|
|
break;
|
2013-10-02 17:54:57 +02:00
|
|
|
case 0xD: {
|
2010-06-17 09:18:14 +02:00
|
|
|
/* Processor Extended State */
|
2013-10-02 17:54:57 +02:00
|
|
|
*eax = 0;
|
|
|
|
*ebx = 0;
|
|
|
|
*ecx = 0;
|
|
|
|
*edx = 0;
|
2015-07-02 15:53:40 +02:00
|
|
|
if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
|
2010-06-17 09:18:14 +02:00
|
|
|
break;
|
|
|
|
}
|
2016-09-22 19:33:01 +02:00
|
|
|
|
2013-10-02 17:54:57 +02:00
|
|
|
if (count == 0) {
|
2022-02-15 20:52:54 +01:00
|
|
|
*ecx = xsave_area_size(x86_cpu_xsave_xcr0_components(cpu), false);
|
|
|
|
*eax = env->features[FEAT_XSAVE_XCR0_LO];
|
|
|
|
*edx = env->features[FEAT_XSAVE_XCR0_HI];
|
2019-08-22 06:29:01 +02:00
|
|
|
/*
|
|
|
|
* The initial value of xcr0 and ebx == 0, On host without kvm
|
|
|
|
* commit 412a3c41(e.g., CentOS 6), the ebx's value always == 0
|
|
|
|
* even through guest update xcr0, this will crash some legacy guest
|
|
|
|
* (e.g., CentOS 6), So set ebx == ecx to workaroud it.
|
|
|
|
*/
|
2022-02-15 20:52:54 +01:00
|
|
|
*ebx = kvm_enabled() ? *ecx : xsave_area_size(env->xcr0, false);
|
2013-10-02 17:54:57 +02:00
|
|
|
} else if (count == 1) {
|
2022-02-15 20:52:54 +01:00
|
|
|
uint64_t xstate = x86_cpu_xsave_xcr0_components(cpu) |
|
|
|
|
x86_cpu_xsave_xss_components(cpu);
|
|
|
|
|
2014-11-24 15:54:43 +01:00
|
|
|
*eax = env->features[FEAT_XSAVE];
|
2022-02-15 20:52:54 +01:00
|
|
|
*ebx = xsave_area_size(xstate, true);
|
|
|
|
*ecx = env->features[FEAT_XSAVE_XSS_LO];
|
|
|
|
*edx = env->features[FEAT_XSAVE_XSS_HI];
|
2022-02-15 20:52:58 +01:00
|
|
|
if (kvm_enabled() && cpu->enable_pmu &&
|
|
|
|
(env->features[FEAT_7_0_EDX] & CPUID_7_0_EDX_ARCH_LBR) &&
|
|
|
|
(*eax & CPUID_XSAVE_XSAVES)) {
|
|
|
|
*ecx |= XSTATE_ARCH_LBR_MASK;
|
|
|
|
} else {
|
|
|
|
*ecx &= ~XSTATE_ARCH_LBR_MASK;
|
|
|
|
}
|
|
|
|
} else if (count == 0xf &&
|
|
|
|
accel_uses_host_cpuid() && cpu->enable_pmu &&
|
|
|
|
(env->features[FEAT_7_0_EDX] & CPUID_7_0_EDX_ARCH_LBR)) {
|
|
|
|
x86_cpu_get_supported_cpuid(0xD, count, eax, ebx, ecx, edx);
|
2015-07-02 16:57:14 +02:00
|
|
|
} else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
|
2022-02-15 20:52:54 +01:00
|
|
|
const ExtSaveArea *esa = &x86_ext_save_areas[count];
|
|
|
|
|
|
|
|
if (x86_cpu_xsave_xcr0_components(cpu) & (1ULL << count)) {
|
2013-12-02 21:17:50 +01:00
|
|
|
*eax = esa->size;
|
|
|
|
*ebx = esa->offset;
|
2022-02-17 07:04:30 +01:00
|
|
|
*ecx = esa->ecx &
|
|
|
|
(ESA_FEATURE_ALIGN64_MASK | ESA_FEATURE_XFD_MASK);
|
2022-02-15 20:52:54 +01:00
|
|
|
} else if (x86_cpu_xsave_xss_components(cpu) & (1ULL << count)) {
|
|
|
|
*eax = esa->size;
|
|
|
|
*ebx = 0;
|
|
|
|
*ecx = 1;
|
2013-10-02 17:54:57 +02:00
|
|
|
}
|
2010-06-17 09:18:14 +02:00
|
|
|
}
|
|
|
|
break;
|
2013-10-02 17:54:57 +02:00
|
|
|
}
|
2021-07-19 13:21:15 +02:00
|
|
|
case 0x12:
|
|
|
|
#ifndef CONFIG_USER_ONLY
|
|
|
|
if (!kvm_enabled() ||
|
|
|
|
!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_SGX)) {
|
|
|
|
*eax = *ebx = *ecx = *edx = 0;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* SGX sub-leafs CPUID.0x12.{0x2..N} enumerate EPC sections. Retrieve
|
|
|
|
* the EPC properties, e.g. confidentiality and integrity, from the
|
|
|
|
* host's first EPC section, i.e. assume there is one EPC section or
|
|
|
|
* that all EPC sections have the same security properties.
|
|
|
|
*/
|
|
|
|
if (count > 1) {
|
|
|
|
uint64_t epc_addr, epc_size;
|
|
|
|
|
|
|
|
if (sgx_epc_get_section(count - 2, &epc_addr, &epc_size)) {
|
|
|
|
*eax = *ebx = *ecx = *edx = 0;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
host_cpuid(index, 2, eax, ebx, ecx, edx);
|
|
|
|
*eax = (uint32_t)(epc_addr & 0xfffff000) | 0x1;
|
|
|
|
*ebx = (uint32_t)(epc_addr >> 32);
|
|
|
|
*ecx = (uint32_t)(epc_size & 0xfffff000) | (*ecx & 0xf);
|
|
|
|
*edx = (uint32_t)(epc_size >> 32);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* SGX sub-leafs CPUID.0x12.{0x0,0x1} are heavily dependent on hardware
|
|
|
|
* and KVM, i.e. QEMU cannot emulate features to override what KVM
|
|
|
|
* supports. Features can be further restricted by userspace, but not
|
|
|
|
* made more permissive.
|
|
|
|
*/
|
2022-04-29 20:52:52 +02:00
|
|
|
x86_cpu_get_supported_cpuid(0x12, index, eax, ebx, ecx, edx);
|
2021-07-19 13:21:15 +02:00
|
|
|
|
|
|
|
if (count == 0) {
|
|
|
|
*eax &= env->features[FEAT_SGX_12_0_EAX];
|
|
|
|
*ebx &= env->features[FEAT_SGX_12_0_EBX];
|
|
|
|
} else {
|
|
|
|
*eax &= env->features[FEAT_SGX_12_1_EAX];
|
|
|
|
*ebx &= 0; /* ebx reserve */
|
2022-02-15 20:52:54 +01:00
|
|
|
*ecx &= env->features[FEAT_XSAVE_XSS_LO];
|
|
|
|
*edx &= env->features[FEAT_XSAVE_XSS_HI];
|
2021-07-19 13:21:15 +02:00
|
|
|
|
|
|
|
/* FP and SSE are always allowed regardless of XSAVE/XCR0. */
|
|
|
|
*ecx |= XSTATE_FP_MASK | XSTATE_SSE_MASK;
|
|
|
|
|
|
|
|
/* Access to PROVISIONKEY requires additional credentials. */
|
2021-07-19 13:21:16 +02:00
|
|
|
if ((*eax & (1U << 4)) &&
|
|
|
|
!kvm_enable_sgx_provisioning(cs->kvm_state)) {
|
|
|
|
*eax &= ~(1U << 4);
|
|
|
|
}
|
2021-07-19 13:21:15 +02:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
break;
|
2018-03-04 17:48:35 +01:00
|
|
|
case 0x14: {
|
|
|
|
/* Intel Processor Trace Enumeration */
|
|
|
|
*eax = 0;
|
|
|
|
*ebx = 0;
|
|
|
|
*ecx = 0;
|
|
|
|
*edx = 0;
|
|
|
|
if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) ||
|
|
|
|
!kvm_enabled()) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (count == 0) {
|
|
|
|
*eax = INTEL_PT_MAX_SUBLEAF;
|
|
|
|
*ebx = INTEL_PT_MINIMAL_EBX;
|
|
|
|
*ecx = INTEL_PT_MINIMAL_ECX;
|
2020-12-02 11:10:42 +01:00
|
|
|
if (env->features[FEAT_14_0_ECX] & CPUID_14_0_ECX_LIP) {
|
|
|
|
*ecx |= CPUID_14_0_ECX_LIP;
|
|
|
|
}
|
2018-03-04 17:48:35 +01:00
|
|
|
} else if (count == 1) {
|
|
|
|
*eax = INTEL_PT_MTC_BITMAP | INTEL_PT_ADDR_RANGES_NUM;
|
|
|
|
*ebx = INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
2022-02-17 07:04:31 +01:00
|
|
|
case 0x1D: {
|
|
|
|
/* AMX TILE */
|
|
|
|
*eax = 0;
|
|
|
|
*ebx = 0;
|
|
|
|
*ecx = 0;
|
|
|
|
*edx = 0;
|
|
|
|
if (!(env->features[FEAT_7_0_EDX] & CPUID_7_0_EDX_AMX_TILE)) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (count == 0) {
|
|
|
|
/* Highest numbered palette subleaf */
|
|
|
|
*eax = INTEL_AMX_TILE_MAX_SUBLEAF;
|
|
|
|
} else if (count == 1) {
|
|
|
|
*eax = INTEL_AMX_TOTAL_TILE_BYTES |
|
|
|
|
(INTEL_AMX_BYTES_PER_TILE << 16);
|
|
|
|
*ebx = INTEL_AMX_BYTES_PER_ROW | (INTEL_AMX_TILE_MAX_NAMES << 16);
|
|
|
|
*ecx = INTEL_AMX_TILE_MAX_ROWS;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case 0x1E: {
|
|
|
|
/* AMX TMUL */
|
|
|
|
*eax = 0;
|
|
|
|
*ebx = 0;
|
|
|
|
*ecx = 0;
|
|
|
|
*edx = 0;
|
|
|
|
if (!(env->features[FEAT_7_0_EDX] & CPUID_7_0_EDX_AMX_TILE)) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (count == 0) {
|
|
|
|
/* Highest numbered palette subleaf */
|
|
|
|
*ebx = INTEL_AMX_TMUL_MAX_K | (INTEL_AMX_TMUL_MAX_N << 8);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
2017-05-09 15:27:36 +02:00
|
|
|
case 0x40000000:
|
|
|
|
/*
|
|
|
|
* CPUID code in kvm_arch_init_vcpu() ignores stuff
|
|
|
|
* set here, but we restrict to TCG none the less.
|
|
|
|
*/
|
|
|
|
if (tcg_enabled() && cpu->expose_tcg) {
|
|
|
|
memcpy(signature, "TCGTCGTCGTCG", 12);
|
|
|
|
*eax = 0x40000001;
|
|
|
|
*ebx = signature[0];
|
|
|
|
*ecx = signature[1];
|
|
|
|
*edx = signature[2];
|
|
|
|
} else {
|
|
|
|
*eax = 0;
|
|
|
|
*ebx = 0;
|
|
|
|
*ecx = 0;
|
|
|
|
*edx = 0;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case 0x40000001:
|
|
|
|
*eax = 0;
|
|
|
|
*ebx = 0;
|
|
|
|
*ecx = 0;
|
|
|
|
*edx = 0;
|
|
|
|
break;
|
2010-03-11 14:38:55 +01:00
|
|
|
case 0x80000000:
|
|
|
|
*eax = env->cpuid_xlevel;
|
|
|
|
*ebx = env->cpuid_vendor1;
|
|
|
|
*edx = env->cpuid_vendor2;
|
|
|
|
*ecx = env->cpuid_vendor3;
|
|
|
|
break;
|
|
|
|
case 0x80000001:
|
|
|
|
*eax = env->cpuid_version;
|
|
|
|
*ebx = 0;
|
2013-04-22 21:00:15 +02:00
|
|
|
*ecx = env->features[FEAT_8000_0001_ECX];
|
|
|
|
*edx = env->features[FEAT_8000_0001_EDX];
|
2010-03-11 14:38:55 +01:00
|
|
|
|
|
|
|
/* The Linux kernel checks for the CMPLegacy bit and
|
|
|
|
* discards multiple thread information if it is set.
|
2016-03-23 15:59:57 +01:00
|
|
|
* So don't set it here for Intel to make Linux guests happy.
|
2010-03-11 14:38:55 +01:00
|
|
|
*/
|
2012-12-17 03:27:07 +01:00
|
|
|
if (cs->nr_cores * cs->nr_threads > 1) {
|
2015-02-03 18:57:50 +01:00
|
|
|
if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
|
|
|
|
env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
|
|
|
|
env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
|
2010-03-11 14:38:55 +01:00
|
|
|
*ecx |= 1 << 1; /* CmpLegacy bit */
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case 0x80000002:
|
|
|
|
case 0x80000003:
|
|
|
|
case 0x80000004:
|
|
|
|
*eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
|
|
|
|
*ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
|
|
|
|
*ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
|
|
|
|
*edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
|
|
|
|
break;
|
|
|
|
case 0x80000005:
|
|
|
|
/* cache info (L1 cache) */
|
2013-09-02 17:06:37 +02:00
|
|
|
if (cpu->cache_info_passthrough) {
|
2022-04-29 21:16:28 +02:00
|
|
|
x86_cpu_get_cache_cpuid(index, 0, eax, ebx, ecx, edx);
|
2013-09-02 17:06:37 +02:00
|
|
|
break;
|
|
|
|
}
|
2020-04-13 00:35:56 +02:00
|
|
|
*eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) |
|
2013-08-27 17:24:37 +02:00
|
|
|
(L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
|
2020-04-13 00:35:56 +02:00
|
|
|
*ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) |
|
2013-08-27 17:24:37 +02:00
|
|
|
(L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
|
2018-05-24 17:43:30 +02:00
|
|
|
*ecx = encode_cache_cpuid80000005(env->cache_info_amd.l1d_cache);
|
|
|
|
*edx = encode_cache_cpuid80000005(env->cache_info_amd.l1i_cache);
|
2010-03-11 14:38:55 +01:00
|
|
|
break;
|
|
|
|
case 0x80000006:
|
|
|
|
/* cache info (L2 cache) */
|
2013-09-02 17:06:37 +02:00
|
|
|
if (cpu->cache_info_passthrough) {
|
2022-04-29 21:16:28 +02:00
|
|
|
x86_cpu_get_cache_cpuid(index, 0, eax, ebx, ecx, edx);
|
2013-09-02 17:06:37 +02:00
|
|
|
break;
|
|
|
|
}
|
2020-04-13 00:35:56 +02:00
|
|
|
*eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) |
|
|
|
|
(L2_DTLB_2M_ENTRIES << 16) |
|
|
|
|
(AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) |
|
2013-08-27 17:24:37 +02:00
|
|
|
(L2_ITLB_2M_ENTRIES);
|
2020-04-13 00:35:56 +02:00
|
|
|
*ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) |
|
|
|
|
(L2_DTLB_4K_ENTRIES << 16) |
|
|
|
|
(AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) |
|
2013-08-27 17:24:37 +02:00
|
|
|
(L2_ITLB_4K_ENTRIES);
|
2018-05-24 17:43:30 +02:00
|
|
|
encode_cache_cpuid80000006(env->cache_info_amd.l2_cache,
|
|
|
|
cpu->enable_l3_cache ?
|
|
|
|
env->cache_info_amd.l3_cache : NULL,
|
|
|
|
ecx, edx);
|
2010-03-11 14:38:55 +01:00
|
|
|
break;
|
2014-04-30 18:48:45 +02:00
|
|
|
case 0x80000007:
|
|
|
|
*eax = 0;
|
|
|
|
*ebx = 0;
|
|
|
|
*ecx = 0;
|
|
|
|
*edx = env->features[FEAT_8000_0007_EDX];
|
|
|
|
break;
|
2010-03-11 14:38:55 +01:00
|
|
|
case 0x80000008:
|
|
|
|
/* virtual & phys address size in low 2 bytes. */
|
2021-08-04 13:30:58 +02:00
|
|
|
*eax = cpu->phys_bits;
|
2013-04-22 21:00:15 +02:00
|
|
|
if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
|
2016-12-15 01:13:05 +01:00
|
|
|
/* 64 bit processor */
|
2021-08-04 13:30:58 +02:00
|
|
|
*eax |= (cpu_x86_virtual_addr_width(env) << 8);
|
2010-03-11 14:38:55 +01:00
|
|
|
}
|
2018-01-09 16:45:16 +01:00
|
|
|
*ebx = env->features[FEAT_8000_0008_EBX];
|
2012-12-17 03:27:07 +01:00
|
|
|
if (cs->nr_cores * cs->nr_threads > 1) {
|
2020-04-17 23:55:13 +02:00
|
|
|
/*
|
|
|
|
* Bits 15:12 is "The number of bits in the initial
|
|
|
|
* Core::X86::Apic::ApicId[ApicId] value that indicate
|
2020-08-31 20:42:11 +02:00
|
|
|
* thread ID within a package".
|
2020-04-17 23:55:13 +02:00
|
|
|
* Bits 7:0 is "The number of threads in the package is NC+1"
|
|
|
|
*/
|
2020-08-31 20:42:11 +02:00
|
|
|
*ecx = (apicid_pkg_offset(&topo_info) << 12) |
|
2020-04-17 23:55:13 +02:00
|
|
|
((cs->nr_cores * cs->nr_threads) - 1);
|
|
|
|
} else {
|
|
|
|
*ecx = 0;
|
2010-03-11 14:38:55 +01:00
|
|
|
}
|
2020-04-17 23:55:13 +02:00
|
|
|
*edx = 0;
|
2010-03-11 14:38:55 +01:00
|
|
|
break;
|
|
|
|
case 0x8000000A:
|
2013-04-22 21:00:15 +02:00
|
|
|
if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
|
2012-12-04 20:34:38 +01:00
|
|
|
*eax = 0x00000001; /* SVM Revision */
|
|
|
|
*ebx = 0x00000010; /* nr of ASIDs */
|
|
|
|
*ecx = 0;
|
2013-04-22 21:00:15 +02:00
|
|
|
*edx = env->features[FEAT_SVM]; /* optional features */
|
2012-12-04 20:34:38 +01:00
|
|
|
} else {
|
|
|
|
*eax = 0;
|
|
|
|
*ebx = 0;
|
|
|
|
*ecx = 0;
|
|
|
|
*edx = 0;
|
|
|
|
}
|
2010-03-11 14:38:55 +01:00
|
|
|
break;
|
2018-05-24 17:43:31 +02:00
|
|
|
case 0x8000001D:
|
|
|
|
*eax = 0;
|
2019-04-16 14:38:33 +02:00
|
|
|
if (cpu->cache_info_passthrough) {
|
2022-04-29 21:16:28 +02:00
|
|
|
x86_cpu_get_cache_cpuid(index, count, eax, ebx, ecx, edx);
|
2019-04-16 14:38:33 +02:00
|
|
|
break;
|
|
|
|
}
|
2018-05-24 17:43:31 +02:00
|
|
|
switch (count) {
|
|
|
|
case 0: /* L1 dcache info */
|
2020-09-01 17:57:26 +02:00
|
|
|
encode_cache_cpuid8000001d(env->cache_info_amd.l1d_cache,
|
|
|
|
&topo_info, eax, ebx, ecx, edx);
|
2018-05-24 17:43:31 +02:00
|
|
|
break;
|
|
|
|
case 1: /* L1 icache info */
|
2020-09-01 17:57:26 +02:00
|
|
|
encode_cache_cpuid8000001d(env->cache_info_amd.l1i_cache,
|
|
|
|
&topo_info, eax, ebx, ecx, edx);
|
2018-05-24 17:43:31 +02:00
|
|
|
break;
|
|
|
|
case 2: /* L2 cache info */
|
2020-09-01 17:57:26 +02:00
|
|
|
encode_cache_cpuid8000001d(env->cache_info_amd.l2_cache,
|
|
|
|
&topo_info, eax, ebx, ecx, edx);
|
2018-05-24 17:43:31 +02:00
|
|
|
break;
|
|
|
|
case 3: /* L3 cache info */
|
2020-09-01 17:57:26 +02:00
|
|
|
encode_cache_cpuid8000001d(env->cache_info_amd.l3_cache,
|
|
|
|
&topo_info, eax, ebx, ecx, edx);
|
2018-05-24 17:43:31 +02:00
|
|
|
break;
|
|
|
|
default: /* end of info */
|
|
|
|
*eax = *ebx = *ecx = *edx = 0;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
break;
|
2018-06-09 00:56:17 +02:00
|
|
|
case 0x8000001E:
|
2020-09-22 00:47:28 +02:00
|
|
|
if (cpu->core_id <= 255) {
|
|
|
|
encode_topo_cpuid8000001e(cpu, &topo_info, eax, ebx, ecx, edx);
|
|
|
|
} else {
|
|
|
|
*eax = 0;
|
|
|
|
*ebx = 0;
|
|
|
|
*ecx = 0;
|
|
|
|
*edx = 0;
|
|
|
|
}
|
2018-06-09 00:56:17 +02:00
|
|
|
break;
|
2011-06-01 03:59:52 +02:00
|
|
|
case 0xC0000000:
|
|
|
|
*eax = env->cpuid_xlevel2;
|
|
|
|
*ebx = 0;
|
|
|
|
*ecx = 0;
|
|
|
|
*edx = 0;
|
|
|
|
break;
|
|
|
|
case 0xC0000001:
|
|
|
|
/* Support for VIA CPU's CPUID instruction */
|
|
|
|
*eax = env->cpuid_version;
|
|
|
|
*ebx = 0;
|
|
|
|
*ecx = 0;
|
2013-04-22 21:00:15 +02:00
|
|
|
*edx = env->features[FEAT_C000_0001_EDX];
|
2011-06-01 03:59:52 +02:00
|
|
|
break;
|
|
|
|
case 0xC0000002:
|
|
|
|
case 0xC0000003:
|
|
|
|
case 0xC0000004:
|
|
|
|
/* Reserved for the future, and now filled with zero */
|
|
|
|
*eax = 0;
|
|
|
|
*ebx = 0;
|
|
|
|
*ecx = 0;
|
|
|
|
*edx = 0;
|
|
|
|
break;
|
2018-03-08 13:48:58 +01:00
|
|
|
case 0x8000001F:
|
2021-10-07 18:17:09 +02:00
|
|
|
*eax = *ebx = *ecx = *edx = 0;
|
|
|
|
if (sev_enabled()) {
|
|
|
|
*eax = 0x2;
|
|
|
|
*eax |= sev_es_enabled() ? 0x8 : 0;
|
|
|
|
*ebx = sev_get_cbit_position();
|
|
|
|
*ebx |= sev_get_reduced_phys_bits() << 6;
|
|
|
|
}
|
2018-03-08 13:48:58 +01:00
|
|
|
break;
|
2010-03-11 14:38:55 +01:00
|
|
|
default:
|
|
|
|
/* reserved values: zero */
|
|
|
|
*eax = 0;
|
|
|
|
*ebx = 0;
|
|
|
|
*ecx = 0;
|
|
|
|
*edx = 0;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2012-04-02 23:20:08 +02:00
|
|
|
|
2021-07-19 13:21:13 +02:00
|
|
|
static void x86_cpu_set_sgxlepubkeyhash(CPUX86State *env)
|
|
|
|
{
|
|
|
|
#ifndef CONFIG_USER_ONLY
|
|
|
|
/* Those default values are defined in Skylake HW */
|
|
|
|
env->msr_ia32_sgxlepubkeyhash[0] = 0xa6053e051270b7acULL;
|
|
|
|
env->msr_ia32_sgxlepubkeyhash[1] = 0x6cfbe8ba8b3b413dULL;
|
|
|
|
env->msr_ia32_sgxlepubkeyhash[2] = 0xc4916d99f2b3735dULL;
|
|
|
|
env->msr_ia32_sgxlepubkeyhash[3] = 0xd4f8c05909f9bb3bULL;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
cpu: Use DeviceClass reset instead of a special CPUClass reset
The CPUClass has a 'reset' method. This is a legacy from when
TYPE_CPU used not to inherit from TYPE_DEVICE. We don't need it any
more, as we can simply use the TYPE_DEVICE reset. The 'cpu_reset()'
function is kept as the API which most places use to reset a CPU; it
is now a wrapper which calls device_cold_reset() and then the
tracepoint function.
This change should not cause CPU objects to be reset more often
than they are at the moment, because:
* nobody is directly calling device_cold_reset() or
qdev_reset_all() on CPU objects
* no CPU object is on a qbus, so they will not be reset either
by somebody calling qbus_reset_all()/bus_cold_reset(), or
by the main "reset sysbus and everything in the qbus tree"
reset that most devices are reset by
Note that this does not change the need for each machine or whatever
to use qemu_register_reset() to arrange to call cpu_reset() -- that
is necessary because CPU objects are not on any qbus, so they don't
get reset when the qbus tree rooted at the sysbus bus is reset, and
this isn't being changed here.
All the changes to the files under target/ were made using the
included Coccinelle script, except:
(1) the deletion of the now-inaccurate and not terribly useful
"CPUClass::reset" comments was done with a perl one-liner afterwards:
perl -n -i -e '/ CPUClass::reset/ or print' target/*/*.c
(2) this bit of the s390 change was done by hand, because the
Coccinelle script is not sophisticated enough to handle the
parent_reset call being inside another function:
| @@ -96,8 +96,9 @@ static void s390_cpu_reset(CPUState *s, cpu_reset_type type)
| S390CPU *cpu = S390_CPU(s);
| S390CPUClass *scc = S390_CPU_GET_CLASS(cpu);
| CPUS390XState *env = &cpu->env;
|+ DeviceState *dev = DEVICE(s);
|
|- scc->parent_reset(s);
|+ scc->parent_reset(dev);
| cpu->env.sigp_order = 0;
| s390_cpu_set_state(S390_CPU_STATE_STOPPED, cpu);
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Message-Id: <20200303100511.5498-1-peter.maydell@linaro.org>
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
2020-03-03 11:05:11 +01:00
|
|
|
static void x86_cpu_reset(DeviceState *dev)
|
2012-04-02 23:20:08 +02:00
|
|
|
{
|
cpu: Use DeviceClass reset instead of a special CPUClass reset
The CPUClass has a 'reset' method. This is a legacy from when
TYPE_CPU used not to inherit from TYPE_DEVICE. We don't need it any
more, as we can simply use the TYPE_DEVICE reset. The 'cpu_reset()'
function is kept as the API which most places use to reset a CPU; it
is now a wrapper which calls device_cold_reset() and then the
tracepoint function.
This change should not cause CPU objects to be reset more often
than they are at the moment, because:
* nobody is directly calling device_cold_reset() or
qdev_reset_all() on CPU objects
* no CPU object is on a qbus, so they will not be reset either
by somebody calling qbus_reset_all()/bus_cold_reset(), or
by the main "reset sysbus and everything in the qbus tree"
reset that most devices are reset by
Note that this does not change the need for each machine or whatever
to use qemu_register_reset() to arrange to call cpu_reset() -- that
is necessary because CPU objects are not on any qbus, so they don't
get reset when the qbus tree rooted at the sysbus bus is reset, and
this isn't being changed here.
All the changes to the files under target/ were made using the
included Coccinelle script, except:
(1) the deletion of the now-inaccurate and not terribly useful
"CPUClass::reset" comments was done with a perl one-liner afterwards:
perl -n -i -e '/ CPUClass::reset/ or print' target/*/*.c
(2) this bit of the s390 change was done by hand, because the
Coccinelle script is not sophisticated enough to handle the
parent_reset call being inside another function:
| @@ -96,8 +96,9 @@ static void s390_cpu_reset(CPUState *s, cpu_reset_type type)
| S390CPU *cpu = S390_CPU(s);
| S390CPUClass *scc = S390_CPU_GET_CLASS(cpu);
| CPUS390XState *env = &cpu->env;
|+ DeviceState *dev = DEVICE(s);
|
|- scc->parent_reset(s);
|+ scc->parent_reset(dev);
| cpu->env.sigp_order = 0;
| s390_cpu_set_state(S390_CPU_STATE_STOPPED, cpu);
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Message-Id: <20200303100511.5498-1-peter.maydell@linaro.org>
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
2020-03-03 11:05:11 +01:00
|
|
|
CPUState *s = CPU(dev);
|
2012-04-02 23:20:08 +02:00
|
|
|
X86CPU *cpu = X86_CPU(s);
|
|
|
|
X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
|
|
|
|
CPUX86State *env = &cpu->env;
|
2015-07-07 17:13:10 +02:00
|
|
|
target_ulong cr4;
|
|
|
|
uint64_t xcr0;
|
2012-04-03 00:16:24 +02:00
|
|
|
int i;
|
|
|
|
|
cpu: Use DeviceClass reset instead of a special CPUClass reset
The CPUClass has a 'reset' method. This is a legacy from when
TYPE_CPU used not to inherit from TYPE_DEVICE. We don't need it any
more, as we can simply use the TYPE_DEVICE reset. The 'cpu_reset()'
function is kept as the API which most places use to reset a CPU; it
is now a wrapper which calls device_cold_reset() and then the
tracepoint function.
This change should not cause CPU objects to be reset more often
than they are at the moment, because:
* nobody is directly calling device_cold_reset() or
qdev_reset_all() on CPU objects
* no CPU object is on a qbus, so they will not be reset either
by somebody calling qbus_reset_all()/bus_cold_reset(), or
by the main "reset sysbus and everything in the qbus tree"
reset that most devices are reset by
Note that this does not change the need for each machine or whatever
to use qemu_register_reset() to arrange to call cpu_reset() -- that
is necessary because CPU objects are not on any qbus, so they don't
get reset when the qbus tree rooted at the sysbus bus is reset, and
this isn't being changed here.
All the changes to the files under target/ were made using the
included Coccinelle script, except:
(1) the deletion of the now-inaccurate and not terribly useful
"CPUClass::reset" comments was done with a perl one-liner afterwards:
perl -n -i -e '/ CPUClass::reset/ or print' target/*/*.c
(2) this bit of the s390 change was done by hand, because the
Coccinelle script is not sophisticated enough to handle the
parent_reset call being inside another function:
| @@ -96,8 +96,9 @@ static void s390_cpu_reset(CPUState *s, cpu_reset_type type)
| S390CPU *cpu = S390_CPU(s);
| S390CPUClass *scc = S390_CPU_GET_CLASS(cpu);
| CPUS390XState *env = &cpu->env;
|+ DeviceState *dev = DEVICE(s);
|
|- scc->parent_reset(s);
|+ scc->parent_reset(dev);
| cpu->env.sigp_order = 0;
| s390_cpu_set_state(S390_CPU_STATE_STOPPED, cpu);
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Message-Id: <20200303100511.5498-1-peter.maydell@linaro.org>
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
2020-03-03 11:05:11 +01:00
|
|
|
xcc->parent_reset(dev);
|
2012-04-02 23:20:08 +02:00
|
|
|
|
2015-04-24 20:49:15 +02:00
|
|
|
memset(env, 0, offsetof(CPUX86State, end_reset_fields));
|
2012-04-03 00:16:24 +02:00
|
|
|
|
|
|
|
env->old_exception = -1;
|
|
|
|
|
|
|
|
/* init to reset state */
|
2021-08-14 09:51:00 +02:00
|
|
|
env->int_ctl = 0;
|
2012-04-03 00:16:24 +02:00
|
|
|
env->hflags2 |= HF2_GIF_MASK;
|
2021-08-05 13:08:23 +02:00
|
|
|
env->hflags2 |= HF2_VGIF_MASK;
|
2020-05-20 16:49:22 +02:00
|
|
|
env->hflags &= ~HF_GUEST_MASK;
|
2012-04-03 00:16:24 +02:00
|
|
|
|
|
|
|
cpu_x86_update_cr0(env, 0x60000010);
|
|
|
|
env->a20_mask = ~0x0;
|
|
|
|
env->smbase = 0x30000;
|
2018-02-27 11:22:12 +01:00
|
|
|
env->msr_smi_count = 0;
|
2012-04-03 00:16:24 +02:00
|
|
|
|
|
|
|
env->idt.limit = 0xffff;
|
|
|
|
env->gdt.limit = 0xffff;
|
|
|
|
env->ldt.limit = 0xffff;
|
|
|
|
env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
|
|
|
|
env->tr.limit = 0xffff;
|
|
|
|
env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
|
|
|
|
|
|
|
|
cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
|
|
|
|
DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
|
|
|
|
DESC_R_MASK | DESC_A_MASK);
|
|
|
|
cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
|
|
|
|
DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
|
|
|
|
DESC_A_MASK);
|
|
|
|
cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
|
|
|
|
DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
|
|
|
|
DESC_A_MASK);
|
|
|
|
cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
|
|
|
|
DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
|
|
|
|
DESC_A_MASK);
|
|
|
|
cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
|
|
|
|
DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
|
|
|
|
DESC_A_MASK);
|
|
|
|
cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
|
|
|
|
DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
|
|
|
|
DESC_A_MASK);
|
|
|
|
|
|
|
|
env->eip = 0xfff0;
|
|
|
|
env->regs[R_EDX] = env->cpuid_version;
|
|
|
|
|
|
|
|
env->eflags = 0x2;
|
|
|
|
|
|
|
|
/* FPU init */
|
|
|
|
for (i = 0; i < 8; i++) {
|
|
|
|
env->fptags[i] = 1;
|
|
|
|
}
|
2014-09-17 10:05:19 +02:00
|
|
|
cpu_set_fpuc(env, 0x37f);
|
2012-04-03 00:16:24 +02:00
|
|
|
|
|
|
|
env->mxcsr = 0x1f80;
|
2015-07-07 17:13:10 +02:00
|
|
|
/* All units are in INIT state. */
|
|
|
|
env->xstate_bv = 0;
|
2012-04-03 00:16:24 +02:00
|
|
|
|
|
|
|
env->pat = 0x0007040600070406ULL;
|
2022-03-24 09:21:41 +01:00
|
|
|
|
|
|
|
if (kvm_enabled()) {
|
|
|
|
/*
|
|
|
|
* KVM handles TSC = 0 specially and thinks we are hot-plugging
|
|
|
|
* a new CPU, use 1 instead to force a reset.
|
|
|
|
*/
|
|
|
|
if (env->tsc != 0) {
|
|
|
|
env->tsc = 1;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
env->tsc = 0;
|
|
|
|
}
|
|
|
|
|
2012-04-03 00:16:24 +02:00
|
|
|
env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
|
2019-05-14 08:06:39 +02:00
|
|
|
if (env->features[FEAT_1_ECX] & CPUID_EXT_MONITOR) {
|
|
|
|
env->msr_ia32_misc_enable |= MSR_IA32_MISC_ENABLE_MWAIT;
|
|
|
|
}
|
2012-04-03 00:16:24 +02:00
|
|
|
|
|
|
|
memset(env->dr, 0, sizeof(env->dr));
|
|
|
|
env->dr[6] = DR6_FIXED_1;
|
|
|
|
env->dr[7] = DR7_FIXED_1;
|
2013-09-02 17:26:20 +02:00
|
|
|
cpu_breakpoint_remove_all(s, BP_CPU);
|
2013-09-02 16:57:02 +02:00
|
|
|
cpu_watchpoint_remove_all(s, BP_CPU);
|
2012-07-23 15:22:27 +02:00
|
|
|
|
2015-07-07 17:13:10 +02:00
|
|
|
cr4 = 0;
|
2016-02-17 10:54:53 +01:00
|
|
|
xcr0 = XSTATE_FP_MASK;
|
2015-07-07 17:13:10 +02:00
|
|
|
|
|
|
|
#ifdef CONFIG_USER_ONLY
|
|
|
|
/* Enable all the features for user-mode. */
|
|
|
|
if (env->features[FEAT_1_EDX] & CPUID_SSE) {
|
2016-02-17 10:54:53 +01:00
|
|
|
xcr0 |= XSTATE_SSE_MASK;
|
2015-07-07 17:13:10 +02:00
|
|
|
}
|
2016-02-09 14:14:28 +01:00
|
|
|
for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
|
|
|
|
const ExtSaveArea *esa = &x86_ext_save_areas[i];
|
2022-02-15 20:52:54 +01:00
|
|
|
if (!((1 << i) & CPUID_XSTATE_XCR0_MASK)) {
|
|
|
|
continue;
|
|
|
|
}
|
2016-09-22 23:58:39 +02:00
|
|
|
if (env->features[esa->feature] & esa->bits) {
|
2016-02-09 14:14:28 +01:00
|
|
|
xcr0 |= 1ull << i;
|
|
|
|
}
|
2015-07-07 17:13:10 +02:00
|
|
|
}
|
2016-02-09 14:14:28 +01:00
|
|
|
|
2015-07-07 17:13:10 +02:00
|
|
|
if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) {
|
|
|
|
cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK;
|
|
|
|
}
|
2015-11-18 12:55:47 +01:00
|
|
|
if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) {
|
|
|
|
cr4 |= CR4_FSGSBASE_MASK;
|
|
|
|
}
|
2015-07-07 17:13:10 +02:00
|
|
|
#endif
|
|
|
|
|
|
|
|
env->xcr0 = xcr0;
|
|
|
|
cpu_x86_update_cr4(env, cr4);
|
2013-12-06 09:33:01 +01:00
|
|
|
|
2014-08-14 23:39:39 +02:00
|
|
|
/*
|
|
|
|
* SDM 11.11.5 requires:
|
|
|
|
* - IA32_MTRR_DEF_TYPE MSR.E = 0
|
|
|
|
* - IA32_MTRR_PHYSMASKn.V = 0
|
|
|
|
* All other bits are undefined. For simplification, zero it all.
|
|
|
|
*/
|
|
|
|
env->mtrr_deftype = 0;
|
|
|
|
memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
|
|
|
|
memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
|
|
|
|
|
2017-09-13 11:05:21 +02:00
|
|
|
env->interrupt_injected = -1;
|
2019-06-19 18:21:39 +02:00
|
|
|
env->exception_nr = -1;
|
|
|
|
env->exception_pending = 0;
|
|
|
|
env->exception_injected = 0;
|
|
|
|
env->exception_has_payload = false;
|
|
|
|
env->exception_payload = 0;
|
2017-09-13 11:05:21 +02:00
|
|
|
env->nmi_injected = false;
|
2012-07-23 15:22:27 +02:00
|
|
|
#if !defined(CONFIG_USER_ONLY)
|
|
|
|
/* We hard-wire the BSP to the first CPU. */
|
2015-04-02 01:58:36 +02:00
|
|
|
apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
|
2012-07-23 15:22:27 +02:00
|
|
|
|
2013-01-17 18:51:17 +01:00
|
|
|
s->halted = !cpu_is_bsp(cpu);
|
2013-03-20 13:11:56 +01:00
|
|
|
|
|
|
|
if (kvm_enabled()) {
|
|
|
|
kvm_arch_reset_vcpu(cpu);
|
|
|
|
}
|
2021-07-19 13:21:13 +02:00
|
|
|
|
|
|
|
x86_cpu_set_sgxlepubkeyhash(env);
|
2021-11-01 14:23:00 +01:00
|
|
|
|
2022-02-23 12:58:24 +01:00
|
|
|
env->amd_tsc_scale_msr = MSR_AMD64_TSC_RATIO_DEFAULT;
|
2021-11-01 14:23:00 +01:00
|
|
|
|
2012-07-23 15:22:27 +02:00
|
|
|
#endif
|
2012-04-02 23:20:08 +02:00
|
|
|
}
|
|
|
|
|
2012-04-03 00:00:17 +02:00
|
|
|
static void mce_init(X86CPU *cpu)
|
|
|
|
{
|
|
|
|
CPUX86State *cenv = &cpu->env;
|
|
|
|
unsigned int bank;
|
|
|
|
|
|
|
|
if (((cenv->cpuid_version >> 8) & 0xf) >= 6
|
2013-04-22 21:00:15 +02:00
|
|
|
&& (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
|
2012-04-03 00:00:17 +02:00
|
|
|
(CPUID_MCE | CPUID_MCA)) {
|
2016-06-22 08:56:21 +02:00
|
|
|
cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF |
|
|
|
|
(cpu->enable_lmce ? MCG_LMCE_P : 0);
|
2012-04-03 00:00:17 +02:00
|
|
|
cenv->mcg_ctl = ~(uint64_t)0;
|
|
|
|
for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
|
|
|
|
cenv->mce_banks[bank * 4] = ~(uint64_t)0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-09-21 18:30:12 +02:00
|
|
|
static void x86_cpu_adjust_level(X86CPU *cpu, uint32_t *min, uint32_t value)
|
|
|
|
{
|
|
|
|
if (*min < value) {
|
|
|
|
*min = value;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Increase cpuid_min_{level,xlevel,xlevel2} automatically, if appropriate */
|
|
|
|
static void x86_cpu_adjust_feat_level(X86CPU *cpu, FeatureWord w)
|
|
|
|
{
|
|
|
|
CPUX86State *env = &cpu->env;
|
|
|
|
FeatureWordInfo *fi = &feature_word_info[w];
|
2018-10-15 06:47:24 +02:00
|
|
|
uint32_t eax = fi->cpuid.eax;
|
2016-09-21 18:30:12 +02:00
|
|
|
uint32_t region = eax & 0xF0000000;
|
|
|
|
|
2018-10-15 06:47:24 +02:00
|
|
|
assert(feature_word_info[w].type == CPUID_FEATURE_WORD);
|
2016-09-21 18:30:12 +02:00
|
|
|
if (!env->features[w]) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (region) {
|
|
|
|
case 0x00000000:
|
|
|
|
x86_cpu_adjust_level(cpu, &env->cpuid_min_level, eax);
|
|
|
|
break;
|
|
|
|
case 0x80000000:
|
|
|
|
x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, eax);
|
|
|
|
break;
|
|
|
|
case 0xC0000000:
|
|
|
|
x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel2, eax);
|
|
|
|
break;
|
|
|
|
}
|
2019-07-25 08:14:16 +02:00
|
|
|
|
|
|
|
if (eax == 7) {
|
|
|
|
x86_cpu_adjust_level(cpu, &env->cpuid_min_level_func7,
|
|
|
|
fi->cpuid.ecx);
|
|
|
|
}
|
2016-09-21 18:30:12 +02:00
|
|
|
}
|
|
|
|
|
2016-09-22 22:27:56 +02:00
|
|
|
/* Calculate XSAVE components based on the configured CPU feature flags */
|
|
|
|
static void x86_cpu_enable_xsave_components(X86CPU *cpu)
|
|
|
|
{
|
|
|
|
CPUX86State *env = &cpu->env;
|
|
|
|
int i;
|
2016-09-22 22:41:35 +02:00
|
|
|
uint64_t mask;
|
2022-02-17 07:04:29 +01:00
|
|
|
static bool request_perm;
|
2016-09-22 22:27:56 +02:00
|
|
|
|
|
|
|
if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
|
2022-02-15 20:52:54 +01:00
|
|
|
env->features[FEAT_XSAVE_XCR0_LO] = 0;
|
|
|
|
env->features[FEAT_XSAVE_XCR0_HI] = 0;
|
2016-09-22 22:27:56 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2016-09-30 20:49:42 +02:00
|
|
|
mask = 0;
|
|
|
|
for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
|
2016-09-22 22:27:56 +02:00
|
|
|
const ExtSaveArea *esa = &x86_ext_save_areas[i];
|
|
|
|
if (env->features[esa->feature] & esa->bits) {
|
2016-09-22 22:41:35 +02:00
|
|
|
mask |= (1ULL << i);
|
2016-09-22 22:27:56 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-02-17 07:04:29 +01:00
|
|
|
/* Only request permission for first vcpu */
|
|
|
|
if (kvm_enabled() && !request_perm) {
|
|
|
|
kvm_request_xsave_components(cpu, mask);
|
|
|
|
request_perm = true;
|
|
|
|
}
|
|
|
|
|
2022-02-15 20:52:54 +01:00
|
|
|
env->features[FEAT_XSAVE_XCR0_LO] = mask & CPUID_XSTATE_XCR0_MASK;
|
|
|
|
env->features[FEAT_XSAVE_XCR0_HI] = mask >> 32;
|
|
|
|
env->features[FEAT_XSAVE_XSS_LO] = mask & CPUID_XSTATE_XSS_MASK;
|
|
|
|
env->features[FEAT_XSAVE_XSS_HI] = mask >> 32;
|
2016-09-22 22:27:56 +02:00
|
|
|
}
|
|
|
|
|
2017-01-16 22:11:21 +01:00
|
|
|
/***** Steps involved on loading and filtering CPUID data
|
|
|
|
*
|
|
|
|
* When initializing and realizing a CPU object, the steps
|
|
|
|
* involved in setting up CPUID data are:
|
|
|
|
*
|
|
|
|
* 1) Loading CPU model definition (X86CPUDefinition). This is
|
2019-06-28 02:28:39 +02:00
|
|
|
* implemented by x86_cpu_load_model() and should be completely
|
2017-01-16 22:11:21 +01:00
|
|
|
* transparent, as it is done automatically by instance_init.
|
|
|
|
* No code should need to look at X86CPUDefinition structs
|
|
|
|
* outside instance_init.
|
|
|
|
*
|
|
|
|
* 2) CPU expansion. This is done by realize before CPUID
|
|
|
|
* filtering, and will make sure host/accelerator data is
|
|
|
|
* loaded for CPU models that depend on host capabilities
|
|
|
|
* (e.g. "host"). Done by x86_cpu_expand_features().
|
|
|
|
*
|
|
|
|
* 3) CPUID filtering. This initializes extra data related to
|
|
|
|
* CPUID, and checks if the host supports all capabilities
|
|
|
|
* required by the CPU. Runnability of a CPU model is
|
|
|
|
* determined at this step. Done by x86_cpu_filter_features().
|
|
|
|
*
|
|
|
|
* Some operations don't require all steps to be performed.
|
|
|
|
* More precisely:
|
|
|
|
*
|
|
|
|
* - CPU instance creation (instance_init) will run only CPU
|
|
|
|
* model loading. CPU expansion can't run at instance_init-time
|
|
|
|
* because host/accelerator data may be not available yet.
|
|
|
|
* - CPU realization will perform both CPU model expansion and CPUID
|
|
|
|
* filtering, and return an error in case one of them fails.
|
|
|
|
* - query-cpu-definitions needs to run all 3 steps. It needs
|
|
|
|
* to run CPUID filtering, as the 'unavailable-features'
|
|
|
|
* field is set based on the filtering results.
|
|
|
|
* - The query-cpu-model-expansion QMP command only needs to run
|
|
|
|
* CPU model loading and CPU expansion. It should not filter
|
|
|
|
* any CPUID data based on host capabilities.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* Expand CPU configuration data, based on configured features
|
|
|
|
* and host/accelerator capabilities when appropriate.
|
|
|
|
*/
|
2021-03-22 14:27:54 +01:00
|
|
|
void x86_cpu_expand_features(X86CPU *cpu, Error **errp)
|
2012-05-09 23:15:32 +02:00
|
|
|
{
|
2012-10-22 17:03:00 +02:00
|
|
|
CPUX86State *env = &cpu->env;
|
2016-06-06 17:16:44 +02:00
|
|
|
FeatureWord w;
|
2019-07-01 17:26:45 +02:00
|
|
|
int i;
|
2016-09-30 20:49:39 +02:00
|
|
|
GList *l;
|
2014-12-19 02:31:11 +01:00
|
|
|
|
2019-07-01 17:26:45 +02:00
|
|
|
for (l = plus_features; l; l = l->next) {
|
|
|
|
const char *prop = l->data;
|
2020-07-07 18:06:04 +02:00
|
|
|
if (!object_property_set_bool(OBJECT(cpu), prop, true, errp)) {
|
|
|
|
return;
|
2019-07-01 17:26:45 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for (l = minus_features; l; l = l->next) {
|
|
|
|
const char *prop = l->data;
|
2020-07-07 18:06:04 +02:00
|
|
|
if (!object_property_set_bool(OBJECT(cpu), prop, false, errp)) {
|
|
|
|
return;
|
2019-07-01 17:26:45 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-03-27 16:48:15 +02:00
|
|
|
/*TODO: Now cpu->max_features doesn't overwrite features
|
|
|
|
* set using QOM properties, and we can convert
|
2016-06-06 17:16:44 +02:00
|
|
|
* plus_features & minus_features to global properties
|
|
|
|
* inside x86_cpu_parse_featurestr() too.
|
|
|
|
*/
|
2017-01-19 22:04:46 +01:00
|
|
|
if (cpu->max_features) {
|
2016-06-06 17:16:44 +02:00
|
|
|
for (w = 0; w < FEATURE_WORDS; w++) {
|
2017-03-27 16:48:15 +02:00
|
|
|
/* Override only features that weren't set explicitly
|
|
|
|
* by the user.
|
|
|
|
*/
|
|
|
|
env->features[w] |=
|
|
|
|
x86_cpu_get_supported_feature_word(w, cpu->migratable) &
|
2020-04-13 00:35:56 +02:00
|
|
|
~env->user_features[w] &
|
2018-04-10 23:15:34 +02:00
|
|
|
~feature_word_info[w].no_autoenable_flags;
|
2016-06-06 17:16:44 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-07-01 17:26:45 +02:00
|
|
|
for (i = 0; i < ARRAY_SIZE(feature_dependencies); i++) {
|
|
|
|
FeatureDep *d = &feature_dependencies[i];
|
|
|
|
if (!(env->features[d->from.index] & d->from.mask)) {
|
2019-07-01 17:38:54 +02:00
|
|
|
uint64_t unavailable_features = env->features[d->to.index] & d->to.mask;
|
2016-09-30 20:49:39 +02:00
|
|
|
|
2019-07-01 17:26:45 +02:00
|
|
|
/* Not an error unless the dependent feature was added explicitly. */
|
|
|
|
mark_unavailable_features(cpu, d->to.index,
|
|
|
|
unavailable_features & env->user_features[d->to.index],
|
|
|
|
"This feature depends on other features that were not requested");
|
|
|
|
|
|
|
|
env->features[d->to.index] &= ~unavailable_features;
|
2016-09-30 20:49:39 +02:00
|
|
|
}
|
2016-06-06 17:16:44 +02:00
|
|
|
}
|
|
|
|
|
2016-09-27 00:11:14 +02:00
|
|
|
if (!kvm_enabled() || !cpu->expose_kvm) {
|
|
|
|
env->features[FEAT_KVM] = 0;
|
|
|
|
}
|
|
|
|
|
2016-09-22 22:27:56 +02:00
|
|
|
x86_cpu_enable_xsave_components(cpu);
|
2016-09-21 18:30:12 +02:00
|
|
|
|
|
|
|
/* CPUID[EAX=7,ECX=0].EBX always increased level automatically: */
|
|
|
|
x86_cpu_adjust_feat_level(cpu, FEAT_7_0_EBX);
|
|
|
|
if (cpu->full_cpuid_auto_level) {
|
|
|
|
x86_cpu_adjust_feat_level(cpu, FEAT_1_EDX);
|
|
|
|
x86_cpu_adjust_feat_level(cpu, FEAT_1_ECX);
|
|
|
|
x86_cpu_adjust_feat_level(cpu, FEAT_6_EAX);
|
|
|
|
x86_cpu_adjust_feat_level(cpu, FEAT_7_0_ECX);
|
2019-07-25 08:14:16 +02:00
|
|
|
x86_cpu_adjust_feat_level(cpu, FEAT_7_1_EAX);
|
2016-09-21 18:30:12 +02:00
|
|
|
x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX);
|
|
|
|
x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_ECX);
|
|
|
|
x86_cpu_adjust_feat_level(cpu, FEAT_8000_0007_EDX);
|
2018-01-09 16:45:16 +01:00
|
|
|
x86_cpu_adjust_feat_level(cpu, FEAT_8000_0008_EBX);
|
2016-09-21 18:30:12 +02:00
|
|
|
x86_cpu_adjust_feat_level(cpu, FEAT_C000_0001_EDX);
|
|
|
|
x86_cpu_adjust_feat_level(cpu, FEAT_SVM);
|
|
|
|
x86_cpu_adjust_feat_level(cpu, FEAT_XSAVE);
|
2019-01-30 00:52:59 +01:00
|
|
|
|
|
|
|
/* Intel Processor Trace requires CPUID[0x14] */
|
2020-03-12 17:48:06 +01:00
|
|
|
if ((env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT)) {
|
|
|
|
if (cpu->intel_pt_auto_level) {
|
|
|
|
x86_cpu_adjust_level(cpu, &cpu->env.cpuid_min_level, 0x14);
|
|
|
|
} else if (cpu->env.cpuid_min_level < 0x14) {
|
|
|
|
mark_unavailable_features(cpu, FEAT_7_0_EBX,
|
|
|
|
CPUID_7_0_EBX_INTEL_PT,
|
2021-02-16 20:10:27 +01:00
|
|
|
"Intel PT need CPUID leaf 0x14, please set by \"-cpu ...,intel-pt=on,min-level=0x14\"");
|
2020-03-12 17:48:06 +01:00
|
|
|
}
|
2019-01-30 00:52:59 +01:00
|
|
|
}
|
|
|
|
|
2021-07-08 19:06:41 +02:00
|
|
|
/*
|
|
|
|
* Intel CPU topology with multi-dies support requires CPUID[0x1F].
|
|
|
|
* For AMD Rome/Milan, cpuid level is 0x10, and guest OS should detect
|
|
|
|
* extended toplogy by leaf 0xB. Only adjust it for Intel CPU, unless
|
|
|
|
* cpu->vendor_cpuid_only has been unset for compatibility with older
|
|
|
|
* machine types.
|
|
|
|
*/
|
|
|
|
if ((env->nr_dies > 1) &&
|
|
|
|
(IS_INTEL_CPU(env) || !cpu->vendor_cpuid_only)) {
|
2019-06-20 07:45:23 +02:00
|
|
|
x86_cpu_adjust_level(cpu, &env->cpuid_min_level, 0x1F);
|
|
|
|
}
|
|
|
|
|
2016-09-21 20:01:35 +02:00
|
|
|
/* SVM requires CPUID[0x8000000A] */
|
|
|
|
if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
|
|
|
|
x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000000A);
|
|
|
|
}
|
2018-03-08 13:48:58 +01:00
|
|
|
|
|
|
|
/* SEV requires CPUID[0x8000001F] */
|
|
|
|
if (sev_enabled()) {
|
|
|
|
x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000001F);
|
|
|
|
}
|
2021-07-19 13:21:18 +02:00
|
|
|
|
|
|
|
/* SGX requires CPUID[0x12] for EPC enumeration */
|
|
|
|
if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_SGX) {
|
|
|
|
x86_cpu_adjust_level(cpu, &env->cpuid_min_level, 0x12);
|
|
|
|
}
|
2016-09-21 18:30:12 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */
|
2019-07-25 08:14:16 +02:00
|
|
|
if (env->cpuid_level_func7 == UINT32_MAX) {
|
|
|
|
env->cpuid_level_func7 = env->cpuid_min_level_func7;
|
|
|
|
}
|
2016-09-21 18:30:12 +02:00
|
|
|
if (env->cpuid_level == UINT32_MAX) {
|
|
|
|
env->cpuid_level = env->cpuid_min_level;
|
|
|
|
}
|
|
|
|
if (env->cpuid_xlevel == UINT32_MAX) {
|
|
|
|
env->cpuid_xlevel = env->cpuid_min_xlevel;
|
|
|
|
}
|
|
|
|
if (env->cpuid_xlevel2 == UINT32_MAX) {
|
|
|
|
env->cpuid_xlevel2 = env->cpuid_min_xlevel2;
|
2012-10-22 17:03:00 +02:00
|
|
|
}
|
2021-06-08 14:08:13 +02:00
|
|
|
|
|
|
|
if (kvm_enabled()) {
|
|
|
|
kvm_hyperv_expand_features(cpu, errp);
|
|
|
|
}
|
2016-09-28 20:03:26 +02:00
|
|
|
}
|
|
|
|
|
2017-01-16 22:11:21 +01:00
|
|
|
/*
|
|
|
|
* Finishes initialization of CPUID data, filters CPU feature
|
|
|
|
* words based on host availability of each feature.
|
|
|
|
*
|
|
|
|
* Returns: 0 if all flags are supported by the host, non-zero otherwise.
|
|
|
|
*/
|
2019-07-02 15:32:41 +02:00
|
|
|
static void x86_cpu_filter_features(X86CPU *cpu, bool verbose)
|
2017-01-16 22:11:21 +01:00
|
|
|
{
|
|
|
|
CPUX86State *env = &cpu->env;
|
|
|
|
FeatureWord w;
|
2019-07-02 15:32:41 +02:00
|
|
|
const char *prefix = NULL;
|
|
|
|
|
|
|
|
if (verbose) {
|
|
|
|
prefix = accel_uses_host_cpuid()
|
|
|
|
? "host doesn't support requested feature"
|
|
|
|
: "TCG doesn't support requested feature";
|
|
|
|
}
|
2017-01-16 22:11:21 +01:00
|
|
|
|
|
|
|
for (w = 0; w < FEATURE_WORDS; w++) {
|
2019-07-01 17:38:54 +02:00
|
|
|
uint64_t host_feat =
|
2017-01-16 22:11:21 +01:00
|
|
|
x86_cpu_get_supported_feature_word(w, false);
|
2019-07-01 17:38:54 +02:00
|
|
|
uint64_t requested_features = env->features[w];
|
|
|
|
uint64_t unavailable_features = requested_features & ~host_feat;
|
2019-07-02 15:32:41 +02:00
|
|
|
mark_unavailable_features(cpu, w, unavailable_features, prefix);
|
2017-01-16 22:11:21 +01:00
|
|
|
}
|
|
|
|
|
2018-03-04 17:48:35 +01:00
|
|
|
if ((env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) &&
|
|
|
|
kvm_enabled()) {
|
|
|
|
KVMState *s = CPU(cpu)->kvm_state;
|
|
|
|
uint32_t eax_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EAX);
|
|
|
|
uint32_t ebx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EBX);
|
|
|
|
uint32_t ecx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_ECX);
|
|
|
|
uint32_t eax_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EAX);
|
|
|
|
uint32_t ebx_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EBX);
|
|
|
|
|
|
|
|
if (!eax_0 ||
|
|
|
|
((ebx_0 & INTEL_PT_MINIMAL_EBX) != INTEL_PT_MINIMAL_EBX) ||
|
|
|
|
((ecx_0 & INTEL_PT_MINIMAL_ECX) != INTEL_PT_MINIMAL_ECX) ||
|
|
|
|
((eax_1 & INTEL_PT_MTC_BITMAP) != INTEL_PT_MTC_BITMAP) ||
|
|
|
|
((eax_1 & INTEL_PT_ADDR_RANGES_NUM_MASK) <
|
|
|
|
INTEL_PT_ADDR_RANGES_NUM) ||
|
|
|
|
((ebx_1 & (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) !=
|
2018-03-13 20:26:31 +01:00
|
|
|
(INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) ||
|
2020-12-02 11:10:42 +01:00
|
|
|
((ecx_0 & CPUID_14_0_ECX_LIP) !=
|
|
|
|
(env->features[FEAT_14_0_ECX] & CPUID_14_0_ECX_LIP))) {
|
2018-03-04 17:48:35 +01:00
|
|
|
/*
|
|
|
|
* Processor Trace capabilities aren't configurable, so if the
|
|
|
|
* host can't emulate the capabilities we report on
|
|
|
|
* cpu_x86_cpuid(), intel-pt can't be enabled on the current host.
|
|
|
|
*/
|
2019-07-02 15:32:41 +02:00
|
|
|
mark_unavailable_features(cpu, FEAT_7_0_EBX, CPUID_7_0_EBX_INTEL_PT, prefix);
|
2018-03-04 17:48:35 +01:00
|
|
|
}
|
|
|
|
}
|
2017-01-16 22:11:21 +01:00
|
|
|
}
|
|
|
|
|
2020-11-19 11:32:17 +01:00
|
|
|
static void x86_cpu_hyperv_realize(X86CPU *cpu)
|
|
|
|
{
|
|
|
|
size_t len;
|
|
|
|
|
|
|
|
/* Hyper-V vendor id */
|
|
|
|
if (!cpu->hyperv_vendor) {
|
2021-04-22 18:11:12 +02:00
|
|
|
object_property_set_str(OBJECT(cpu), "hv-vendor-id", "Microsoft Hv",
|
|
|
|
&error_abort);
|
|
|
|
}
|
|
|
|
len = strlen(cpu->hyperv_vendor);
|
|
|
|
if (len > 12) {
|
|
|
|
warn_report("hv-vendor-id truncated to 12 characters");
|
|
|
|
len = 12;
|
2020-11-19 11:32:17 +01:00
|
|
|
}
|
2021-04-22 18:11:12 +02:00
|
|
|
memset(cpu->hyperv_vendor_id, 0, 12);
|
|
|
|
memcpy(cpu->hyperv_vendor_id, cpu->hyperv_vendor, len);
|
2020-11-19 11:32:18 +01:00
|
|
|
|
|
|
|
/* 'Hv#1' interface identification*/
|
|
|
|
cpu->hyperv_interface_id[0] = 0x31237648;
|
|
|
|
cpu->hyperv_interface_id[1] = 0;
|
|
|
|
cpu->hyperv_interface_id[2] = 0;
|
|
|
|
cpu->hyperv_interface_id[3] = 0;
|
2020-11-19 11:32:19 +01:00
|
|
|
|
2020-11-19 11:32:20 +01:00
|
|
|
/* Hypervisor implementation limits */
|
|
|
|
cpu->hyperv_limits[0] = 64;
|
|
|
|
cpu->hyperv_limits[1] = 0;
|
|
|
|
cpu->hyperv_limits[2] = 0;
|
2020-11-19 11:32:17 +01:00
|
|
|
}
|
|
|
|
|
2016-09-28 20:03:26 +02:00
|
|
|
static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
|
|
|
|
{
|
|
|
|
CPUState *cs = CPU(dev);
|
|
|
|
X86CPU *cpu = X86_CPU(dev);
|
|
|
|
X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
|
|
|
|
CPUX86State *env = &cpu->env;
|
|
|
|
Error *local_err = NULL;
|
|
|
|
static bool ht_warned;
|
2022-02-15 20:52:52 +01:00
|
|
|
unsigned requested_lbr_fmt;
|
2016-09-28 20:03:26 +02:00
|
|
|
|
|
|
|
if (cpu->apic_id == UNASSIGNED_APIC_ID) {
|
|
|
|
error_setg(errp, "apic-id property was not initialized properly");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2021-06-03 14:30:00 +02:00
|
|
|
/*
|
|
|
|
* Process Hyper-V enlightenments.
|
|
|
|
* Note: this currently has to happen before the expansion of CPU features.
|
|
|
|
*/
|
|
|
|
x86_cpu_hyperv_realize(cpu);
|
|
|
|
|
2017-01-16 22:11:21 +01:00
|
|
|
x86_cpu_expand_features(cpu, &local_err);
|
2016-09-28 20:03:26 +02:00
|
|
|
if (local_err) {
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2022-02-15 20:52:52 +01:00
|
|
|
/*
|
|
|
|
* Override env->features[FEAT_PERF_CAPABILITIES].LBR_FMT
|
|
|
|
* with user-provided setting.
|
|
|
|
*/
|
|
|
|
if (cpu->lbr_fmt != ~PERF_CAP_LBR_FMT) {
|
|
|
|
if ((cpu->lbr_fmt & PERF_CAP_LBR_FMT) != cpu->lbr_fmt) {
|
|
|
|
error_setg(errp, "invalid lbr-fmt");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
env->features[FEAT_PERF_CAPABILITIES] &= ~PERF_CAP_LBR_FMT;
|
|
|
|
env->features[FEAT_PERF_CAPABILITIES] |= cpu->lbr_fmt;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* vPMU LBR is supported when 1) KVM is enabled 2) Option pmu=on and
|
|
|
|
* 3)vPMU LBR format matches that of host setting.
|
|
|
|
*/
|
|
|
|
requested_lbr_fmt =
|
|
|
|
env->features[FEAT_PERF_CAPABILITIES] & PERF_CAP_LBR_FMT;
|
|
|
|
if (requested_lbr_fmt && kvm_enabled()) {
|
|
|
|
uint64_t host_perf_cap =
|
|
|
|
x86_cpu_get_supported_feature_word(FEAT_PERF_CAPABILITIES, false);
|
|
|
|
unsigned host_lbr_fmt = host_perf_cap & PERF_CAP_LBR_FMT;
|
|
|
|
|
|
|
|
if (!cpu->enable_pmu) {
|
|
|
|
error_setg(errp, "vPMU: LBR is unsupported without pmu=on");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (requested_lbr_fmt != host_lbr_fmt) {
|
|
|
|
error_setg(errp, "vPMU: the lbr-fmt value (0x%x) does not match "
|
|
|
|
"the host value (0x%x).",
|
|
|
|
requested_lbr_fmt, host_lbr_fmt);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-07-02 15:32:41 +02:00
|
|
|
x86_cpu_filter_features(cpu, cpu->check_cpuid || cpu->enforce_cpuid);
|
|
|
|
|
|
|
|
if (cpu->enforce_cpuid && x86_cpu_have_filtered_features(cpu)) {
|
|
|
|
error_setg(&local_err,
|
|
|
|
accel_uses_host_cpuid() ?
|
|
|
|
"Host doesn't support requested features" :
|
|
|
|
"TCG doesn't support requested features");
|
|
|
|
goto out;
|
target-i386: Set AMD alias bits after filtering CPUID data
QEMU complains about -cpu host on an AMD machine:
warning: host doesn't support requested feature: CPUID.80000001H:EDX [bit 0]
For bits 0,1,3,4,5,6,7,8,9,12,13,14,15,16,17,23,24.
KVM_GET_SUPPORTED_CPUID and and x86_cpu_get_migratable_flags()
don't handle the AMD CPUID aliases bits, making
x86_cpu_filter_features() print warnings and clear those CPUID
bits incorrectly.
To avoid hacking x86_cpu_get_migratable_flags() to handle
CPUID_EXT2_AMD_ALIASES (just like the existing hack inside
kvm_arch_get_supported_cpuid()), simply move the
CPUID_EXT2_AMD_ALIASES code in x86_cpu_realizefn() after the
x86_cpu_filter_features() call.
This will probably make the CPUID_EXT2_AMD_ALIASES hack in
kvm_arch_get_supported_cpuid() unnecessary, too. The hack will be
removed in a follow-up patch after v2.6.0.
Reported-by: Radim Krčmář <rkrcmar@redhat.com>
Tested-by: Radim Krčmář <rkrcmar@redhat.com>
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
2016-04-15 19:54:26 +02:00
|
|
|
}
|
|
|
|
|
2012-12-28 21:01:17 +01:00
|
|
|
/* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
|
|
|
|
* CPUID[1].EDX.
|
|
|
|
*/
|
2014-10-21 17:00:45 +02:00
|
|
|
if (IS_AMD_CPU(env)) {
|
2013-04-22 21:00:15 +02:00
|
|
|
env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
|
|
|
|
env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
|
2012-12-28 21:01:17 +01:00
|
|
|
& CPUID_EXT2_AMD_ALIASES);
|
|
|
|
}
|
|
|
|
|
2021-07-19 13:21:13 +02:00
|
|
|
x86_cpu_set_sgxlepubkeyhash(env);
|
|
|
|
|
2021-06-03 14:30:00 +02:00
|
|
|
/*
|
|
|
|
* note: the call to the framework needs to happen after feature expansion,
|
|
|
|
* but before the checks/modifications to ucode_rev, mwait, phys_bits.
|
|
|
|
* These may be set by the accel-specific code,
|
|
|
|
* and the results are subsequently checked / assumed in this function.
|
|
|
|
*/
|
|
|
|
cpu_exec_realizefn(cs, &local_err);
|
|
|
|
if (local_err != NULL) {
|
|
|
|
error_propagate(errp, local_err);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (xcc->host_cpuid_required && !accel_uses_host_cpuid()) {
|
|
|
|
g_autofree char *name = x86_cpu_class_get_model_name(xcc);
|
|
|
|
error_setg(&local_err, "CPU model '%s' requires KVM or HVF", name);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (cpu->ucode_rev == 0) {
|
|
|
|
/*
|
|
|
|
* The default is the same as KVM's. Note that this check
|
|
|
|
* needs to happen after the evenual setting of ucode_rev in
|
|
|
|
* accel-specific code in cpu_exec_realizefn.
|
|
|
|
*/
|
|
|
|
if (IS_AMD_CPU(env)) {
|
|
|
|
cpu->ucode_rev = 0x01000065;
|
|
|
|
} else {
|
|
|
|
cpu->ucode_rev = 0x100000000ULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* mwait extended info: needed for Core compatibility
|
|
|
|
* We always wake on interrupt even if host does not have the capability.
|
|
|
|
*
|
|
|
|
* requires the accel-specific code in cpu_exec_realizefn to
|
|
|
|
* have already acquired the CPUID data into cpu->mwait.
|
|
|
|
*/
|
|
|
|
cpu->mwait.ecx |= CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
|
|
|
|
|
2016-07-11 21:28:46 +02:00
|
|
|
/* For 64bit systems think about the number of physical bits to present.
|
|
|
|
* ideally this should be the same as the host; anything other than matching
|
|
|
|
* the host can cause incorrect guest behaviour.
|
|
|
|
* QEMU used to pick the magic value of 40 bits that corresponds to
|
|
|
|
* consumer AMD devices but nothing else.
|
2021-06-03 14:30:00 +02:00
|
|
|
*
|
|
|
|
* Note that this code assumes features expansion has already been done
|
|
|
|
* (as it checks for CPUID_EXT2_LM), and also assumes that potential
|
|
|
|
* phys_bits adjustments to match the host have been already done in
|
|
|
|
* accel-specific code in cpu_exec_realizefn.
|
2016-07-11 21:28:46 +02:00
|
|
|
*/
|
2016-07-08 17:01:36 +02:00
|
|
|
if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
|
2021-03-18 14:35:56 +01:00
|
|
|
if (cpu->phys_bits &&
|
|
|
|
(cpu->phys_bits > TARGET_PHYS_ADDR_SPACE_BITS ||
|
|
|
|
cpu->phys_bits < 32)) {
|
|
|
|
error_setg(errp, "phys-bits should be between 32 and %u "
|
|
|
|
" (but is %u)",
|
|
|
|
TARGET_PHYS_ADDR_SPACE_BITS, cpu->phys_bits);
|
|
|
|
return;
|
2016-07-08 17:01:36 +02:00
|
|
|
}
|
2021-03-22 14:27:40 +01:00
|
|
|
/*
|
|
|
|
* 0 means it was not explicitly set by the user (or by machine
|
|
|
|
* compat_props or by the host code in host-cpu.c).
|
|
|
|
* In this case, the default is the value used by TCG (40).
|
2016-07-11 21:28:46 +02:00
|
|
|
*/
|
|
|
|
if (cpu->phys_bits == 0) {
|
|
|
|
cpu->phys_bits = TCG_PHYS_ADDR_BITS;
|
|
|
|
}
|
2016-07-08 17:01:36 +02:00
|
|
|
} else {
|
|
|
|
/* For 32 bit systems don't use the user set value, but keep
|
|
|
|
* phys_bits consistent with what we tell the guest.
|
|
|
|
*/
|
|
|
|
if (cpu->phys_bits != 0) {
|
|
|
|
error_setg(errp, "phys-bits is not user-configurable in 32 bit");
|
|
|
|
return;
|
|
|
|
}
|
2014-04-30 18:48:39 +02:00
|
|
|
|
2016-07-08 17:01:36 +02:00
|
|
|
if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
|
|
|
|
cpu->phys_bits = 36;
|
|
|
|
} else {
|
|
|
|
cpu->phys_bits = 32;
|
|
|
|
}
|
|
|
|
}
|
2018-05-24 17:43:30 +02:00
|
|
|
|
|
|
|
/* Cache information initialization */
|
|
|
|
if (!cpu->legacy_cache) {
|
2019-06-28 02:28:39 +02:00
|
|
|
if (!xcc->model || !xcc->model->cpudef->cache_info) {
|
2019-10-25 04:56:32 +02:00
|
|
|
g_autofree char *name = x86_cpu_class_get_model_name(xcc);
|
2018-05-24 17:43:30 +02:00
|
|
|
error_setg(errp,
|
|
|
|
"CPU model '%s' doesn't support legacy-cache=off", name);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
env->cache_info_cpuid2 = env->cache_info_cpuid4 = env->cache_info_amd =
|
2019-06-28 02:28:39 +02:00
|
|
|
*xcc->model->cpudef->cache_info;
|
2018-05-24 17:43:30 +02:00
|
|
|
} else {
|
|
|
|
/* Build legacy cache information */
|
|
|
|
env->cache_info_cpuid2.l1d_cache = &legacy_l1d_cache;
|
|
|
|
env->cache_info_cpuid2.l1i_cache = &legacy_l1i_cache;
|
|
|
|
env->cache_info_cpuid2.l2_cache = &legacy_l2_cache_cpuid2;
|
|
|
|
env->cache_info_cpuid2.l3_cache = &legacy_l3_cache;
|
|
|
|
|
|
|
|
env->cache_info_cpuid4.l1d_cache = &legacy_l1d_cache;
|
|
|
|
env->cache_info_cpuid4.l1i_cache = &legacy_l1i_cache;
|
|
|
|
env->cache_info_cpuid4.l2_cache = &legacy_l2_cache;
|
|
|
|
env->cache_info_cpuid4.l3_cache = &legacy_l3_cache;
|
|
|
|
|
|
|
|
env->cache_info_amd.l1d_cache = &legacy_l1d_cache_amd;
|
|
|
|
env->cache_info_amd.l1i_cache = &legacy_l1i_cache_amd;
|
|
|
|
env->cache_info_amd.l2_cache = &legacy_l2_cache_amd;
|
|
|
|
env->cache_info_amd.l3_cache = &legacy_l3_cache;
|
|
|
|
}
|
|
|
|
|
2012-07-23 15:22:28 +02:00
|
|
|
#ifndef CONFIG_USER_ONLY
|
2019-05-18 22:54:25 +02:00
|
|
|
MachineState *ms = MACHINE(qdev_get_machine());
|
2012-07-23 15:22:28 +02:00
|
|
|
qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
|
2012-10-13 22:35:39 +02:00
|
|
|
|
2019-05-18 22:54:25 +02:00
|
|
|
if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || ms->smp.cpus > 1) {
|
2013-04-05 16:36:54 +02:00
|
|
|
x86_cpu_apic_create(cpu, &local_err);
|
2013-01-16 03:41:47 +01:00
|
|
|
if (local_err != NULL) {
|
2013-04-05 16:36:53 +02:00
|
|
|
goto out;
|
2012-10-13 22:35:39 +02:00
|
|
|
}
|
|
|
|
}
|
2012-07-23 15:22:28 +02:00
|
|
|
#endif
|
|
|
|
|
2012-05-09 23:15:32 +02:00
|
|
|
mce_init(cpu);
|
2015-03-31 14:11:09 +02:00
|
|
|
|
2013-07-27 02:53:25 +02:00
|
|
|
qemu_init_vcpu(cs);
|
2013-04-05 16:36:54 +02:00
|
|
|
|
2018-06-19 23:31:59 +02:00
|
|
|
/*
|
|
|
|
* Most Intel and certain AMD CPUs support hyperthreading. Even though QEMU
|
|
|
|
* fixes this issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
|
|
|
|
* based on inputs (sockets,cores,threads), it is still better to give
|
2014-10-21 17:00:45 +02:00
|
|
|
* users a warning.
|
|
|
|
*
|
|
|
|
* NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
|
|
|
|
* cs->nr_threads hasn't be populated yet and the checking is incorrect.
|
|
|
|
*/
|
2018-10-17 10:26:28 +02:00
|
|
|
if (IS_AMD_CPU(env) &&
|
|
|
|
!(env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_TOPOEXT) &&
|
|
|
|
cs->nr_threads > 1 && !ht_warned) {
|
|
|
|
warn_report("This family of AMD CPU doesn't support "
|
|
|
|
"hyperthreading(%d)",
|
|
|
|
cs->nr_threads);
|
|
|
|
error_printf("Please configure -smp options properly"
|
|
|
|
" or try enabling topoext feature.\n");
|
|
|
|
ht_warned = true;
|
2014-10-21 17:00:45 +02:00
|
|
|
}
|
|
|
|
|
2021-03-22 14:27:54 +01:00
|
|
|
#ifndef CONFIG_USER_ONLY
|
2013-04-05 16:36:54 +02:00
|
|
|
x86_cpu_apic_realize(cpu, &local_err);
|
|
|
|
if (local_err != NULL) {
|
|
|
|
goto out;
|
|
|
|
}
|
2021-03-22 14:27:54 +01:00
|
|
|
#endif /* !CONFIG_USER_ONLY */
|
2013-07-27 02:53:25 +02:00
|
|
|
cpu_reset(cs);
|
2013-01-16 03:41:47 +01:00
|
|
|
|
2013-04-05 16:36:53 +02:00
|
|
|
xcc->parent_realize(dev, &local_err);
|
2015-03-31 14:11:09 +02:00
|
|
|
|
2013-04-05 16:36:53 +02:00
|
|
|
out:
|
|
|
|
if (local_err != NULL) {
|
|
|
|
error_propagate(errp, local_err);
|
|
|
|
return;
|
|
|
|
}
|
2012-05-09 23:15:32 +02:00
|
|
|
}
|
|
|
|
|
qdev: Unrealize must not fail
Devices may have component devices and buses.
Device realization may fail. Realization is recursive: a device's
realize() method realizes its components, and device_set_realized()
realizes its buses (which should in turn realize the devices on that
bus, except bus_set_realized() doesn't implement that, yet).
When realization of a component or bus fails, we need to roll back:
unrealize everything we realized so far. If any of these unrealizes
failed, the device would be left in an inconsistent state. Must not
happen.
device_set_realized() lets it happen: it ignores errors in the roll
back code starting at label child_realize_fail.
Since realization is recursive, unrealization must be recursive, too.
But how could a partly failed unrealize be rolled back? We'd have to
re-realize, which can fail. This design is fundamentally broken.
device_set_realized() does not roll back at all. Instead, it keeps
unrealizing, ignoring further errors.
It can screw up even for a device with no buses: if the lone
dc->unrealize() fails, it still unregisters vmstate, and calls
listeners' unrealize() callback.
bus_set_realized() does not roll back either. Instead, it stops
unrealizing.
Fortunately, no unrealize method can fail, as we'll see below.
To fix the design error, drop parameter @errp from all the unrealize
methods.
Any unrealize method that uses @errp now needs an update. This leads
us to unrealize() methods that can fail. Merely passing it to another
unrealize method cannot cause failure, though. Here are the ones that
do other things with @errp:
* virtio_serial_device_unrealize()
Fails when qbus_set_hotplug_handler() fails, but still does all the
other work. On failure, the device would stay realized with its
resources completely gone. Oops. Can't happen, because
qbus_set_hotplug_handler() can't actually fail here. Pass
&error_abort to qbus_set_hotplug_handler() instead.
* hw/ppc/spapr_drc.c's unrealize()
Fails when object_property_del() fails, but all the other work is
already done. On failure, the device would stay realized with its
vmstate registration gone. Oops. Can't happen, because
object_property_del() can't actually fail here. Pass &error_abort
to object_property_del() instead.
* spapr_phb_unrealize()
Fails and bails out when remove_drcs() fails, but other work is
already done. On failure, the device would stay realized with some
of its resources gone. Oops. remove_drcs() fails only when
chassis_from_bus()'s object_property_get_uint() fails, and it can't
here. Pass &error_abort to remove_drcs() instead.
Therefore, no unrealize method can fail before this patch.
device_set_realized()'s recursive unrealization via bus uses
object_property_set_bool(). Can't drop @errp there, so pass
&error_abort.
We similarly unrealize with object_property_set_bool() elsewhere,
always ignoring errors. Pass &error_abort instead.
Several unrealize methods no longer handle errors from other unrealize
methods: virtio_9p_device_unrealize(),
virtio_input_device_unrealize(), scsi_qdev_unrealize(), ...
Much of the deleted error handling looks wrong anyway.
One unrealize methods no longer ignore such errors:
usb_ehci_pci_exit().
Several realize methods no longer ignore errors when rolling back:
v9fs_device_realize_common(), pci_qdev_unrealize(),
spapr_phb_realize(), usb_qdev_realize(), vfio_ccw_realize(),
virtio_device_realize().
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Message-Id: <20200505152926.18877-17-armbru@redhat.com>
2020-05-05 17:29:24 +02:00
|
|
|
static void x86_cpu_unrealizefn(DeviceState *dev)
|
2016-06-24 16:01:02 +02:00
|
|
|
{
|
|
|
|
X86CPU *cpu = X86_CPU(dev);
|
2016-10-20 13:26:04 +02:00
|
|
|
X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
|
2016-06-24 16:01:02 +02:00
|
|
|
|
|
|
|
#ifndef CONFIG_USER_ONLY
|
|
|
|
cpu_remove_sync(CPU(dev));
|
|
|
|
qemu_unregister_reset(x86_cpu_machine_reset_cb, dev);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
if (cpu->apic_state) {
|
|
|
|
object_unparent(OBJECT(cpu->apic_state));
|
|
|
|
cpu->apic_state = NULL;
|
|
|
|
}
|
2016-10-20 13:26:04 +02:00
|
|
|
|
qdev: Unrealize must not fail
Devices may have component devices and buses.
Device realization may fail. Realization is recursive: a device's
realize() method realizes its components, and device_set_realized()
realizes its buses (which should in turn realize the devices on that
bus, except bus_set_realized() doesn't implement that, yet).
When realization of a component or bus fails, we need to roll back:
unrealize everything we realized so far. If any of these unrealizes
failed, the device would be left in an inconsistent state. Must not
happen.
device_set_realized() lets it happen: it ignores errors in the roll
back code starting at label child_realize_fail.
Since realization is recursive, unrealization must be recursive, too.
But how could a partly failed unrealize be rolled back? We'd have to
re-realize, which can fail. This design is fundamentally broken.
device_set_realized() does not roll back at all. Instead, it keeps
unrealizing, ignoring further errors.
It can screw up even for a device with no buses: if the lone
dc->unrealize() fails, it still unregisters vmstate, and calls
listeners' unrealize() callback.
bus_set_realized() does not roll back either. Instead, it stops
unrealizing.
Fortunately, no unrealize method can fail, as we'll see below.
To fix the design error, drop parameter @errp from all the unrealize
methods.
Any unrealize method that uses @errp now needs an update. This leads
us to unrealize() methods that can fail. Merely passing it to another
unrealize method cannot cause failure, though. Here are the ones that
do other things with @errp:
* virtio_serial_device_unrealize()
Fails when qbus_set_hotplug_handler() fails, but still does all the
other work. On failure, the device would stay realized with its
resources completely gone. Oops. Can't happen, because
qbus_set_hotplug_handler() can't actually fail here. Pass
&error_abort to qbus_set_hotplug_handler() instead.
* hw/ppc/spapr_drc.c's unrealize()
Fails when object_property_del() fails, but all the other work is
already done. On failure, the device would stay realized with its
vmstate registration gone. Oops. Can't happen, because
object_property_del() can't actually fail here. Pass &error_abort
to object_property_del() instead.
* spapr_phb_unrealize()
Fails and bails out when remove_drcs() fails, but other work is
already done. On failure, the device would stay realized with some
of its resources gone. Oops. remove_drcs() fails only when
chassis_from_bus()'s object_property_get_uint() fails, and it can't
here. Pass &error_abort to remove_drcs() instead.
Therefore, no unrealize method can fail before this patch.
device_set_realized()'s recursive unrealization via bus uses
object_property_set_bool(). Can't drop @errp there, so pass
&error_abort.
We similarly unrealize with object_property_set_bool() elsewhere,
always ignoring errors. Pass &error_abort instead.
Several unrealize methods no longer handle errors from other unrealize
methods: virtio_9p_device_unrealize(),
virtio_input_device_unrealize(), scsi_qdev_unrealize(), ...
Much of the deleted error handling looks wrong anyway.
One unrealize methods no longer ignore such errors:
usb_ehci_pci_exit().
Several realize methods no longer ignore errors when rolling back:
v9fs_device_realize_common(), pci_qdev_unrealize(),
spapr_phb_realize(), usb_qdev_realize(), vfio_ccw_realize(),
virtio_device_realize().
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Message-Id: <20200505152926.18877-17-armbru@redhat.com>
2020-05-05 17:29:24 +02:00
|
|
|
xcc->parent_unrealize(dev);
|
2016-06-24 16:01:02 +02:00
|
|
|
}
|
|
|
|
|
2015-03-23 21:29:32 +01:00
|
|
|
typedef struct BitProperty {
|
2017-03-27 16:48:14 +02:00
|
|
|
FeatureWord w;
|
2019-07-01 17:38:54 +02:00
|
|
|
uint64_t mask;
|
2015-03-23 21:29:32 +01:00
|
|
|
} BitProperty;
|
|
|
|
|
qom: Swap 'name' next to visitor in ObjectPropertyAccessor
Similar to the previous patch, it's nice to have all functions
in the tree that involve a visitor and a name for conversion to
or from QAPI to consistently stick the 'name' parameter next
to the Visitor parameter.
Done by manually changing include/qom/object.h and qom/object.c,
then running this Coccinelle script and touching up the fallout
(Coccinelle insisted on adding some trailing whitespace).
@ rule1 @
identifier fn;
typedef Object, Visitor, Error;
identifier obj, v, opaque, name, errp;
@@
void fn
- (Object *obj, Visitor *v, void *opaque, const char *name,
+ (Object *obj, Visitor *v, const char *name, void *opaque,
Error **errp) { ... }
@@
identifier rule1.fn;
expression obj, v, opaque, name, errp;
@@
fn(obj, v,
- opaque, name,
+ name, opaque,
errp)
Signed-off-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Marc-André Lureau <marcandre.lureau@redhat.com>
Message-Id: <1454075341-13658-20-git-send-email-eblake@redhat.com>
Signed-off-by: Markus Armbruster <armbru@redhat.com>
2016-01-29 14:48:55 +01:00
|
|
|
static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name,
|
|
|
|
void *opaque, Error **errp)
|
2015-03-23 21:29:32 +01:00
|
|
|
{
|
2017-03-27 16:48:14 +02:00
|
|
|
X86CPU *cpu = X86_CPU(obj);
|
2015-03-23 21:29:32 +01:00
|
|
|
BitProperty *fp = opaque;
|
2019-07-01 17:38:54 +02:00
|
|
|
uint64_t f = cpu->env.features[fp->w];
|
2017-03-27 16:48:14 +02:00
|
|
|
bool value = (f & fp->mask) == fp->mask;
|
qapi: Swap visit_* arguments for consistent 'name' placement
JSON uses "name":value, but many of our visitor interfaces were
called with visit_type_FOO(v, &value, name, errp). This can be
a bit confusing to have to mentally swap the parameter order to
match JSON order. It's particularly bad for visit_start_struct(),
where the 'name' parameter is smack in the middle of the
otherwise-related group of 'obj, kind, size' parameters! It's
time to do a global swap of the parameter ordering, so that the
'name' parameter is always immediately after the Visitor argument.
Additional reason in favor of the swap: the existing include/qjson.h
prefers listing 'name' first in json_prop_*(), and I have plans to
unify that file with the qapi visitors; listing 'name' first in
qapi will minimize churn to the (admittedly few) qjson.h clients.
Later patches will then fix docs, object.h, visitor-impl.h, and
those clients to match.
Done by first patching scripts/qapi*.py by hand to make generated
files do what I want, then by running the following Coccinelle
script to affect the rest of the code base:
$ spatch --sp-file script `git grep -l '\bvisit_' -- '**/*.[ch]'`
I then had to apply some touchups (Coccinelle insisted on TAB
indentation in visitor.h, and botched the signature of
visit_type_enum() by rewriting 'const char *const strings[]' to
the syntactically invalid 'const char*const[] strings'). The
movement of parameters is sufficient to provoke compiler errors
if any callers were missed.
// Part 1: Swap declaration order
@@
type TV, TErr, TObj, T1, T2;
identifier OBJ, ARG1, ARG2;
@@
void visit_start_struct
-(TV v, TObj OBJ, T1 ARG1, const char *name, T2 ARG2, TErr errp)
+(TV v, const char *name, TObj OBJ, T1 ARG1, T2 ARG2, TErr errp)
{ ... }
@@
type bool, TV, T1;
identifier ARG1;
@@
bool visit_optional
-(TV v, T1 ARG1, const char *name)
+(TV v, const char *name, T1 ARG1)
{ ... }
@@
type TV, TErr, TObj, T1;
identifier OBJ, ARG1;
@@
void visit_get_next_type
-(TV v, TObj OBJ, T1 ARG1, const char *name, TErr errp)
+(TV v, const char *name, TObj OBJ, T1 ARG1, TErr errp)
{ ... }
@@
type TV, TErr, TObj, T1, T2;
identifier OBJ, ARG1, ARG2;
@@
void visit_type_enum
-(TV v, TObj OBJ, T1 ARG1, T2 ARG2, const char *name, TErr errp)
+(TV v, const char *name, TObj OBJ, T1 ARG1, T2 ARG2, TErr errp)
{ ... }
@@
type TV, TErr, TObj;
identifier OBJ;
identifier VISIT_TYPE =~ "^visit_type_";
@@
void VISIT_TYPE
-(TV v, TObj OBJ, const char *name, TErr errp)
+(TV v, const char *name, TObj OBJ, TErr errp)
{ ... }
// Part 2: swap caller order
@@
expression V, NAME, OBJ, ARG1, ARG2, ERR;
identifier VISIT_TYPE =~ "^visit_type_";
@@
(
-visit_start_struct(V, OBJ, ARG1, NAME, ARG2, ERR)
+visit_start_struct(V, NAME, OBJ, ARG1, ARG2, ERR)
|
-visit_optional(V, ARG1, NAME)
+visit_optional(V, NAME, ARG1)
|
-visit_get_next_type(V, OBJ, ARG1, NAME, ERR)
+visit_get_next_type(V, NAME, OBJ, ARG1, ERR)
|
-visit_type_enum(V, OBJ, ARG1, ARG2, NAME, ERR)
+visit_type_enum(V, NAME, OBJ, ARG1, ARG2, ERR)
|
-VISIT_TYPE(V, OBJ, NAME, ERR)
+VISIT_TYPE(V, NAME, OBJ, ERR)
)
Signed-off-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Marc-André Lureau <marcandre.lureau@redhat.com>
Message-Id: <1454075341-13658-19-git-send-email-eblake@redhat.com>
Signed-off-by: Markus Armbruster <armbru@redhat.com>
2016-01-29 14:48:54 +01:00
|
|
|
visit_type_bool(v, name, &value, errp);
|
2015-03-23 21:29:32 +01:00
|
|
|
}
|
|
|
|
|
qom: Swap 'name' next to visitor in ObjectPropertyAccessor
Similar to the previous patch, it's nice to have all functions
in the tree that involve a visitor and a name for conversion to
or from QAPI to consistently stick the 'name' parameter next
to the Visitor parameter.
Done by manually changing include/qom/object.h and qom/object.c,
then running this Coccinelle script and touching up the fallout
(Coccinelle insisted on adding some trailing whitespace).
@ rule1 @
identifier fn;
typedef Object, Visitor, Error;
identifier obj, v, opaque, name, errp;
@@
void fn
- (Object *obj, Visitor *v, void *opaque, const char *name,
+ (Object *obj, Visitor *v, const char *name, void *opaque,
Error **errp) { ... }
@@
identifier rule1.fn;
expression obj, v, opaque, name, errp;
@@
fn(obj, v,
- opaque, name,
+ name, opaque,
errp)
Signed-off-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Marc-André Lureau <marcandre.lureau@redhat.com>
Message-Id: <1454075341-13658-20-git-send-email-eblake@redhat.com>
Signed-off-by: Markus Armbruster <armbru@redhat.com>
2016-01-29 14:48:55 +01:00
|
|
|
static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name,
|
|
|
|
void *opaque, Error **errp)
|
2015-03-23 21:29:32 +01:00
|
|
|
{
|
|
|
|
DeviceState *dev = DEVICE(obj);
|
2017-03-27 16:48:14 +02:00
|
|
|
X86CPU *cpu = X86_CPU(obj);
|
2015-03-23 21:29:32 +01:00
|
|
|
BitProperty *fp = opaque;
|
|
|
|
bool value;
|
|
|
|
|
|
|
|
if (dev->realized) {
|
|
|
|
qdev_prop_set_after_realize(dev, name, errp);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
error: Eliminate error_propagate() with Coccinelle, part 1
When all we do with an Error we receive into a local variable is
propagating to somewhere else, we can just as well receive it there
right away. Convert
if (!foo(..., &err)) {
...
error_propagate(errp, err);
...
return ...
}
to
if (!foo(..., errp)) {
...
...
return ...
}
where nothing else needs @err. Coccinelle script:
@rule1 forall@
identifier fun, err, errp, lbl;
expression list args, args2;
binary operator op;
constant c1, c2;
symbol false;
@@
if (
(
- fun(args, &err, args2)
+ fun(args, errp, args2)
|
- !fun(args, &err, args2)
+ !fun(args, errp, args2)
|
- fun(args, &err, args2) op c1
+ fun(args, errp, args2) op c1
)
)
{
... when != err
when != lbl:
when strict
- error_propagate(errp, err);
... when != err
(
return;
|
return c2;
|
return false;
)
}
@rule2 forall@
identifier fun, err, errp, lbl;
expression list args, args2;
expression var;
binary operator op;
constant c1, c2;
symbol false;
@@
- var = fun(args, &err, args2);
+ var = fun(args, errp, args2);
... when != err
if (
(
var
|
!var
|
var op c1
)
)
{
... when != err
when != lbl:
when strict
- error_propagate(errp, err);
... when != err
(
return;
|
return c2;
|
return false;
|
return var;
)
}
@depends on rule1 || rule2@
identifier err;
@@
- Error *err = NULL;
... when != err
Not exactly elegant, I'm afraid.
The "when != lbl:" is necessary to avoid transforming
if (fun(args, &err)) {
goto out
}
...
out:
error_propagate(errp, err);
even though other paths to label out still need the error_propagate().
For an actual example, see sclp_realize().
Without the "when strict", Coccinelle transforms vfio_msix_setup(),
incorrectly. I don't know what exactly "when strict" does, only that
it helps here.
The match of return is narrower than what I want, but I can't figure
out how to express "return where the operand doesn't use @err". For
an example where it's too narrow, see vfio_intx_enable().
Silently fails to convert hw/arm/armsse.c, because Coccinelle gets
confused by ARMSSE being used both as typedef and function-like macro
there. Converted manually.
Line breaks tidied up manually. One nested declaration of @local_err
deleted manually. Preexisting unwanted blank line dropped in
hw/riscv/sifive_e.c.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Message-Id: <20200707160613.848843-35-armbru@redhat.com>
2020-07-07 18:06:02 +02:00
|
|
|
if (!visit_type_bool(v, name, &value, errp)) {
|
2015-03-23 21:29:32 +01:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (value) {
|
2017-03-27 16:48:14 +02:00
|
|
|
cpu->env.features[fp->w] |= fp->mask;
|
2015-03-23 21:29:32 +01:00
|
|
|
} else {
|
2017-03-27 16:48:14 +02:00
|
|
|
cpu->env.features[fp->w] &= ~fp->mask;
|
2015-03-23 21:29:32 +01:00
|
|
|
}
|
2017-03-27 16:48:15 +02:00
|
|
|
cpu->env.user_features[fp->w] |= fp->mask;
|
2015-03-23 21:29:32 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Register a boolean property to get/set a single bit in a uint32_t field.
|
|
|
|
*
|
|
|
|
* The same property name can be registered multiple times to make it affect
|
|
|
|
* multiple bits in the same FeatureWord. In that case, the getter will return
|
|
|
|
* true only if all bits are set.
|
|
|
|
*/
|
2020-11-11 19:38:15 +01:00
|
|
|
static void x86_cpu_register_bit_prop(X86CPUClass *xcc,
|
2015-03-23 21:29:32 +01:00
|
|
|
const char *prop_name,
|
2017-03-27 16:48:14 +02:00
|
|
|
FeatureWord w,
|
2015-03-23 21:29:32 +01:00
|
|
|
int bitnr)
|
|
|
|
{
|
2020-11-11 19:38:15 +01:00
|
|
|
ObjectClass *oc = OBJECT_CLASS(xcc);
|
2015-03-23 21:29:32 +01:00
|
|
|
BitProperty *fp;
|
|
|
|
ObjectProperty *op;
|
2019-07-01 17:38:54 +02:00
|
|
|
uint64_t mask = (1ULL << bitnr);
|
2015-03-23 21:29:32 +01:00
|
|
|
|
2020-11-11 19:38:15 +01:00
|
|
|
op = object_class_property_find(oc, prop_name);
|
2015-03-23 21:29:32 +01:00
|
|
|
if (op) {
|
|
|
|
fp = op->opaque;
|
2017-03-27 16:48:14 +02:00
|
|
|
assert(fp->w == w);
|
2015-03-23 21:29:32 +01:00
|
|
|
fp->mask |= mask;
|
|
|
|
} else {
|
|
|
|
fp = g_new0(BitProperty, 1);
|
2017-03-27 16:48:14 +02:00
|
|
|
fp->w = w;
|
2015-03-23 21:29:32 +01:00
|
|
|
fp->mask = mask;
|
2020-11-11 19:38:15 +01:00
|
|
|
object_class_property_add(oc, prop_name, "bool",
|
|
|
|
x86_cpu_get_bit_prop,
|
|
|
|
x86_cpu_set_bit_prop,
|
|
|
|
NULL, fp);
|
2015-03-23 21:29:32 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-11-11 19:38:15 +01:00
|
|
|
static void x86_cpu_register_feature_bit_props(X86CPUClass *xcc,
|
2015-03-23 21:29:32 +01:00
|
|
|
FeatureWord w,
|
|
|
|
int bitnr)
|
|
|
|
{
|
|
|
|
FeatureWordInfo *fi = &feature_word_info[w];
|
2016-09-30 20:49:41 +02:00
|
|
|
const char *name = fi->feat_names[bitnr];
|
2015-03-23 21:29:32 +01:00
|
|
|
|
2016-09-30 20:49:41 +02:00
|
|
|
if (!name) {
|
2015-03-23 21:29:32 +01:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2016-09-30 20:49:40 +02:00
|
|
|
/* Property names should use "-" instead of "_".
|
|
|
|
* Old names containing underscores are registered as aliases
|
|
|
|
* using object_property_add_alias()
|
|
|
|
*/
|
2016-09-30 20:49:41 +02:00
|
|
|
assert(!strchr(name, '_'));
|
|
|
|
/* aliases don't use "|" delimiters anymore, they are registered
|
|
|
|
* manually using object_property_add_alias() */
|
|
|
|
assert(!strchr(name, '|'));
|
2020-11-11 19:38:15 +01:00
|
|
|
x86_cpu_register_bit_prop(xcc, name, w, bitnr);
|
2015-03-23 21:29:32 +01:00
|
|
|
}
|
|
|
|
|
i386: run accel_cpu_instance_init as post_init
This fixes host and max cpu initialization, by running the accel cpu
initialization only after all instance init functions are called for all
X86 cpu subclasses.
The bug this is fixing is related to the "max" and "host" i386 cpu
subclasses, which set cpu->max_features, which is then used at cpu
realization time.
In order to properly split the accel-specific max features code that
needs to be executed at cpu instance initialization time,
we cannot call the accel cpu initialization at the end of the x86 base
class initialization, or we will have no way to specialize
"max features" cpu behavior, overriding the "max" cpu class defaults,
and checking for the "max features" flag itself.
This patch moves the accel-specific cpu instance initialization to after
all x86 cpu instance code has been executed, including subclasses,
so that proper initialization of cpu "host" and "max" can be restored.
Fixes: f5cc5a5c ("i386: split cpu accelerators from cpu.c,"...)
Cc: Eduardo Habkost <ehabkost@redhat.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Claudio Fontana <cfontana@suse.de>
Message-Id: <20210603123001.17843-3-cfontana@suse.de>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2021-06-03 14:30:01 +02:00
|
|
|
static void x86_cpu_post_initfn(Object *obj)
|
|
|
|
{
|
|
|
|
accel_cpu_instance_init(CPU(obj));
|
|
|
|
}
|
|
|
|
|
2012-04-03 00:00:17 +02:00
|
|
|
static void x86_cpu_initfn(Object *obj)
|
|
|
|
{
|
|
|
|
X86CPU *cpu = X86_CPU(obj);
|
2014-02-10 11:21:30 +01:00
|
|
|
X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
|
2012-04-03 00:00:17 +02:00
|
|
|
CPUX86State *env = &cpu->env;
|
|
|
|
|
2019-06-12 10:40:56 +02:00
|
|
|
env->nr_dies = 1;
|
2019-03-28 22:26:22 +01:00
|
|
|
cpu_set_cpustate_pointers(cpu);
|
2012-04-17 12:10:29 +02:00
|
|
|
|
2013-05-06 18:20:07 +02:00
|
|
|
object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
|
|
|
|
x86_cpu_get_feature_words,
|
qom: Drop parameter @errp of object_property_add() & friends
The only way object_property_add() can fail is when a property with
the same name already exists. Since our property names are all
hardcoded, failure is a programming error, and the appropriate way to
handle it is passing &error_abort.
Same for its variants, except for object_property_add_child(), which
additionally fails when the child already has a parent. Parentage is
also under program control, so this is a programming error, too.
We have a bit over 500 callers. Almost half of them pass
&error_abort, slightly fewer ignore errors, one test case handles
errors, and the remaining few callers pass them to their own callers.
The previous few commits demonstrated once again that ignoring
programming errors is a bad idea.
Of the few ones that pass on errors, several violate the Error API.
The Error ** argument must be NULL, &error_abort, &error_fatal, or a
pointer to a variable containing NULL. Passing an argument of the
latter kind twice without clearing it in between is wrong: if the
first call sets an error, it no longer points to NULL for the second
call. ich9_pm_add_properties(), sparc32_ledma_realize(),
sparc32_dma_realize(), xilinx_axidma_realize(), xilinx_enet_realize()
are wrong that way.
When the one appropriate choice of argument is &error_abort, letting
users pick the argument is a bad idea.
Drop parameter @errp and assert the preconditions instead.
There's one exception to "duplicate property name is a programming
error": the way object_property_add() implements the magic (and
undocumented) "automatic arrayification". Don't drop @errp there.
Instead, rename object_property_add() to object_property_try_add(),
and add the obvious wrapper object_property_add().
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Message-Id: <20200505152926.18877-15-armbru@redhat.com>
[Two semantic rebase conflicts resolved]
2020-05-05 17:29:22 +02:00
|
|
|
NULL, NULL, (void *)env->features);
|
2013-05-06 18:20:09 +02:00
|
|
|
object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
|
|
|
|
x86_cpu_get_feature_words,
|
qom: Drop parameter @errp of object_property_add() & friends
The only way object_property_add() can fail is when a property with
the same name already exists. Since our property names are all
hardcoded, failure is a programming error, and the appropriate way to
handle it is passing &error_abort.
Same for its variants, except for object_property_add_child(), which
additionally fails when the child already has a parent. Parentage is
also under program control, so this is a programming error, too.
We have a bit over 500 callers. Almost half of them pass
&error_abort, slightly fewer ignore errors, one test case handles
errors, and the remaining few callers pass them to their own callers.
The previous few commits demonstrated once again that ignoring
programming errors is a bad idea.
Of the few ones that pass on errors, several violate the Error API.
The Error ** argument must be NULL, &error_abort, &error_fatal, or a
pointer to a variable containing NULL. Passing an argument of the
latter kind twice without clearing it in between is wrong: if the
first call sets an error, it no longer points to NULL for the second
call. ich9_pm_add_properties(), sparc32_ledma_realize(),
sparc32_dma_realize(), xilinx_axidma_realize(), xilinx_enet_realize()
are wrong that way.
When the one appropriate choice of argument is &error_abort, letting
users pick the argument is a bad idea.
Drop parameter @errp and assert the preconditions instead.
There's one exception to "duplicate property name is a programming
error": the way object_property_add() implements the magic (and
undocumented) "automatic arrayification". Don't drop @errp there.
Instead, rename object_property_add() to object_property_try_add(),
and add the obvious wrapper object_property_add().
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Message-Id: <20200505152926.18877-15-armbru@redhat.com>
[Two semantic rebase conflicts resolved]
2020-05-05 17:29:22 +02:00
|
|
|
NULL, NULL, (void *)cpu->filtered_features);
|
2017-02-14 07:25:22 +01:00
|
|
|
|
qom: Drop parameter @errp of object_property_add() & friends
The only way object_property_add() can fail is when a property with
the same name already exists. Since our property names are all
hardcoded, failure is a programming error, and the appropriate way to
handle it is passing &error_abort.
Same for its variants, except for object_property_add_child(), which
additionally fails when the child already has a parent. Parentage is
also under program control, so this is a programming error, too.
We have a bit over 500 callers. Almost half of them pass
&error_abort, slightly fewer ignore errors, one test case handles
errors, and the remaining few callers pass them to their own callers.
The previous few commits demonstrated once again that ignoring
programming errors is a bad idea.
Of the few ones that pass on errors, several violate the Error API.
The Error ** argument must be NULL, &error_abort, &error_fatal, or a
pointer to a variable containing NULL. Passing an argument of the
latter kind twice without clearing it in between is wrong: if the
first call sets an error, it no longer points to NULL for the second
call. ich9_pm_add_properties(), sparc32_ledma_realize(),
sparc32_dma_realize(), xilinx_axidma_realize(), xilinx_enet_realize()
are wrong that way.
When the one appropriate choice of argument is &error_abort, letting
users pick the argument is a bad idea.
Drop parameter @errp and assert the preconditions instead.
There's one exception to "duplicate property name is a programming
error": the way object_property_add() implements the magic (and
undocumented) "automatic arrayification". Don't drop @errp there.
Instead, rename object_property_add() to object_property_try_add(),
and add the obvious wrapper object_property_add().
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Message-Id: <20200505152926.18877-15-armbru@redhat.com>
[Two semantic rebase conflicts resolved]
2020-05-05 17:29:22 +02:00
|
|
|
object_property_add_alias(obj, "sse3", obj, "pni");
|
|
|
|
object_property_add_alias(obj, "pclmuldq", obj, "pclmulqdq");
|
|
|
|
object_property_add_alias(obj, "sse4-1", obj, "sse4.1");
|
|
|
|
object_property_add_alias(obj, "sse4-2", obj, "sse4.2");
|
|
|
|
object_property_add_alias(obj, "xd", obj, "nx");
|
|
|
|
object_property_add_alias(obj, "ffxsr", obj, "fxsr-opt");
|
|
|
|
object_property_add_alias(obj, "i64", obj, "lm");
|
|
|
|
|
|
|
|
object_property_add_alias(obj, "ds_cpl", obj, "ds-cpl");
|
|
|
|
object_property_add_alias(obj, "tsc_adjust", obj, "tsc-adjust");
|
|
|
|
object_property_add_alias(obj, "fxsr_opt", obj, "fxsr-opt");
|
|
|
|
object_property_add_alias(obj, "lahf_lm", obj, "lahf-lm");
|
|
|
|
object_property_add_alias(obj, "cmp_legacy", obj, "cmp-legacy");
|
|
|
|
object_property_add_alias(obj, "nodeid_msr", obj, "nodeid-msr");
|
|
|
|
object_property_add_alias(obj, "perfctr_core", obj, "perfctr-core");
|
|
|
|
object_property_add_alias(obj, "perfctr_nb", obj, "perfctr-nb");
|
|
|
|
object_property_add_alias(obj, "kvm_nopiodelay", obj, "kvm-nopiodelay");
|
|
|
|
object_property_add_alias(obj, "kvm_mmu", obj, "kvm-mmu");
|
|
|
|
object_property_add_alias(obj, "kvm_asyncpf", obj, "kvm-asyncpf");
|
2020-09-08 16:12:06 +02:00
|
|
|
object_property_add_alias(obj, "kvm_asyncpf_int", obj, "kvm-asyncpf-int");
|
qom: Drop parameter @errp of object_property_add() & friends
The only way object_property_add() can fail is when a property with
the same name already exists. Since our property names are all
hardcoded, failure is a programming error, and the appropriate way to
handle it is passing &error_abort.
Same for its variants, except for object_property_add_child(), which
additionally fails when the child already has a parent. Parentage is
also under program control, so this is a programming error, too.
We have a bit over 500 callers. Almost half of them pass
&error_abort, slightly fewer ignore errors, one test case handles
errors, and the remaining few callers pass them to their own callers.
The previous few commits demonstrated once again that ignoring
programming errors is a bad idea.
Of the few ones that pass on errors, several violate the Error API.
The Error ** argument must be NULL, &error_abort, &error_fatal, or a
pointer to a variable containing NULL. Passing an argument of the
latter kind twice without clearing it in between is wrong: if the
first call sets an error, it no longer points to NULL for the second
call. ich9_pm_add_properties(), sparc32_ledma_realize(),
sparc32_dma_realize(), xilinx_axidma_realize(), xilinx_enet_realize()
are wrong that way.
When the one appropriate choice of argument is &error_abort, letting
users pick the argument is a bad idea.
Drop parameter @errp and assert the preconditions instead.
There's one exception to "duplicate property name is a programming
error": the way object_property_add() implements the magic (and
undocumented) "automatic arrayification". Don't drop @errp there.
Instead, rename object_property_add() to object_property_try_add(),
and add the obvious wrapper object_property_add().
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Message-Id: <20200505152926.18877-15-armbru@redhat.com>
[Two semantic rebase conflicts resolved]
2020-05-05 17:29:22 +02:00
|
|
|
object_property_add_alias(obj, "kvm_steal_time", obj, "kvm-steal-time");
|
|
|
|
object_property_add_alias(obj, "kvm_pv_eoi", obj, "kvm-pv-eoi");
|
|
|
|
object_property_add_alias(obj, "kvm_pv_unhalt", obj, "kvm-pv-unhalt");
|
|
|
|
object_property_add_alias(obj, "kvm_poll_control", obj, "kvm-poll-control");
|
|
|
|
object_property_add_alias(obj, "svm_lock", obj, "svm-lock");
|
|
|
|
object_property_add_alias(obj, "nrip_save", obj, "nrip-save");
|
|
|
|
object_property_add_alias(obj, "tsc_scale", obj, "tsc-scale");
|
|
|
|
object_property_add_alias(obj, "vmcb_clean", obj, "vmcb-clean");
|
|
|
|
object_property_add_alias(obj, "pause_filter", obj, "pause-filter");
|
|
|
|
object_property_add_alias(obj, "sse4_1", obj, "sse4.1");
|
|
|
|
object_property_add_alias(obj, "sse4_2", obj, "sse4.2");
|
2016-09-30 20:49:38 +02:00
|
|
|
|
2021-09-02 11:35:28 +02:00
|
|
|
object_property_add_alias(obj, "hv-apicv", obj, "hv-avic");
|
2022-02-15 20:52:52 +01:00
|
|
|
cpu->lbr_fmt = ~PERF_CAP_LBR_FMT;
|
|
|
|
object_property_add_alias(obj, "lbr_fmt", obj, "lbr-fmt");
|
2021-09-02 11:35:28 +02:00
|
|
|
|
2019-06-28 02:28:39 +02:00
|
|
|
if (xcc->model) {
|
2020-05-05 12:19:08 +02:00
|
|
|
x86_cpu_load_model(cpu, xcc->model);
|
2017-02-22 19:39:19 +01:00
|
|
|
}
|
2012-04-03 00:00:17 +02:00
|
|
|
}
|
|
|
|
|
2013-04-23 10:29:41 +02:00
|
|
|
static int64_t x86_cpu_get_arch_id(CPUState *cs)
|
|
|
|
{
|
|
|
|
X86CPU *cpu = X86_CPU(cs);
|
|
|
|
|
2014-12-19 02:20:10 +01:00
|
|
|
return cpu->apic_id;
|
2013-04-23 10:29:41 +02:00
|
|
|
}
|
|
|
|
|
2021-05-17 12:51:39 +02:00
|
|
|
#if !defined(CONFIG_USER_ONLY)
|
2013-05-28 13:28:38 +02:00
|
|
|
static bool x86_cpu_get_paging_enabled(const CPUState *cs)
|
|
|
|
{
|
|
|
|
X86CPU *cpu = X86_CPU(cs);
|
|
|
|
|
|
|
|
return cpu->env.cr[0] & CR0_PG_MASK;
|
|
|
|
}
|
2021-05-17 12:51:39 +02:00
|
|
|
#endif /* !CONFIG_USER_ONLY */
|
2013-05-28 13:28:38 +02:00
|
|
|
|
2013-06-21 19:09:18 +02:00
|
|
|
static void x86_cpu_set_pc(CPUState *cs, vaddr value)
|
|
|
|
{
|
|
|
|
X86CPU *cpu = X86_CPU(cs);
|
|
|
|
|
|
|
|
cpu->env.eip = value;
|
|
|
|
}
|
|
|
|
|
2018-08-21 15:31:24 +02:00
|
|
|
int x86_cpu_pending_interrupt(CPUState *cs, int interrupt_request)
|
2013-08-25 18:53:55 +02:00
|
|
|
{
|
|
|
|
X86CPU *cpu = X86_CPU(cs);
|
|
|
|
CPUX86State *env = &cpu->env;
|
|
|
|
|
2018-08-21 15:31:24 +02:00
|
|
|
#if !defined(CONFIG_USER_ONLY)
|
|
|
|
if (interrupt_request & CPU_INTERRUPT_POLL) {
|
|
|
|
return CPU_INTERRUPT_POLL;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
if (interrupt_request & CPU_INTERRUPT_SIPI) {
|
|
|
|
return CPU_INTERRUPT_SIPI;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (env->hflags2 & HF2_GIF_MASK) {
|
|
|
|
if ((interrupt_request & CPU_INTERRUPT_SMI) &&
|
|
|
|
!(env->hflags & HF_SMM_MASK)) {
|
|
|
|
return CPU_INTERRUPT_SMI;
|
|
|
|
} else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
|
|
|
|
!(env->hflags2 & HF2_NMI_MASK)) {
|
|
|
|
return CPU_INTERRUPT_NMI;
|
|
|
|
} else if (interrupt_request & CPU_INTERRUPT_MCE) {
|
|
|
|
return CPU_INTERRUPT_MCE;
|
|
|
|
} else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
|
|
|
|
(((env->hflags2 & HF2_VINTR_MASK) &&
|
|
|
|
(env->hflags2 & HF2_HIF_MASK)) ||
|
|
|
|
(!(env->hflags2 & HF2_VINTR_MASK) &&
|
|
|
|
(env->eflags & IF_MASK &&
|
|
|
|
!(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
|
|
|
|
return CPU_INTERRUPT_HARD;
|
|
|
|
#if !defined(CONFIG_USER_ONLY)
|
2021-08-05 13:08:23 +02:00
|
|
|
} else if (env->hflags2 & HF2_VGIF_MASK) {
|
|
|
|
if((interrupt_request & CPU_INTERRUPT_VIRQ) &&
|
2018-08-21 15:31:24 +02:00
|
|
|
(env->eflags & IF_MASK) &&
|
|
|
|
!(env->hflags & HF_INHIBIT_IRQ_MASK)) {
|
2021-08-05 13:08:23 +02:00
|
|
|
return CPU_INTERRUPT_VIRQ;
|
|
|
|
}
|
2018-08-21 15:31:24 +02:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool x86_cpu_has_work(CPUState *cs)
|
|
|
|
{
|
|
|
|
return x86_cpu_pending_interrupt(cs, cs->interrupt_request) != 0;
|
2013-08-25 18:53:55 +02:00
|
|
|
}
|
|
|
|
|
2017-09-14 17:10:53 +02:00
|
|
|
static void x86_disas_set_info(CPUState *cs, disassemble_info *info)
|
|
|
|
{
|
|
|
|
X86CPU *cpu = X86_CPU(cs);
|
|
|
|
CPUX86State *env = &cpu->env;
|
|
|
|
|
|
|
|
info->mach = (env->hflags & HF_CS64_MASK ? bfd_mach_x86_64
|
|
|
|
: env->hflags & HF_CS32_MASK ? bfd_mach_i386_i386
|
|
|
|
: bfd_mach_i386_i8086);
|
2017-09-14 18:50:05 +02:00
|
|
|
|
|
|
|
info->cap_arch = CS_ARCH_X86;
|
|
|
|
info->cap_mode = (env->hflags & HF_CS64_MASK ? CS_MODE_64
|
|
|
|
: env->hflags & HF_CS32_MASK ? CS_MODE_32
|
|
|
|
: CS_MODE_16);
|
2017-11-07 13:19:18 +01:00
|
|
|
info->cap_insn_unit = 1;
|
|
|
|
info->cap_insn_split = 8;
|
2017-09-14 17:10:53 +02:00
|
|
|
}
|
|
|
|
|
2018-01-10 20:50:54 +01:00
|
|
|
void x86_update_hflags(CPUX86State *env)
|
|
|
|
{
|
|
|
|
uint32_t hflags;
|
|
|
|
#define HFLAG_COPY_MASK \
|
|
|
|
~( HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \
|
|
|
|
HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \
|
|
|
|
HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \
|
|
|
|
HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)
|
|
|
|
|
|
|
|
hflags = env->hflags & HFLAG_COPY_MASK;
|
|
|
|
hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
|
|
|
|
hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT);
|
|
|
|
hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) &
|
|
|
|
(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK);
|
|
|
|
hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK));
|
|
|
|
|
|
|
|
if (env->cr[4] & CR4_OSFXSR_MASK) {
|
|
|
|
hflags |= HF_OSFXSR_MASK;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (env->efer & MSR_EFER_LMA) {
|
|
|
|
hflags |= HF_LMA_MASK;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) {
|
|
|
|
hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK;
|
|
|
|
} else {
|
|
|
|
hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >>
|
|
|
|
(DESC_B_SHIFT - HF_CS32_SHIFT);
|
|
|
|
hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >>
|
|
|
|
(DESC_B_SHIFT - HF_SS32_SHIFT);
|
|
|
|
if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK) ||
|
|
|
|
!(hflags & HF_CS32_MASK)) {
|
|
|
|
hflags |= HF_ADDSEG_MASK;
|
|
|
|
} else {
|
|
|
|
hflags |= ((env->segs[R_DS].base | env->segs[R_ES].base |
|
|
|
|
env->segs[R_SS].base) != 0) << HF_ADDSEG_SHIFT;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
env->hflags = hflags;
|
|
|
|
}
|
|
|
|
|
2013-07-26 22:09:36 +02:00
|
|
|
static Property x86_cpu_properties[] = {
|
2016-07-06 08:20:41 +02:00
|
|
|
#ifdef CONFIG_USER_ONLY
|
|
|
|
/* apic_id = 0 by default for *-user, see commit 9886e834 */
|
|
|
|
DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, 0),
|
2016-07-06 08:20:42 +02:00
|
|
|
DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, 0),
|
|
|
|
DEFINE_PROP_INT32("core-id", X86CPU, core_id, 0),
|
2019-06-12 10:40:58 +02:00
|
|
|
DEFINE_PROP_INT32("die-id", X86CPU, die_id, 0),
|
2016-07-06 08:20:42 +02:00
|
|
|
DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, 0),
|
2016-07-06 08:20:41 +02:00
|
|
|
#else
|
|
|
|
DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, UNASSIGNED_APIC_ID),
|
2016-07-06 08:20:42 +02:00
|
|
|
DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, -1),
|
|
|
|
DEFINE_PROP_INT32("core-id", X86CPU, core_id, -1),
|
2019-06-12 10:40:58 +02:00
|
|
|
DEFINE_PROP_INT32("die-id", X86CPU, die_id, -1),
|
2016-07-06 08:20:42 +02:00
|
|
|
DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, -1),
|
2016-07-06 08:20:41 +02:00
|
|
|
#endif
|
2017-05-30 18:24:00 +02:00
|
|
|
DEFINE_PROP_INT32("node-id", X86CPU, node_id, CPU_UNSET_NUMA_NODE_ID),
|
2013-07-26 22:09:36 +02:00
|
|
|
DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
|
2022-02-15 20:52:52 +01:00
|
|
|
DEFINE_PROP_UINT64_CHECKMASK("lbr-fmt", X86CPU, lbr_fmt, PERF_CAP_LBR_FMT),
|
2019-05-17 16:19:16 +02:00
|
|
|
|
2019-06-18 13:07:06 +02:00
|
|
|
DEFINE_PROP_UINT32("hv-spinlocks", X86CPU, hyperv_spinlock_attempts,
|
2020-05-15 13:48:47 +02:00
|
|
|
HYPERV_SPINLOCK_NEVER_NOTIFY),
|
2019-05-17 16:19:16 +02:00
|
|
|
DEFINE_PROP_BIT64("hv-relaxed", X86CPU, hyperv_features,
|
|
|
|
HYPERV_FEAT_RELAXED, 0),
|
|
|
|
DEFINE_PROP_BIT64("hv-vapic", X86CPU, hyperv_features,
|
|
|
|
HYPERV_FEAT_VAPIC, 0),
|
|
|
|
DEFINE_PROP_BIT64("hv-time", X86CPU, hyperv_features,
|
|
|
|
HYPERV_FEAT_TIME, 0),
|
|
|
|
DEFINE_PROP_BIT64("hv-crash", X86CPU, hyperv_features,
|
|
|
|
HYPERV_FEAT_CRASH, 0),
|
|
|
|
DEFINE_PROP_BIT64("hv-reset", X86CPU, hyperv_features,
|
|
|
|
HYPERV_FEAT_RESET, 0),
|
|
|
|
DEFINE_PROP_BIT64("hv-vpindex", X86CPU, hyperv_features,
|
|
|
|
HYPERV_FEAT_VPINDEX, 0),
|
|
|
|
DEFINE_PROP_BIT64("hv-runtime", X86CPU, hyperv_features,
|
|
|
|
HYPERV_FEAT_RUNTIME, 0),
|
|
|
|
DEFINE_PROP_BIT64("hv-synic", X86CPU, hyperv_features,
|
|
|
|
HYPERV_FEAT_SYNIC, 0),
|
|
|
|
DEFINE_PROP_BIT64("hv-stimer", X86CPU, hyperv_features,
|
|
|
|
HYPERV_FEAT_STIMER, 0),
|
|
|
|
DEFINE_PROP_BIT64("hv-frequencies", X86CPU, hyperv_features,
|
|
|
|
HYPERV_FEAT_FREQUENCIES, 0),
|
|
|
|
DEFINE_PROP_BIT64("hv-reenlightenment", X86CPU, hyperv_features,
|
|
|
|
HYPERV_FEAT_REENLIGHTENMENT, 0),
|
|
|
|
DEFINE_PROP_BIT64("hv-tlbflush", X86CPU, hyperv_features,
|
|
|
|
HYPERV_FEAT_TLBFLUSH, 0),
|
|
|
|
DEFINE_PROP_BIT64("hv-evmcs", X86CPU, hyperv_features,
|
|
|
|
HYPERV_FEAT_EVMCS, 0),
|
|
|
|
DEFINE_PROP_BIT64("hv-ipi", X86CPU, hyperv_features,
|
|
|
|
HYPERV_FEAT_IPI, 0),
|
2019-05-17 16:19:24 +02:00
|
|
|
DEFINE_PROP_BIT64("hv-stimer-direct", X86CPU, hyperv_features,
|
|
|
|
HYPERV_FEAT_STIMER_DIRECT, 0),
|
2021-09-02 11:35:28 +02:00
|
|
|
DEFINE_PROP_BIT64("hv-avic", X86CPU, hyperv_features,
|
|
|
|
HYPERV_FEAT_AVIC, 0),
|
2019-10-18 18:39:08 +02:00
|
|
|
DEFINE_PROP_ON_OFF_AUTO("hv-no-nonarch-coresharing", X86CPU,
|
|
|
|
hyperv_no_nonarch_cs, ON_OFF_AUTO_OFF),
|
2022-02-16 11:24:59 +01:00
|
|
|
DEFINE_PROP_BIT64("hv-syndbg", X86CPU, hyperv_features,
|
|
|
|
HYPERV_FEAT_SYNDBG, 0),
|
2019-05-17 16:19:20 +02:00
|
|
|
DEFINE_PROP_BOOL("hv-passthrough", X86CPU, hyperv_passthrough, false),
|
2021-09-02 11:35:26 +02:00
|
|
|
DEFINE_PROP_BOOL("hv-enforce-cpuid", X86CPU, hyperv_enforce_cpuid, false),
|
2019-05-17 16:19:16 +02:00
|
|
|
|
2021-09-02 11:35:29 +02:00
|
|
|
/* WS2008R2 identify by default */
|
|
|
|
DEFINE_PROP_UINT32("hv-version-id-build", X86CPU, hyperv_ver_id_build,
|
2021-09-02 11:35:30 +02:00
|
|
|
0x3839),
|
2021-09-02 11:35:29 +02:00
|
|
|
DEFINE_PROP_UINT16("hv-version-id-major", X86CPU, hyperv_ver_id_major,
|
2021-09-02 11:35:30 +02:00
|
|
|
0x000A),
|
2021-09-02 11:35:29 +02:00
|
|
|
DEFINE_PROP_UINT16("hv-version-id-minor", X86CPU, hyperv_ver_id_minor,
|
2021-09-02 11:35:30 +02:00
|
|
|
0x0000),
|
2021-09-02 11:35:29 +02:00
|
|
|
DEFINE_PROP_UINT32("hv-version-id-spack", X86CPU, hyperv_ver_id_sp, 0),
|
|
|
|
DEFINE_PROP_UINT8("hv-version-id-sbranch", X86CPU, hyperv_ver_id_sb, 0),
|
|
|
|
DEFINE_PROP_UINT32("hv-version-id-snumber", X86CPU, hyperv_ver_id_sn, 0),
|
|
|
|
|
2015-08-26 18:25:44 +02:00
|
|
|
DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
|
2013-06-04 15:13:14 +02:00
|
|
|
DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
|
2019-06-28 02:28:37 +02:00
|
|
|
DEFINE_PROP_BOOL("x-force-features", X86CPU, force_features, false),
|
2014-06-02 19:28:50 +02:00
|
|
|
DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
|
2016-07-08 17:01:36 +02:00
|
|
|
DEFINE_PROP_UINT32("phys-bits", X86CPU, phys_bits, 0),
|
2016-07-11 21:28:46 +02:00
|
|
|
DEFINE_PROP_BOOL("host-phys-bits", X86CPU, host_phys_bits, false),
|
2018-12-11 20:25:27 +01:00
|
|
|
DEFINE_PROP_UINT8("host-phys-bits-limit", X86CPU, host_phys_bits_limit, 0),
|
2016-07-08 17:01:38 +02:00
|
|
|
DEFINE_PROP_BOOL("fill-mtrr-mask", X86CPU, fill_mtrr_mask, true),
|
2019-07-25 08:14:16 +02:00
|
|
|
DEFINE_PROP_UINT32("level-func7", X86CPU, env.cpuid_level_func7,
|
|
|
|
UINT32_MAX),
|
2016-09-21 18:30:12 +02:00
|
|
|
DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, UINT32_MAX),
|
|
|
|
DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, UINT32_MAX),
|
|
|
|
DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, UINT32_MAX),
|
|
|
|
DEFINE_PROP_UINT32("min-level", X86CPU, env.cpuid_min_level, 0),
|
|
|
|
DEFINE_PROP_UINT32("min-xlevel", X86CPU, env.cpuid_min_xlevel, 0),
|
|
|
|
DEFINE_PROP_UINT32("min-xlevel2", X86CPU, env.cpuid_min_xlevel2, 0),
|
2020-01-20 19:21:43 +01:00
|
|
|
DEFINE_PROP_UINT64("ucode-rev", X86CPU, ucode_rev, 0),
|
2016-09-21 18:30:12 +02:00
|
|
|
DEFINE_PROP_BOOL("full-cpuid-auto-level", X86CPU, full_cpuid_auto_level, true),
|
2020-11-19 11:32:17 +01:00
|
|
|
DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor),
|
2016-05-12 19:24:26 +02:00
|
|
|
DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true),
|
2021-07-08 02:36:23 +02:00
|
|
|
DEFINE_PROP_BOOL("x-vendor-cpuid-only", X86CPU, vendor_cpuid_only, true),
|
2016-06-22 08:56:21 +02:00
|
|
|
DEFINE_PROP_BOOL("lmce", X86CPU, enable_lmce, false),
|
target-i386: present virtual L3 cache info for vcpus
Some software algorithms are based on the hardware's cache info, for example,
for x86 linux kernel, when cpu1 want to wakeup a task on cpu2, cpu1 will trigger
a resched IPI and told cpu2 to do the wakeup if they don't share low level
cache. Oppositely, cpu1 will access cpu2's runqueue directly if they share llc.
The relevant linux-kernel code as bellow:
static void ttwu_queue(struct task_struct *p, int cpu)
{
struct rq *rq = cpu_rq(cpu);
......
if (... && !cpus_share_cache(smp_processor_id(), cpu)) {
......
ttwu_queue_remote(p, cpu); /* will trigger RES IPI */
return;
}
......
ttwu_do_activate(rq, p, 0); /* access target's rq directly */
......
}
In real hardware, the cpus on the same socket share L3 cache, so one won't
trigger a resched IPIs when wakeup a task on others. But QEMU doesn't present a
virtual L3 cache info for VM, then the linux guest will trigger lots of RES IPIs
under some workloads even if the virtual cpus belongs to the same virtual socket.
For KVM, there will be lots of vmexit due to guest send IPIs.
The workload is a SAP HANA's testsuite, we run it one round(about 40 minuates)
and observe the (Suse11sp3)Guest's amounts of RES IPIs which triggering during
the period:
No-L3 With-L3(applied this patch)
cpu0: 363890 44582
cpu1: 373405 43109
cpu2: 340783 43797
cpu3: 333854 43409
cpu4: 327170 40038
cpu5: 325491 39922
cpu6: 319129 42391
cpu7: 306480 41035
cpu8: 161139 32188
cpu9: 164649 31024
cpu10: 149823 30398
cpu11: 149823 32455
cpu12: 164830 35143
cpu13: 172269 35805
cpu14: 179979 33898
cpu15: 194505 32754
avg: 268963.6 40129.8
The VM's topology is "1*socket 8*cores 2*threads".
After present virtual L3 cache info for VM, the amounts of RES IPIs in guest
reduce 85%.
For KVM, vcpus send IPIs will cause vmexit which is expensive, so it can cause
severe performance degradation. We had tested the overall system performance if
vcpus actually run on sparate physical socket. With L3 cache, the performance
improves 7.2%~33.1%(avg:15.7%).
Signed-off-by: Longpeng(Mike) <longpeng2@huawei.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2016-09-07 07:21:13 +02:00
|
|
|
DEFINE_PROP_BOOL("l3-cache", X86CPU, enable_l3_cache, true),
|
2017-02-23 14:34:41 +01:00
|
|
|
DEFINE_PROP_BOOL("kvm-no-smi-migration", X86CPU, kvm_no_smi_migration,
|
|
|
|
false),
|
2021-09-02 11:35:25 +02:00
|
|
|
DEFINE_PROP_BOOL("kvm-pv-enforce-cpuid", X86CPU, kvm_pv_enforce_cpuid,
|
|
|
|
false),
|
2017-01-20 15:11:36 +01:00
|
|
|
DEFINE_PROP_BOOL("vmware-cpuid-freq", X86CPU, vmware_cpuid_freq, true),
|
2017-05-09 15:27:36 +02:00
|
|
|
DEFINE_PROP_BOOL("tcg-cpuid", X86CPU, expose_tcg, true),
|
2018-07-24 13:59:21 +02:00
|
|
|
DEFINE_PROP_BOOL("x-migrate-smi-count", X86CPU, migrate_smi_count,
|
|
|
|
true),
|
2018-05-14 18:41:51 +02:00
|
|
|
/*
|
2018-05-24 17:43:30 +02:00
|
|
|
* lecacy_cache defaults to true unless the CPU model provides its
|
|
|
|
* own cache information (see x86_cpu_load_def()).
|
2018-05-14 18:41:51 +02:00
|
|
|
*/
|
2018-05-24 17:43:30 +02:00
|
|
|
DEFINE_PROP_BOOL("legacy-cache", X86CPU, legacy_cache, true),
|
2017-09-11 17:20:27 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* From "Requirements for Implementing the Microsoft
|
|
|
|
* Hypervisor Interface":
|
|
|
|
* https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/reference/tlfs
|
|
|
|
*
|
|
|
|
* "Starting with Windows Server 2012 and Windows 8, if
|
|
|
|
* CPUID.40000005.EAX contains a value of -1, Windows assumes that
|
|
|
|
* the hypervisor imposes no specific limit to the number of VPs.
|
|
|
|
* In this case, Windows Server 2012 guest VMs may use more than
|
|
|
|
* 64 VPs, up to the maximum supported number of processors applicable
|
|
|
|
* to the specific Windows version being used."
|
|
|
|
*/
|
|
|
|
DEFINE_PROP_INT32("x-hv-max-vps", X86CPU, hv_max_vps, -1),
|
2018-09-21 10:22:10 +02:00
|
|
|
DEFINE_PROP_BOOL("x-hv-synic-kvm-only", X86CPU, hyperv_synic_kvm_only,
|
|
|
|
false),
|
2019-01-30 00:52:59 +01:00
|
|
|
DEFINE_PROP_BOOL("x-intel-pt-auto-level", X86CPU, intel_pt_auto_level,
|
|
|
|
true),
|
2013-07-26 22:09:36 +02:00
|
|
|
DEFINE_PROP_END_OF_LIST()
|
|
|
|
};
|
|
|
|
|
2021-05-17 12:51:31 +02:00
|
|
|
#ifndef CONFIG_USER_ONLY
|
|
|
|
#include "hw/core/sysemu-cpu-ops.h"
|
|
|
|
|
|
|
|
static const struct SysemuCPUOps i386_sysemu_ops = {
|
2021-05-17 12:51:38 +02:00
|
|
|
.get_memory_mapping = x86_cpu_get_memory_mapping,
|
2021-05-17 12:51:39 +02:00
|
|
|
.get_paging_enabled = x86_cpu_get_paging_enabled,
|
2021-05-17 12:51:37 +02:00
|
|
|
.get_phys_page_attrs_debug = x86_cpu_get_phys_page_attrs_debug,
|
2021-05-17 12:51:36 +02:00
|
|
|
.asidx_from_attrs = x86_asidx_from_attrs,
|
2021-05-17 12:51:34 +02:00
|
|
|
.get_crash_info = x86_cpu_get_crash_info,
|
2021-05-17 12:51:35 +02:00
|
|
|
.write_elf32_note = x86_cpu_write_elf32_note,
|
|
|
|
.write_elf64_note = x86_cpu_write_elf64_note,
|
|
|
|
.write_elf32_qemunote = x86_cpu_write_elf32_qemunote,
|
|
|
|
.write_elf64_qemunote = x86_cpu_write_elf64_qemunote,
|
2021-05-17 12:51:32 +02:00
|
|
|
.legacy_vmsd = &vmstate_x86_cpu,
|
2021-05-17 12:51:31 +02:00
|
|
|
};
|
|
|
|
#endif
|
|
|
|
|
2012-04-02 23:20:08 +02:00
|
|
|
static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
|
|
|
|
{
|
|
|
|
X86CPUClass *xcc = X86_CPU_CLASS(oc);
|
|
|
|
CPUClass *cc = CPU_CLASS(oc);
|
2013-01-16 03:41:47 +01:00
|
|
|
DeviceClass *dc = DEVICE_CLASS(oc);
|
2020-11-11 19:38:15 +01:00
|
|
|
FeatureWord w;
|
2013-01-16 03:41:47 +01:00
|
|
|
|
2018-01-14 03:04:12 +01:00
|
|
|
device_class_set_parent_realize(dc, x86_cpu_realizefn,
|
|
|
|
&xcc->parent_realize);
|
|
|
|
device_class_set_parent_unrealize(dc, x86_cpu_unrealizefn,
|
|
|
|
&xcc->parent_unrealize);
|
2020-01-10 16:30:32 +01:00
|
|
|
device_class_set_props(dc, x86_cpu_properties);
|
2012-04-02 23:20:08 +02:00
|
|
|
|
cpu: Use DeviceClass reset instead of a special CPUClass reset
The CPUClass has a 'reset' method. This is a legacy from when
TYPE_CPU used not to inherit from TYPE_DEVICE. We don't need it any
more, as we can simply use the TYPE_DEVICE reset. The 'cpu_reset()'
function is kept as the API which most places use to reset a CPU; it
is now a wrapper which calls device_cold_reset() and then the
tracepoint function.
This change should not cause CPU objects to be reset more often
than they are at the moment, because:
* nobody is directly calling device_cold_reset() or
qdev_reset_all() on CPU objects
* no CPU object is on a qbus, so they will not be reset either
by somebody calling qbus_reset_all()/bus_cold_reset(), or
by the main "reset sysbus and everything in the qbus tree"
reset that most devices are reset by
Note that this does not change the need for each machine or whatever
to use qemu_register_reset() to arrange to call cpu_reset() -- that
is necessary because CPU objects are not on any qbus, so they don't
get reset when the qbus tree rooted at the sysbus bus is reset, and
this isn't being changed here.
All the changes to the files under target/ were made using the
included Coccinelle script, except:
(1) the deletion of the now-inaccurate and not terribly useful
"CPUClass::reset" comments was done with a perl one-liner afterwards:
perl -n -i -e '/ CPUClass::reset/ or print' target/*/*.c
(2) this bit of the s390 change was done by hand, because the
Coccinelle script is not sophisticated enough to handle the
parent_reset call being inside another function:
| @@ -96,8 +96,9 @@ static void s390_cpu_reset(CPUState *s, cpu_reset_type type)
| S390CPU *cpu = S390_CPU(s);
| S390CPUClass *scc = S390_CPU_GET_CLASS(cpu);
| CPUS390XState *env = &cpu->env;
|+ DeviceState *dev = DEVICE(s);
|
|- scc->parent_reset(s);
|+ scc->parent_reset(dev);
| cpu->env.sigp_order = 0;
| s390_cpu_set_state(S390_CPU_STATE_STOPPED, cpu);
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Message-Id: <20200303100511.5498-1-peter.maydell@linaro.org>
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
2020-03-03 11:05:11 +01:00
|
|
|
device_class_set_parent_reset(dc, x86_cpu_reset, &xcc->parent_reset);
|
2013-06-16 07:49:48 +02:00
|
|
|
cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
|
2013-02-02 13:38:08 +01:00
|
|
|
|
2014-02-10 22:02:44 +01:00
|
|
|
cc->class_by_name = x86_cpu_class_by_name;
|
2014-03-03 23:19:19 +01:00
|
|
|
cc->parse_features = x86_cpu_parse_featurestr;
|
2013-08-25 18:53:55 +02:00
|
|
|
cc->has_work = x86_cpu_has_work;
|
2013-05-27 01:33:50 +02:00
|
|
|
cc->dump_state = x86_cpu_dump_state;
|
2013-06-21 19:09:18 +02:00
|
|
|
cc->set_pc = x86_cpu_set_pc;
|
2013-06-29 04:18:45 +02:00
|
|
|
cc->gdb_read_register = x86_cpu_gdb_read_register;
|
|
|
|
cc->gdb_write_register = x86_cpu_gdb_write_register;
|
2013-05-28 13:28:38 +02:00
|
|
|
cc->get_arch_id = x86_cpu_get_arch_id;
|
2020-12-12 16:55:14 +01:00
|
|
|
|
2019-04-02 10:39:50 +02:00
|
|
|
#ifndef CONFIG_USER_ONLY
|
2021-05-17 12:51:31 +02:00
|
|
|
cc->sysemu_ops = &i386_sysemu_ops;
|
2020-12-12 16:55:14 +01:00
|
|
|
#endif /* !CONFIG_USER_ONLY */
|
|
|
|
|
2016-12-28 17:34:02 +01:00
|
|
|
cc->gdb_arch_name = x86_gdb_arch_name;
|
|
|
|
#ifdef TARGET_X86_64
|
2017-06-01 11:33:15 +02:00
|
|
|
cc->gdb_core_xml_file = "i386-64bit.xml";
|
2019-01-24 05:04:57 +01:00
|
|
|
cc->gdb_num_core_regs = 66;
|
2016-12-28 17:34:02 +01:00
|
|
|
#else
|
2017-06-01 11:33:15 +02:00
|
|
|
cc->gdb_core_xml_file = "i386-32bit.xml";
|
2019-01-24 05:04:57 +01:00
|
|
|
cc->gdb_num_core_regs = 50;
|
2017-10-26 15:58:14 +02:00
|
|
|
#endif
|
2017-09-14 17:10:53 +02:00
|
|
|
cc->disas_set_info = x86_disas_set_info;
|
qdev: Protect device-list-properties against broken devices
Several devices don't survive object_unref(object_new(T)): they crash
or hang during cleanup, or they leave dangling pointers behind.
This breaks at least device-list-properties, because
qmp_device_list_properties() needs to create a device to find its
properties. Broken in commit f4eb32b "qmp: show QOM properties in
device-list-properties", v2.1. Example reproducer:
$ qemu-system-aarch64 -nodefaults -display none -machine none -S -qmp stdio
{"QMP": {"version": {"qemu": {"micro": 50, "minor": 4, "major": 2}, "package": ""}, "capabilities": []}}
{ "execute": "qmp_capabilities" }
{"return": {}}
{ "execute": "device-list-properties", "arguments": { "typename": "pxa2xx-pcmcia" } }
qemu-system-aarch64: /home/armbru/work/qemu/memory.c:1307: memory_region_finalize: Assertion `((&mr->subregions)->tqh_first == ((void *)0))' failed.
Aborted (core dumped)
[Exit 134 (SIGABRT)]
Unfortunately, I can't fix the problems in these devices right now.
Instead, add DeviceClass member cannot_destroy_with_object_finalize_yet
to mark them:
* Hang during cleanup (didn't debug, so I can't say why):
"realview_pci", "versatile_pci".
* Dangling pointer in cpus: most CPUs, plus "allwinner-a10", "digic",
"fsl,imx25", "fsl,imx31", "xlnx,zynqmp", because they create such
CPUs
* Assert kvm_enabled(): "host-x86_64-cpu", host-i386-cpu",
"host-powerpc64-cpu", "host-embedded-powerpc-cpu",
"host-powerpc-cpu" (the powerpc ones can't currently reach the
assertion, because the CPUs are only registered when KVM is enabled,
but the assertion is arguably in the wrong place all the same)
Make qmp_device_list_properties() fail cleanly when the device is so
marked. This improves device-list-properties from "crashes, hangs or
leaves dangling pointers behind" to "fails". Not a complete fix, just
a better-than-nothing work-around. In the above reproducer,
device-list-properties now fails with "Can't list properties of device
'pxa2xx-pcmcia'".
This also protects -device FOO,help, which uses the same machinery
since commit ef52358 "qdev-monitor: include QOM properties in -device
FOO, help output", v2.2. Example reproducer:
$ qemu-system-aarch64 -machine none -device pxa2xx-pcmcia,help
Before:
qemu-system-aarch64: .../memory.c:1307: memory_region_finalize: Assertion `((&mr->subregions)->tqh_first == ((void *)0))' failed.
After:
Can't list properties of device 'pxa2xx-pcmcia'
Cc: "Andreas Färber" <afaerber@suse.de>
Cc: "Edgar E. Iglesias" <edgar.iglesias@gmail.com>
Cc: Alexander Graf <agraf@suse.de>
Cc: Anthony Green <green@moxielogic.com>
Cc: Aurelien Jarno <aurelien@aurel32.net>
Cc: Bastian Koppelmann <kbastian@mail.uni-paderborn.de>
Cc: Blue Swirl <blauwirbel@gmail.com>
Cc: Eduardo Habkost <ehabkost@redhat.com>
Cc: Guan Xuetao <gxt@mprc.pku.edu.cn>
Cc: Jia Liu <proljc@gmail.com>
Cc: Leon Alrae <leon.alrae@imgtec.com>
Cc: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk>
Cc: Max Filippov <jcmvbkbc@gmail.com>
Cc: Michael Walle <michael@walle.cc>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Peter Maydell <peter.maydell@linaro.org>
Cc: Richard Henderson <rth@twiddle.net>
Cc: qemu-ppc@nongnu.org
Cc: qemu-stable@nongnu.org
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Eduardo Habkost <ehabkost@redhat.com>
Message-Id: <1443689999-12182-10-git-send-email-armbru@redhat.com>
2015-10-01 10:59:58 +02:00
|
|
|
|
2017-05-03 22:35:44 +02:00
|
|
|
dc->user_creatable = true;
|
2020-09-22 00:10:34 +02:00
|
|
|
|
|
|
|
object_class_property_add(oc, "family", "int",
|
|
|
|
x86_cpuid_version_get_family,
|
|
|
|
x86_cpuid_version_set_family, NULL, NULL);
|
|
|
|
object_class_property_add(oc, "model", "int",
|
|
|
|
x86_cpuid_version_get_model,
|
|
|
|
x86_cpuid_version_set_model, NULL, NULL);
|
|
|
|
object_class_property_add(oc, "stepping", "int",
|
|
|
|
x86_cpuid_version_get_stepping,
|
|
|
|
x86_cpuid_version_set_stepping, NULL, NULL);
|
|
|
|
object_class_property_add_str(oc, "vendor",
|
|
|
|
x86_cpuid_get_vendor,
|
|
|
|
x86_cpuid_set_vendor);
|
|
|
|
object_class_property_add_str(oc, "model-id",
|
|
|
|
x86_cpuid_get_model_id,
|
|
|
|
x86_cpuid_set_model_id);
|
|
|
|
object_class_property_add(oc, "tsc-frequency", "int",
|
|
|
|
x86_cpuid_get_tsc_freq,
|
|
|
|
x86_cpuid_set_tsc_freq, NULL, NULL);
|
|
|
|
/*
|
|
|
|
* The "unavailable-features" property has the same semantics as
|
|
|
|
* CpuDefinitionInfo.unavailable-features on the "query-cpu-definitions"
|
|
|
|
* QMP command: they list the features that would have prevented the
|
|
|
|
* CPU from running if the "enforce" flag was set.
|
|
|
|
*/
|
|
|
|
object_class_property_add(oc, "unavailable-features", "strList",
|
|
|
|
x86_cpu_get_unavailable_features,
|
|
|
|
NULL, NULL, NULL);
|
|
|
|
|
|
|
|
#if !defined(CONFIG_USER_ONLY)
|
|
|
|
object_class_property_add(oc, "crash-information", "GuestPanicInformation",
|
|
|
|
x86_cpu_get_crash_info_qom, NULL, NULL, NULL);
|
|
|
|
#endif
|
|
|
|
|
2020-11-11 19:38:15 +01:00
|
|
|
for (w = 0; w < FEATURE_WORDS; w++) {
|
|
|
|
int bitnr;
|
|
|
|
for (bitnr = 0; bitnr < 64; bitnr++) {
|
|
|
|
x86_cpu_register_feature_bit_props(xcc, w, bitnr);
|
|
|
|
}
|
|
|
|
}
|
2012-04-02 23:20:08 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static const TypeInfo x86_cpu_type_info = {
|
|
|
|
.name = TYPE_X86_CPU,
|
|
|
|
.parent = TYPE_CPU,
|
|
|
|
.instance_size = sizeof(X86CPU),
|
2012-04-03 00:00:17 +02:00
|
|
|
.instance_init = x86_cpu_initfn,
|
i386: run accel_cpu_instance_init as post_init
This fixes host and max cpu initialization, by running the accel cpu
initialization only after all instance init functions are called for all
X86 cpu subclasses.
The bug this is fixing is related to the "max" and "host" i386 cpu
subclasses, which set cpu->max_features, which is then used at cpu
realization time.
In order to properly split the accel-specific max features code that
needs to be executed at cpu instance initialization time,
we cannot call the accel cpu initialization at the end of the x86 base
class initialization, or we will have no way to specialize
"max features" cpu behavior, overriding the "max" cpu class defaults,
and checking for the "max features" flag itself.
This patch moves the accel-specific cpu instance initialization to after
all x86 cpu instance code has been executed, including subclasses,
so that proper initialization of cpu "host" and "max" can be restored.
Fixes: f5cc5a5c ("i386: split cpu accelerators from cpu.c,"...)
Cc: Eduardo Habkost <ehabkost@redhat.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Claudio Fontana <cfontana@suse.de>
Message-Id: <20210603123001.17843-3-cfontana@suse.de>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2021-06-03 14:30:01 +02:00
|
|
|
.instance_post_init = x86_cpu_post_initfn,
|
|
|
|
|
2014-02-10 11:21:30 +01:00
|
|
|
.abstract = true,
|
2012-04-02 23:20:08 +02:00
|
|
|
.class_size = sizeof(X86CPUClass),
|
|
|
|
.class_init = x86_cpu_common_class_init,
|
|
|
|
};
|
|
|
|
|
i386: Define static "base" CPU model
The query-cpu-model-expand QMP command needs at least one static
model, to allow the "static" expansion mode to be implemented.
Instead of defining static versions of every CPU model, define a
"base" CPU model that has absolutely no feature flag enabled.
Despite having no CPUID data set at all, "-cpu base" is even a
functional CPU:
* It can boot a Slackware Linux 1.01 image with a Linux 0.99.12
kernel[1].
* It is even possible to boot[2] a modern Fedora x86_64 guest by
manually enabling the following CPU features:
-cpu base,+lm,+msr,+pae,+fpu,+cx8,+cmov,+sse,+sse2,+fxsr
[1] http://www.qemu-advent-calendar.org/2014/#day-1
[2] This is what can be seen in the guest:
[root@localhost ~]# cat /proc/cpuinfo
processor : 0
vendor_id : unknown
cpu family : 0
model : 0
model name : 00/00
stepping : 0
physical id : 0
siblings : 1
core id : 0
cpu cores : 1
apicid : 0
initial apicid : 0
fpu : yes
fpu_exception : yes
cpuid level : 1
wp : yes
flags : fpu msr pae cx8 cmov fxsr sse sse2 lm nopl
bugs :
bogomips : 5832.70
clflush size : 64
cache_alignment : 64
address sizes : 36 bits physical, 48 bits virtual
power management:
[root@localhost ~]# x86info -v -a
x86info v1.30. Dave Jones 2001-2011
Feedback to <davej@redhat.com>.
No TSC, MHz calculation cannot be performed.
Unknown vendor (0)
MP Table:
Family: 0 Model: 0 Stepping: 0
CPU Model (x86info's best guess):
eax in: 0x00000000, eax = 00000001 ebx = 00000000 ecx = 00000000 edx = 00000000
eax in: 0x00000001, eax = 00000000 ebx = 00000800 ecx = 00000000 edx = 07008161
eax in: 0x80000000, eax = 80000001 ebx = 00000000 ecx = 00000000 edx = 00000000
eax in: 0x80000001, eax = 00000000 ebx = 00000000 ecx = 00000000 edx = 20000000
Feature flags:
fpu Onboard FPU
msr Model-Specific Registers
pae Physical Address Extensions
cx8 CMPXCHG8 instruction
cmov CMOV instruction
fxsr FXSAVE and FXRSTOR instructions
sse SSE support
sse2 SSE2 support
Long NOPs supported: yes
Address sizes : 0 bits physical, 0 bits virtual
0MHz processor (estimate).
running at an estimated 0MHz
[root@localhost ~]#
Message-Id: <20170222190029.17243-2-ehabkost@redhat.com>
Reviewed-by: David Hildenbrand <david@redhat.com>
Tested-by: Jiri Denemark <jdenemar@redhat.com>
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
2017-02-22 20:00:27 +01:00
|
|
|
/* "base" CPU model, used by query-cpu-model-expansion */
|
|
|
|
static void x86_cpu_base_class_init(ObjectClass *oc, void *data)
|
|
|
|
{
|
|
|
|
X86CPUClass *xcc = X86_CPU_CLASS(oc);
|
|
|
|
|
|
|
|
xcc->static_model = true;
|
|
|
|
xcc->migration_safe = true;
|
|
|
|
xcc->model_description = "base CPU model type with no features enabled";
|
|
|
|
xcc->ordering = 8;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const TypeInfo x86_base_cpu_type_info = {
|
|
|
|
.name = X86_CPU_TYPE_NAME("base"),
|
|
|
|
.parent = TYPE_X86_CPU,
|
|
|
|
.class_init = x86_cpu_base_class_init,
|
|
|
|
};
|
|
|
|
|
2012-04-02 23:20:08 +02:00
|
|
|
static void x86_cpu_register_types(void)
|
|
|
|
{
|
2014-02-10 11:21:30 +01:00
|
|
|
int i;
|
|
|
|
|
2012-04-02 23:20:08 +02:00
|
|
|
type_register_static(&x86_cpu_type_info);
|
2014-02-10 11:21:30 +01:00
|
|
|
for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
|
2019-06-28 02:28:39 +02:00
|
|
|
x86_register_cpudef_types(&builtin_x86_defs[i]);
|
2014-02-10 11:21:30 +01:00
|
|
|
}
|
2017-02-22 19:39:17 +01:00
|
|
|
type_register_static(&max_x86_cpu_type_info);
|
i386: Define static "base" CPU model
The query-cpu-model-expand QMP command needs at least one static
model, to allow the "static" expansion mode to be implemented.
Instead of defining static versions of every CPU model, define a
"base" CPU model that has absolutely no feature flag enabled.
Despite having no CPUID data set at all, "-cpu base" is even a
functional CPU:
* It can boot a Slackware Linux 1.01 image with a Linux 0.99.12
kernel[1].
* It is even possible to boot[2] a modern Fedora x86_64 guest by
manually enabling the following CPU features:
-cpu base,+lm,+msr,+pae,+fpu,+cx8,+cmov,+sse,+sse2,+fxsr
[1] http://www.qemu-advent-calendar.org/2014/#day-1
[2] This is what can be seen in the guest:
[root@localhost ~]# cat /proc/cpuinfo
processor : 0
vendor_id : unknown
cpu family : 0
model : 0
model name : 00/00
stepping : 0
physical id : 0
siblings : 1
core id : 0
cpu cores : 1
apicid : 0
initial apicid : 0
fpu : yes
fpu_exception : yes
cpuid level : 1
wp : yes
flags : fpu msr pae cx8 cmov fxsr sse sse2 lm nopl
bugs :
bogomips : 5832.70
clflush size : 64
cache_alignment : 64
address sizes : 36 bits physical, 48 bits virtual
power management:
[root@localhost ~]# x86info -v -a
x86info v1.30. Dave Jones 2001-2011
Feedback to <davej@redhat.com>.
No TSC, MHz calculation cannot be performed.
Unknown vendor (0)
MP Table:
Family: 0 Model: 0 Stepping: 0
CPU Model (x86info's best guess):
eax in: 0x00000000, eax = 00000001 ebx = 00000000 ecx = 00000000 edx = 00000000
eax in: 0x00000001, eax = 00000000 ebx = 00000800 ecx = 00000000 edx = 07008161
eax in: 0x80000000, eax = 80000001 ebx = 00000000 ecx = 00000000 edx = 00000000
eax in: 0x80000001, eax = 00000000 ebx = 00000000 ecx = 00000000 edx = 20000000
Feature flags:
fpu Onboard FPU
msr Model-Specific Registers
pae Physical Address Extensions
cx8 CMPXCHG8 instruction
cmov CMOV instruction
fxsr FXSAVE and FXRSTOR instructions
sse SSE support
sse2 SSE2 support
Long NOPs supported: yes
Address sizes : 0 bits physical, 0 bits virtual
0MHz processor (estimate).
running at an estimated 0MHz
[root@localhost ~]#
Message-Id: <20170222190029.17243-2-ehabkost@redhat.com>
Reviewed-by: David Hildenbrand <david@redhat.com>
Tested-by: Jiri Denemark <jdenemar@redhat.com>
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
2017-02-22 20:00:27 +01:00
|
|
|
type_register_static(&x86_base_cpu_type_info);
|
2012-04-02 23:20:08 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
type_init(x86_cpu_register_types)
|