ppc patch queue for 2016-06-14

Latest patch queue for ppc.
     * Allow qemu to support a generic architecture 2.07 (POWER8-era)
       compatibility mode.  This is useful for guests which are POWER8
       aware, but don't know about the specific POWER8 variant that
       qemu (and/or KVM) is emulating. (Thomas Huth)
     * Fix a bug where macio wasn't removing DMA mappings (Mark Cave-Ayland)
     * Add a workaround for Linux guest's miscalculation of maximum
       memory address (including hotplugged memory), which could break
       when hotplug memory was combined with VFIO.  The previous
       approach was technically correct by spec, but differed from
       PowerVM's behaviour enough to trip a guest kernel bug.  This
       works around the bug, while remaining correct-to-spec. (Bharata Rao)
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1
 
 iQIcBAABAgAGBQJXX5v2AAoJEGw4ysog2bOSZ2gQANCpLK5a01NlJ+hSeiHeKeFX
 tm0UmVRSRnM+/aIMLMV03CTxBkoAsZJsVdu6WYz4y8S2iug642NpbevSu/4YkFHp
 gnsjRrjDBiqEJO3Zwb4kiHjOZ/wSBnFu29Y77kYEdeEinHIMeb2NN01OoI+KLyuZ
 x3oh0QZbcjdBCEcgzchZRmPusHJQ2h47AK3gIxX0A2IHTmLTCnJA1+LcTmZSqF19
 mgV25KoNHwEhruOfpJFimeJBP2wn1TE+QxF6UiHhPbej6f61cs1hm+6Vt6Be9j5l
 B9kPyV+CvLf/y682wXSub7sX01RyHTn5+1uRNK/qRx8ah6pnf47+3QCZJdoOFIvT
 2jAmeO1cSzstiyrUQbi0Qkx+lf1cuKrg1129BJDLnPGni6Ty8Z74SomGRX2YAXCb
 Diw9kZTQvM6H8NEtsL9XWehBH6XgxuZL0MD4XkNoYY2LLcZT+sz/V7SziouJ6AaJ
 yXE4BVm4Ktc0BZmr8o6TmM9lGOL4HqlDj0Cnrel1PMpar1/xCOZ7oRzzOOLucd9i
 oEAy8PK3FrRmK59OzEL99cX4F5ceTv9qBV9ANC5y39TQX8FR4wLAuVpz656k8vxk
 1K1c4p9/QukKw5+vJMwLL5JiCXm5Sw1yIlrrIvgFfge0xeCvuJHsATT8SUAI66Ax
 0xrU/8XnkuXRsRBaXbcv
 =e1PQ
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/dgibson/tags/ppc-for-2.7-20160614' into staging

ppc patch queue for 2016-06-14

Latest patch queue for ppc.
    * Allow qemu to support a generic architecture 2.07 (POWER8-era)
      compatibility mode.  This is useful for guests which are POWER8
      aware, but don't know about the specific POWER8 variant that
      qemu (and/or KVM) is emulating. (Thomas Huth)
    * Fix a bug where macio wasn't removing DMA mappings (Mark Cave-Ayland)
    * Add a workaround for Linux guest's miscalculation of maximum
      memory address (including hotplugged memory), which could break
      when hotplug memory was combined with VFIO.  The previous
      approach was technically correct by spec, but differed from
      PowerVM's behaviour enough to trip a guest kernel bug.  This
      works around the bug, while remaining correct-to-spec. (Bharata Rao)

# gpg: Signature made Tue 14 Jun 2016 06:53:58 BST
# gpg:                using RSA key 0x6C38CACA20D9B392
# gpg: Good signature from "David Gibson <david@gibson.dropbear.id.au>"
# gpg:                 aka "David Gibson (Red Hat) <dgibson@redhat.com>"
# gpg:                 aka "David Gibson (ozlabs.org) <dgibson@ozlabs.org>"
# gpg: WARNING: This key is not certified with sufficiently trusted signatures!
# gpg:          It is not certain that the signature belongs to the owner.
# Primary key fingerprint: 75F4 6586 AE61 A66C C44E  87DC 6C38 CACA 20D9 B392

* remotes/dgibson/tags/ppc-for-2.7-20160614:
  spapr: Ensure all LMBs are represented in ibm,dynamic-memory
  macio: call dma_memory_unmap() at the end of each DMA transfer
  Add PowerPC AT_HWCAP2 definitions
  ppc: Add PowerISA 2.07 compatibility mode
  ppc: Improve PCR bit selection in ppc_set_compat()
  ppc: Provide function to get CPU class of the host CPU
  ppc: Split pcr_mask settings into supported bits and the register mask
  ppc/spapr: Refactor h_client_architecture_support() CPU parsing code

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2016-06-14 09:30:05 +01:00
commit a28aae041a
11 changed files with 166 additions and 78 deletions

View File

@ -66,8 +66,7 @@ static void pmac_dma_read(BlockBackend *blk,
DBDMA_io *io = opaque;
MACIOIDEState *m = io->opaque;
IDEState *s = idebus_active_if(&m->bus);
dma_addr_t dma_addr, dma_len;
void *mem;
dma_addr_t dma_addr;
int64_t sector_num;
int nsector;
uint64_t align = BDRV_SECTOR_SIZE;
@ -84,9 +83,10 @@ static void pmac_dma_read(BlockBackend *blk,
sector_num, nsector);
dma_addr = io->addr;
dma_len = io->len;
mem = dma_memory_map(&address_space_memory, dma_addr, &dma_len,
DMA_DIRECTION_FROM_DEVICE);
io->dir = DMA_DIRECTION_FROM_DEVICE;
io->dma_len = io->len;
io->dma_mem = dma_memory_map(&address_space_memory, dma_addr, &io->dma_len,
io->dir);
if (offset & (align - 1)) {
head_bytes = offset & (align - 1);
@ -100,7 +100,7 @@ static void pmac_dma_read(BlockBackend *blk,
offset = offset & ~(align - 1);
}
qemu_iovec_add(&io->iov, mem, io->len);
qemu_iovec_add(&io->iov, io->dma_mem, io->len);
if ((offset + bytes) & (align - 1)) {
tail_bytes = (offset + bytes) & (align - 1);
@ -130,8 +130,7 @@ static void pmac_dma_write(BlockBackend *blk,
DBDMA_io *io = opaque;
MACIOIDEState *m = io->opaque;
IDEState *s = idebus_active_if(&m->bus);
dma_addr_t dma_addr, dma_len;
void *mem;
dma_addr_t dma_addr;
int64_t sector_num;
int nsector;
uint64_t align = BDRV_SECTOR_SIZE;
@ -149,9 +148,10 @@ static void pmac_dma_write(BlockBackend *blk,
sector_num, nsector);
dma_addr = io->addr;
dma_len = io->len;
mem = dma_memory_map(&address_space_memory, dma_addr, &dma_len,
DMA_DIRECTION_TO_DEVICE);
io->dir = DMA_DIRECTION_TO_DEVICE;
io->dma_len = io->len;
io->dma_mem = dma_memory_map(&address_space_memory, dma_addr, &io->dma_len,
io->dir);
if (offset & (align - 1)) {
head_bytes = offset & (align - 1);
@ -163,7 +163,7 @@ static void pmac_dma_write(BlockBackend *blk,
blk_pread(s->blk, (sector_num << 9), &io->head_remainder, align);
qemu_iovec_add(&io->iov, &io->head_remainder, head_bytes);
qemu_iovec_add(&io->iov, mem, io->len);
qemu_iovec_add(&io->iov, io->dma_mem, io->len);
bytes += offset & (align - 1);
offset = offset & ~(align - 1);
@ -181,7 +181,7 @@ static void pmac_dma_write(BlockBackend *blk,
blk_pread(s->blk, (sector_num << 9), &io->tail_remainder, align);
if (!unaligned_head) {
qemu_iovec_add(&io->iov, mem, io->len);
qemu_iovec_add(&io->iov, io->dma_mem, io->len);
}
qemu_iovec_add(&io->iov, &io->tail_remainder + tail_bytes,
@ -193,7 +193,7 @@ static void pmac_dma_write(BlockBackend *blk,
}
if (!unaligned_head && !unaligned_tail) {
qemu_iovec_add(&io->iov, mem, io->len);
qemu_iovec_add(&io->iov, io->dma_mem, io->len);
}
s->io_buffer_size -= io->len;
@ -214,18 +214,18 @@ static void pmac_dma_trim(BlockBackend *blk,
DBDMA_io *io = opaque;
MACIOIDEState *m = io->opaque;
IDEState *s = idebus_active_if(&m->bus);
dma_addr_t dma_addr, dma_len;
void *mem;
dma_addr_t dma_addr;
qemu_iovec_destroy(&io->iov);
qemu_iovec_init(&io->iov, io->len / MACIO_PAGE_SIZE + 1);
dma_addr = io->addr;
dma_len = io->len;
mem = dma_memory_map(&address_space_memory, dma_addr, &dma_len,
DMA_DIRECTION_TO_DEVICE);
io->dir = DMA_DIRECTION_TO_DEVICE;
io->dma_len = io->len;
io->dma_mem = dma_memory_map(&address_space_memory, dma_addr, &io->dma_len,
io->dir);
qemu_iovec_add(&io->iov, mem, io->len);
qemu_iovec_add(&io->iov, io->dma_mem, io->len);
s->io_buffer_size -= io->len;
s->io_buffer_index += io->len;
io->len = 0;
@ -285,6 +285,9 @@ static void pmac_ide_atapi_transfer_cb(void *opaque, int ret)
return;
done:
dma_memory_unmap(&address_space_memory, io->dma_mem, io->dma_len,
io->dir, io->dma_len);
if (ret < 0) {
block_acct_failed(blk_get_stats(s->blk), &s->acct);
} else {
@ -351,6 +354,9 @@ static void pmac_ide_transfer_cb(void *opaque, int ret)
return;
done:
dma_memory_unmap(&address_space_memory, io->dma_mem, io->dma_len,
io->dir, io->dma_len);
if (s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) {
if (ret < 0) {
block_acct_failed(blk_get_stats(s->blk), &s->acct);

View File

@ -762,14 +762,17 @@ static int spapr_populate_drconf_memory(sPAPRMachineState *spapr, void *fdt)
int ret, i, offset;
uint64_t lmb_size = SPAPR_MEMORY_BLOCK_SIZE;
uint32_t prop_lmb_size[] = {0, cpu_to_be32(lmb_size)};
uint32_t nr_lmbs = (machine->maxram_size - machine->ram_size)/lmb_size;
uint32_t hotplug_lmb_start = spapr->hotplug_memory.base / lmb_size;
uint32_t nr_lmbs = (spapr->hotplug_memory.base +
memory_region_size(&spapr->hotplug_memory.mr)) /
lmb_size;
uint32_t *int_buf, *cur_index, buf_len;
int nr_nodes = nb_numa_nodes ? nb_numa_nodes : 1;
/*
* Don't create the node if there are no DR LMBs.
* Don't create the node if there is no hotpluggable memory
*/
if (!nr_lmbs) {
if (machine->ram_size == machine->maxram_size) {
return 0;
}
@ -803,26 +806,40 @@ static int spapr_populate_drconf_memory(sPAPRMachineState *spapr, void *fdt)
int_buf[0] = cpu_to_be32(nr_lmbs);
cur_index++;
for (i = 0; i < nr_lmbs; i++) {
sPAPRDRConnector *drc;
sPAPRDRConnectorClass *drck;
uint64_t addr = i * lmb_size + spapr->hotplug_memory.base;;
uint64_t addr = i * lmb_size;
uint32_t *dynamic_memory = cur_index;
drc = spapr_dr_connector_by_id(SPAPR_DR_CONNECTOR_TYPE_LMB,
addr/lmb_size);
g_assert(drc);
drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
if (i >= hotplug_lmb_start) {
sPAPRDRConnector *drc;
sPAPRDRConnectorClass *drck;
dynamic_memory[0] = cpu_to_be32(addr >> 32);
dynamic_memory[1] = cpu_to_be32(addr & 0xffffffff);
dynamic_memory[2] = cpu_to_be32(drck->get_index(drc));
dynamic_memory[3] = cpu_to_be32(0); /* reserved */
dynamic_memory[4] = cpu_to_be32(numa_get_node(addr, NULL));
if (addr < machine->ram_size ||
memory_region_present(get_system_memory(), addr)) {
dynamic_memory[5] = cpu_to_be32(SPAPR_LMB_FLAGS_ASSIGNED);
drc = spapr_dr_connector_by_id(SPAPR_DR_CONNECTOR_TYPE_LMB, i);
g_assert(drc);
drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
dynamic_memory[0] = cpu_to_be32(addr >> 32);
dynamic_memory[1] = cpu_to_be32(addr & 0xffffffff);
dynamic_memory[2] = cpu_to_be32(drck->get_index(drc));
dynamic_memory[3] = cpu_to_be32(0); /* reserved */
dynamic_memory[4] = cpu_to_be32(numa_get_node(addr, NULL));
if (memory_region_present(get_system_memory(), addr)) {
dynamic_memory[5] = cpu_to_be32(SPAPR_LMB_FLAGS_ASSIGNED);
} else {
dynamic_memory[5] = cpu_to_be32(0);
}
} else {
dynamic_memory[5] = cpu_to_be32(0);
/*
* LMB information for RMA, boot time RAM and gap b/n RAM and
* hotplug memory region -- all these are marked as reserved
* and as having no valid DRC.
*/
dynamic_memory[0] = cpu_to_be32(addr >> 32);
dynamic_memory[1] = cpu_to_be32(addr & 0xffffffff);
dynamic_memory[2] = cpu_to_be32(0);
dynamic_memory[3] = cpu_to_be32(0); /* reserved */
dynamic_memory[4] = cpu_to_be32(-1);
dynamic_memory[5] = cpu_to_be32(SPAPR_LMB_FLAGS_RESERVED |
SPAPR_LMB_FLAGS_DRC_INVALID);
}
cur_index += SPAPR_DR_LMB_LIST_ENTRY_SIZE;

View File

@ -922,6 +922,41 @@ static void do_set_compat(void *arg)
((cpuver) == CPU_POWERPC_LOGICAL_2_06_PLUS) ? 2061 : \
((cpuver) == CPU_POWERPC_LOGICAL_2_07) ? 2070 : 0)
static void cas_handle_compat_cpu(PowerPCCPUClass *pcc, uint32_t pvr,
unsigned max_lvl, unsigned *compat_lvl,
unsigned *cpu_version)
{
unsigned lvl = get_compat_level(pvr);
bool is205, is206, is207;
if (!lvl) {
return;
}
/* If it is a logical PVR, try to determine the highest level */
is205 = (pcc->pcr_supported & PCR_COMPAT_2_05) &&
(lvl == get_compat_level(CPU_POWERPC_LOGICAL_2_05));
is206 = (pcc->pcr_supported & PCR_COMPAT_2_06) &&
((lvl == get_compat_level(CPU_POWERPC_LOGICAL_2_06)) ||
(lvl == get_compat_level(CPU_POWERPC_LOGICAL_2_06_PLUS)));
is207 = (pcc->pcr_supported & PCR_COMPAT_2_07) &&
(lvl == get_compat_level(CPU_POWERPC_LOGICAL_2_07));
if (is205 || is206 || is207) {
if (!max_lvl) {
/* User did not set the level, choose the highest */
if (*compat_lvl <= lvl) {
*compat_lvl = lvl;
*cpu_version = pvr;
}
} else if (max_lvl >= lvl) {
/* User chose the level, don't set higher than this */
*compat_lvl = lvl;
*cpu_version = pvr;
}
}
}
#define OV5_DRCONF_MEMORY 0x20
static target_ulong h_client_architecture_support(PowerPCCPU *cpu_,
@ -931,7 +966,7 @@ static target_ulong h_client_architecture_support(PowerPCCPU *cpu_,
{
target_ulong list = ppc64_phys_to_real(args[0]);
target_ulong ov_table, ov5;
PowerPCCPUClass *pcc_ = POWERPC_CPU_GET_CLASS(cpu_);
PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu_);
CPUState *cs;
bool cpu_match = false, cpu_update = true, memory_update = false;
unsigned old_cpu_version = cpu_->cpu_version;
@ -958,29 +993,7 @@ static target_ulong h_client_architecture_support(PowerPCCPU *cpu_,
cpu_match = true;
cpu_version = cpu_->cpu_version;
} else if (!cpu_match) {
/* If it is a logical PVR, try to determine the highest level */
unsigned lvl = get_compat_level(pvr);
if (lvl) {
bool is205 = (pcc_->pcr_mask & PCR_COMPAT_2_05) &&
(lvl == get_compat_level(CPU_POWERPC_LOGICAL_2_05));
bool is206 = (pcc_->pcr_mask & PCR_COMPAT_2_06) &&
((lvl == get_compat_level(CPU_POWERPC_LOGICAL_2_06)) ||
(lvl == get_compat_level(CPU_POWERPC_LOGICAL_2_06_PLUS)));
if (is205 || is206) {
if (!max_lvl) {
/* User did not set the level, choose the highest */
if (compat_lvl <= lvl) {
compat_lvl = lvl;
cpu_version = pvr;
}
} else if (max_lvl >= lvl) {
/* User chose the level, don't set higher than this */
compat_lvl = lvl;
cpu_version = pvr;
}
}
}
cas_handle_compat_cpu(pcc, pvr, max_lvl, &compat_lvl, &cpu_version);
}
/* Terminator record */
if (~pvr_mask & pvr) {
@ -990,7 +1003,7 @@ static target_ulong h_client_architecture_support(PowerPCCPU *cpu_,
/* Parsing finished */
trace_spapr_cas_pvr(cpu_->cpu_version, cpu_match,
cpu_version, pcc_->pcr_mask);
cpu_version, pcc->pcr_mask);
/* Update CPUs */
if (old_cpu_version != cpu_version) {

View File

@ -477,6 +477,19 @@ typedef struct {
#define PPC_FEATURE_TRUE_LE 0x00000002
#define PPC_FEATURE_PPC_LE 0x00000001
/* Bits present in AT_HWCAP2 for PowerPC. */
#define PPC_FEATURE2_ARCH_2_07 0x80000000
#define PPC_FEATURE2_HAS_HTM 0x40000000
#define PPC_FEATURE2_HAS_DSCR 0x20000000
#define PPC_FEATURE2_HAS_EBB 0x10000000
#define PPC_FEATURE2_HAS_ISEL 0x08000000
#define PPC_FEATURE2_HAS_TAR 0x04000000
#define PPC_FEATURE2_HAS_VEC_CRYPTO 0x02000000
#define PPC_FEATURE2_HTM_NOSC 0x01000000
#define PPC_FEATURE2_ARCH_3_00 0x00800000
#define PPC_FEATURE2_HAS_IEEE128 0x00400000
/* Bits present in AT_HWCAP for Sparc. */
#define HWCAP_SPARC_FLUSH 0x00000001

View File

@ -24,6 +24,7 @@
#include "exec/memory.h"
#include "qemu/iov.h"
#include "sysemu/dma.h"
typedef struct DBDMA_io DBDMA_io;
@ -44,6 +45,10 @@ struct DBDMA_io {
uint8_t head_remainder[0x200];
uint8_t tail_remainder[0x200];
QEMUIOVector iov;
/* DMA request */
void *dma_mem;
dma_addr_t dma_len;
DMADirection dir;
};
/*

View File

@ -620,9 +620,11 @@ int spapr_rng_populate_dt(void *fdt);
#define SPAPR_DR_LMB_LIST_ENTRY_SIZE 6
/*
* This flag value defines the LMB as assigned in ibm,dynamic-memory
* property under ibm,dynamic-reconfiguration-memory node.
* Defines for flag value in ibm,dynamic-memory property under
* ibm,dynamic-reconfiguration-memory node.
*/
#define SPAPR_LMB_FLAGS_ASSIGNED 0x00000008
#define SPAPR_LMB_FLAGS_DRC_INVALID 0x00000020
#define SPAPR_LMB_FLAGS_RESERVED 0x00000080
#endif /* !defined (__HW_SPAPR_H__) */

View File

@ -165,7 +165,8 @@ typedef struct PowerPCCPUClass {
uint32_t pvr;
bool (*pvr_match)(struct PowerPCCPUClass *pcc, uint32_t pvr);
uint64_t pcr_mask;
uint64_t pcr_mask; /* Available bits in PCR register */
uint64_t pcr_supported; /* Bits for supported PowerISA versions */
uint32_t svr;
uint64_t insns_flags;
uint64_t insns_flags2;

View File

@ -1187,7 +1187,9 @@ void ppc_store_msr (CPUPPCState *env, target_ulong value);
void ppc_cpu_list (FILE *f, fprintf_function cpu_fprintf);
int ppc_get_compat_smt_threads(PowerPCCPU *cpu);
#if defined(TARGET_PPC64)
void ppc_set_compat(PowerPCCPU *cpu, uint32_t cpu_version, Error **errp);
#endif
/* Time-base and decrementer management */
#ifndef NO_CPU_IO_DEFS
@ -2200,6 +2202,7 @@ enum {
enum {
PCR_COMPAT_2_05 = 1ull << (63-62),
PCR_COMPAT_2_06 = 1ull << (63-61),
PCR_COMPAT_2_07 = 1ull << (63-60),
PCR_VEC_DIS = 1ull << (63-0), /* Vec. disable (bit NA since POWER8) */
PCR_VSX_DIS = 1ull << (63-1), /* VSX disable (bit NA since POWER8) */
PCR_TM_DIS = 1ull << (63-2), /* Trans. memory disable (POWER8) */

View File

@ -2329,6 +2329,19 @@ static PowerPCCPUClass *ppc_cpu_get_family_class(PowerPCCPUClass *pcc)
return POWERPC_CPU_CLASS(oc);
}
PowerPCCPUClass *kvm_ppc_get_host_cpu_class(void)
{
uint32_t host_pvr = mfpvr();
PowerPCCPUClass *pvr_pcc;
pvr_pcc = ppc_cpu_class_by_pvr(host_pvr);
if (pvr_pcc == NULL) {
pvr_pcc = ppc_cpu_class_by_pvr_mask(host_pvr);
}
return pvr_pcc;
}
static int kvm_ppc_register_host_cpu_type(void)
{
TypeInfo type_info = {
@ -2336,14 +2349,10 @@ static int kvm_ppc_register_host_cpu_type(void)
.instance_init = kvmppc_host_cpu_initfn,
.class_init = kvmppc_host_cpu_class_init,
};
uint32_t host_pvr = mfpvr();
PowerPCCPUClass *pvr_pcc;
DeviceClass *dc;
pvr_pcc = ppc_cpu_class_by_pvr(host_pvr);
if (pvr_pcc == NULL) {
pvr_pcc = ppc_cpu_class_by_pvr_mask(host_pvr);
}
pvr_pcc = kvm_ppc_get_host_cpu_class();
if (pvr_pcc == NULL) {
return -1;
}

View File

@ -56,6 +56,7 @@ void kvmppc_hash64_write_pte(CPUPPCState *env, target_ulong pte_index,
bool kvmppc_has_cap_fixup_hcalls(void);
int kvmppc_enable_hwrng(void);
int kvmppc_put_books_sregs(PowerPCCPU *cpu);
PowerPCCPUClass *kvm_ppc_get_host_cpu_class(void);
#else
@ -252,6 +253,12 @@ static inline int kvmppc_put_books_sregs(PowerPCCPU *cpu)
{
abort();
}
static inline PowerPCCPUClass *kvm_ppc_get_host_cpu_class(void)
{
return NULL;
}
#endif
#ifndef CONFIG_KVM

View File

@ -8365,7 +8365,8 @@ POWERPC_FAMILY(POWER7)(ObjectClass *oc, void *data)
dc->desc = "POWER7";
dc->props = powerpc_servercpu_properties;
pcc->pvr_match = ppc_pvr_match_power7;
pcc->pcr_mask = PCR_COMPAT_2_05 | PCR_COMPAT_2_06;
pcc->pcr_mask = PCR_VEC_DIS | PCR_VSX_DIS | PCR_COMPAT_2_05;
pcc->pcr_supported = PCR_COMPAT_2_06 | PCR_COMPAT_2_05;
pcc->init_proc = init_proc_POWER7;
pcc->check_pow = check_pow_nocheck;
pcc->insns_flags = PPC_INSNS_BASE | PPC_ISEL | PPC_STRING | PPC_MFTB |
@ -8445,7 +8446,8 @@ POWERPC_FAMILY(POWER8)(ObjectClass *oc, void *data)
dc->desc = "POWER8";
dc->props = powerpc_servercpu_properties;
pcc->pvr_match = ppc_pvr_match_power8;
pcc->pcr_mask = PCR_COMPAT_2_05 | PCR_COMPAT_2_06;
pcc->pcr_mask = PCR_TM_DIS | PCR_COMPAT_2_06 | PCR_COMPAT_2_05;
pcc->pcr_supported = PCR_COMPAT_2_07 | PCR_COMPAT_2_06 | PCR_COMPAT_2_05;
pcc->init_proc = init_proc_POWER8;
pcc->check_pow = check_pow_nocheck;
pcc->insns_flags = PPC_INSNS_BASE | PPC_ISEL | PPC_STRING | PPC_MFTB |
@ -9513,28 +9515,37 @@ int ppc_get_compat_smt_threads(PowerPCCPU *cpu)
return ret;
}
#ifdef TARGET_PPC64
void ppc_set_compat(PowerPCCPU *cpu, uint32_t cpu_version, Error **errp)
{
int ret = 0;
CPUPPCState *env = &cpu->env;
PowerPCCPUClass *host_pcc;
cpu->cpu_version = cpu_version;
switch (cpu_version) {
case CPU_POWERPC_LOGICAL_2_05:
env->spr[SPR_PCR] = PCR_COMPAT_2_05;
env->spr[SPR_PCR] = PCR_TM_DIS | PCR_VSX_DIS | PCR_COMPAT_2_07 |
PCR_COMPAT_2_06 | PCR_COMPAT_2_05;
break;
case CPU_POWERPC_LOGICAL_2_06:
env->spr[SPR_PCR] = PCR_COMPAT_2_06;
break;
case CPU_POWERPC_LOGICAL_2_06_PLUS:
env->spr[SPR_PCR] = PCR_COMPAT_2_06;
env->spr[SPR_PCR] = PCR_TM_DIS | PCR_COMPAT_2_07 | PCR_COMPAT_2_06;
break;
case CPU_POWERPC_LOGICAL_2_07:
env->spr[SPR_PCR] = PCR_COMPAT_2_07;
break;
default:
env->spr[SPR_PCR] = 0;
break;
}
host_pcc = kvm_ppc_get_host_cpu_class();
if (host_pcc) {
env->spr[SPR_PCR] &= host_pcc->pcr_mask;
}
if (kvm_enabled()) {
ret = kvmppc_set_compat(cpu, cpu->cpu_version);
if (ret < 0) {
@ -9543,6 +9554,7 @@ void ppc_set_compat(PowerPCCPU *cpu, uint32_t cpu_version, Error **errp)
}
}
}
#endif
static gint ppc_cpu_compare_class_pvr(gconstpointer a, gconstpointer b)
{