MIPS patches 2016-02-26

Changes:
 * support for FPU and MSA in KVM guest
 * support for R6 Virtual Processors
 -----BEGIN PGP SIGNATURE-----
 
 iQEcBAABAgAGBQJW0DH5AAoJEFIRjjwLKdprloMH+QHAytLDPk+2R17Axkj3zcsc
 +bMZFPSCEpHxXRChP+oXFx8qsXMySVecWM1WQt4DKLuQux6kimhw99TECLNjkdCS
 VlkRt5wtkzGRxlci0CJ8kSjpx/4/7y+OP/Bu4ubSGn7NWm/nATbKwNcwFzTlcGIQ
 w8PwyrLxCKS2j9nz9Z8K31E+sQzCbg3W+5Az4EJzTKmTxZMzKPMA/Pd5mZGQZ4UP
 DjPdPyMjffCXlXnC2AjeIBAFECmyAxysRwAJStPzTpBnwSPBO53vnMG2IuGEIg6f
 NiBYDAb3nOFUF+nuiVT+lkK0iDYkpUZh2e7nI3xtlChgVkJmPsP8XaqvamSgpF8=
 =8Mll
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/lalrae/tags/mips-20160226' into staging

MIPS patches 2016-02-26

Changes:
* support for FPU and MSA in KVM guest
* support for R6 Virtual Processors

# gpg: Signature made Fri 26 Feb 2016 11:07:37 GMT using RSA key ID 0B29DA6B
# gpg: Good signature from "Leon Alrae <leon.alrae@imgtec.com>"

* remotes/lalrae/tags/mips-20160226:
  target-mips: implement R6 multi-threading
  mips/kvm: Support MSA in MIPS KVM guests
  mips/kvm: Support FPU in MIPS KVM guests
  mips/kvm: Support signed 64-bit KVM registers
  mips/kvm: Support unsigned KVM registers
  mips/kvm: Implement Config CP0 registers
  mips/kvm: Implement PRid CP0 register
  mips/kvm: Remove a couple of noisy DPRINTFs

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2016-02-26 12:54:22 +00:00
commit 4d1e324b22
8 changed files with 525 additions and 14 deletions

View File

@ -1405,6 +1405,10 @@ const struct mips_opcode mips_builtin_opcodes[] =
{"cmp.sor.d", "D,S,T", 0x46a00019, 0xffe0003f, RD_S|RD_T|WR_D|FP_D, 0, I32R6},
{"cmp.sune.d", "D,S,T", 0x46a0001a, 0xffe0003f, RD_S|RD_T|WR_D|FP_D, 0, I32R6},
{"cmp.sne.d", "D,S,T", 0x46a0001b, 0xffe0003f, RD_S|RD_T|WR_D|FP_D, 0, I32R6},
{"dvp", "", 0x41600024, 0xffffffff, TRAP, 0, I32R6},
{"dvp", "t", 0x41600024, 0xffe0ffff, TRAP|WR_t, 0, I32R6},
{"evp", "", 0x41600004, 0xffffffff, TRAP, 0, I32R6},
{"evp", "t", 0x41600004, 0xffe0ffff, TRAP|WR_t, 0, I32R6},
/* MSA */
{"sll.b", "+d,+e,+f", 0x7800000d, 0xffe0003f, WR_VD|RD_VS|RD_VT, 0, MSA},

View File

@ -77,6 +77,15 @@ static bool mips_cpu_has_work(CPUState *cs)
has_work = false;
}
}
/* MIPS Release 6 has the ability to halt the CPU. */
if (env->CP0_Config5 & (1 << CP0C5_VP)) {
if (cs->interrupt_request & CPU_INTERRUPT_WAKE) {
has_work = true;
}
if (!mips_vp_active(env)) {
has_work = false;
}
}
return has_work;
}

View File

@ -237,6 +237,8 @@ struct CPUMIPSState {
int32_t CP0_Index;
/* CP0_MVP* are per MVP registers. */
int32_t CP0_VPControl;
#define CP0VPCtl_DIS 0
int32_t CP0_Random;
int32_t CP0_VPEControl;
#define CP0VPECo_YSI 21
@ -286,6 +288,8 @@ struct CPUMIPSState {
# define CP0EnLo_RI 31
# define CP0EnLo_XI 30
#endif
int32_t CP0_GlobalNumber;
#define CP0GN_VPId 0
target_ulong CP0_Context;
target_ulong CP0_KScratch[MIPS_KSCRATCH_NUM];
int32_t CP0_PageMask;
@ -471,6 +475,7 @@ struct CPUMIPSState {
#define CP0C5_XNP 13
#define CP0C5_UFE 9
#define CP0C5_FRE 8
#define CP0C5_VP 7
#define CP0C5_SBRI 6
#define CP0C5_MVH 5
#define CP0C5_LLB 4
@ -858,6 +863,26 @@ static inline int mips_vpe_active(CPUMIPSState *env)
return active;
}
static inline int mips_vp_active(CPUMIPSState *env)
{
CPUState *other_cs = first_cpu;
/* Check if the VP disabled other VPs (which means the VP is enabled) */
if ((env->CP0_VPControl >> CP0VPCtl_DIS) & 1) {
return 1;
}
/* Check if the virtual processor is disabled due to a DVP */
CPU_FOREACH(other_cs) {
MIPSCPU *other_cpu = MIPS_CPU(other_cs);
if ((&other_cpu->env != env) &&
((other_cpu->env.CP0_VPControl >> CP0VPCtl_DIS) & 1)) {
return 0;
}
}
return 1;
}
#include "exec/exec-all.h"
static inline void compute_hflags(CPUMIPSState *env)

View File

@ -176,6 +176,10 @@ DEF_HELPER_0(dmt, tl)
DEF_HELPER_0(emt, tl)
DEF_HELPER_1(dvpe, tl, env)
DEF_HELPER_1(evpe, tl, env)
/* R6 Multi-threading */
DEF_HELPER_1(dvp, tl, env)
DEF_HELPER_1(evp, tl, env)
#endif /* !CONFIG_USER_ONLY */
/* microMIPS functions */

View File

@ -30,6 +30,9 @@
#define DPRINTF(fmt, ...) \
do { if (DEBUG_KVM) { fprintf(stderr, fmt, ## __VA_ARGS__); } } while (0)
static int kvm_mips_fpu_cap;
static int kvm_mips_msa_cap;
const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
KVM_CAP_LAST_INFO
};
@ -46,16 +49,39 @@ int kvm_arch_init(MachineState *ms, KVMState *s)
/* MIPS has 128 signals */
kvm_set_sigmask_len(s, 16);
kvm_mips_fpu_cap = kvm_check_extension(s, KVM_CAP_MIPS_FPU);
kvm_mips_msa_cap = kvm_check_extension(s, KVM_CAP_MIPS_MSA);
DPRINTF("%s\n", __func__);
return 0;
}
int kvm_arch_init_vcpu(CPUState *cs)
{
MIPSCPU *cpu = MIPS_CPU(cs);
CPUMIPSState *env = &cpu->env;
int ret = 0;
qemu_add_vm_change_state_handler(kvm_mips_update_state, cs);
if (kvm_mips_fpu_cap && env->CP0_Config1 & (1 << CP0C1_FP)) {
ret = kvm_vcpu_enable_cap(cs, KVM_CAP_MIPS_FPU, 0, 0);
if (ret < 0) {
/* mark unsupported so it gets disabled on reset */
kvm_mips_fpu_cap = 0;
ret = 0;
}
}
if (kvm_mips_msa_cap && env->CP0_Config3 & (1 << CP0C3_MSAP)) {
ret = kvm_vcpu_enable_cap(cs, KVM_CAP_MIPS_MSA, 0, 0);
if (ret < 0) {
/* mark unsupported so it gets disabled on reset */
kvm_mips_msa_cap = 0;
ret = 0;
}
}
DPRINTF("%s\n", __func__);
return ret;
}
@ -64,10 +90,14 @@ void kvm_mips_reset_vcpu(MIPSCPU *cpu)
{
CPUMIPSState *env = &cpu->env;
if (env->CP0_Config1 & (1 << CP0C1_FP)) {
fprintf(stderr, "Warning: FPU not supported with KVM, disabling\n");
if (!kvm_mips_fpu_cap && env->CP0_Config1 & (1 << CP0C1_FP)) {
fprintf(stderr, "Warning: KVM does not support FPU, disabling\n");
env->CP0_Config1 &= ~(1 << CP0C1_FP);
}
if (!kvm_mips_msa_cap && env->CP0_Config3 & (1 << CP0C3_MSAP)) {
fprintf(stderr, "Warning: KVM does not support MSA, disabling\n");
env->CP0_Config3 &= ~(1 << CP0C3_MSAP);
}
DPRINTF("%s\n", __func__);
}
@ -88,7 +118,6 @@ static inline int cpu_mips_io_interrupts_pending(MIPSCPU *cpu)
{
CPUMIPSState *env = &cpu->env;
DPRINTF("%s: %#x\n", __func__, env->CP0_Cause & (1 << (2 + CP0Ca_IP)));
return env->CP0_Cause & (0x1 << (2 + CP0Ca_IP));
}
@ -117,7 +146,6 @@ void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run)
{
DPRINTF("%s\n", __func__);
return MEMTXATTRS_UNSPECIFIED;
}
@ -230,6 +258,13 @@ int kvm_mips_set_ipi_interrupt(MIPSCPU *cpu, int irq, int level)
#define KVM_REG_MIPS_CP0_STATUS MIPS_CP0_32(12, 0)
#define KVM_REG_MIPS_CP0_CAUSE MIPS_CP0_32(13, 0)
#define KVM_REG_MIPS_CP0_EPC MIPS_CP0_64(14, 0)
#define KVM_REG_MIPS_CP0_PRID MIPS_CP0_32(15, 0)
#define KVM_REG_MIPS_CP0_CONFIG MIPS_CP0_32(16, 0)
#define KVM_REG_MIPS_CP0_CONFIG1 MIPS_CP0_32(16, 1)
#define KVM_REG_MIPS_CP0_CONFIG2 MIPS_CP0_32(16, 2)
#define KVM_REG_MIPS_CP0_CONFIG3 MIPS_CP0_32(16, 3)
#define KVM_REG_MIPS_CP0_CONFIG4 MIPS_CP0_32(16, 4)
#define KVM_REG_MIPS_CP0_CONFIG5 MIPS_CP0_32(16, 5)
#define KVM_REG_MIPS_CP0_ERROREPC MIPS_CP0_64(30, 0)
static inline int kvm_mips_put_one_reg(CPUState *cs, uint64_t reg_id,
@ -243,6 +278,17 @@ static inline int kvm_mips_put_one_reg(CPUState *cs, uint64_t reg_id,
return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &cp0reg);
}
static inline int kvm_mips_put_one_ureg(CPUState *cs, uint64_t reg_id,
uint32_t *addr)
{
struct kvm_one_reg cp0reg = {
.id = reg_id,
.addr = (uintptr_t)addr
};
return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &cp0reg);
}
static inline int kvm_mips_put_one_ulreg(CPUState *cs, uint64_t reg_id,
target_ulong *addr)
{
@ -256,7 +302,18 @@ static inline int kvm_mips_put_one_ulreg(CPUState *cs, uint64_t reg_id,
}
static inline int kvm_mips_put_one_reg64(CPUState *cs, uint64_t reg_id,
uint64_t *addr)
int64_t *addr)
{
struct kvm_one_reg cp0reg = {
.id = reg_id,
.addr = (uintptr_t)addr
};
return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &cp0reg);
}
static inline int kvm_mips_put_one_ureg64(CPUState *cs, uint64_t reg_id,
uint64_t *addr)
{
struct kvm_one_reg cp0reg = {
.id = reg_id,
@ -277,6 +334,17 @@ static inline int kvm_mips_get_one_reg(CPUState *cs, uint64_t reg_id,
return kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &cp0reg);
}
static inline int kvm_mips_get_one_ureg(CPUState *cs, uint64_t reg_id,
uint32_t *addr)
{
struct kvm_one_reg cp0reg = {
.id = reg_id,
.addr = (uintptr_t)addr
};
return kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &cp0reg);
}
static inline int kvm_mips_get_one_ulreg(CPUState *cs, uint64_t reg_id,
target_ulong *addr)
{
@ -295,7 +363,7 @@ static inline int kvm_mips_get_one_ulreg(CPUState *cs, uint64_t reg_id,
}
static inline int kvm_mips_get_one_reg64(CPUState *cs, uint64_t reg_id,
uint64_t *addr)
int64_t *addr)
{
struct kvm_one_reg cp0reg = {
.id = reg_id,
@ -305,6 +373,50 @@ static inline int kvm_mips_get_one_reg64(CPUState *cs, uint64_t reg_id,
return kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &cp0reg);
}
static inline int kvm_mips_get_one_ureg64(CPUState *cs, uint64_t reg_id,
uint64_t *addr)
{
struct kvm_one_reg cp0reg = {
.id = reg_id,
.addr = (uintptr_t)addr
};
return kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &cp0reg);
}
#define KVM_REG_MIPS_CP0_CONFIG_MASK (1U << CP0C0_M)
#define KVM_REG_MIPS_CP0_CONFIG1_MASK ((1U << CP0C1_M) | \
(1U << CP0C1_FP))
#define KVM_REG_MIPS_CP0_CONFIG2_MASK (1U << CP0C2_M)
#define KVM_REG_MIPS_CP0_CONFIG3_MASK ((1U << CP0C3_M) | \
(1U << CP0C3_MSAP))
#define KVM_REG_MIPS_CP0_CONFIG4_MASK (1U << CP0C4_M)
#define KVM_REG_MIPS_CP0_CONFIG5_MASK ((1U << CP0C5_MSAEn) | \
(1U << CP0C5_UFE) | \
(1U << CP0C5_FRE) | \
(1U << CP0C5_UFR))
static inline int kvm_mips_change_one_reg(CPUState *cs, uint64_t reg_id,
int32_t *addr, int32_t mask)
{
int err;
int32_t tmp, change;
err = kvm_mips_get_one_reg(cs, reg_id, &tmp);
if (err < 0) {
return err;
}
/* only change bits in mask */
change = (*addr ^ tmp) & mask;
if (!change) {
return 0;
}
tmp = tmp ^ change;
return kvm_mips_put_one_reg(cs, reg_id, &tmp);
}
/*
* We freeze the KVM timer when either the VM clock is stopped or the state is
* saved (the state is dirty).
@ -322,13 +434,13 @@ static int kvm_mips_save_count(CPUState *cs)
int err, ret = 0;
/* freeze KVM timer */
err = kvm_mips_get_one_reg64(cs, KVM_REG_MIPS_COUNT_CTL, &count_ctl);
err = kvm_mips_get_one_ureg64(cs, KVM_REG_MIPS_COUNT_CTL, &count_ctl);
if (err < 0) {
DPRINTF("%s: Failed to get COUNT_CTL (%d)\n", __func__, err);
ret = err;
} else if (!(count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)) {
count_ctl |= KVM_REG_MIPS_COUNT_CTL_DC;
err = kvm_mips_put_one_reg64(cs, KVM_REG_MIPS_COUNT_CTL, &count_ctl);
err = kvm_mips_put_one_ureg64(cs, KVM_REG_MIPS_COUNT_CTL, &count_ctl);
if (err < 0) {
DPRINTF("%s: Failed to set COUNT_CTL.DC=1 (%d)\n", __func__, err);
ret = err;
@ -364,14 +476,14 @@ static int kvm_mips_restore_count(CPUState *cs)
int err_dc, err, ret = 0;
/* check the timer is frozen */
err_dc = kvm_mips_get_one_reg64(cs, KVM_REG_MIPS_COUNT_CTL, &count_ctl);
err_dc = kvm_mips_get_one_ureg64(cs, KVM_REG_MIPS_COUNT_CTL, &count_ctl);
if (err_dc < 0) {
DPRINTF("%s: Failed to get COUNT_CTL (%d)\n", __func__, err_dc);
ret = err_dc;
} else if (!(count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)) {
/* freeze timer (sets COUNT_RESUME for us) */
count_ctl |= KVM_REG_MIPS_COUNT_CTL_DC;
err = kvm_mips_put_one_reg64(cs, KVM_REG_MIPS_COUNT_CTL, &count_ctl);
err = kvm_mips_put_one_ureg64(cs, KVM_REG_MIPS_COUNT_CTL, &count_ctl);
if (err < 0) {
DPRINTF("%s: Failed to set COUNT_CTL.DC=1 (%d)\n", __func__, err);
ret = err;
@ -395,7 +507,7 @@ static int kvm_mips_restore_count(CPUState *cs)
/* resume KVM timer */
if (err_dc >= 0) {
count_ctl &= ~KVM_REG_MIPS_COUNT_CTL_DC;
err = kvm_mips_put_one_reg64(cs, KVM_REG_MIPS_COUNT_CTL, &count_ctl);
err = kvm_mips_put_one_ureg64(cs, KVM_REG_MIPS_COUNT_CTL, &count_ctl);
if (err < 0) {
DPRINTF("%s: Failed to set COUNT_CTL.DC=0 (%d)\n", __func__, err);
ret = err;
@ -428,8 +540,8 @@ static void kvm_mips_update_state(void *opaque, int running, RunState state)
} else {
/* Set clock restore time to now */
count_resume = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
ret = kvm_mips_put_one_reg64(cs, KVM_REG_MIPS_COUNT_RESUME,
&count_resume);
ret = kvm_mips_put_one_ureg64(cs, KVM_REG_MIPS_COUNT_RESUME,
&count_resume);
if (ret < 0) {
fprintf(stderr, "Failed setting COUNT_RESUME\n");
return;
@ -444,6 +556,167 @@ static void kvm_mips_update_state(void *opaque, int running, RunState state)
}
}
static int kvm_mips_put_fpu_registers(CPUState *cs, int level)
{
MIPSCPU *cpu = MIPS_CPU(cs);
CPUMIPSState *env = &cpu->env;
int err, ret = 0;
unsigned int i;
/* Only put FPU state if we're emulating a CPU with an FPU */
if (env->CP0_Config1 & (1 << CP0C1_FP)) {
/* FPU Control Registers */
if (level == KVM_PUT_FULL_STATE) {
err = kvm_mips_put_one_ureg(cs, KVM_REG_MIPS_FCR_IR,
&env->active_fpu.fcr0);
if (err < 0) {
DPRINTF("%s: Failed to put FCR_IR (%d)\n", __func__, err);
ret = err;
}
}
err = kvm_mips_put_one_ureg(cs, KVM_REG_MIPS_FCR_CSR,
&env->active_fpu.fcr31);
if (err < 0) {
DPRINTF("%s: Failed to put FCR_CSR (%d)\n", __func__, err);
ret = err;
}
/*
* FPU register state is a subset of MSA vector state, so don't put FPU
* registers if we're emulating a CPU with MSA.
*/
if (!(env->CP0_Config3 & (1 << CP0C3_MSAP))) {
/* Floating point registers */
for (i = 0; i < 32; ++i) {
if (env->CP0_Status & (1 << CP0St_FR)) {
err = kvm_mips_put_one_ureg64(cs, KVM_REG_MIPS_FPR_64(i),
&env->active_fpu.fpr[i].d);
} else {
err = kvm_mips_get_one_ureg(cs, KVM_REG_MIPS_FPR_32(i),
&env->active_fpu.fpr[i].w[FP_ENDIAN_IDX]);
}
if (err < 0) {
DPRINTF("%s: Failed to put FPR%u (%d)\n", __func__, i, err);
ret = err;
}
}
}
}
/* Only put MSA state if we're emulating a CPU with MSA */
if (env->CP0_Config3 & (1 << CP0C3_MSAP)) {
/* MSA Control Registers */
if (level == KVM_PUT_FULL_STATE) {
err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_MSA_IR,
&env->msair);
if (err < 0) {
DPRINTF("%s: Failed to put MSA_IR (%d)\n", __func__, err);
ret = err;
}
}
err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_MSA_CSR,
&env->active_tc.msacsr);
if (err < 0) {
DPRINTF("%s: Failed to put MSA_CSR (%d)\n", __func__, err);
ret = err;
}
/* Vector registers (includes FP registers) */
for (i = 0; i < 32; ++i) {
/* Big endian MSA not supported by QEMU yet anyway */
err = kvm_mips_put_one_reg64(cs, KVM_REG_MIPS_VEC_128(i),
env->active_fpu.fpr[i].wr.d);
if (err < 0) {
DPRINTF("%s: Failed to put VEC%u (%d)\n", __func__, i, err);
ret = err;
}
}
}
return ret;
}
static int kvm_mips_get_fpu_registers(CPUState *cs)
{
MIPSCPU *cpu = MIPS_CPU(cs);
CPUMIPSState *env = &cpu->env;
int err, ret = 0;
unsigned int i;
/* Only get FPU state if we're emulating a CPU with an FPU */
if (env->CP0_Config1 & (1 << CP0C1_FP)) {
/* FPU Control Registers */
err = kvm_mips_get_one_ureg(cs, KVM_REG_MIPS_FCR_IR,
&env->active_fpu.fcr0);
if (err < 0) {
DPRINTF("%s: Failed to get FCR_IR (%d)\n", __func__, err);
ret = err;
}
err = kvm_mips_get_one_ureg(cs, KVM_REG_MIPS_FCR_CSR,
&env->active_fpu.fcr31);
if (err < 0) {
DPRINTF("%s: Failed to get FCR_CSR (%d)\n", __func__, err);
ret = err;
} else {
restore_fp_status(env);
}
/*
* FPU register state is a subset of MSA vector state, so don't save FPU
* registers if we're emulating a CPU with MSA.
*/
if (!(env->CP0_Config3 & (1 << CP0C3_MSAP))) {
/* Floating point registers */
for (i = 0; i < 32; ++i) {
if (env->CP0_Status & (1 << CP0St_FR)) {
err = kvm_mips_get_one_ureg64(cs, KVM_REG_MIPS_FPR_64(i),
&env->active_fpu.fpr[i].d);
} else {
err = kvm_mips_get_one_ureg(cs, KVM_REG_MIPS_FPR_32(i),
&env->active_fpu.fpr[i].w[FP_ENDIAN_IDX]);
}
if (err < 0) {
DPRINTF("%s: Failed to get FPR%u (%d)\n", __func__, i, err);
ret = err;
}
}
}
}
/* Only get MSA state if we're emulating a CPU with MSA */
if (env->CP0_Config3 & (1 << CP0C3_MSAP)) {
/* MSA Control Registers */
err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_MSA_IR,
&env->msair);
if (err < 0) {
DPRINTF("%s: Failed to get MSA_IR (%d)\n", __func__, err);
ret = err;
}
err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_MSA_CSR,
&env->active_tc.msacsr);
if (err < 0) {
DPRINTF("%s: Failed to get MSA_CSR (%d)\n", __func__, err);
ret = err;
} else {
restore_msa_fp_status(env);
}
/* Vector registers (includes FP registers) */
for (i = 0; i < 32; ++i) {
/* Big endian MSA not supported by QEMU yet anyway */
err = kvm_mips_get_one_reg64(cs, KVM_REG_MIPS_VEC_128(i),
env->active_fpu.fpr[i].wr.d);
if (err < 0) {
DPRINTF("%s: Failed to get VEC%u (%d)\n", __func__, i, err);
ret = err;
}
}
}
return ret;
}
static int kvm_mips_put_cp0_registers(CPUState *cs, int level)
{
MIPSCPU *cpu = MIPS_CPU(cs);
@ -522,6 +795,53 @@ static int kvm_mips_put_cp0_registers(CPUState *cs, int level)
DPRINTF("%s: Failed to put CP0_EPC (%d)\n", __func__, err);
ret = err;
}
err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_PRID, &env->CP0_PRid);
if (err < 0) {
DPRINTF("%s: Failed to put CP0_PRID (%d)\n", __func__, err);
ret = err;
}
err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG,
&env->CP0_Config0,
KVM_REG_MIPS_CP0_CONFIG_MASK);
if (err < 0) {
DPRINTF("%s: Failed to change CP0_CONFIG (%d)\n", __func__, err);
ret = err;
}
err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG1,
&env->CP0_Config1,
KVM_REG_MIPS_CP0_CONFIG1_MASK);
if (err < 0) {
DPRINTF("%s: Failed to change CP0_CONFIG1 (%d)\n", __func__, err);
ret = err;
}
err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG2,
&env->CP0_Config2,
KVM_REG_MIPS_CP0_CONFIG2_MASK);
if (err < 0) {
DPRINTF("%s: Failed to change CP0_CONFIG2 (%d)\n", __func__, err);
ret = err;
}
err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG3,
&env->CP0_Config3,
KVM_REG_MIPS_CP0_CONFIG3_MASK);
if (err < 0) {
DPRINTF("%s: Failed to change CP0_CONFIG3 (%d)\n", __func__, err);
ret = err;
}
err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG4,
&env->CP0_Config4,
KVM_REG_MIPS_CP0_CONFIG4_MASK);
if (err < 0) {
DPRINTF("%s: Failed to change CP0_CONFIG4 (%d)\n", __func__, err);
ret = err;
}
err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG5,
&env->CP0_Config5,
KVM_REG_MIPS_CP0_CONFIG5_MASK);
if (err < 0) {
DPRINTF("%s: Failed to change CP0_CONFIG5 (%d)\n", __func__, err);
ret = err;
}
err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_ERROREPC,
&env->CP0_ErrorEPC);
if (err < 0) {
@ -608,6 +928,41 @@ static int kvm_mips_get_cp0_registers(CPUState *cs)
DPRINTF("%s: Failed to get CP0_EPC (%d)\n", __func__, err);
ret = err;
}
err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_PRID, &env->CP0_PRid);
if (err < 0) {
DPRINTF("%s: Failed to get CP0_PRID (%d)\n", __func__, err);
ret = err;
}
err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG, &env->CP0_Config0);
if (err < 0) {
DPRINTF("%s: Failed to get CP0_CONFIG (%d)\n", __func__, err);
ret = err;
}
err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG1, &env->CP0_Config1);
if (err < 0) {
DPRINTF("%s: Failed to get CP0_CONFIG1 (%d)\n", __func__, err);
ret = err;
}
err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG2, &env->CP0_Config2);
if (err < 0) {
DPRINTF("%s: Failed to get CP0_CONFIG2 (%d)\n", __func__, err);
ret = err;
}
err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG3, &env->CP0_Config3);
if (err < 0) {
DPRINTF("%s: Failed to get CP0_CONFIG3 (%d)\n", __func__, err);
ret = err;
}
err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG4, &env->CP0_Config4);
if (err < 0) {
DPRINTF("%s: Failed to get CP0_CONFIG4 (%d)\n", __func__, err);
ret = err;
}
err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG5, &env->CP0_Config5);
if (err < 0) {
DPRINTF("%s: Failed to get CP0_CONFIG5 (%d)\n", __func__, err);
ret = err;
}
err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_ERROREPC,
&env->CP0_ErrorEPC);
if (err < 0) {
@ -646,6 +1001,11 @@ int kvm_arch_put_registers(CPUState *cs, int level)
return ret;
}
ret = kvm_mips_put_fpu_registers(cs, level);
if (ret < 0) {
return ret;
}
return ret;
}
@ -673,6 +1033,7 @@ int kvm_arch_get_registers(CPUState *cs)
env->active_tc.PC = regs.pc;
kvm_mips_get_cp0_registers(cs);
kvm_mips_get_fpu_registers(cs);
return ret;
}

View File

@ -571,6 +571,14 @@ static bool mips_vpe_is_wfi(MIPSCPU *c)
return cpu->halted && mips_vpe_active(env);
}
static bool mips_vp_is_wfi(MIPSCPU *c)
{
CPUState *cpu = CPU(c);
CPUMIPSState *env = &c->env;
return cpu->halted && mips_vp_active(env);
}
static inline void mips_vpe_wake(MIPSCPU *c)
{
/* Dont set ->halted = 0 directly, let it be done via cpu_has_work
@ -1840,6 +1848,46 @@ target_ulong helper_yield(CPUMIPSState *env, target_ulong arg)
return env->CP0_YQMask;
}
/* R6 Multi-threading */
#ifndef CONFIG_USER_ONLY
target_ulong helper_dvp(CPUMIPSState *env)
{
CPUState *other_cs = first_cpu;
target_ulong prev = env->CP0_VPControl;
if (!((env->CP0_VPControl >> CP0VPCtl_DIS) & 1)) {
CPU_FOREACH(other_cs) {
MIPSCPU *other_cpu = MIPS_CPU(other_cs);
/* Turn off all VPs except the one executing the dvp. */
if (&other_cpu->env != env) {
mips_vpe_sleep(other_cpu);
}
}
env->CP0_VPControl |= (1 << CP0VPCtl_DIS);
}
return prev;
}
target_ulong helper_evp(CPUMIPSState *env)
{
CPUState *other_cs = first_cpu;
target_ulong prev = env->CP0_VPControl;
if ((env->CP0_VPControl >> CP0VPCtl_DIS) & 1) {
CPU_FOREACH(other_cs) {
MIPSCPU *other_cpu = MIPS_CPU(other_cs);
if ((&other_cpu->env != env) && !mips_vp_is_wfi(other_cpu)) {
/* If the VP is WFI, don't disturb its sleep.
* Otherwise, wake it up. */
mips_vpe_wake(other_cpu);
}
}
env->CP0_VPControl &= ~(1 << CP0VPCtl_DIS);
}
return prev;
}
#endif /* !CONFIG_USER_ONLY */
#ifndef CONFIG_USER_ONLY
/* TLB management */
static void r4k_mips_tlb_flush_extra (CPUMIPSState *env, int first)

View File

@ -894,6 +894,8 @@ enum {
OPC_EVPE = 0x01 | (1 << 5) | OPC_MFMC0,
OPC_DI = (0 << 5) | (0x0C << 11) | OPC_MFMC0,
OPC_EI = (1 << 5) | (0x0C << 11) | OPC_MFMC0,
OPC_DVP = 0x04 | (0 << 3) | (1 << 5) | (0 << 11) | OPC_MFMC0,
OPC_EVP = 0x04 | (0 << 3) | (0 << 5) | (0 << 11) | OPC_MFMC0,
};
/* Coprocessor 0 (with rs == C0) */
@ -1429,6 +1431,7 @@ typedef struct DisasContext {
bool mvh;
int CP0_LLAddr_shift;
bool ps;
bool vp;
} DisasContext;
enum {
@ -4950,6 +4953,11 @@ static void gen_mfc0(DisasContext *ctx, TCGv arg, int reg, int sel)
gen_helper_mfc0_mvpconf1(arg, cpu_env);
rn = "MVPConf1";
break;
case 4:
CP0_CHECK(ctx->vp);
gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_VPControl));
rn = "VPControl";
break;
default:
goto cp0_unimplemented;
}
@ -5077,6 +5085,11 @@ static void gen_mfc0(DisasContext *ctx, TCGv arg, int reg, int sel)
}
rn = "EntryLo1";
break;
case 1:
CP0_CHECK(ctx->vp);
gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_GlobalNumber));
rn = "GlobalNumber";
break;
default:
goto cp0_unimplemented;
}
@ -5597,6 +5610,11 @@ static void gen_mtc0(DisasContext *ctx, TCGv arg, int reg, int sel)
/* ignored */
rn = "MVPConf1";
break;
case 4:
CP0_CHECK(ctx->vp);
/* ignored */
rn = "VPControl";
break;
default:
goto cp0_unimplemented;
}
@ -5699,6 +5717,11 @@ static void gen_mtc0(DisasContext *ctx, TCGv arg, int reg, int sel)
gen_helper_mtc0_entrylo1(cpu_env, arg);
rn = "EntryLo1";
break;
case 1:
CP0_CHECK(ctx->vp);
/* ignored */
rn = "GlobalNumber";
break;
default:
goto cp0_unimplemented;
}
@ -6234,6 +6257,11 @@ static void gen_dmfc0(DisasContext *ctx, TCGv arg, int reg, int sel)
gen_helper_mfc0_mvpconf1(arg, cpu_env);
rn = "MVPConf1";
break;
case 4:
CP0_CHECK(ctx->vp);
gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_VPControl));
rn = "VPControl";
break;
default:
goto cp0_unimplemented;
}
@ -6335,6 +6363,11 @@ static void gen_dmfc0(DisasContext *ctx, TCGv arg, int reg, int sel)
tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_EntryLo1));
rn = "EntryLo1";
break;
case 1:
CP0_CHECK(ctx->vp);
gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_GlobalNumber));
rn = "GlobalNumber";
break;
default:
goto cp0_unimplemented;
}
@ -6841,6 +6874,11 @@ static void gen_dmtc0(DisasContext *ctx, TCGv arg, int reg, int sel)
/* ignored */
rn = "MVPConf1";
break;
case 4:
CP0_CHECK(ctx->vp);
/* ignored */
rn = "VPControl";
break;
default:
goto cp0_unimplemented;
}
@ -6941,6 +6979,11 @@ static void gen_dmtc0(DisasContext *ctx, TCGv arg, int reg, int sel)
gen_helper_dmtc0_entrylo1(cpu_env, arg);
rn = "EntryLo1";
break;
case 1:
CP0_CHECK(ctx->vp);
/* ignored */
rn = "GlobalNumber";
break;
default:
goto cp0_unimplemented;
}
@ -19080,6 +19123,20 @@ static void decode_opc(CPUMIPSState *env, DisasContext *ctx)
gen_helper_evpe(t0, cpu_env);
gen_store_gpr(t0, rt);
break;
case OPC_DVP:
check_insn(ctx, ISA_MIPS32R6);
if (ctx->vp) {
gen_helper_dvp(t0, cpu_env);
gen_store_gpr(t0, rt);
}
break;
case OPC_EVP:
check_insn(ctx, ISA_MIPS32R6);
if (ctx->vp) {
gen_helper_evp(t0, cpu_env);
gen_store_gpr(t0, rt);
}
break;
case OPC_DI:
check_insn(ctx, ISA_MIPS32R2);
save_cpu_state(ctx, 1);
@ -19611,6 +19668,7 @@ void gen_intermediate_code(CPUMIPSState *env, struct TranslationBlock *tb)
ctx.ulri = (env->CP0_Config3 >> CP0C3_ULRI) & 1;
ctx.ps = ((env->active_fpu.fcr0 >> FCR0_PS) & 1) ||
(env->insn_flags & (INSN_LOONGSON2E | INSN_LOONGSON2F));
ctx.vp = (env->CP0_Config5 >> CP0C5_VP) & 1;
restore_cpu_state(env, &ctx);
#ifdef CONFIG_USER_ONLY
ctx.mem_idx = MIPS_HFLAG_UM;
@ -19996,6 +20054,7 @@ void cpu_state_reset(CPUMIPSState *env)
env->CP0_Random = env->tlb->nb_tlb - 1;
env->tlb->tlb_in_use = env->tlb->nb_tlb;
env->CP0_Wired = 0;
env->CP0_GlobalNumber = (cs->cpu_index & 0xFF) << CP0GN_VPId;
env->CP0_EBase = (cs->cpu_index & 0x3FF);
if (kvm_enabled()) {
env->CP0_EBase |= 0x40000000;

View File

@ -665,7 +665,8 @@ static const mips_def_t mips_defs[] =
(1 << CP0C3_RXI) | (1 << CP0C3_LPA),
.CP0_Config4 = MIPS_CONFIG4 | (1U << CP0C4_M) | (3 << CP0C4_IE) |
(0xfc << CP0C4_KScrExist),
.CP0_Config5 = MIPS_CONFIG5 | (1 << CP0C5_XNP) | (1 << CP0C5_LLB),
.CP0_Config5 = MIPS_CONFIG5 | (1 << CP0C5_XNP) | (1 << CP0C5_VP) |
(1 << CP0C5_LLB),
.CP0_Config5_rw_bitmask = (1 << CP0C5_MSAEn) | (1 << CP0C5_SBRI) |
(1 << CP0C5_FRE) | (1 << CP0C5_UFE),
.CP0_LLAddr_rw_bitmask = 0,