MIPS: KVM: Reformat code and comments

No logic changes inside.

Signed-off-by: Deng-Cheng Zhu <dengcheng.zhu@imgtec.com>
Reviewed-by: James Hogan <james.hogan@imgtec.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Deng-Cheng Zhu 2014-06-26 12:11:34 -07:00 committed by Paolo Bonzini
parent 85949977a5
commit d116e812f9
15 changed files with 564 additions and 552 deletions

View File

@ -359,13 +359,17 @@ enum emulation_result {
#define MIPS3_PG_FRAME 0x3fffffc0 #define MIPS3_PG_FRAME 0x3fffffc0
#define VPN2_MASK 0xffffe000 #define VPN2_MASK 0xffffe000
#define TLB_IS_GLOBAL(x) (((x).tlb_lo0 & MIPS3_PG_G) && \ #define TLB_IS_GLOBAL(x) (((x).tlb_lo0 & MIPS3_PG_G) && \
((x).tlb_lo1 & MIPS3_PG_G)) ((x).tlb_lo1 & MIPS3_PG_G))
#define TLB_VPN2(x) ((x).tlb_hi & VPN2_MASK) #define TLB_VPN2(x) ((x).tlb_hi & VPN2_MASK)
#define TLB_ASID(x) ((x).tlb_hi & ASID_MASK) #define TLB_ASID(x) ((x).tlb_hi & ASID_MASK)
#define TLB_IS_VALID(x, va) (((va) & (1 << PAGE_SHIFT)) \ #define TLB_IS_VALID(x, va) (((va) & (1 << PAGE_SHIFT)) \
? ((x).tlb_lo1 & MIPS3_PG_V) \ ? ((x).tlb_lo1 & MIPS3_PG_V) \
: ((x).tlb_lo0 & MIPS3_PG_V)) : ((x).tlb_lo0 & MIPS3_PG_V))
#define TLB_HI_VPN2_HIT(x, y) ((TLB_VPN2(x) & ~(x).tlb_mask) == \
((y) & VPN2_MASK & ~(x).tlb_mask))
#define TLB_HI_ASID_HIT(x, y) (TLB_IS_GLOBAL(x) || \
TLB_ASID(x) == ((y) & ASID_MASK))
struct kvm_mips_tlb { struct kvm_mips_tlb {
long tlb_mask; long tlb_mask;

View File

@ -19,6 +19,9 @@
#include <asm/mipsmtregs.h> #include <asm/mipsmtregs.h>
#include <asm/uaccess.h> /* for segment_eq() */ #include <asm/uaccess.h> /* for segment_eq() */
extern void (*r4k_blast_dcache)(void);
extern void (*r4k_blast_icache)(void);
/* /*
* This macro return a properly sign-extended address suitable as base address * This macro return a properly sign-extended address suitable as base address
* for indexed cache operations. Two issues here: * for indexed cache operations. Two issues here:

View File

@ -16,7 +16,6 @@
#include <asm/stackframe.h> #include <asm/stackframe.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#define _C_LABEL(x) x #define _C_LABEL(x) x
#define MIPSX(name) mips32_ ## name #define MIPSX(name) mips32_ ## name
#define CALLFRAME_SIZ 32 #define CALLFRAME_SIZ 32
@ -91,7 +90,10 @@ FEXPORT(__kvm_mips_vcpu_run)
LONG_S $24, PT_R24(k1) LONG_S $24, PT_R24(k1)
LONG_S $25, PT_R25(k1) LONG_S $25, PT_R25(k1)
/* XXXKYMA k0/k1 not saved, not being used if we got here through an ioctl() */ /*
* XXXKYMA k0/k1 not saved, not being used if we got here through
* an ioctl()
*/
LONG_S $28, PT_R28(k1) LONG_S $28, PT_R28(k1)
LONG_S $29, PT_R29(k1) LONG_S $29, PT_R29(k1)
@ -132,7 +134,10 @@ FEXPORT(__kvm_mips_vcpu_run)
/* Save the kernel gp as well */ /* Save the kernel gp as well */
LONG_S gp, VCPU_HOST_GP(k1) LONG_S gp, VCPU_HOST_GP(k1)
/* Setup status register for running the guest in UM, interrupts are disabled */ /*
* Setup status register for running the guest in UM, interrupts
* are disabled
*/
li k0, (ST0_EXL | KSU_USER | ST0_BEV) li k0, (ST0_EXL | KSU_USER | ST0_BEV)
mtc0 k0, CP0_STATUS mtc0 k0, CP0_STATUS
ehb ehb
@ -152,7 +157,6 @@ FEXPORT(__kvm_mips_vcpu_run)
mtc0 k0, CP0_STATUS mtc0 k0, CP0_STATUS
ehb ehb
/* Set Guest EPC */ /* Set Guest EPC */
LONG_L t0, VCPU_PC(k1) LONG_L t0, VCPU_PC(k1)
mtc0 t0, CP0_EPC mtc0 t0, CP0_EPC
@ -165,7 +169,7 @@ FEXPORT(__kvm_mips_load_asid)
INT_ADDIU t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */ INT_ADDIU t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */
INT_ADDIU t1, k1, VCPU_GUEST_USER_ASID /* else user */ INT_ADDIU t1, k1, VCPU_GUEST_USER_ASID /* else user */
1: 1:
/* t1: contains the base of the ASID array, need to get the cpu id */ /* t1: contains the base of the ASID array, need to get the cpu id */
LONG_L t2, TI_CPU($28) /* smp_processor_id */ LONG_L t2, TI_CPU($28) /* smp_processor_id */
INT_SLL t2, t2, 2 /* x4 */ INT_SLL t2, t2, 2 /* x4 */
REG_ADDU t3, t1, t2 REG_ADDU t3, t1, t2
@ -229,9 +233,7 @@ FEXPORT(__kvm_mips_load_k0k1)
eret eret
VECTOR(MIPSX(exception), unknown) VECTOR(MIPSX(exception), unknown)
/* /* Find out what mode we came from and jump to the proper handler. */
* Find out what mode we came from and jump to the proper handler.
*/
mtc0 k0, CP0_ERROREPC #01: Save guest k0 mtc0 k0, CP0_ERROREPC #01: Save guest k0
ehb #02: ehb #02:
@ -239,7 +241,8 @@ VECTOR(MIPSX(exception), unknown)
INT_SRL k0, k0, 10 #03: Get rid of CPUNum INT_SRL k0, k0, 10 #03: Get rid of CPUNum
INT_SLL k0, k0, 10 #04 INT_SLL k0, k0, 10 #04
LONG_S k1, 0x3000(k0) #05: Save k1 @ offset 0x3000 LONG_S k1, 0x3000(k0) #05: Save k1 @ offset 0x3000
INT_ADDIU k0, k0, 0x2000 #06: Exception handler is installed @ offset 0x2000 INT_ADDIU k0, k0, 0x2000 #06: Exception handler is
# installed @ offset 0x2000
j k0 #07: jump to the function j k0 #07: jump to the function
nop #08: branch delay slot nop #08: branch delay slot
VECTOR_END(MIPSX(exceptionEnd)) VECTOR_END(MIPSX(exceptionEnd))
@ -248,7 +251,6 @@ VECTOR_END(MIPSX(exceptionEnd))
/* /*
* Generic Guest exception handler. We end up here when the guest * Generic Guest exception handler. We end up here when the guest
* does something that causes a trap to kernel mode. * does something that causes a trap to kernel mode.
*
*/ */
NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra) NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra)
/* Get the VCPU pointer from DDTATA_LO */ /* Get the VCPU pointer from DDTATA_LO */
@ -290,9 +292,7 @@ NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra)
LONG_S $30, VCPU_R30(k1) LONG_S $30, VCPU_R30(k1)
LONG_S $31, VCPU_R31(k1) LONG_S $31, VCPU_R31(k1)
/* We need to save hi/lo and restore them on /* We need to save hi/lo and restore them on the way out */
* the way out
*/
mfhi t0 mfhi t0
LONG_S t0, VCPU_HI(k1) LONG_S t0, VCPU_HI(k1)
@ -321,8 +321,10 @@ NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra)
/* Save pointer to run in s0, will be saved by the compiler */ /* Save pointer to run in s0, will be saved by the compiler */
move s0, a0 move s0, a0
/* Save Host level EPC, BadVaddr and Cause to VCPU, useful to /*
* process the exception */ * Save Host level EPC, BadVaddr and Cause to VCPU, useful to
* process the exception
*/
mfc0 k0,CP0_EPC mfc0 k0,CP0_EPC
LONG_S k0, VCPU_PC(k1) LONG_S k0, VCPU_PC(k1)
@ -351,7 +353,6 @@ NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra)
LONG_L k0, VCPU_HOST_EBASE(k1) LONG_L k0, VCPU_HOST_EBASE(k1)
mtc0 k0,CP0_EBASE mtc0 k0,CP0_EBASE
/* Now that the new EBASE has been loaded, unset BEV and KSU_USER */ /* Now that the new EBASE has been loaded, unset BEV and KSU_USER */
.set at .set at
and v0, v0, ~(ST0_EXL | KSU_USER | ST0_IE) and v0, v0, ~(ST0_EXL | KSU_USER | ST0_IE)
@ -369,7 +370,8 @@ NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra)
/* Saved host state */ /* Saved host state */
INT_ADDIU sp, sp, -PT_SIZE INT_ADDIU sp, sp, -PT_SIZE
/* XXXKYMA do we need to load the host ASID, maybe not because the /*
* XXXKYMA do we need to load the host ASID, maybe not because the
* kernel entries are marked GLOBAL, need to verify * kernel entries are marked GLOBAL, need to verify
*/ */
@ -383,9 +385,11 @@ NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra)
/* Jump to handler */ /* Jump to handler */
FEXPORT(__kvm_mips_jump_to_handler) FEXPORT(__kvm_mips_jump_to_handler)
/* XXXKYMA: not sure if this is safe, how large is the stack?? /*
* XXXKYMA: not sure if this is safe, how large is the stack??
* Now jump to the kvm_mips_handle_exit() to see if we can deal * Now jump to the kvm_mips_handle_exit() to see if we can deal
* with this in the kernel */ * with this in the kernel
*/
PTR_LA t9, kvm_mips_handle_exit PTR_LA t9, kvm_mips_handle_exit
jalr.hb t9 jalr.hb t9
INT_ADDIU sp, sp, -CALLFRAME_SIZ /* BD Slot */ INT_ADDIU sp, sp, -CALLFRAME_SIZ /* BD Slot */
@ -394,7 +398,8 @@ FEXPORT(__kvm_mips_jump_to_handler)
di di
ehb ehb
/* XXXKYMA: k0/k1 could have been blown away if we processed /*
* XXXKYMA: k0/k1 could have been blown away if we processed
* an exception while we were handling the exception from the * an exception while we were handling the exception from the
* guest, reload k1 * guest, reload k1
*/ */
@ -402,7 +407,8 @@ FEXPORT(__kvm_mips_jump_to_handler)
move k1, s1 move k1, s1
INT_ADDIU k1, k1, VCPU_HOST_ARCH INT_ADDIU k1, k1, VCPU_HOST_ARCH
/* Check return value, should tell us if we are returning to the /*
* Check return value, should tell us if we are returning to the
* host (handle I/O etc)or resuming the guest * host (handle I/O etc)or resuming the guest
*/ */
andi t0, v0, RESUME_HOST andi t0, v0, RESUME_HOST
@ -521,8 +527,10 @@ __kvm_mips_return_to_host:
LONG_L $0, PT_R0(k1) LONG_L $0, PT_R0(k1)
LONG_L $1, PT_R1(k1) LONG_L $1, PT_R1(k1)
/* r2/v0 is the return code, shift it down by 2 (arithmetic) /*
* to recover the err code */ * r2/v0 is the return code, shift it down by 2 (arithmetic)
* to recover the err code
*/
INT_SRA k0, v0, 2 INT_SRA k0, v0, 2
move $2, k0 move $2, k0
@ -566,7 +574,6 @@ __kvm_mips_return_to_host:
PTR_LI k0, 0x2000000F PTR_LI k0, 0x2000000F
mtc0 k0, CP0_HWRENA mtc0 k0, CP0_HWRENA
/* Restore RA, which is the address we will return to */ /* Restore RA, which is the address we will return to */
LONG_L ra, PT_R31(k1) LONG_L ra, PT_R31(k1)
j ra j ra

View File

@ -7,7 +7,7 @@
* *
* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
* Authors: Sanjay Lal <sanjayl@kymasys.com> * Authors: Sanjay Lal <sanjayl@kymasys.com>
*/ */
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/err.h> #include <linux/err.h>
@ -31,38 +31,41 @@
#define VECTORSPACING 0x100 /* for EI/VI mode */ #define VECTORSPACING 0x100 /* for EI/VI mode */
#endif #endif
#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x)
struct kvm_stats_debugfs_item debugfs_entries[] = { struct kvm_stats_debugfs_item debugfs_entries[] = {
{ "wait", VCPU_STAT(wait_exits) }, { "wait", VCPU_STAT(wait_exits), KVM_STAT_VCPU },
{ "cache", VCPU_STAT(cache_exits) }, { "cache", VCPU_STAT(cache_exits), KVM_STAT_VCPU },
{ "signal", VCPU_STAT(signal_exits) }, { "signal", VCPU_STAT(signal_exits), KVM_STAT_VCPU },
{ "interrupt", VCPU_STAT(int_exits) }, { "interrupt", VCPU_STAT(int_exits), KVM_STAT_VCPU },
{ "cop_unsuable", VCPU_STAT(cop_unusable_exits) }, { "cop_unsuable", VCPU_STAT(cop_unusable_exits), KVM_STAT_VCPU },
{ "tlbmod", VCPU_STAT(tlbmod_exits) }, { "tlbmod", VCPU_STAT(tlbmod_exits), KVM_STAT_VCPU },
{ "tlbmiss_ld", VCPU_STAT(tlbmiss_ld_exits) }, { "tlbmiss_ld", VCPU_STAT(tlbmiss_ld_exits), KVM_STAT_VCPU },
{ "tlbmiss_st", VCPU_STAT(tlbmiss_st_exits) }, { "tlbmiss_st", VCPU_STAT(tlbmiss_st_exits), KVM_STAT_VCPU },
{ "addrerr_st", VCPU_STAT(addrerr_st_exits) }, { "addrerr_st", VCPU_STAT(addrerr_st_exits), KVM_STAT_VCPU },
{ "addrerr_ld", VCPU_STAT(addrerr_ld_exits) }, { "addrerr_ld", VCPU_STAT(addrerr_ld_exits), KVM_STAT_VCPU },
{ "syscall", VCPU_STAT(syscall_exits) }, { "syscall", VCPU_STAT(syscall_exits), KVM_STAT_VCPU },
{ "resvd_inst", VCPU_STAT(resvd_inst_exits) }, { "resvd_inst", VCPU_STAT(resvd_inst_exits), KVM_STAT_VCPU },
{ "break_inst", VCPU_STAT(break_inst_exits) }, { "break_inst", VCPU_STAT(break_inst_exits), KVM_STAT_VCPU },
{ "flush_dcache", VCPU_STAT(flush_dcache_exits) }, { "flush_dcache", VCPU_STAT(flush_dcache_exits), KVM_STAT_VCPU },
{ "halt_wakeup", VCPU_STAT(halt_wakeup) }, { "halt_wakeup", VCPU_STAT(halt_wakeup), KVM_STAT_VCPU },
{NULL} {NULL}
}; };
static int kvm_mips_reset_vcpu(struct kvm_vcpu *vcpu) static int kvm_mips_reset_vcpu(struct kvm_vcpu *vcpu)
{ {
int i; int i;
for_each_possible_cpu(i) { for_each_possible_cpu(i) {
vcpu->arch.guest_kernel_asid[i] = 0; vcpu->arch.guest_kernel_asid[i] = 0;
vcpu->arch.guest_user_asid[i] = 0; vcpu->arch.guest_user_asid[i] = 0;
} }
return 0; return 0;
} }
/* XXXKYMA: We are simulatoring a processor that has the WII bit set in Config7, so we /*
* are "runnable" if interrupts are pending * XXXKYMA: We are simulatoring a processor that has the WII bit set in
* Config7, so we are "runnable" if interrupts are pending
*/ */
int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
{ {
@ -103,7 +106,10 @@ static void kvm_mips_init_tlbs(struct kvm *kvm)
{ {
unsigned long wired; unsigned long wired;
/* Add a wired entry to the TLB, it is used to map the commpage to the Guest kernel */ /*
* Add a wired entry to the TLB, it is used to map the commpage to
* the Guest kernel
*/
wired = read_c0_wired(); wired = read_c0_wired();
write_c0_wired(wired + 1); write_c0_wired(wired + 1);
mtc0_tlbw_hazard(); mtc0_tlbw_hazard();
@ -130,7 +136,6 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
on_each_cpu(kvm_mips_init_vm_percpu, kvm, 1); on_each_cpu(kvm_mips_init_vm_percpu, kvm, 1);
} }
return 0; return 0;
} }
@ -185,8 +190,8 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
} }
} }
long long kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl,
kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) unsigned long arg)
{ {
return -ENOIOCTLCMD; return -ENOIOCTLCMD;
} }
@ -207,17 +212,17 @@ void kvm_arch_memslots_updated(struct kvm *kvm)
} }
int kvm_arch_prepare_memory_region(struct kvm *kvm, int kvm_arch_prepare_memory_region(struct kvm *kvm,
struct kvm_memory_slot *memslot, struct kvm_memory_slot *memslot,
struct kvm_userspace_memory_region *mem, struct kvm_userspace_memory_region *mem,
enum kvm_mr_change change) enum kvm_mr_change change)
{ {
return 0; return 0;
} }
void kvm_arch_commit_memory_region(struct kvm *kvm, void kvm_arch_commit_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem, struct kvm_userspace_memory_region *mem,
const struct kvm_memory_slot *old, const struct kvm_memory_slot *old,
enum kvm_mr_change change) enum kvm_mr_change change)
{ {
unsigned long npages = 0; unsigned long npages = 0;
int i, err = 0; int i, err = 0;
@ -246,9 +251,8 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
npages, kvm->arch.guest_pmap); npages, kvm->arch.guest_pmap);
/* Now setup the page table */ /* Now setup the page table */
for (i = 0; i < npages; i++) { for (i = 0; i < npages; i++)
kvm->arch.guest_pmap[i] = KVM_INVALID_PAGE; kvm->arch.guest_pmap[i] = KVM_INVALID_PAGE;
}
} }
} }
out: out:
@ -270,8 +274,6 @@ void kvm_arch_flush_shadow(struct kvm *kvm)
struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
{ {
extern char mips32_exception[], mips32_exceptionEnd[];
extern char mips32_GuestException[], mips32_GuestExceptionEnd[];
int err, size, offset; int err, size, offset;
void *gebase; void *gebase;
int i; int i;
@ -290,14 +292,14 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
kvm_debug("kvm @ %p: create cpu %d at %p\n", kvm, id, vcpu); kvm_debug("kvm @ %p: create cpu %d at %p\n", kvm, id, vcpu);
/* Allocate space for host mode exception handlers that handle /*
* Allocate space for host mode exception handlers that handle
* guest mode exits * guest mode exits
*/ */
if (cpu_has_veic || cpu_has_vint) { if (cpu_has_veic || cpu_has_vint)
size = 0x200 + VECTORSPACING * 64; size = 0x200 + VECTORSPACING * 64;
} else { else
size = 0x4000; size = 0x4000;
}
/* Save Linux EBASE */ /* Save Linux EBASE */
vcpu->arch.host_ebase = (void *)read_c0_ebase(); vcpu->arch.host_ebase = (void *)read_c0_ebase();
@ -345,7 +347,10 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
local_flush_icache_range((unsigned long)gebase, local_flush_icache_range((unsigned long)gebase,
(unsigned long)gebase + ALIGN(size, PAGE_SIZE)); (unsigned long)gebase + ALIGN(size, PAGE_SIZE));
/* Allocate comm page for guest kernel, a TLB will be reserved for mapping GVA @ 0xFFFF8000 to this page */ /*
* Allocate comm page for guest kernel, a TLB will be reserved for
* mapping GVA @ 0xFFFF8000 to this page
*/
vcpu->arch.kseg0_commpage = kzalloc(PAGE_SIZE << 1, GFP_KERNEL); vcpu->arch.kseg0_commpage = kzalloc(PAGE_SIZE << 1, GFP_KERNEL);
if (!vcpu->arch.kseg0_commpage) { if (!vcpu->arch.kseg0_commpage) {
@ -391,9 +396,8 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
kvm_arch_vcpu_free(vcpu); kvm_arch_vcpu_free(vcpu);
} }
int int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg)
struct kvm_guest_debug *dbg)
{ {
return -ENOIOCTLCMD; return -ENOIOCTLCMD;
} }
@ -430,8 +434,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
return r; return r;
} }
int int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_mips_interrupt *irq) struct kvm_mips_interrupt *irq)
{ {
int intr = (int)irq->irq; int intr = (int)irq->irq;
struct kvm_vcpu *dvcpu = NULL; struct kvm_vcpu *dvcpu = NULL;
@ -458,23 +462,20 @@ kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_mips_interrupt *irq)
dvcpu->arch.wait = 0; dvcpu->arch.wait = 0;
if (waitqueue_active(&dvcpu->wq)) { if (waitqueue_active(&dvcpu->wq))
wake_up_interruptible(&dvcpu->wq); wake_up_interruptible(&dvcpu->wq);
}
return 0; return 0;
} }
int int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, struct kvm_mp_state *mp_state)
struct kvm_mp_state *mp_state)
{ {
return -ENOIOCTLCMD; return -ENOIOCTLCMD;
} }
int int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, struct kvm_mp_state *mp_state)
struct kvm_mp_state *mp_state)
{ {
return -ENOIOCTLCMD; return -ENOIOCTLCMD;
} }
@ -631,10 +632,12 @@ static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
} }
if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) { if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) {
u64 __user *uaddr64 = (u64 __user *)(long)reg->addr; u64 __user *uaddr64 = (u64 __user *)(long)reg->addr;
return put_user(v, uaddr64); return put_user(v, uaddr64);
} else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) { } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) {
u32 __user *uaddr32 = (u32 __user *)(long)reg->addr; u32 __user *uaddr32 = (u32 __user *)(long)reg->addr;
u32 v32 = (u32)v; u32 v32 = (u32)v;
return put_user(v32, uaddr32); return put_user(v32, uaddr32);
} else { } else {
return -EINVAL; return -EINVAL;
@ -727,8 +730,8 @@ static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
return 0; return 0;
} }
long long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl,
kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) unsigned long arg)
{ {
struct kvm_vcpu *vcpu = filp->private_data; struct kvm_vcpu *vcpu = filp->private_data;
void __user *argp = (void __user *)arg; void __user *argp = (void __user *)arg;
@ -738,6 +741,7 @@ kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
case KVM_SET_ONE_REG: case KVM_SET_ONE_REG:
case KVM_GET_ONE_REG: { case KVM_GET_ONE_REG: {
struct kvm_one_reg reg; struct kvm_one_reg reg;
if (copy_from_user(&reg, argp, sizeof(reg))) if (copy_from_user(&reg, argp, sizeof(reg)))
return -EFAULT; return -EFAULT;
if (ioctl == KVM_SET_ONE_REG) if (ioctl == KVM_SET_ONE_REG)
@ -772,6 +776,7 @@ kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
case KVM_INTERRUPT: case KVM_INTERRUPT:
{ {
struct kvm_mips_interrupt irq; struct kvm_mips_interrupt irq;
r = -EFAULT; r = -EFAULT;
if (copy_from_user(&irq, argp, sizeof(irq))) if (copy_from_user(&irq, argp, sizeof(irq)))
goto out; goto out;
@ -790,9 +795,7 @@ out:
return r; return r;
} }
/* /* Get (and clear) the dirty memory log for a memory slot. */
* Get (and clear) the dirty memory log for a memory slot.
*/
int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
{ {
struct kvm_memory_slot *memslot; struct kvm_memory_slot *memslot;
@ -859,14 +862,14 @@ void kvm_arch_exit(void)
kvm_mips_callbacks = NULL; kvm_mips_callbacks = NULL;
} }
int int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) struct kvm_sregs *sregs)
{ {
return -ENOIOCTLCMD; return -ENOIOCTLCMD;
} }
int int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) struct kvm_sregs *sregs)
{ {
return -ENOIOCTLCMD; return -ENOIOCTLCMD;
} }
@ -979,14 +982,11 @@ static void kvm_mips_comparecount_func(unsigned long data)
kvm_mips_callbacks->queue_timer_int(vcpu); kvm_mips_callbacks->queue_timer_int(vcpu);
vcpu->arch.wait = 0; vcpu->arch.wait = 0;
if (waitqueue_active(&vcpu->wq)) { if (waitqueue_active(&vcpu->wq))
wake_up_interruptible(&vcpu->wq); wake_up_interruptible(&vcpu->wq);
}
} }
/* /* low level hrtimer wake routine */
* low level hrtimer wake routine.
*/
static enum hrtimer_restart kvm_mips_comparecount_wakeup(struct hrtimer *timer) static enum hrtimer_restart kvm_mips_comparecount_wakeup(struct hrtimer *timer)
{ {
struct kvm_vcpu *vcpu; struct kvm_vcpu *vcpu;
@ -1010,8 +1010,8 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
return; return;
} }
int int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, struct kvm_translation *tr) struct kvm_translation *tr)
{ {
return 0; return 0;
} }
@ -1022,8 +1022,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
return kvm_mips_callbacks->vcpu_setup(vcpu); return kvm_mips_callbacks->vcpu_setup(vcpu);
} }
static static void kvm_mips_set_c0_status(void)
void kvm_mips_set_c0_status(void)
{ {
uint32_t status = read_c0_status(); uint32_t status = read_c0_status();
@ -1053,7 +1052,10 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
run->exit_reason = KVM_EXIT_UNKNOWN; run->exit_reason = KVM_EXIT_UNKNOWN;
run->ready_for_interrupt_injection = 1; run->ready_for_interrupt_injection = 1;
/* Set the appropriate status bits based on host CPU features, before we hit the scheduler */ /*
* Set the appropriate status bits based on host CPU features,
* before we hit the scheduler
*/
kvm_mips_set_c0_status(); kvm_mips_set_c0_status();
local_irq_enable(); local_irq_enable();
@ -1061,7 +1063,8 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
kvm_debug("kvm_mips_handle_exit: cause: %#x, PC: %p, kvm_run: %p, kvm_vcpu: %p\n", kvm_debug("kvm_mips_handle_exit: cause: %#x, PC: %p, kvm_run: %p, kvm_vcpu: %p\n",
cause, opc, run, vcpu); cause, opc, run, vcpu);
/* Do a privilege check, if in UM most of these exit conditions end up /*
* Do a privilege check, if in UM most of these exit conditions end up
* causing an exception to be delivered to the Guest Kernel * causing an exception to be delivered to the Guest Kernel
*/ */
er = kvm_mips_check_privilege(cause, opc, run, vcpu); er = kvm_mips_check_privilege(cause, opc, run, vcpu);
@ -1080,9 +1083,8 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
++vcpu->stat.int_exits; ++vcpu->stat.int_exits;
trace_kvm_exit(vcpu, INT_EXITS); trace_kvm_exit(vcpu, INT_EXITS);
if (need_resched()) { if (need_resched())
cond_resched(); cond_resched();
}
ret = RESUME_GUEST; ret = RESUME_GUEST;
break; break;
@ -1094,9 +1096,8 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
trace_kvm_exit(vcpu, COP_UNUSABLE_EXITS); trace_kvm_exit(vcpu, COP_UNUSABLE_EXITS);
ret = kvm_mips_callbacks->handle_cop_unusable(vcpu); ret = kvm_mips_callbacks->handle_cop_unusable(vcpu);
/* XXXKYMA: Might need to return to user space */ /* XXXKYMA: Might need to return to user space */
if (run->exit_reason == KVM_EXIT_IRQ_WINDOW_OPEN) { if (run->exit_reason == KVM_EXIT_IRQ_WINDOW_OPEN)
ret = RESUME_HOST; ret = RESUME_HOST;
}
break; break;
case T_TLB_MOD: case T_TLB_MOD:
@ -1106,10 +1107,9 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
break; break;
case T_TLB_ST_MISS: case T_TLB_ST_MISS:
kvm_debug kvm_debug("TLB ST fault: cause %#x, status %#lx, PC: %p, BadVaddr: %#lx\n",
("TLB ST fault: cause %#x, status %#lx, PC: %p, BadVaddr: %#lx\n", cause, kvm_read_c0_guest_status(vcpu->arch.cop0), opc,
cause, kvm_read_c0_guest_status(vcpu->arch.cop0), opc, badvaddr);
badvaddr);
++vcpu->stat.tlbmiss_st_exits; ++vcpu->stat.tlbmiss_st_exits;
trace_kvm_exit(vcpu, TLBMISS_ST_EXITS); trace_kvm_exit(vcpu, TLBMISS_ST_EXITS);
@ -1156,10 +1156,9 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
break; break;
default: default:
kvm_err kvm_err("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#lx\n",
("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#lx\n", exccode, opc, kvm_get_inst(opc, vcpu), badvaddr,
exccode, opc, kvm_get_inst(opc, vcpu), badvaddr, kvm_read_c0_guest_status(vcpu->arch.cop0));
kvm_read_c0_guest_status(vcpu->arch.cop0));
kvm_arch_vcpu_dump_regs(vcpu); kvm_arch_vcpu_dump_regs(vcpu);
run->exit_reason = KVM_EXIT_INTERNAL_ERROR; run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = RESUME_HOST; ret = RESUME_HOST;
@ -1174,7 +1173,7 @@ skip_emul:
kvm_mips_deliver_interrupts(vcpu, cause); kvm_mips_deliver_interrupts(vcpu, cause);
if (!(ret & RESUME_HOST)) { if (!(ret & RESUME_HOST)) {
/* Only check for signals if not already exiting to userspace */ /* Only check for signals if not already exiting to userspace */
if (signal_pending(current)) { if (signal_pending(current)) {
run->exit_reason = KVM_EXIT_INTR; run->exit_reason = KVM_EXIT_INTR;
ret = (-EINTR << 2) | RESUME_HOST; ret = (-EINTR << 2) | RESUME_HOST;
@ -1195,11 +1194,13 @@ int __init kvm_mips_init(void)
if (ret) if (ret)
return ret; return ret;
/* On MIPS, kernel modules are executed from "mapped space", which requires TLBs. /*
* The TLB handling code is statically linked with the rest of the kernel (kvm_tlb.c) * On MIPS, kernel modules are executed from "mapped space", which
* to avoid the possibility of double faulting. The issue is that the TLB code * requires TLBs. The TLB handling code is statically linked with
* references routines that are part of the the KVM module, * the rest of the kernel (kvm_tlb.c) to avoid the possibility of
* which are only available once the module is loaded. * double faulting. The issue is that the TLB code references
* routines that are part of the the KVM module, which are only
* available once the module is loaded.
*/ */
kvm_mips_gfn_to_pfn = gfn_to_pfn; kvm_mips_gfn_to_pfn = gfn_to_pfn;
kvm_mips_release_pfn_clean = kvm_release_pfn_clean; kvm_mips_release_pfn_clean = kvm_release_pfn_clean;

View File

@ -1,19 +1,20 @@
/* /*
* This file is subject to the terms and conditions of the GNU General Public * This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive * License. See the file "COPYING" in the main directory of this archive
* for more details. * for more details.
* *
* KVM/MIPS: commpage: mapped into get kernel space * KVM/MIPS: commpage: mapped into get kernel space
* *
* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
* Authors: Sanjay Lal <sanjayl@kymasys.com> * Authors: Sanjay Lal <sanjayl@kymasys.com>
*/ */
#ifndef __KVM_MIPS_COMMPAGE_H__ #ifndef __KVM_MIPS_COMMPAGE_H__
#define __KVM_MIPS_COMMPAGE_H__ #define __KVM_MIPS_COMMPAGE_H__
struct kvm_mips_commpage { struct kvm_mips_commpage {
struct mips_coproc cop0; /* COP0 state is mapped into Guest kernel via commpage */ /* COP0 state is mapped into Guest kernel via commpage */
struct mips_coproc cop0;
}; };
#define KVM_MIPS_COMM_EIDI_OFFSET 0x0 #define KVM_MIPS_COMM_EIDI_OFFSET 0x0

View File

@ -1,14 +1,14 @@
/* /*
* This file is subject to the terms and conditions of the GNU General Public * This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive * License. See the file "COPYING" in the main directory of this archive
* for more details. * for more details.
* *
* commpage, currently used for Virtual COP0 registers. * commpage, currently used for Virtual COP0 registers.
* Mapped into the guest kernel @ 0x0. * Mapped into the guest kernel @ 0x0.
* *
* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
* Authors: Sanjay Lal <sanjayl@kymasys.com> * Authors: Sanjay Lal <sanjayl@kymasys.com>
*/ */
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/err.h> #include <linux/err.h>
@ -27,6 +27,7 @@
void kvm_mips_commpage_init(struct kvm_vcpu *vcpu) void kvm_mips_commpage_init(struct kvm_vcpu *vcpu)
{ {
struct kvm_mips_commpage *page = vcpu->arch.kseg0_commpage; struct kvm_mips_commpage *page = vcpu->arch.kseg0_commpage;
memset(page, 0, sizeof(struct kvm_mips_commpage)); memset(page, 0, sizeof(struct kvm_mips_commpage));
/* Specific init values for fields */ /* Specific init values for fields */

View File

@ -1,13 +1,13 @@
/* /*
* This file is subject to the terms and conditions of the GNU General Public * This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive * License. See the file "COPYING" in the main directory of this archive
* for more details. * for more details.
* *
* KVM/MIPS: Binary Patching for privileged instructions, reduces traps. * KVM/MIPS: Binary Patching for privileged instructions, reduces traps.
* *
* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
* Authors: Sanjay Lal <sanjayl@kymasys.com> * Authors: Sanjay Lal <sanjayl@kymasys.com>
*/ */
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/err.h> #include <linux/err.h>
@ -28,9 +28,8 @@
#define CLEAR_TEMPLATE 0x00000020 #define CLEAR_TEMPLATE 0x00000020
#define SW_TEMPLATE 0xac000000 #define SW_TEMPLATE 0xac000000
int int kvm_mips_trans_cache_index(uint32_t inst, uint32_t *opc,
kvm_mips_trans_cache_index(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu)
struct kvm_vcpu *vcpu)
{ {
int result = 0; int result = 0;
unsigned long kseg0_opc; unsigned long kseg0_opc;
@ -47,12 +46,11 @@ kvm_mips_trans_cache_index(uint32_t inst, uint32_t *opc,
} }
/* /*
* Address based CACHE instructions are transformed into synci(s). A little heavy * Address based CACHE instructions are transformed into synci(s). A little
* for just D-cache invalidates, but avoids an expensive trap * heavy for just D-cache invalidates, but avoids an expensive trap
*/ */
int int kvm_mips_trans_cache_va(uint32_t inst, uint32_t *opc,
kvm_mips_trans_cache_va(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu)
struct kvm_vcpu *vcpu)
{ {
int result = 0; int result = 0;
unsigned long kseg0_opc; unsigned long kseg0_opc;
@ -72,8 +70,7 @@ kvm_mips_trans_cache_va(uint32_t inst, uint32_t *opc,
return result; return result;
} }
int int kvm_mips_trans_mfc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu)
kvm_mips_trans_mfc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu)
{ {
int32_t rt, rd, sel; int32_t rt, rd, sel;
uint32_t mfc0_inst; uint32_t mfc0_inst;
@ -115,8 +112,7 @@ kvm_mips_trans_mfc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu)
return 0; return 0;
} }
int int kvm_mips_trans_mtc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu)
kvm_mips_trans_mtc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu)
{ {
int32_t rt, rd, sel; int32_t rt, rd, sel;
uint32_t mtc0_inst = SW_TEMPLATE; uint32_t mtc0_inst = SW_TEMPLATE;

View File

@ -1,13 +1,13 @@
/* /*
* This file is subject to the terms and conditions of the GNU General Public * This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive * License. See the file "COPYING" in the main directory of this archive
* for more details. * for more details.
* *
* KVM/MIPS: Instruction/Exception emulation * KVM/MIPS: Instruction/Exception emulation
* *
* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
* Authors: Sanjay Lal <sanjayl@kymasys.com> * Authors: Sanjay Lal <sanjayl@kymasys.com>
*/ */
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/err.h> #include <linux/err.h>
@ -51,18 +51,14 @@ unsigned long kvm_compute_return_epc(struct kvm_vcpu *vcpu,
if (epc & 3) if (epc & 3)
goto unaligned; goto unaligned;
/* /* Read the instruction */
* Read the instruction
*/
insn.word = kvm_get_inst((uint32_t *) epc, vcpu); insn.word = kvm_get_inst((uint32_t *) epc, vcpu);
if (insn.word == KVM_INVALID_INST) if (insn.word == KVM_INVALID_INST)
return KVM_INVALID_INST; return KVM_INVALID_INST;
switch (insn.i_format.opcode) { switch (insn.i_format.opcode) {
/* /* jr and jalr are in r_format format. */
* jr and jalr are in r_format format.
*/
case spec_op: case spec_op:
switch (insn.r_format.func) { switch (insn.r_format.func) {
case jalr_op: case jalr_op:
@ -124,18 +120,16 @@ unsigned long kvm_compute_return_epc(struct kvm_vcpu *vcpu,
dspcontrol = rddsp(0x01); dspcontrol = rddsp(0x01);
if (dspcontrol >= 32) { if (dspcontrol >= 32)
epc = epc + 4 + (insn.i_format.simmediate << 2); epc = epc + 4 + (insn.i_format.simmediate << 2);
} else else
epc += 8; epc += 8;
nextpc = epc; nextpc = epc;
break; break;
} }
break; break;
/* /* These are unconditional and in j_format. */
* These are unconditional and in j_format.
*/
case jal_op: case jal_op:
arch->gprs[31] = instpc + 8; arch->gprs[31] = instpc + 8;
case j_op: case j_op:
@ -146,9 +140,7 @@ unsigned long kvm_compute_return_epc(struct kvm_vcpu *vcpu,
nextpc = epc; nextpc = epc;
break; break;
/* /* These are conditional and in i_format. */
* These are conditional and in i_format.
*/
case beq_op: case beq_op:
case beql_op: case beql_op:
if (arch->gprs[insn.i_format.rs] == if (arch->gprs[insn.i_format.rs] ==
@ -189,9 +181,7 @@ unsigned long kvm_compute_return_epc(struct kvm_vcpu *vcpu,
nextpc = epc; nextpc = epc;
break; break;
/* /* And now the FPA/cp1 branch instructions. */
* And now the FPA/cp1 branch instructions.
*/
case cop1_op: case cop1_op:
printk("%s: unsupported cop1_op\n", __func__); printk("%s: unsupported cop1_op\n", __func__);
break; break;
@ -219,7 +209,8 @@ enum emulation_result update_pc(struct kvm_vcpu *vcpu, uint32_t cause)
er = EMULATE_FAIL; er = EMULATE_FAIL;
} else { } else {
vcpu->arch.pc = branch_pc; vcpu->arch.pc = branch_pc;
kvm_debug("BD update_pc(): New PC: %#lx\n", vcpu->arch.pc); kvm_debug("BD update_pc(): New PC: %#lx\n",
vcpu->arch.pc);
} }
} else } else
vcpu->arch.pc += 4; vcpu->arch.pc += 4;
@ -240,6 +231,7 @@ enum emulation_result update_pc(struct kvm_vcpu *vcpu, uint32_t cause)
static inline int kvm_mips_count_disabled(struct kvm_vcpu *vcpu) static inline int kvm_mips_count_disabled(struct kvm_vcpu *vcpu)
{ {
struct mips_coproc *cop0 = vcpu->arch.cop0; struct mips_coproc *cop0 = vcpu->arch.cop0;
return (vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) || return (vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) ||
(kvm_read_c0_guest_cause(cop0) & CAUSEF_DC); (kvm_read_c0_guest_cause(cop0) & CAUSEF_DC);
} }
@ -392,7 +384,6 @@ static ktime_t kvm_mips_freeze_hrtimer(struct kvm_vcpu *vcpu,
return now; return now;
} }
/** /**
* kvm_mips_resume_hrtimer() - Resume hrtimer, updating expiry. * kvm_mips_resume_hrtimer() - Resume hrtimer, updating expiry.
* @vcpu: Virtual CPU. * @vcpu: Virtual CPU.
@ -781,8 +772,9 @@ enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu)
vcpu->arch.wait = 1; vcpu->arch.wait = 1;
kvm_vcpu_block(vcpu); kvm_vcpu_block(vcpu);
/* We we are runnable, then definitely go off to user space to check if any /*
* I/O interrupts are pending. * We we are runnable, then definitely go off to user space to
* check if any I/O interrupts are pending.
*/ */
if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) { if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) {
clear_bit(KVM_REQ_UNHALT, &vcpu->requests); clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
@ -793,8 +785,9 @@ enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu)
return er; return er;
} }
/* XXXKYMA: Linux doesn't seem to use TLBR, return EMULATE_FAIL for now so that we can catch /*
* this, if things ever change * XXXKYMA: Linux doesn't seem to use TLBR, return EMULATE_FAIL for now so that
* we can catch this, if things ever change
*/ */
enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu) enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu)
{ {
@ -827,21 +820,22 @@ enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu)
} }
tlb = &vcpu->arch.guest_tlb[index]; tlb = &vcpu->arch.guest_tlb[index];
#if 1 /*
/* Probe the shadow host TLB for the entry being overwritten, if one matches, invalidate it */ * Probe the shadow host TLB for the entry being overwritten, if one
* matches, invalidate it
*/
kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi); kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
#endif
tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0); tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0); tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0); tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0);
tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0); tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0);
kvm_debug kvm_debug("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n", pc, index, kvm_read_c0_guest_entryhi(cop0),
pc, index, kvm_read_c0_guest_entryhi(cop0), kvm_read_c0_guest_entrylo0(cop0),
kvm_read_c0_guest_entrylo0(cop0), kvm_read_c0_guest_entrylo1(cop0), kvm_read_c0_guest_entrylo1(cop0),
kvm_read_c0_guest_pagemask(cop0)); kvm_read_c0_guest_pagemask(cop0));
return er; return er;
} }
@ -855,12 +849,8 @@ enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu)
uint32_t pc = vcpu->arch.pc; uint32_t pc = vcpu->arch.pc;
int index; int index;
#if 1
get_random_bytes(&index, sizeof(index)); get_random_bytes(&index, sizeof(index));
index &= (KVM_MIPS_GUEST_TLB_SIZE - 1); index &= (KVM_MIPS_GUEST_TLB_SIZE - 1);
#else
index = jiffies % KVM_MIPS_GUEST_TLB_SIZE;
#endif
if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) { if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) {
printk("%s: illegal index: %d\n", __func__, index); printk("%s: illegal index: %d\n", __func__, index);
@ -869,21 +859,21 @@ enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu)
tlb = &vcpu->arch.guest_tlb[index]; tlb = &vcpu->arch.guest_tlb[index];
#if 1 /*
/* Probe the shadow host TLB for the entry being overwritten, if one matches, invalidate it */ * Probe the shadow host TLB for the entry being overwritten, if one
* matches, invalidate it
*/
kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi); kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
#endif
tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0); tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0); tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0); tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0);
tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0); tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0);
kvm_debug kvm_debug("[%#x] COP0_TLBWR[%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx)\n",
("[%#x] COP0_TLBWR[%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx)\n", pc, index, kvm_read_c0_guest_entryhi(cop0),
pc, index, kvm_read_c0_guest_entryhi(cop0), kvm_read_c0_guest_entrylo0(cop0),
kvm_read_c0_guest_entrylo0(cop0), kvm_read_c0_guest_entrylo1(cop0));
kvm_read_c0_guest_entrylo1(cop0));
return er; return er;
} }
@ -906,9 +896,9 @@ enum emulation_result kvm_mips_emul_tlbp(struct kvm_vcpu *vcpu)
return er; return er;
} }
enum emulation_result enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc,
kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause, uint32_t cause, struct kvm_run *run,
struct kvm_run *run, struct kvm_vcpu *vcpu) struct kvm_vcpu *vcpu)
{ {
struct mips_coproc *cop0 = vcpu->arch.cop0; struct mips_coproc *cop0 = vcpu->arch.cop0;
enum emulation_result er = EMULATE_DONE; enum emulation_result er = EMULATE_DONE;
@ -922,9 +912,8 @@ kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause,
*/ */
curr_pc = vcpu->arch.pc; curr_pc = vcpu->arch.pc;
er = update_pc(vcpu, cause); er = update_pc(vcpu, cause);
if (er == EMULATE_FAIL) { if (er == EMULATE_FAIL)
return er; return er;
}
copz = (inst >> 21) & 0x1f; copz = (inst >> 21) & 0x1f;
rt = (inst >> 16) & 0x1f; rt = (inst >> 16) & 0x1f;
@ -973,8 +962,7 @@ kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause,
#ifdef CONFIG_KVM_MIPS_DYN_TRANS #ifdef CONFIG_KVM_MIPS_DYN_TRANS
kvm_mips_trans_mfc0(inst, opc, vcpu); kvm_mips_trans_mfc0(inst, opc, vcpu);
#endif #endif
} } else {
else {
vcpu->arch.gprs[rt] = cop0->reg[rd][sel]; vcpu->arch.gprs[rt] = cop0->reg[rd][sel];
#ifdef CONFIG_KVM_MIPS_DYN_TRANS #ifdef CONFIG_KVM_MIPS_DYN_TRANS
@ -1014,17 +1002,15 @@ kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause,
kvm_read_c0_guest_ebase(cop0)); kvm_read_c0_guest_ebase(cop0));
} else if (rd == MIPS_CP0_TLB_HI && sel == 0) { } else if (rd == MIPS_CP0_TLB_HI && sel == 0) {
uint32_t nasid = uint32_t nasid =
vcpu->arch.gprs[rt] & ASID_MASK; vcpu->arch.gprs[rt] & ASID_MASK;
if ((KSEGX(vcpu->arch.gprs[rt]) != CKSEG0) if ((KSEGX(vcpu->arch.gprs[rt]) != CKSEG0) &&
&&
((kvm_read_c0_guest_entryhi(cop0) & ((kvm_read_c0_guest_entryhi(cop0) &
ASID_MASK) != nasid)) { ASID_MASK) != nasid)) {
kvm_debug("MTCz, change ASID from %#lx to %#lx\n",
kvm_debug kvm_read_c0_guest_entryhi(cop0)
("MTCz, change ASID from %#lx to %#lx\n", & ASID_MASK,
kvm_read_c0_guest_entryhi(cop0) & vcpu->arch.gprs[rt]
ASID_MASK, & ASID_MASK);
vcpu->arch.gprs[rt] & ASID_MASK);
/* Blow away the shadow host TLBs */ /* Blow away the shadow host TLBs */
kvm_mips_flush_host_tlb(1); kvm_mips_flush_host_tlb(1);
@ -1049,7 +1035,10 @@ kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause,
} else if ((rd == MIPS_CP0_STATUS) && (sel == 0)) { } else if ((rd == MIPS_CP0_STATUS) && (sel == 0)) {
kvm_write_c0_guest_status(cop0, kvm_write_c0_guest_status(cop0,
vcpu->arch.gprs[rt]); vcpu->arch.gprs[rt]);
/* Make sure that CU1 and NMI bits are never set */ /*
* Make sure that CU1 and NMI bits are
* never set
*/
kvm_clear_c0_guest_status(cop0, kvm_clear_c0_guest_status(cop0,
(ST0_CU1 | ST0_NMI)); (ST0_CU1 | ST0_NMI));
@ -1058,6 +1047,7 @@ kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause,
#endif #endif
} else if ((rd == MIPS_CP0_CAUSE) && (sel == 0)) { } else if ((rd == MIPS_CP0_CAUSE) && (sel == 0)) {
uint32_t old_cause, new_cause; uint32_t old_cause, new_cause;
old_cause = kvm_read_c0_guest_cause(cop0); old_cause = kvm_read_c0_guest_cause(cop0);
new_cause = vcpu->arch.gprs[rt]; new_cause = vcpu->arch.gprs[rt];
/* Update R/W bits */ /* Update R/W bits */
@ -1115,7 +1105,10 @@ kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause,
cop0->reg[MIPS_CP0_STATUS][2] & 0xf; cop0->reg[MIPS_CP0_STATUS][2] & 0xf;
uint32_t pss = uint32_t pss =
(cop0->reg[MIPS_CP0_STATUS][2] >> 6) & 0xf; (cop0->reg[MIPS_CP0_STATUS][2] >> 6) & 0xf;
/* We don't support any shadow register sets, so SRSCtl[PSS] == SRSCtl[CSS] = 0 */ /*
* We don't support any shadow register sets, so
* SRSCtl[PSS] == SRSCtl[CSS] = 0
*/
if (css || pss) { if (css || pss) {
er = EMULATE_FAIL; er = EMULATE_FAIL;
break; break;
@ -1135,12 +1128,9 @@ kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause,
} }
done: done:
/* /* Rollback PC only if emulation was unsuccessful */
* Rollback PC only if emulation was unsuccessful if (er == EMULATE_FAIL)
*/
if (er == EMULATE_FAIL) {
vcpu->arch.pc = curr_pc; vcpu->arch.pc = curr_pc;
}
dont_update_pc: dont_update_pc:
/* /*
@ -1152,9 +1142,9 @@ dont_update_pc:
return er; return er;
} }
enum emulation_result enum emulation_result kvm_mips_emulate_store(uint32_t inst, uint32_t cause,
kvm_mips_emulate_store(uint32_t inst, uint32_t cause, struct kvm_run *run,
struct kvm_run *run, struct kvm_vcpu *vcpu) struct kvm_vcpu *vcpu)
{ {
enum emulation_result er = EMULATE_DO_MMIO; enum emulation_result er = EMULATE_DO_MMIO;
int32_t op, base, rt, offset; int32_t op, base, rt, offset;
@ -1257,19 +1247,16 @@ kvm_mips_emulate_store(uint32_t inst, uint32_t cause,
break; break;
} }
/* /* Rollback PC if emulation was unsuccessful */
* Rollback PC if emulation was unsuccessful if (er == EMULATE_FAIL)
*/
if (er == EMULATE_FAIL) {
vcpu->arch.pc = curr_pc; vcpu->arch.pc = curr_pc;
}
return er; return er;
} }
enum emulation_result enum emulation_result kvm_mips_emulate_load(uint32_t inst, uint32_t cause,
kvm_mips_emulate_load(uint32_t inst, uint32_t cause, struct kvm_run *run,
struct kvm_run *run, struct kvm_vcpu *vcpu) struct kvm_vcpu *vcpu)
{ {
enum emulation_result er = EMULATE_DO_MMIO; enum emulation_result er = EMULATE_DO_MMIO;
int32_t op, base, rt, offset; int32_t op, base, rt, offset;
@ -1410,13 +1397,12 @@ int kvm_mips_sync_icache(unsigned long va, struct kvm_vcpu *vcpu)
#define MIPS_CACHE_DCACHE 0x1 #define MIPS_CACHE_DCACHE 0x1
#define MIPS_CACHE_SEC 0x3 #define MIPS_CACHE_SEC 0x3
enum emulation_result enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc,
kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc, uint32_t cause, uint32_t cause,
struct kvm_run *run, struct kvm_vcpu *vcpu) struct kvm_run *run,
struct kvm_vcpu *vcpu)
{ {
struct mips_coproc *cop0 = vcpu->arch.cop0; struct mips_coproc *cop0 = vcpu->arch.cop0;
extern void (*r4k_blast_dcache) (void);
extern void (*r4k_blast_icache) (void);
enum emulation_result er = EMULATE_DONE; enum emulation_result er = EMULATE_DONE;
int32_t offset, cache, op_inst, op, base; int32_t offset, cache, op_inst, op, base;
struct kvm_vcpu_arch *arch = &vcpu->arch; struct kvm_vcpu_arch *arch = &vcpu->arch;
@ -1443,14 +1429,15 @@ kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc, uint32_t cause,
kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
cache, op, base, arch->gprs[base], offset); cache, op, base, arch->gprs[base], offset);
/* Treat INDEX_INV as a nop, basically issued by Linux on startup to invalidate /*
* the caches entirely by stepping through all the ways/indexes * Treat INDEX_INV as a nop, basically issued by Linux on startup to
* invalidate the caches entirely by stepping through all the
* ways/indexes
*/ */
if (op == MIPS_CACHE_OP_INDEX_INV) { if (op == MIPS_CACHE_OP_INDEX_INV) {
kvm_debug kvm_debug("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", vcpu->arch.pc, vcpu->arch.gprs[31], cache, op, base,
vcpu->arch.pc, vcpu->arch.gprs[31], cache, op, base, arch->gprs[base], offset);
arch->gprs[base], offset);
if (cache == MIPS_CACHE_DCACHE) if (cache == MIPS_CACHE_DCACHE)
r4k_blast_dcache(); r4k_blast_dcache();
@ -1470,21 +1457,19 @@ kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc, uint32_t cause,
preempt_disable(); preempt_disable();
if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) { if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) {
if (kvm_mips_host_tlb_lookup(vcpu, va) < 0)
if (kvm_mips_host_tlb_lookup(vcpu, va) < 0) {
kvm_mips_handle_kseg0_tlb_fault(va, vcpu); kvm_mips_handle_kseg0_tlb_fault(va, vcpu);
}
} else if ((KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0) || } else if ((KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0) ||
KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) { KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) {
int index; int index;
/* If an entry already exists then skip */ /* If an entry already exists then skip */
if (kvm_mips_host_tlb_lookup(vcpu, va) >= 0) { if (kvm_mips_host_tlb_lookup(vcpu, va) >= 0)
goto skip_fault; goto skip_fault;
}
/* If address not in the guest TLB, then give the guest a fault, the /*
* resulting handler will do the right thing * If address not in the guest TLB, then give the guest a fault,
* the resulting handler will do the right thing
*/ */
index = kvm_mips_guest_tlb_lookup(vcpu, (va & VPN2_MASK) | index = kvm_mips_guest_tlb_lookup(vcpu, (va & VPN2_MASK) |
(kvm_read_c0_guest_entryhi (kvm_read_c0_guest_entryhi
@ -1499,14 +1484,20 @@ kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc, uint32_t cause,
goto dont_update_pc; goto dont_update_pc;
} else { } else {
struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index]; struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index];
/* Check if the entry is valid, if not then setup a TLB invalid exception to the guest */ /*
* Check if the entry is valid, if not then setup a TLB
* invalid exception to the guest
*/
if (!TLB_IS_VALID(*tlb, va)) { if (!TLB_IS_VALID(*tlb, va)) {
er = kvm_mips_emulate_tlbinv_ld(cause, NULL, er = kvm_mips_emulate_tlbinv_ld(cause, NULL,
run, vcpu); run, vcpu);
preempt_enable(); preempt_enable();
goto dont_update_pc; goto dont_update_pc;
} else { } else {
/* We fault an entry from the guest tlb to the shadow host TLB */ /*
* We fault an entry from the guest tlb to the
* shadow host TLB
*/
kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb,
NULL, NULL,
NULL); NULL);
@ -1530,7 +1521,10 @@ skip_fault:
flush_dcache_line(va); flush_dcache_line(va);
#ifdef CONFIG_KVM_MIPS_DYN_TRANS #ifdef CONFIG_KVM_MIPS_DYN_TRANS
/* Replace the CACHE instruction, with a SYNCI, not the same, but avoids a trap */ /*
* Replace the CACHE instruction, with a SYNCI, not the same,
* but avoids a trap
*/
kvm_mips_trans_cache_va(inst, opc, vcpu); kvm_mips_trans_cache_va(inst, opc, vcpu);
#endif #endif
} else if (op == MIPS_CACHE_OP_HIT_INV && cache == MIPS_CACHE_ICACHE) { } else if (op == MIPS_CACHE_OP_HIT_INV && cache == MIPS_CACHE_ICACHE) {
@ -1552,28 +1546,23 @@ skip_fault:
preempt_enable(); preempt_enable();
dont_update_pc: dont_update_pc:
/* /* Rollback PC */
* Rollback PC
*/
vcpu->arch.pc = curr_pc; vcpu->arch.pc = curr_pc;
done: done:
return er; return er;
} }
enum emulation_result enum emulation_result kvm_mips_emulate_inst(unsigned long cause, uint32_t *opc,
kvm_mips_emulate_inst(unsigned long cause, uint32_t *opc, struct kvm_run *run,
struct kvm_run *run, struct kvm_vcpu *vcpu) struct kvm_vcpu *vcpu)
{ {
enum emulation_result er = EMULATE_DONE; enum emulation_result er = EMULATE_DONE;
uint32_t inst; uint32_t inst;
/* /* Fetch the instruction. */
* Fetch the instruction. if (cause & CAUSEF_BD)
*/
if (cause & CAUSEF_BD) {
opc += 1; opc += 1;
}
inst = kvm_get_inst(opc, vcpu); inst = kvm_get_inst(opc, vcpu);
@ -1611,9 +1600,10 @@ kvm_mips_emulate_inst(unsigned long cause, uint32_t *opc,
return er; return er;
} }
enum emulation_result enum emulation_result kvm_mips_emulate_syscall(unsigned long cause,
kvm_mips_emulate_syscall(unsigned long cause, uint32_t *opc, uint32_t *opc,
struct kvm_run *run, struct kvm_vcpu *vcpu) struct kvm_run *run,
struct kvm_vcpu *vcpu)
{ {
struct mips_coproc *cop0 = vcpu->arch.cop0; struct mips_coproc *cop0 = vcpu->arch.cop0;
struct kvm_vcpu_arch *arch = &vcpu->arch; struct kvm_vcpu_arch *arch = &vcpu->arch;
@ -1645,9 +1635,10 @@ kvm_mips_emulate_syscall(unsigned long cause, uint32_t *opc,
return er; return er;
} }
enum emulation_result enum emulation_result kvm_mips_emulate_tlbmiss_ld(unsigned long cause,
kvm_mips_emulate_tlbmiss_ld(unsigned long cause, uint32_t *opc, uint32_t *opc,
struct kvm_run *run, struct kvm_vcpu *vcpu) struct kvm_run *run,
struct kvm_vcpu *vcpu)
{ {
struct mips_coproc *cop0 = vcpu->arch.cop0; struct mips_coproc *cop0 = vcpu->arch.cop0;
struct kvm_vcpu_arch *arch = &vcpu->arch; struct kvm_vcpu_arch *arch = &vcpu->arch;
@ -1691,9 +1682,10 @@ kvm_mips_emulate_tlbmiss_ld(unsigned long cause, uint32_t *opc,
return er; return er;
} }
enum emulation_result enum emulation_result kvm_mips_emulate_tlbinv_ld(unsigned long cause,
kvm_mips_emulate_tlbinv_ld(unsigned long cause, uint32_t *opc, uint32_t *opc,
struct kvm_run *run, struct kvm_vcpu *vcpu) struct kvm_run *run,
struct kvm_vcpu *vcpu)
{ {
struct mips_coproc *cop0 = vcpu->arch.cop0; struct mips_coproc *cop0 = vcpu->arch.cop0;
struct kvm_vcpu_arch *arch = &vcpu->arch; struct kvm_vcpu_arch *arch = &vcpu->arch;
@ -1737,9 +1729,10 @@ kvm_mips_emulate_tlbinv_ld(unsigned long cause, uint32_t *opc,
return er; return er;
} }
enum emulation_result enum emulation_result kvm_mips_emulate_tlbmiss_st(unsigned long cause,
kvm_mips_emulate_tlbmiss_st(unsigned long cause, uint32_t *opc, uint32_t *opc,
struct kvm_run *run, struct kvm_vcpu *vcpu) struct kvm_run *run,
struct kvm_vcpu *vcpu)
{ {
struct mips_coproc *cop0 = vcpu->arch.cop0; struct mips_coproc *cop0 = vcpu->arch.cop0;
struct kvm_vcpu_arch *arch = &vcpu->arch; struct kvm_vcpu_arch *arch = &vcpu->arch;
@ -1781,9 +1774,10 @@ kvm_mips_emulate_tlbmiss_st(unsigned long cause, uint32_t *opc,
return er; return er;
} }
enum emulation_result enum emulation_result kvm_mips_emulate_tlbinv_st(unsigned long cause,
kvm_mips_emulate_tlbinv_st(unsigned long cause, uint32_t *opc, uint32_t *opc,
struct kvm_run *run, struct kvm_vcpu *vcpu) struct kvm_run *run,
struct kvm_vcpu *vcpu)
{ {
struct mips_coproc *cop0 = vcpu->arch.cop0; struct mips_coproc *cop0 = vcpu->arch.cop0;
struct kvm_vcpu_arch *arch = &vcpu->arch; struct kvm_vcpu_arch *arch = &vcpu->arch;
@ -1826,9 +1820,9 @@ kvm_mips_emulate_tlbinv_st(unsigned long cause, uint32_t *opc,
} }
/* TLBMOD: store into address matching TLB with Dirty bit off */ /* TLBMOD: store into address matching TLB with Dirty bit off */
enum emulation_result enum emulation_result kvm_mips_handle_tlbmod(unsigned long cause, uint32_t *opc,
kvm_mips_handle_tlbmod(unsigned long cause, uint32_t *opc, struct kvm_run *run,
struct kvm_run *run, struct kvm_vcpu *vcpu) struct kvm_vcpu *vcpu)
{ {
enum emulation_result er = EMULATE_DONE; enum emulation_result er = EMULATE_DONE;
#ifdef DEBUG #ifdef DEBUG
@ -1837,9 +1831,7 @@ kvm_mips_handle_tlbmod(unsigned long cause, uint32_t *opc,
(kvm_read_c0_guest_entryhi(cop0) & ASID_MASK); (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
int index; int index;
/* /* If address not in the guest TLB, then we are in trouble */
* If address not in the guest TLB, then we are in trouble
*/
index = kvm_mips_guest_tlb_lookup(vcpu, entryhi); index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
if (index < 0) { if (index < 0) {
/* XXXKYMA Invalidate and retry */ /* XXXKYMA Invalidate and retry */
@ -1856,9 +1848,10 @@ kvm_mips_handle_tlbmod(unsigned long cause, uint32_t *opc,
return er; return er;
} }
enum emulation_result enum emulation_result kvm_mips_emulate_tlbmod(unsigned long cause,
kvm_mips_emulate_tlbmod(unsigned long cause, uint32_t *opc, uint32_t *opc,
struct kvm_run *run, struct kvm_vcpu *vcpu) struct kvm_run *run,
struct kvm_vcpu *vcpu)
{ {
struct mips_coproc *cop0 = vcpu->arch.cop0; struct mips_coproc *cop0 = vcpu->arch.cop0;
unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
@ -1898,9 +1891,10 @@ kvm_mips_emulate_tlbmod(unsigned long cause, uint32_t *opc,
return er; return er;
} }
enum emulation_result enum emulation_result kvm_mips_emulate_fpu_exc(unsigned long cause,
kvm_mips_emulate_fpu_exc(unsigned long cause, uint32_t *opc, uint32_t *opc,
struct kvm_run *run, struct kvm_vcpu *vcpu) struct kvm_run *run,
struct kvm_vcpu *vcpu)
{ {
struct mips_coproc *cop0 = vcpu->arch.cop0; struct mips_coproc *cop0 = vcpu->arch.cop0;
struct kvm_vcpu_arch *arch = &vcpu->arch; struct kvm_vcpu_arch *arch = &vcpu->arch;
@ -1927,9 +1921,10 @@ kvm_mips_emulate_fpu_exc(unsigned long cause, uint32_t *opc,
return er; return er;
} }
enum emulation_result enum emulation_result kvm_mips_emulate_ri_exc(unsigned long cause,
kvm_mips_emulate_ri_exc(unsigned long cause, uint32_t *opc, uint32_t *opc,
struct kvm_run *run, struct kvm_vcpu *vcpu) struct kvm_run *run,
struct kvm_vcpu *vcpu)
{ {
struct mips_coproc *cop0 = vcpu->arch.cop0; struct mips_coproc *cop0 = vcpu->arch.cop0;
struct kvm_vcpu_arch *arch = &vcpu->arch; struct kvm_vcpu_arch *arch = &vcpu->arch;
@ -1961,9 +1956,10 @@ kvm_mips_emulate_ri_exc(unsigned long cause, uint32_t *opc,
return er; return er;
} }
enum emulation_result enum emulation_result kvm_mips_emulate_bp_exc(unsigned long cause,
kvm_mips_emulate_bp_exc(unsigned long cause, uint32_t *opc, uint32_t *opc,
struct kvm_run *run, struct kvm_vcpu *vcpu) struct kvm_run *run,
struct kvm_vcpu *vcpu)
{ {
struct mips_coproc *cop0 = vcpu->arch.cop0; struct mips_coproc *cop0 = vcpu->arch.cop0;
struct kvm_vcpu_arch *arch = &vcpu->arch; struct kvm_vcpu_arch *arch = &vcpu->arch;
@ -1995,9 +1991,7 @@ kvm_mips_emulate_bp_exc(unsigned long cause, uint32_t *opc,
return er; return er;
} }
/* /* ll/sc, rdhwr, sync emulation */
* ll/sc, rdhwr, sync emulation
*/
#define OPCODE 0xfc000000 #define OPCODE 0xfc000000
#define BASE 0x03e00000 #define BASE 0x03e00000
@ -2012,9 +2006,9 @@ kvm_mips_emulate_bp_exc(unsigned long cause, uint32_t *opc,
#define SYNC 0x0000000f #define SYNC 0x0000000f
#define RDHWR 0x0000003b #define RDHWR 0x0000003b
enum emulation_result enum emulation_result kvm_mips_handle_ri(unsigned long cause, uint32_t *opc,
kvm_mips_handle_ri(unsigned long cause, uint32_t *opc, struct kvm_run *run,
struct kvm_run *run, struct kvm_vcpu *vcpu) struct kvm_vcpu *vcpu)
{ {
struct mips_coproc *cop0 = vcpu->arch.cop0; struct mips_coproc *cop0 = vcpu->arch.cop0;
struct kvm_vcpu_arch *arch = &vcpu->arch; struct kvm_vcpu_arch *arch = &vcpu->arch;
@ -2031,9 +2025,7 @@ kvm_mips_handle_ri(unsigned long cause, uint32_t *opc,
if (er == EMULATE_FAIL) if (er == EMULATE_FAIL)
return er; return er;
/* /* Fetch the instruction. */
* Fetch the instruction.
*/
if (cause & CAUSEF_BD) if (cause & CAUSEF_BD)
opc += 1; opc += 1;
@ -2099,8 +2091,8 @@ emulate_ri:
return kvm_mips_emulate_ri_exc(cause, opc, run, vcpu); return kvm_mips_emulate_ri_exc(cause, opc, run, vcpu);
} }
enum emulation_result enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu, struct kvm_run *run) struct kvm_run *run)
{ {
unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr]; unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr];
enum emulation_result er = EMULATE_DONE; enum emulation_result er = EMULATE_DONE;
@ -2142,18 +2134,18 @@ kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu, struct kvm_run *run)
} }
if (vcpu->arch.pending_load_cause & CAUSEF_BD) if (vcpu->arch.pending_load_cause & CAUSEF_BD)
kvm_debug kvm_debug("[%#lx] Completing %d byte BD Load to gpr %d (0x%08lx) type %d\n",
("[%#lx] Completing %d byte BD Load to gpr %d (0x%08lx) type %d\n", vcpu->arch.pc, run->mmio.len, vcpu->arch.io_gpr, *gpr,
vcpu->arch.pc, run->mmio.len, vcpu->arch.io_gpr, *gpr, vcpu->mmio_needed);
vcpu->mmio_needed);
done: done:
return er; return er;
} }
static enum emulation_result static enum emulation_result kvm_mips_emulate_exc(unsigned long cause,
kvm_mips_emulate_exc(unsigned long cause, uint32_t *opc, uint32_t *opc,
struct kvm_run *run, struct kvm_vcpu *vcpu) struct kvm_run *run,
struct kvm_vcpu *vcpu)
{ {
uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f; uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
struct mips_coproc *cop0 = vcpu->arch.cop0; struct mips_coproc *cop0 = vcpu->arch.cop0;
@ -2188,9 +2180,10 @@ kvm_mips_emulate_exc(unsigned long cause, uint32_t *opc,
return er; return er;
} }
enum emulation_result enum emulation_result kvm_mips_check_privilege(unsigned long cause,
kvm_mips_check_privilege(unsigned long cause, uint32_t *opc, uint32_t *opc,
struct kvm_run *run, struct kvm_vcpu *vcpu) struct kvm_run *run,
struct kvm_vcpu *vcpu)
{ {
enum emulation_result er = EMULATE_DONE; enum emulation_result er = EMULATE_DONE;
uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f; uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
@ -2215,7 +2208,10 @@ kvm_mips_check_privilege(unsigned long cause, uint32_t *opc,
break; break;
case T_TLB_LD_MISS: case T_TLB_LD_MISS:
/* We we are accessing Guest kernel space, then send an address error exception to the guest */ /*
* We we are accessing Guest kernel space, then send an
* address error exception to the guest
*/
if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) { if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) {
printk("%s: LD MISS @ %#lx\n", __func__, printk("%s: LD MISS @ %#lx\n", __func__,
badvaddr); badvaddr);
@ -2226,7 +2222,10 @@ kvm_mips_check_privilege(unsigned long cause, uint32_t *opc,
break; break;
case T_TLB_ST_MISS: case T_TLB_ST_MISS:
/* We we are accessing Guest kernel space, then send an address error exception to the guest */ /*
* We we are accessing Guest kernel space, then send an
* address error exception to the guest
*/
if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) { if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) {
printk("%s: ST MISS @ %#lx\n", __func__, printk("%s: ST MISS @ %#lx\n", __func__,
badvaddr); badvaddr);
@ -2260,21 +2259,23 @@ kvm_mips_check_privilege(unsigned long cause, uint32_t *opc,
} }
} }
if (er == EMULATE_PRIV_FAIL) { if (er == EMULATE_PRIV_FAIL)
kvm_mips_emulate_exc(cause, opc, run, vcpu); kvm_mips_emulate_exc(cause, opc, run, vcpu);
}
return er; return er;
} }
/* User Address (UA) fault, this could happen if /*
* User Address (UA) fault, this could happen if
* (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this * (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this
* case we pass on the fault to the guest kernel and let it handle it. * case we pass on the fault to the guest kernel and let it handle it.
* (2) TLB entry is present in the Guest TLB but not in the shadow, in this * (2) TLB entry is present in the Guest TLB but not in the shadow, in this
* case we inject the TLB from the Guest TLB into the shadow host TLB * case we inject the TLB from the Guest TLB into the shadow host TLB
*/ */
enum emulation_result enum emulation_result kvm_mips_handle_tlbmiss(unsigned long cause,
kvm_mips_handle_tlbmiss(unsigned long cause, uint32_t *opc, uint32_t *opc,
struct kvm_run *run, struct kvm_vcpu *vcpu) struct kvm_run *run,
struct kvm_vcpu *vcpu)
{ {
enum emulation_result er = EMULATE_DONE; enum emulation_result er = EMULATE_DONE;
uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f; uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
@ -2284,10 +2285,11 @@ kvm_mips_handle_tlbmiss(unsigned long cause, uint32_t *opc,
kvm_debug("kvm_mips_handle_tlbmiss: badvaddr: %#lx, entryhi: %#lx\n", kvm_debug("kvm_mips_handle_tlbmiss: badvaddr: %#lx, entryhi: %#lx\n",
vcpu->arch.host_cp0_badvaddr, vcpu->arch.host_cp0_entryhi); vcpu->arch.host_cp0_badvaddr, vcpu->arch.host_cp0_entryhi);
/* KVM would not have got the exception if this entry was valid in the shadow host TLB /*
* Check the Guest TLB, if the entry is not there then send the guest an * KVM would not have got the exception if this entry was valid in the
* exception. The guest exc handler should then inject an entry into the * shadow host TLB. Check the Guest TLB, if the entry is not there then
* guest TLB * send the guest an exception. The guest exc handler should then inject
* an entry into the guest TLB.
*/ */
index = kvm_mips_guest_tlb_lookup(vcpu, index = kvm_mips_guest_tlb_lookup(vcpu,
(va & VPN2_MASK) | (va & VPN2_MASK) |
@ -2305,7 +2307,10 @@ kvm_mips_handle_tlbmiss(unsigned long cause, uint32_t *opc,
} else { } else {
struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index]; struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index];
/* Check if the entry is valid, if not then setup a TLB invalid exception to the guest */ /*
* Check if the entry is valid, if not then setup a TLB invalid
* exception to the guest
*/
if (!TLB_IS_VALID(*tlb, va)) { if (!TLB_IS_VALID(*tlb, va)) {
if (exccode == T_TLB_LD_MISS) { if (exccode == T_TLB_LD_MISS) {
er = kvm_mips_emulate_tlbinv_ld(cause, opc, run, er = kvm_mips_emulate_tlbinv_ld(cause, opc, run,
@ -2319,10 +2324,12 @@ kvm_mips_handle_tlbmiss(unsigned long cause, uint32_t *opc,
er = EMULATE_FAIL; er = EMULATE_FAIL;
} }
} else { } else {
kvm_debug kvm_debug("Injecting hi: %#lx, lo0: %#lx, lo1: %#lx into shadow host TLB\n",
("Injecting hi: %#lx, lo0: %#lx, lo1: %#lx into shadow host TLB\n", tlb->tlb_hi, tlb->tlb_lo0, tlb->tlb_lo1);
tlb->tlb_hi, tlb->tlb_lo0, tlb->tlb_lo1); /*
/* OK we have a Guest TLB entry, now inject it into the shadow host TLB */ * OK we have a Guest TLB entry, now inject it into the
* shadow host TLB
*/
kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, NULL, kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, NULL,
NULL); NULL);
} }

View File

@ -1,13 +1,13 @@
/* /*
* This file is subject to the terms and conditions of the GNU General Public * This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive * License. See the file "COPYING" in the main directory of this archive
* for more details. * for more details.
* *
* KVM/MIPS: Interrupt delivery * KVM/MIPS: Interrupt delivery
* *
* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
* Authors: Sanjay Lal <sanjayl@kymasys.com> * Authors: Sanjay Lal <sanjayl@kymasys.com>
*/ */
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/err.h> #include <linux/err.h>
@ -34,7 +34,8 @@ void kvm_mips_dequeue_irq(struct kvm_vcpu *vcpu, uint32_t priority)
void kvm_mips_queue_timer_int_cb(struct kvm_vcpu *vcpu) void kvm_mips_queue_timer_int_cb(struct kvm_vcpu *vcpu)
{ {
/* Cause bits to reflect the pending timer interrupt, /*
* Cause bits to reflect the pending timer interrupt,
* the EXC code will be set when we are actually * the EXC code will be set when we are actually
* delivering the interrupt: * delivering the interrupt:
*/ */
@ -51,12 +52,13 @@ void kvm_mips_dequeue_timer_int_cb(struct kvm_vcpu *vcpu)
kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_TIMER); kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_TIMER);
} }
void void kvm_mips_queue_io_int_cb(struct kvm_vcpu *vcpu,
kvm_mips_queue_io_int_cb(struct kvm_vcpu *vcpu, struct kvm_mips_interrupt *irq) struct kvm_mips_interrupt *irq)
{ {
int intr = (int)irq->irq; int intr = (int)irq->irq;
/* Cause bits to reflect the pending IO interrupt, /*
* Cause bits to reflect the pending IO interrupt,
* the EXC code will be set when we are actually * the EXC code will be set when we are actually
* delivering the interrupt: * delivering the interrupt:
*/ */
@ -83,11 +85,11 @@ kvm_mips_queue_io_int_cb(struct kvm_vcpu *vcpu, struct kvm_mips_interrupt *irq)
} }
void void kvm_mips_dequeue_io_int_cb(struct kvm_vcpu *vcpu,
kvm_mips_dequeue_io_int_cb(struct kvm_vcpu *vcpu, struct kvm_mips_interrupt *irq)
struct kvm_mips_interrupt *irq)
{ {
int intr = (int)irq->irq; int intr = (int)irq->irq;
switch (intr) { switch (intr) {
case -2: case -2:
kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ0)); kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ0));
@ -111,9 +113,8 @@ kvm_mips_dequeue_io_int_cb(struct kvm_vcpu *vcpu,
} }
/* Deliver the interrupt of the corresponding priority, if possible. */ /* Deliver the interrupt of the corresponding priority, if possible. */
int int kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority,
kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority, uint32_t cause)
uint32_t cause)
{ {
int allowed = 0; int allowed = 0;
uint32_t exccode; uint32_t exccode;
@ -164,7 +165,6 @@ kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority,
/* Are we allowed to deliver the interrupt ??? */ /* Are we allowed to deliver the interrupt ??? */
if (allowed) { if (allowed) {
if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
/* save old pc */ /* save old pc */
kvm_write_c0_guest_epc(cop0, arch->pc); kvm_write_c0_guest_epc(cop0, arch->pc);
@ -195,9 +195,8 @@ kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority,
return allowed; return allowed;
} }
int int kvm_mips_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority,
kvm_mips_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority, uint32_t cause)
uint32_t cause)
{ {
return 1; return 1;
} }

View File

@ -1,14 +1,15 @@
/* /*
* This file is subject to the terms and conditions of the GNU General Public * This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive * License. See the file "COPYING" in the main directory of this archive
* for more details. * for more details.
* *
* KVM/MIPS: Interrupts * KVM/MIPS: Interrupts
* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
* Authors: Sanjay Lal <sanjayl@kymasys.com> * Authors: Sanjay Lal <sanjayl@kymasys.com>
*/ */
/* MIPS Exception Priorities, exceptions (including interrupts) are queued up /*
* MIPS Exception Priorities, exceptions (including interrupts) are queued up
* for the guest in the order specified by their priorities * for the guest in the order specified by their priorities
*/ */
@ -27,6 +28,9 @@
#define MIPS_EXC_MAX 12 #define MIPS_EXC_MAX 12
/* XXXSL More to follow */ /* XXXSL More to follow */
extern char mips32_exception[], mips32_exceptionEnd[];
extern char mips32_GuestException[], mips32_GuestExceptionEnd[];
#define C_TI (_ULCAST_(1) << 30) #define C_TI (_ULCAST_(1) << 30)
#define KVM_MIPS_IRQ_DELIVER_ALL_AT_ONCE (0) #define KVM_MIPS_IRQ_DELIVER_ALL_AT_ONCE (0)

View File

@ -1,24 +1,22 @@
/* /*
* This file is subject to the terms and conditions of the GNU General Public * This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive * License. See the file "COPYING" in the main directory of this archive
* for more details. * for more details.
* *
* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
* Authors: Sanjay Lal <sanjayl@kymasys.com> * Authors: Sanjay Lal <sanjayl@kymasys.com>
*/
/*
* Define opcode values not defined in <asm/isnt.h>
*/ */
/* Define opcode values not defined in <asm/isnt.h> */
#ifndef __KVM_MIPS_OPCODE_H__ #ifndef __KVM_MIPS_OPCODE_H__
#define __KVM_MIPS_OPCODE_H__ #define __KVM_MIPS_OPCODE_H__
/* COP0 Ops */ /* COP0 Ops */
#define mfmcz_op 0x0b /* 01011 */ #define mfmcz_op 0x0b /* 01011 */
#define wrpgpr_op 0x0e /* 01110 */ #define wrpgpr_op 0x0e /* 01110 */
/* COP0 opcodes (only if COP0 and CO=1): */ /* COP0 opcodes (only if COP0 and CO=1): */
#define wait_op 0x20 /* 100000 */ #define wait_op 0x20 /* 100000 */
#endif /* __KVM_MIPS_OPCODE_H__ */ #endif /* __KVM_MIPS_OPCODE_H__ */

View File

@ -1,13 +1,13 @@
/* /*
* This file is subject to the terms and conditions of the GNU General Public * This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive * License. See the file "COPYING" in the main directory of this archive
* for more details. * for more details.
* *
* KVM/MIPS: COP0 access histogram * KVM/MIPS: COP0 access histogram
* *
* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
* Authors: Sanjay Lal <sanjayl@kymasys.com> * Authors: Sanjay Lal <sanjayl@kymasys.com>
*/ */
#include <linux/kvm_host.h> #include <linux/kvm_host.h>

View File

@ -1,14 +1,14 @@
/* /*
* This file is subject to the terms and conditions of the GNU General Public * This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive * License. See the file "COPYING" in the main directory of this archive
* for more details. * for more details.
* *
* KVM/MIPS TLB handling, this file is part of the Linux host kernel so that * KVM/MIPS TLB handling, this file is part of the Linux host kernel so that
* TLB handlers run from KSEG0 * TLB handlers run from KSEG0
* *
* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
* Authors: Sanjay Lal <sanjayl@kymasys.com> * Authors: Sanjay Lal <sanjayl@kymasys.com>
*/ */
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/smp.h> #include <linux/smp.h>
@ -18,7 +18,6 @@
#include <linux/kvm_host.h> #include <linux/kvm_host.h>
#include <linux/srcu.h> #include <linux/srcu.h>
#include <asm/cpu.h> #include <asm/cpu.h>
#include <asm/bootinfo.h> #include <asm/bootinfo.h>
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
@ -39,13 +38,13 @@ atomic_t kvm_mips_instance;
EXPORT_SYMBOL(kvm_mips_instance); EXPORT_SYMBOL(kvm_mips_instance);
/* These function pointers are initialized once the KVM module is loaded */ /* These function pointers are initialized once the KVM module is loaded */
pfn_t(*kvm_mips_gfn_to_pfn) (struct kvm *kvm, gfn_t gfn); pfn_t (*kvm_mips_gfn_to_pfn)(struct kvm *kvm, gfn_t gfn);
EXPORT_SYMBOL(kvm_mips_gfn_to_pfn); EXPORT_SYMBOL(kvm_mips_gfn_to_pfn);
void (*kvm_mips_release_pfn_clean) (pfn_t pfn); void (*kvm_mips_release_pfn_clean)(pfn_t pfn);
EXPORT_SYMBOL(kvm_mips_release_pfn_clean); EXPORT_SYMBOL(kvm_mips_release_pfn_clean);
bool(*kvm_mips_is_error_pfn) (pfn_t pfn); bool (*kvm_mips_is_error_pfn)(pfn_t pfn);
EXPORT_SYMBOL(kvm_mips_is_error_pfn); EXPORT_SYMBOL(kvm_mips_is_error_pfn);
uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu) uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
@ -53,21 +52,17 @@ uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
return vcpu->arch.guest_kernel_asid[smp_processor_id()] & ASID_MASK; return vcpu->arch.guest_kernel_asid[smp_processor_id()] & ASID_MASK;
} }
uint32_t kvm_mips_get_user_asid(struct kvm_vcpu *vcpu) uint32_t kvm_mips_get_user_asid(struct kvm_vcpu *vcpu)
{ {
return vcpu->arch.guest_user_asid[smp_processor_id()] & ASID_MASK; return vcpu->arch.guest_user_asid[smp_processor_id()] & ASID_MASK;
} }
inline uint32_t kvm_mips_get_commpage_asid (struct kvm_vcpu *vcpu) inline uint32_t kvm_mips_get_commpage_asid(struct kvm_vcpu *vcpu)
{ {
return vcpu->kvm->arch.commpage_tlb; return vcpu->kvm->arch.commpage_tlb;
} }
/* Structure defining an tlb entry data set. */
/*
* Structure defining an tlb entry data set.
*/
void kvm_mips_dump_host_tlbs(void) void kvm_mips_dump_host_tlbs(void)
{ {
@ -116,6 +111,7 @@ void kvm_mips_dump_host_tlbs(void)
mtc0_tlbw_hazard(); mtc0_tlbw_hazard();
local_irq_restore(flags); local_irq_restore(flags);
} }
EXPORT_SYMBOL(kvm_mips_dump_host_tlbs);
void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu) void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu)
{ {
@ -143,6 +139,7 @@ void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu)
(tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask); (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
} }
} }
EXPORT_SYMBOL(kvm_mips_dump_guest_tlbs);
static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn) static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn)
{ {
@ -152,7 +149,7 @@ static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn)
if (kvm->arch.guest_pmap[gfn] != KVM_INVALID_PAGE) if (kvm->arch.guest_pmap[gfn] != KVM_INVALID_PAGE)
return 0; return 0;
srcu_idx = srcu_read_lock(&kvm->srcu); srcu_idx = srcu_read_lock(&kvm->srcu);
pfn = kvm_mips_gfn_to_pfn(kvm, gfn); pfn = kvm_mips_gfn_to_pfn(kvm, gfn);
if (kvm_mips_is_error_pfn(pfn)) { if (kvm_mips_is_error_pfn(pfn)) {
@ -169,7 +166,7 @@ out:
/* Translate guest KSEG0 addresses to Host PA */ /* Translate guest KSEG0 addresses to Host PA */
unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu, unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu,
unsigned long gva) unsigned long gva)
{ {
gfn_t gfn; gfn_t gfn;
uint32_t offset = gva & ~PAGE_MASK; uint32_t offset = gva & ~PAGE_MASK;
@ -194,12 +191,13 @@ unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu,
return (kvm->arch.guest_pmap[gfn] << PAGE_SHIFT) + offset; return (kvm->arch.guest_pmap[gfn] << PAGE_SHIFT) + offset;
} }
EXPORT_SYMBOL(kvm_mips_translate_guest_kseg0_to_hpa);
/* XXXKYMA: Must be called with interrupts disabled */ /* XXXKYMA: Must be called with interrupts disabled */
/* set flush_dcache_mask == 0 if no dcache flush required */ /* set flush_dcache_mask == 0 if no dcache flush required */
int int kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi,
kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi, unsigned long entrylo0, unsigned long entrylo1,
unsigned long entrylo0, unsigned long entrylo1, int flush_dcache_mask) int flush_dcache_mask)
{ {
unsigned long flags; unsigned long flags;
unsigned long old_entryhi; unsigned long old_entryhi;
@ -207,7 +205,6 @@ kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi,
local_irq_save(flags); local_irq_save(flags);
old_entryhi = read_c0_entryhi(); old_entryhi = read_c0_entryhi();
write_c0_entryhi(entryhi); write_c0_entryhi(entryhi);
mtc0_tlbw_hazard(); mtc0_tlbw_hazard();
@ -240,12 +237,14 @@ kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi,
if (flush_dcache_mask) { if (flush_dcache_mask) {
if (entrylo0 & MIPS3_PG_V) { if (entrylo0 & MIPS3_PG_V) {
++vcpu->stat.flush_dcache_exits; ++vcpu->stat.flush_dcache_exits;
flush_data_cache_page((entryhi & VPN2_MASK) & ~flush_dcache_mask); flush_data_cache_page((entryhi & VPN2_MASK) &
~flush_dcache_mask);
} }
if (entrylo1 & MIPS3_PG_V) { if (entrylo1 & MIPS3_PG_V) {
++vcpu->stat.flush_dcache_exits; ++vcpu->stat.flush_dcache_exits;
flush_data_cache_page(((entryhi & VPN2_MASK) & ~flush_dcache_mask) | flush_data_cache_page(((entryhi & VPN2_MASK) &
(0x1 << PAGE_SHIFT)); ~flush_dcache_mask) |
(0x1 << PAGE_SHIFT));
} }
} }
@ -257,10 +256,9 @@ kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi,
return 0; return 0;
} }
/* XXXKYMA: Must be called with interrupts disabled */ /* XXXKYMA: Must be called with interrupts disabled */
int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr, int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
struct kvm_vcpu *vcpu) struct kvm_vcpu *vcpu)
{ {
gfn_t gfn; gfn_t gfn;
pfn_t pfn0, pfn1; pfn_t pfn0, pfn1;
@ -270,7 +268,6 @@ int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
struct kvm *kvm = vcpu->kvm; struct kvm *kvm = vcpu->kvm;
const int flush_dcache_mask = 0; const int flush_dcache_mask = 0;
if (KVM_GUEST_KSEGX(badvaddr) != KVM_GUEST_KSEG0) { if (KVM_GUEST_KSEGX(badvaddr) != KVM_GUEST_KSEG0) {
kvm_err("%s: Invalid BadVaddr: %#lx\n", __func__, badvaddr); kvm_err("%s: Invalid BadVaddr: %#lx\n", __func__, badvaddr);
kvm_mips_dump_host_tlbs(); kvm_mips_dump_host_tlbs();
@ -302,14 +299,15 @@ int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
} }
entryhi = (vaddr | kvm_mips_get_kernel_asid(vcpu)); entryhi = (vaddr | kvm_mips_get_kernel_asid(vcpu));
entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) | (1 << 2) | entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
(0x1 << 1); (1 << 2) | (0x1 << 1);
entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) | (1 << 2) | entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) |
(0x1 << 1); (1 << 2) | (0x1 << 1);
return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1, return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
flush_dcache_mask); flush_dcache_mask);
} }
EXPORT_SYMBOL(kvm_mips_handle_kseg0_tlb_fault);
int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr, int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
struct kvm_vcpu *vcpu) struct kvm_vcpu *vcpu)
@ -318,11 +316,10 @@ int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
unsigned long flags, old_entryhi = 0, vaddr = 0; unsigned long flags, old_entryhi = 0, vaddr = 0;
unsigned long entrylo0 = 0, entrylo1 = 0; unsigned long entrylo0 = 0, entrylo1 = 0;
pfn0 = CPHYSADDR(vcpu->arch.kseg0_commpage) >> PAGE_SHIFT; pfn0 = CPHYSADDR(vcpu->arch.kseg0_commpage) >> PAGE_SHIFT;
pfn1 = 0; pfn1 = 0;
entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) | (1 << 2) | entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
(0x1 << 1); (1 << 2) | (0x1 << 1);
entrylo1 = 0; entrylo1 = 0;
local_irq_save(flags); local_irq_save(flags);
@ -341,9 +338,9 @@ int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
mtc0_tlbw_hazard(); mtc0_tlbw_hazard();
tlbw_use_hazard(); tlbw_use_hazard();
kvm_debug ("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0 (R): 0x%08lx, entrylo1(R): 0x%08lx\n", kvm_debug("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0 (R): 0x%08lx, entrylo1(R): 0x%08lx\n",
vcpu->arch.pc, read_c0_index(), read_c0_entryhi(), vcpu->arch.pc, read_c0_index(), read_c0_entryhi(),
read_c0_entrylo0(), read_c0_entrylo1()); read_c0_entrylo0(), read_c0_entrylo1());
/* Restore old ASID */ /* Restore old ASID */
write_c0_entryhi(old_entryhi); write_c0_entryhi(old_entryhi);
@ -353,28 +350,33 @@ int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
return 0; return 0;
} }
EXPORT_SYMBOL(kvm_mips_handle_commpage_tlb_fault);
int int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu, struct kvm_mips_tlb *tlb,
struct kvm_mips_tlb *tlb, unsigned long *hpa0, unsigned long *hpa1) unsigned long *hpa0,
unsigned long *hpa1)
{ {
unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0; unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
struct kvm *kvm = vcpu->kvm; struct kvm *kvm = vcpu->kvm;
pfn_t pfn0, pfn1; pfn_t pfn0, pfn1;
if ((tlb->tlb_hi & VPN2_MASK) == 0) { if ((tlb->tlb_hi & VPN2_MASK) == 0) {
pfn0 = 0; pfn0 = 0;
pfn1 = 0; pfn1 = 0;
} else { } else {
if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo0) >> PAGE_SHIFT) < 0) if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo0)
>> PAGE_SHIFT) < 0)
return -1; return -1;
if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo1) >> PAGE_SHIFT) < 0) if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo1)
>> PAGE_SHIFT) < 0)
return -1; return -1;
pfn0 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo0) >> PAGE_SHIFT]; pfn0 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo0)
pfn1 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo1) >> PAGE_SHIFT]; >> PAGE_SHIFT];
pfn1 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo1)
>> PAGE_SHIFT];
} }
if (hpa0) if (hpa0)
@ -385,11 +387,12 @@ kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
/* Get attributes from the Guest TLB */ /* Get attributes from the Guest TLB */
entryhi = (tlb->tlb_hi & VPN2_MASK) | (KVM_GUEST_KERNEL_MODE(vcpu) ? entryhi = (tlb->tlb_hi & VPN2_MASK) | (KVM_GUEST_KERNEL_MODE(vcpu) ?
kvm_mips_get_kernel_asid(vcpu) : kvm_mips_get_user_asid(vcpu)); kvm_mips_get_kernel_asid(vcpu) :
kvm_mips_get_user_asid(vcpu));
entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) | entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
(tlb->tlb_lo0 & MIPS3_PG_D) | (tlb->tlb_lo0 & MIPS3_PG_V); (tlb->tlb_lo0 & MIPS3_PG_D) | (tlb->tlb_lo0 & MIPS3_PG_V);
entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) | entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) |
(tlb->tlb_lo1 & MIPS3_PG_D) | (tlb->tlb_lo1 & MIPS3_PG_V); (tlb->tlb_lo1 & MIPS3_PG_D) | (tlb->tlb_lo1 & MIPS3_PG_V);
kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc, kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc,
tlb->tlb_lo0, tlb->tlb_lo1); tlb->tlb_lo0, tlb->tlb_lo1);
@ -397,6 +400,7 @@ kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1, return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
tlb->tlb_mask); tlb->tlb_mask);
} }
EXPORT_SYMBOL(kvm_mips_handle_mapped_seg_tlb_fault);
int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi) int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi)
{ {
@ -404,10 +408,9 @@ int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi)
int index = -1; int index = -1;
struct kvm_mips_tlb *tlb = vcpu->arch.guest_tlb; struct kvm_mips_tlb *tlb = vcpu->arch.guest_tlb;
for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) { for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
if (((TLB_VPN2(tlb[i]) & ~tlb[i].tlb_mask) == ((entryhi & VPN2_MASK) & ~tlb[i].tlb_mask)) && if (TLB_HI_VPN2_HIT(tlb[i], entryhi) &&
(TLB_IS_GLOBAL(tlb[i]) || (TLB_ASID(tlb[i]) == (entryhi & ASID_MASK)))) { TLB_HI_ASID_HIT(tlb[i], entryhi)) {
index = i; index = i;
break; break;
} }
@ -418,21 +421,23 @@ int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi)
return index; return index;
} }
EXPORT_SYMBOL(kvm_mips_guest_tlb_lookup);
int kvm_mips_host_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long vaddr) int kvm_mips_host_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long vaddr)
{ {
unsigned long old_entryhi, flags; unsigned long old_entryhi, flags;
volatile int idx; volatile int idx;
local_irq_save(flags); local_irq_save(flags);
old_entryhi = read_c0_entryhi(); old_entryhi = read_c0_entryhi();
if (KVM_GUEST_KERNEL_MODE(vcpu)) if (KVM_GUEST_KERNEL_MODE(vcpu))
write_c0_entryhi((vaddr & VPN2_MASK) | kvm_mips_get_kernel_asid(vcpu)); write_c0_entryhi((vaddr & VPN2_MASK) |
kvm_mips_get_kernel_asid(vcpu));
else { else {
write_c0_entryhi((vaddr & VPN2_MASK) | kvm_mips_get_user_asid(vcpu)); write_c0_entryhi((vaddr & VPN2_MASK) |
kvm_mips_get_user_asid(vcpu));
} }
mtc0_tlbw_hazard(); mtc0_tlbw_hazard();
@ -452,6 +457,7 @@ int kvm_mips_host_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long vaddr)
return idx; return idx;
} }
EXPORT_SYMBOL(kvm_mips_host_tlb_lookup);
int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va) int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va)
{ {
@ -460,7 +466,6 @@ int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va)
local_irq_save(flags); local_irq_save(flags);
old_entryhi = read_c0_entryhi(); old_entryhi = read_c0_entryhi();
write_c0_entryhi((va & VPN2_MASK) | kvm_mips_get_user_asid(vcpu)); write_c0_entryhi((va & VPN2_MASK) | kvm_mips_get_user_asid(vcpu));
@ -499,8 +504,9 @@ int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va)
return 0; return 0;
} }
EXPORT_SYMBOL(kvm_mips_host_tlb_inv);
/* XXXKYMA: Fix Guest USER/KERNEL no longer share the same ASID*/ /* XXXKYMA: Fix Guest USER/KERNEL no longer share the same ASID */
int kvm_mips_host_tlb_inv_index(struct kvm_vcpu *vcpu, int index) int kvm_mips_host_tlb_inv_index(struct kvm_vcpu *vcpu, int index)
{ {
unsigned long flags, old_entryhi; unsigned long flags, old_entryhi;
@ -510,7 +516,6 @@ int kvm_mips_host_tlb_inv_index(struct kvm_vcpu *vcpu, int index)
local_irq_save(flags); local_irq_save(flags);
old_entryhi = read_c0_entryhi(); old_entryhi = read_c0_entryhi();
write_c0_entryhi(UNIQUE_ENTRYHI(index)); write_c0_entryhi(UNIQUE_ENTRYHI(index));
@ -546,7 +551,6 @@ void kvm_mips_flush_host_tlb(int skip_kseg0)
int entry = 0; int entry = 0;
int maxentry = current_cpu_data.tlbsize; int maxentry = current_cpu_data.tlbsize;
local_irq_save(flags); local_irq_save(flags);
old_entryhi = read_c0_entryhi(); old_entryhi = read_c0_entryhi();
@ -554,7 +558,6 @@ void kvm_mips_flush_host_tlb(int skip_kseg0)
/* Blast 'em all away. */ /* Blast 'em all away. */
for (entry = 0; entry < maxentry; entry++) { for (entry = 0; entry < maxentry; entry++) {
write_c0_index(entry); write_c0_index(entry);
mtc0_tlbw_hazard(); mtc0_tlbw_hazard();
@ -565,9 +568,8 @@ void kvm_mips_flush_host_tlb(int skip_kseg0)
entryhi = read_c0_entryhi(); entryhi = read_c0_entryhi();
/* Don't blow away guest kernel entries */ /* Don't blow away guest kernel entries */
if (KVM_GUEST_KSEGX(entryhi) == KVM_GUEST_KSEG0) { if (KVM_GUEST_KSEGX(entryhi) == KVM_GUEST_KSEG0)
continue; continue;
}
} }
/* Make sure all entries differ. */ /* Make sure all entries differ. */
@ -591,17 +593,17 @@ void kvm_mips_flush_host_tlb(int skip_kseg0)
local_irq_restore(flags); local_irq_restore(flags);
} }
EXPORT_SYMBOL(kvm_mips_flush_host_tlb);
void void kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,
kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu, struct kvm_vcpu *vcpu)
struct kvm_vcpu *vcpu)
{ {
unsigned long asid = asid_cache(cpu); unsigned long asid = asid_cache(cpu);
if (!((asid += ASID_INC) & ASID_MASK)) { asid += ASID_INC;
if (cpu_has_vtag_icache) { if (!(asid & ASID_MASK)) {
if (cpu_has_vtag_icache)
flush_icache_all(); flush_icache_all();
}
kvm_local_flush_tlb_all(); /* start new asid cycle */ kvm_local_flush_tlb_all(); /* start new asid cycle */
@ -639,6 +641,7 @@ void kvm_local_flush_tlb_all(void)
local_irq_restore(flags); local_irq_restore(flags);
} }
EXPORT_SYMBOL(kvm_local_flush_tlb_all);
/** /**
* kvm_mips_migrate_count() - Migrate timer. * kvm_mips_migrate_count() - Migrate timer.
@ -699,7 +702,10 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
} }
if (!newasid) { if (!newasid) {
/* If we preempted while the guest was executing, then reload the pre-empted ASID */ /*
* If we preempted while the guest was executing, then reload
* the pre-empted ASID
*/
if (current->flags & PF_VCPU) { if (current->flags & PF_VCPU) {
write_c0_entryhi(vcpu->arch. write_c0_entryhi(vcpu->arch.
preempt_entryhi & ASID_MASK); preempt_entryhi & ASID_MASK);
@ -708,9 +714,10 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
} else { } else {
/* New ASIDs were allocated for the VM */ /* New ASIDs were allocated for the VM */
/* Were we in guest context? If so then the pre-empted ASID is no longer /*
* valid, we need to set it to what it should be based on the mode of * Were we in guest context? If so then the pre-empted ASID is
* the Guest (Kernel/User) * no longer valid, we need to set it to what it should be based
* on the mode of the Guest (Kernel/User)
*/ */
if (current->flags & PF_VCPU) { if (current->flags & PF_VCPU) {
if (KVM_GUEST_KERNEL_MODE(vcpu)) if (KVM_GUEST_KERNEL_MODE(vcpu))
@ -728,6 +735,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
local_irq_restore(flags); local_irq_restore(flags);
} }
EXPORT_SYMBOL(kvm_arch_vcpu_load);
/* ASID can change if another task is scheduled during preemption */ /* ASID can change if another task is scheduled during preemption */
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
@ -739,7 +747,6 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
cpu = smp_processor_id(); cpu = smp_processor_id();
vcpu->arch.preempt_entryhi = read_c0_entryhi(); vcpu->arch.preempt_entryhi = read_c0_entryhi();
vcpu->arch.last_sched_cpu = cpu; vcpu->arch.last_sched_cpu = cpu;
@ -754,11 +761,12 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
local_irq_restore(flags); local_irq_restore(flags);
} }
EXPORT_SYMBOL(kvm_arch_vcpu_put);
uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu) uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu)
{ {
struct mips_coproc *cop0 = vcpu->arch.cop0; struct mips_coproc *cop0 = vcpu->arch.cop0;
unsigned long paddr, flags; unsigned long paddr, flags, vpn2, asid;
uint32_t inst; uint32_t inst;
int index; int index;
@ -769,16 +777,12 @@ uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu)
if (index >= 0) { if (index >= 0) {
inst = *(opc); inst = *(opc);
} else { } else {
index = vpn2 = (unsigned long) opc & VPN2_MASK;
kvm_mips_guest_tlb_lookup(vcpu, asid = kvm_read_c0_guest_entryhi(cop0) & ASID_MASK;
((unsigned long) opc & VPN2_MASK) index = kvm_mips_guest_tlb_lookup(vcpu, vpn2 | asid);
|
(kvm_read_c0_guest_entryhi
(cop0) & ASID_MASK));
if (index < 0) { if (index < 0) {
kvm_err kvm_err("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n",
("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n", __func__, opc, vcpu, read_c0_entryhi());
__func__, opc, vcpu, read_c0_entryhi());
kvm_mips_dump_host_tlbs(); kvm_mips_dump_host_tlbs();
local_irq_restore(flags); local_irq_restore(flags);
return KVM_INVALID_INST; return KVM_INVALID_INST;
@ -793,7 +797,7 @@ uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu)
} else if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) { } else if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) {
paddr = paddr =
kvm_mips_translate_guest_kseg0_to_hpa(vcpu, kvm_mips_translate_guest_kseg0_to_hpa(vcpu,
(unsigned long) opc); (unsigned long) opc);
inst = *(uint32_t *) CKSEG0ADDR(paddr); inst = *(uint32_t *) CKSEG0ADDR(paddr);
} else { } else {
kvm_err("%s: illegal address: %p\n", __func__, opc); kvm_err("%s: illegal address: %p\n", __func__, opc);
@ -802,18 +806,4 @@ uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu)
return inst; return inst;
} }
EXPORT_SYMBOL(kvm_local_flush_tlb_all);
EXPORT_SYMBOL(kvm_mips_handle_mapped_seg_tlb_fault);
EXPORT_SYMBOL(kvm_mips_handle_commpage_tlb_fault);
EXPORT_SYMBOL(kvm_mips_dump_host_tlbs);
EXPORT_SYMBOL(kvm_mips_handle_kseg0_tlb_fault);
EXPORT_SYMBOL(kvm_mips_host_tlb_lookup);
EXPORT_SYMBOL(kvm_mips_flush_host_tlb);
EXPORT_SYMBOL(kvm_mips_guest_tlb_lookup);
EXPORT_SYMBOL(kvm_mips_host_tlb_inv);
EXPORT_SYMBOL(kvm_mips_translate_guest_kseg0_to_hpa);
EXPORT_SYMBOL(kvm_mips_dump_guest_tlbs);
EXPORT_SYMBOL(kvm_get_inst); EXPORT_SYMBOL(kvm_get_inst);
EXPORT_SYMBOL(kvm_arch_vcpu_load);
EXPORT_SYMBOL(kvm_arch_vcpu_put);

View File

@ -1,13 +1,13 @@
/* /*
* This file is subject to the terms and conditions of the GNU General Public * This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive * License. See the file "COPYING" in the main directory of this archive
* for more details. * for more details.
* *
* KVM/MIPS: Deliver/Emulate exceptions to the guest kernel * KVM/MIPS: Deliver/Emulate exceptions to the guest kernel
* *
* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
* Authors: Sanjay Lal <sanjayl@kymasys.com> * Authors: Sanjay Lal <sanjayl@kymasys.com>
*/ */
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/err.h> #include <linux/err.h>
@ -37,7 +37,6 @@ static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva)
return gpa; return gpa;
} }
static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu) static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu)
{ {
struct kvm_run *run = vcpu->run; struct kvm_run *run = vcpu->run;
@ -46,9 +45,9 @@ static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu)
enum emulation_result er = EMULATE_DONE; enum emulation_result er = EMULATE_DONE;
int ret = RESUME_GUEST; int ret = RESUME_GUEST;
if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1) { if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1)
er = kvm_mips_emulate_fpu_exc(cause, opc, run, vcpu); er = kvm_mips_emulate_fpu_exc(cause, opc, run, vcpu);
} else else
er = kvm_mips_emulate_inst(cause, opc, run, vcpu); er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
switch (er) { switch (er) {
@ -83,9 +82,8 @@ static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu)
if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0 if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
|| KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) { || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
kvm_debug kvm_debug("USER/KSEG23 ADDR TLB MOD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n",
("USER/KSEG23 ADDR TLB MOD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n", cause, opc, badvaddr);
cause, opc, badvaddr);
er = kvm_mips_handle_tlbmod(cause, opc, run, vcpu); er = kvm_mips_handle_tlbmod(cause, opc, run, vcpu);
if (er == EMULATE_DONE) if (er == EMULATE_DONE)
@ -95,8 +93,10 @@ static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu)
ret = RESUME_HOST; ret = RESUME_HOST;
} }
} else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) { } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
/* XXXKYMA: The guest kernel does not expect to get this fault when we are not /*
* using HIGHMEM. Need to address this in a HIGHMEM kernel * XXXKYMA: The guest kernel does not expect to get this fault
* when we are not using HIGHMEM. Need to address this in a
* HIGHMEM kernel
*/ */
printk printk
("TLB MOD fault not handled, cause %#lx, PC: %p, BadVaddr: %#lx\n", ("TLB MOD fault not handled, cause %#lx, PC: %p, BadVaddr: %#lx\n",
@ -134,9 +134,8 @@ static int kvm_trap_emul_handle_tlb_st_miss(struct kvm_vcpu *vcpu)
} }
} else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0 } else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
|| KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) { || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
kvm_debug kvm_debug("USER ADDR TLB LD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n",
("USER ADDR TLB LD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n", cause, opc, badvaddr);
cause, opc, badvaddr);
er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu); er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu);
if (er == EMULATE_DONE) if (er == EMULATE_DONE)
ret = RESUME_GUEST; ret = RESUME_GUEST;
@ -145,8 +144,9 @@ static int kvm_trap_emul_handle_tlb_st_miss(struct kvm_vcpu *vcpu)
ret = RESUME_HOST; ret = RESUME_HOST;
} }
} else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) { } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
/* All KSEG0 faults are handled by KVM, as the guest kernel does not /*
* expect to ever get them * All KSEG0 faults are handled by KVM, as the guest kernel does
* not expect to ever get them
*/ */
if (kvm_mips_handle_kseg0_tlb_fault if (kvm_mips_handle_kseg0_tlb_fault
(vcpu->arch.host_cp0_badvaddr, vcpu) < 0) { (vcpu->arch.host_cp0_badvaddr, vcpu) < 0) {
@ -154,9 +154,8 @@ static int kvm_trap_emul_handle_tlb_st_miss(struct kvm_vcpu *vcpu)
ret = RESUME_HOST; ret = RESUME_HOST;
} }
} else { } else {
kvm_err kvm_err("Illegal TLB LD fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
("Illegal TLB LD fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n", cause, opc, badvaddr);
cause, opc, badvaddr);
kvm_mips_dump_host_tlbs(); kvm_mips_dump_host_tlbs();
kvm_arch_vcpu_dump_regs(vcpu); kvm_arch_vcpu_dump_regs(vcpu);
run->exit_reason = KVM_EXIT_INTERNAL_ERROR; run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
@ -185,11 +184,14 @@ static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu *vcpu)
kvm_debug("USER ADDR TLB ST fault: PC: %#lx, BadVaddr: %#lx\n", kvm_debug("USER ADDR TLB ST fault: PC: %#lx, BadVaddr: %#lx\n",
vcpu->arch.pc, badvaddr); vcpu->arch.pc, badvaddr);
/* User Address (UA) fault, this could happen if /*
* (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this * User Address (UA) fault, this could happen if
* case we pass on the fault to the guest kernel and let it handle it. * (1) TLB entry not present/valid in both Guest and shadow host
* (2) TLB entry is present in the Guest TLB but not in the shadow, in this * TLBs, in this case we pass on the fault to the guest
* case we inject the TLB from the Guest TLB into the shadow host TLB * kernel and let it handle it.
* (2) TLB entry is present in the Guest TLB but not in the
* shadow, in this case we inject the TLB from the Guest TLB
* into the shadow host TLB
*/ */
er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu); er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu);
@ -349,9 +351,9 @@ static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu)
uint32_t config1; uint32_t config1;
int vcpu_id = vcpu->vcpu_id; int vcpu_id = vcpu->vcpu_id;
/* Arch specific stuff, set up config registers properly so that the /*
* guest will come up as expected, for now we simulate a * Arch specific stuff, set up config registers properly so that the
* MIPS 24kc * guest will come up as expected, for now we simulate a MIPS 24kc
*/ */
kvm_write_c0_guest_prid(cop0, 0x00019300); kvm_write_c0_guest_prid(cop0, 0x00019300);
kvm_write_c0_guest_config(cop0, kvm_write_c0_guest_config(cop0,
@ -373,14 +375,15 @@ static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu)
kvm_write_c0_guest_config2(cop0, MIPS_CONFIG2); kvm_write_c0_guest_config2(cop0, MIPS_CONFIG2);
/* MIPS_CONFIG2 | (read_c0_config2() & 0xfff) */ /* MIPS_CONFIG2 | (read_c0_config2() & 0xfff) */
kvm_write_c0_guest_config3(cop0, kvm_write_c0_guest_config3(cop0, MIPS_CONFIG3 | (0 << CP0C3_VInt) |
MIPS_CONFIG3 | (0 << CP0C3_VInt) | (1 << (1 << CP0C3_ULRI));
CP0C3_ULRI));
/* Set Wait IE/IXMT Ignore in Config7, IAR, AR */ /* Set Wait IE/IXMT Ignore in Config7, IAR, AR */
kvm_write_c0_guest_config7(cop0, (MIPS_CONF7_WII) | (1 << 10)); kvm_write_c0_guest_config7(cop0, (MIPS_CONF7_WII) | (1 << 10));
/* Setup IntCtl defaults, compatibilty mode for timer interrupts (HW5) */ /*
* Setup IntCtl defaults, compatibilty mode for timer interrupts (HW5)
*/
kvm_write_c0_guest_intctl(cop0, 0xFC000000); kvm_write_c0_guest_intctl(cop0, 0xFC000000);
/* Put in vcpu id as CPUNum into Ebase Reg to handle SMP Guests */ /* Put in vcpu id as CPUNum into Ebase Reg to handle SMP Guests */

View File

@ -1,11 +1,11 @@
/* /*
* This file is subject to the terms and conditions of the GNU General Public * This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive * License. See the file "COPYING" in the main directory of this archive
* for more details. * for more details.
* *
* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
* Authors: Sanjay Lal <sanjayl@kymasys.com> * Authors: Sanjay Lal <sanjayl@kymasys.com>
*/ */
#if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ) #if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_KVM_H #define _TRACE_KVM_H
@ -17,9 +17,7 @@
#define TRACE_INCLUDE_PATH . #define TRACE_INCLUDE_PATH .
#define TRACE_INCLUDE_FILE trace #define TRACE_INCLUDE_FILE trace
/* /* Tracepoints for VM eists */
* Tracepoints for VM eists
*/
extern char *kvm_mips_exit_types_str[MAX_KVM_MIPS_EXIT_TYPES]; extern char *kvm_mips_exit_types_str[MAX_KVM_MIPS_EXIT_TYPES];
TRACE_EVENT(kvm_exit, TRACE_EVENT(kvm_exit,