Mostly the PPC part of the release, but also switching to Arnd's fix
for the hyperv config issue and a typo fix. Main PPC changes: reimplement the MMIO instruction emulation, transactional memory support for PR KVM, improve radix page table handling. -----BEGIN PGP SIGNATURE----- Version: GnuPG v2.0.22 (GNU/Linux) iQEcBAABAgAGBQJbIp2DAAoJEL/70l94x66DdZwH/jAI259VXA3VJJZ21mMLry4m 8uL28N/zYgiokPnzE9/BoP3o48ksYGdrJcvIUgHScTNHrMdMGTv/wkvExEzQ+j9Z orCVF46zyGFA1KevEaEfTCrTsUO2HX7kCeZou7J8F37YdxgEqEOIoa6ozC+XVrB5 q75KnnIqizM5Hi5+kdCEPiBZ1Qzy+F8kXtg4OqXSEOubiyxXvTmkC65sUBrEzleW uGHB4qNJ0bpLZAeKrrh2yDwhqR3Dw3Mqz97mA4CygfWm1BjQsPpO8u80NtXr2gW5 iB3hB7RvzlRpzVHxaKAiKu+DAQWkhiEGPAolWGuQ5mFm1V31qw7UO/TDylKSXZk= =keBY -----END PGP SIGNATURE----- Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm Pull more kvm updates from Paolo Bonzini: "Mostly the PPC part of the release, but also switching to Arnd's fix for the hyperv config issue and a typo fix. Main PPC changes: - reimplement the MMIO instruction emulation - transactional memory support for PR KVM - improve radix page table handling" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (63 commits) KVM: x86: VMX: redo fix for link error without CONFIG_HYPERV KVM: x86: fix typo at kvm_arch_hardware_setup comment KVM: PPC: Book3S PR: Fix failure status setting in tabort. emulation KVM: PPC: Book3S PR: Enable use on POWER9 bare-metal hosts in HPT mode KVM: PPC: Book3S PR: Don't let PAPR guest set MSR hypervisor bit KVM: PPC: Book3S PR: Fix failure status setting in treclaim. emulation KVM: PPC: Book3S PR: Fix MSR setting when delivering interrupts KVM: PPC: Book3S PR: Handle additional interrupt types KVM: PPC: Book3S PR: Enable kvmppc_get/set_one_reg_pr() for HTM registers KVM: PPC: Book3S: Remove load/put vcpu for KVM_GET_REGS/KVM_SET_REGS KVM: PPC: Remove load/put vcpu for KVM_GET/SET_ONE_REG ioctl KVM: PPC: Move vcpu_load/vcpu_put down to each ioctl case in kvm_arch_vcpu_ioctl KVM: PPC: Book3S PR: Enable HTM for PR KVM for KVM_CHECK_EXTENSION ioctl KVM: PPC: Book3S PR: Support TAR handling for PR KVM HTM KVM: PPC: Book3S PR: Add guard code to prevent returning to guest with PR=0 and Transactional state KVM: PPC: Book3S PR: Add emulation for tabort. in privileged state KVM: PPC: Book3S PR: Add emulation for trechkpt. KVM: PPC: Book3S PR: Add emulation for treclaim. KVM: PPC: Book3S PR: Restore NV regs after emulating mfspr from TM SPRs KVM: PPC: Book3S PR: Always fail transactions in guest privileged state ...
This commit is contained in:
commit
8949170cf4
|
@ -134,7 +134,13 @@ unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip);
|
||||||
void pnv_power9_force_smt4_catch(void);
|
void pnv_power9_force_smt4_catch(void);
|
||||||
void pnv_power9_force_smt4_release(void);
|
void pnv_power9_force_smt4_release(void);
|
||||||
|
|
||||||
|
/* Transaction memory related */
|
||||||
void tm_enable(void);
|
void tm_enable(void);
|
||||||
void tm_disable(void);
|
void tm_disable(void);
|
||||||
void tm_abort(uint8_t cause);
|
void tm_abort(uint8_t cause);
|
||||||
|
|
||||||
|
struct kvm_vcpu;
|
||||||
|
void _kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu, u64 guest_msr);
|
||||||
|
void _kvmppc_save_tm_pr(struct kvm_vcpu *vcpu, u64 guest_msr);
|
||||||
|
|
||||||
#endif /* _ASM_POWERPC_ASM_PROTOTYPES_H */
|
#endif /* _ASM_POWERPC_ASM_PROTOTYPES_H */
|
||||||
|
|
|
@ -104,6 +104,7 @@ struct kvmppc_vcore {
|
||||||
ulong vtb; /* virtual timebase */
|
ulong vtb; /* virtual timebase */
|
||||||
ulong conferring_threads;
|
ulong conferring_threads;
|
||||||
unsigned int halt_poll_ns;
|
unsigned int halt_poll_ns;
|
||||||
|
atomic_t online_count;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct kvmppc_vcpu_book3s {
|
struct kvmppc_vcpu_book3s {
|
||||||
|
@ -209,6 +210,7 @@ extern void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec)
|
||||||
extern void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu *vcpu,
|
extern void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu *vcpu,
|
||||||
unsigned int vec);
|
unsigned int vec);
|
||||||
extern void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags);
|
extern void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags);
|
||||||
|
extern void kvmppc_trigger_fac_interrupt(struct kvm_vcpu *vcpu, ulong fac);
|
||||||
extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat,
|
extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat,
|
||||||
bool upper, u32 val);
|
bool upper, u32 val);
|
||||||
extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr);
|
extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr);
|
||||||
|
@ -256,6 +258,21 @@ extern int kvmppc_hcall_impl_pr(unsigned long cmd);
|
||||||
extern int kvmppc_hcall_impl_hv_realmode(unsigned long cmd);
|
extern int kvmppc_hcall_impl_hv_realmode(unsigned long cmd);
|
||||||
extern void kvmppc_copy_to_svcpu(struct kvm_vcpu *vcpu);
|
extern void kvmppc_copy_to_svcpu(struct kvm_vcpu *vcpu);
|
||||||
extern void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu);
|
extern void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu);
|
||||||
|
|
||||||
|
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||||
|
void kvmppc_save_tm_pr(struct kvm_vcpu *vcpu);
|
||||||
|
void kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu);
|
||||||
|
void kvmppc_save_tm_sprs(struct kvm_vcpu *vcpu);
|
||||||
|
void kvmppc_restore_tm_sprs(struct kvm_vcpu *vcpu);
|
||||||
|
#else
|
||||||
|
static inline void kvmppc_save_tm_pr(struct kvm_vcpu *vcpu) {}
|
||||||
|
static inline void kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu) {}
|
||||||
|
static inline void kvmppc_save_tm_sprs(struct kvm_vcpu *vcpu) {}
|
||||||
|
static inline void kvmppc_restore_tm_sprs(struct kvm_vcpu *vcpu) {}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac);
|
||||||
|
|
||||||
extern int kvm_irq_bypass;
|
extern int kvm_irq_bypass;
|
||||||
|
|
||||||
static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu)
|
static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu)
|
||||||
|
@ -274,12 +291,12 @@ static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu)
|
||||||
|
|
||||||
static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
|
static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
|
||||||
{
|
{
|
||||||
vcpu->arch.gpr[num] = val;
|
vcpu->arch.regs.gpr[num] = val;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
|
static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
|
||||||
{
|
{
|
||||||
return vcpu->arch.gpr[num];
|
return vcpu->arch.regs.gpr[num];
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
|
static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
|
||||||
|
@ -294,42 +311,42 @@ static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
|
||||||
|
|
||||||
static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, ulong val)
|
static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, ulong val)
|
||||||
{
|
{
|
||||||
vcpu->arch.xer = val;
|
vcpu->arch.regs.xer = val;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline ulong kvmppc_get_xer(struct kvm_vcpu *vcpu)
|
static inline ulong kvmppc_get_xer(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
return vcpu->arch.xer;
|
return vcpu->arch.regs.xer;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val)
|
static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val)
|
||||||
{
|
{
|
||||||
vcpu->arch.ctr = val;
|
vcpu->arch.regs.ctr = val;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu)
|
static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
return vcpu->arch.ctr;
|
return vcpu->arch.regs.ctr;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val)
|
static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val)
|
||||||
{
|
{
|
||||||
vcpu->arch.lr = val;
|
vcpu->arch.regs.link = val;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu)
|
static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
return vcpu->arch.lr;
|
return vcpu->arch.regs.link;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val)
|
static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val)
|
||||||
{
|
{
|
||||||
vcpu->arch.pc = val;
|
vcpu->arch.regs.nip = val;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu)
|
static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
return vcpu->arch.pc;
|
return vcpu->arch.regs.nip;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline u64 kvmppc_get_msr(struct kvm_vcpu *vcpu);
|
static inline u64 kvmppc_get_msr(struct kvm_vcpu *vcpu);
|
||||||
|
|
|
@ -483,15 +483,15 @@ static inline u64 sanitize_msr(u64 msr)
|
||||||
static inline void copy_from_checkpoint(struct kvm_vcpu *vcpu)
|
static inline void copy_from_checkpoint(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
vcpu->arch.cr = vcpu->arch.cr_tm;
|
vcpu->arch.cr = vcpu->arch.cr_tm;
|
||||||
vcpu->arch.xer = vcpu->arch.xer_tm;
|
vcpu->arch.regs.xer = vcpu->arch.xer_tm;
|
||||||
vcpu->arch.lr = vcpu->arch.lr_tm;
|
vcpu->arch.regs.link = vcpu->arch.lr_tm;
|
||||||
vcpu->arch.ctr = vcpu->arch.ctr_tm;
|
vcpu->arch.regs.ctr = vcpu->arch.ctr_tm;
|
||||||
vcpu->arch.amr = vcpu->arch.amr_tm;
|
vcpu->arch.amr = vcpu->arch.amr_tm;
|
||||||
vcpu->arch.ppr = vcpu->arch.ppr_tm;
|
vcpu->arch.ppr = vcpu->arch.ppr_tm;
|
||||||
vcpu->arch.dscr = vcpu->arch.dscr_tm;
|
vcpu->arch.dscr = vcpu->arch.dscr_tm;
|
||||||
vcpu->arch.tar = vcpu->arch.tar_tm;
|
vcpu->arch.tar = vcpu->arch.tar_tm;
|
||||||
memcpy(vcpu->arch.gpr, vcpu->arch.gpr_tm,
|
memcpy(vcpu->arch.regs.gpr, vcpu->arch.gpr_tm,
|
||||||
sizeof(vcpu->arch.gpr));
|
sizeof(vcpu->arch.regs.gpr));
|
||||||
vcpu->arch.fp = vcpu->arch.fp_tm;
|
vcpu->arch.fp = vcpu->arch.fp_tm;
|
||||||
vcpu->arch.vr = vcpu->arch.vr_tm;
|
vcpu->arch.vr = vcpu->arch.vr_tm;
|
||||||
vcpu->arch.vrsave = vcpu->arch.vrsave_tm;
|
vcpu->arch.vrsave = vcpu->arch.vrsave_tm;
|
||||||
|
@ -500,15 +500,15 @@ static inline void copy_from_checkpoint(struct kvm_vcpu *vcpu)
|
||||||
static inline void copy_to_checkpoint(struct kvm_vcpu *vcpu)
|
static inline void copy_to_checkpoint(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
vcpu->arch.cr_tm = vcpu->arch.cr;
|
vcpu->arch.cr_tm = vcpu->arch.cr;
|
||||||
vcpu->arch.xer_tm = vcpu->arch.xer;
|
vcpu->arch.xer_tm = vcpu->arch.regs.xer;
|
||||||
vcpu->arch.lr_tm = vcpu->arch.lr;
|
vcpu->arch.lr_tm = vcpu->arch.regs.link;
|
||||||
vcpu->arch.ctr_tm = vcpu->arch.ctr;
|
vcpu->arch.ctr_tm = vcpu->arch.regs.ctr;
|
||||||
vcpu->arch.amr_tm = vcpu->arch.amr;
|
vcpu->arch.amr_tm = vcpu->arch.amr;
|
||||||
vcpu->arch.ppr_tm = vcpu->arch.ppr;
|
vcpu->arch.ppr_tm = vcpu->arch.ppr;
|
||||||
vcpu->arch.dscr_tm = vcpu->arch.dscr;
|
vcpu->arch.dscr_tm = vcpu->arch.dscr;
|
||||||
vcpu->arch.tar_tm = vcpu->arch.tar;
|
vcpu->arch.tar_tm = vcpu->arch.tar;
|
||||||
memcpy(vcpu->arch.gpr_tm, vcpu->arch.gpr,
|
memcpy(vcpu->arch.gpr_tm, vcpu->arch.regs.gpr,
|
||||||
sizeof(vcpu->arch.gpr));
|
sizeof(vcpu->arch.regs.gpr));
|
||||||
vcpu->arch.fp_tm = vcpu->arch.fp;
|
vcpu->arch.fp_tm = vcpu->arch.fp;
|
||||||
vcpu->arch.vr_tm = vcpu->arch.vr;
|
vcpu->arch.vr_tm = vcpu->arch.vr;
|
||||||
vcpu->arch.vrsave_tm = vcpu->arch.vrsave;
|
vcpu->arch.vrsave_tm = vcpu->arch.vrsave;
|
||||||
|
|
|
@ -36,12 +36,12 @@
|
||||||
|
|
||||||
static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
|
static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
|
||||||
{
|
{
|
||||||
vcpu->arch.gpr[num] = val;
|
vcpu->arch.regs.gpr[num] = val;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
|
static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
|
||||||
{
|
{
|
||||||
return vcpu->arch.gpr[num];
|
return vcpu->arch.regs.gpr[num];
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
|
static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
|
||||||
|
@ -56,12 +56,12 @@ static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
|
||||||
|
|
||||||
static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, ulong val)
|
static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, ulong val)
|
||||||
{
|
{
|
||||||
vcpu->arch.xer = val;
|
vcpu->arch.regs.xer = val;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline ulong kvmppc_get_xer(struct kvm_vcpu *vcpu)
|
static inline ulong kvmppc_get_xer(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
return vcpu->arch.xer;
|
return vcpu->arch.regs.xer;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool kvmppc_need_byteswap(struct kvm_vcpu *vcpu)
|
static inline bool kvmppc_need_byteswap(struct kvm_vcpu *vcpu)
|
||||||
|
@ -72,32 +72,32 @@ static inline bool kvmppc_need_byteswap(struct kvm_vcpu *vcpu)
|
||||||
|
|
||||||
static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val)
|
static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val)
|
||||||
{
|
{
|
||||||
vcpu->arch.ctr = val;
|
vcpu->arch.regs.ctr = val;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu)
|
static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
return vcpu->arch.ctr;
|
return vcpu->arch.regs.ctr;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val)
|
static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val)
|
||||||
{
|
{
|
||||||
vcpu->arch.lr = val;
|
vcpu->arch.regs.link = val;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu)
|
static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
return vcpu->arch.lr;
|
return vcpu->arch.regs.link;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val)
|
static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val)
|
||||||
{
|
{
|
||||||
vcpu->arch.pc = val;
|
vcpu->arch.regs.nip = val;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu)
|
static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
return vcpu->arch.pc;
|
return vcpu->arch.regs.nip;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
|
static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
|
||||||
|
|
|
@ -269,7 +269,6 @@ struct kvm_arch {
|
||||||
unsigned long host_lpcr;
|
unsigned long host_lpcr;
|
||||||
unsigned long sdr1;
|
unsigned long sdr1;
|
||||||
unsigned long host_sdr1;
|
unsigned long host_sdr1;
|
||||||
int tlbie_lock;
|
|
||||||
unsigned long lpcr;
|
unsigned long lpcr;
|
||||||
unsigned long vrma_slb_v;
|
unsigned long vrma_slb_v;
|
||||||
int mmu_ready;
|
int mmu_ready;
|
||||||
|
@ -454,6 +453,12 @@ struct mmio_hpte_cache {
|
||||||
#define KVMPPC_VSX_COPY_WORD 1
|
#define KVMPPC_VSX_COPY_WORD 1
|
||||||
#define KVMPPC_VSX_COPY_DWORD 2
|
#define KVMPPC_VSX_COPY_DWORD 2
|
||||||
#define KVMPPC_VSX_COPY_DWORD_LOAD_DUMP 3
|
#define KVMPPC_VSX_COPY_DWORD_LOAD_DUMP 3
|
||||||
|
#define KVMPPC_VSX_COPY_WORD_LOAD_DUMP 4
|
||||||
|
|
||||||
|
#define KVMPPC_VMX_COPY_BYTE 8
|
||||||
|
#define KVMPPC_VMX_COPY_HWORD 9
|
||||||
|
#define KVMPPC_VMX_COPY_WORD 10
|
||||||
|
#define KVMPPC_VMX_COPY_DWORD 11
|
||||||
|
|
||||||
struct openpic;
|
struct openpic;
|
||||||
|
|
||||||
|
@ -486,7 +491,7 @@ struct kvm_vcpu_arch {
|
||||||
struct kvmppc_book3s_shadow_vcpu *shadow_vcpu;
|
struct kvmppc_book3s_shadow_vcpu *shadow_vcpu;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
ulong gpr[32];
|
struct pt_regs regs;
|
||||||
|
|
||||||
struct thread_fp_state fp;
|
struct thread_fp_state fp;
|
||||||
|
|
||||||
|
@ -521,14 +526,10 @@ struct kvm_vcpu_arch {
|
||||||
u32 qpr[32];
|
u32 qpr[32];
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
ulong pc;
|
|
||||||
ulong ctr;
|
|
||||||
ulong lr;
|
|
||||||
#ifdef CONFIG_PPC_BOOK3S
|
#ifdef CONFIG_PPC_BOOK3S
|
||||||
ulong tar;
|
ulong tar;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
ulong xer;
|
|
||||||
u32 cr;
|
u32 cr;
|
||||||
|
|
||||||
#ifdef CONFIG_PPC_BOOK3S
|
#ifdef CONFIG_PPC_BOOK3S
|
||||||
|
@ -626,7 +627,6 @@ struct kvm_vcpu_arch {
|
||||||
|
|
||||||
struct thread_vr_state vr_tm;
|
struct thread_vr_state vr_tm;
|
||||||
u32 vrsave_tm; /* also USPRG0 */
|
u32 vrsave_tm; /* also USPRG0 */
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_KVM_EXIT_TIMING
|
#ifdef CONFIG_KVM_EXIT_TIMING
|
||||||
|
@ -681,16 +681,17 @@ struct kvm_vcpu_arch {
|
||||||
* Number of simulations for vsx.
|
* Number of simulations for vsx.
|
||||||
* If we use 2*8bytes to simulate 1*16bytes,
|
* If we use 2*8bytes to simulate 1*16bytes,
|
||||||
* then the number should be 2 and
|
* then the number should be 2 and
|
||||||
* mmio_vsx_copy_type=KVMPPC_VSX_COPY_DWORD.
|
* mmio_copy_type=KVMPPC_VSX_COPY_DWORD.
|
||||||
* If we use 4*4bytes to simulate 1*16bytes,
|
* If we use 4*4bytes to simulate 1*16bytes,
|
||||||
* the number should be 4 and
|
* the number should be 4 and
|
||||||
* mmio_vsx_copy_type=KVMPPC_VSX_COPY_WORD.
|
* mmio_vsx_copy_type=KVMPPC_VSX_COPY_WORD.
|
||||||
*/
|
*/
|
||||||
u8 mmio_vsx_copy_nums;
|
u8 mmio_vsx_copy_nums;
|
||||||
u8 mmio_vsx_offset;
|
u8 mmio_vsx_offset;
|
||||||
u8 mmio_vsx_copy_type;
|
|
||||||
u8 mmio_vsx_tx_sx_enabled;
|
u8 mmio_vsx_tx_sx_enabled;
|
||||||
u8 mmio_vmx_copy_nums;
|
u8 mmio_vmx_copy_nums;
|
||||||
|
u8 mmio_vmx_offset;
|
||||||
|
u8 mmio_copy_type;
|
||||||
u8 osi_needed;
|
u8 osi_needed;
|
||||||
u8 osi_enabled;
|
u8 osi_enabled;
|
||||||
u8 papr_enabled;
|
u8 papr_enabled;
|
||||||
|
@ -772,6 +773,8 @@ struct kvm_vcpu_arch {
|
||||||
u64 busy_preempt;
|
u64 busy_preempt;
|
||||||
|
|
||||||
u32 emul_inst;
|
u32 emul_inst;
|
||||||
|
|
||||||
|
u32 online;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
|
#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
|
||||||
|
|
|
@ -52,7 +52,7 @@ enum emulation_result {
|
||||||
EMULATE_EXIT_USER, /* emulation requires exit to user-space */
|
EMULATE_EXIT_USER, /* emulation requires exit to user-space */
|
||||||
};
|
};
|
||||||
|
|
||||||
enum instruction_type {
|
enum instruction_fetch_type {
|
||||||
INST_GENERIC,
|
INST_GENERIC,
|
||||||
INST_SC, /* system call */
|
INST_SC, /* system call */
|
||||||
};
|
};
|
||||||
|
@ -81,10 +81,10 @@ extern int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||||
extern int kvmppc_handle_vsx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
extern int kvmppc_handle_vsx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||||
unsigned int rt, unsigned int bytes,
|
unsigned int rt, unsigned int bytes,
|
||||||
int is_default_endian, int mmio_sign_extend);
|
int is_default_endian, int mmio_sign_extend);
|
||||||
extern int kvmppc_handle_load128_by2x64(struct kvm_run *run,
|
extern int kvmppc_handle_vmx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||||
struct kvm_vcpu *vcpu, unsigned int rt, int is_default_endian);
|
unsigned int rt, unsigned int bytes, int is_default_endian);
|
||||||
extern int kvmppc_handle_store128_by2x64(struct kvm_run *run,
|
extern int kvmppc_handle_vmx_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||||
struct kvm_vcpu *vcpu, unsigned int rs, int is_default_endian);
|
unsigned int rs, unsigned int bytes, int is_default_endian);
|
||||||
extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||||
u64 val, unsigned int bytes,
|
u64 val, unsigned int bytes,
|
||||||
int is_default_endian);
|
int is_default_endian);
|
||||||
|
@ -93,7 +93,7 @@ extern int kvmppc_handle_vsx_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||||
int is_default_endian);
|
int is_default_endian);
|
||||||
|
|
||||||
extern int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
|
extern int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
|
||||||
enum instruction_type type, u32 *inst);
|
enum instruction_fetch_type type, u32 *inst);
|
||||||
|
|
||||||
extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
|
extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
|
||||||
bool data);
|
bool data);
|
||||||
|
@ -265,6 +265,8 @@ union kvmppc_one_reg {
|
||||||
vector128 vval;
|
vector128 vval;
|
||||||
u64 vsxval[2];
|
u64 vsxval[2];
|
||||||
u32 vsx32val[4];
|
u32 vsx32val[4];
|
||||||
|
u16 vsx16val[8];
|
||||||
|
u8 vsx8val[16];
|
||||||
struct {
|
struct {
|
||||||
u64 addr;
|
u64 addr;
|
||||||
u64 length;
|
u64 length;
|
||||||
|
@ -324,13 +326,14 @@ struct kvmppc_ops {
|
||||||
int (*get_rmmu_info)(struct kvm *kvm, struct kvm_ppc_rmmu_info *info);
|
int (*get_rmmu_info)(struct kvm *kvm, struct kvm_ppc_rmmu_info *info);
|
||||||
int (*set_smt_mode)(struct kvm *kvm, unsigned long mode,
|
int (*set_smt_mode)(struct kvm *kvm, unsigned long mode,
|
||||||
unsigned long flags);
|
unsigned long flags);
|
||||||
|
void (*giveup_ext)(struct kvm_vcpu *vcpu, ulong msr);
|
||||||
};
|
};
|
||||||
|
|
||||||
extern struct kvmppc_ops *kvmppc_hv_ops;
|
extern struct kvmppc_ops *kvmppc_hv_ops;
|
||||||
extern struct kvmppc_ops *kvmppc_pr_ops;
|
extern struct kvmppc_ops *kvmppc_pr_ops;
|
||||||
|
|
||||||
static inline int kvmppc_get_last_inst(struct kvm_vcpu *vcpu,
|
static inline int kvmppc_get_last_inst(struct kvm_vcpu *vcpu,
|
||||||
enum instruction_type type, u32 *inst)
|
enum instruction_fetch_type type, u32 *inst)
|
||||||
{
|
{
|
||||||
int ret = EMULATE_DONE;
|
int ret = EMULATE_DONE;
|
||||||
u32 fetched_inst;
|
u32 fetched_inst;
|
||||||
|
|
|
@ -385,6 +385,7 @@
|
||||||
#define SPRN_PSSCR 0x357 /* Processor Stop Status and Control Register (ISA 3.0) */
|
#define SPRN_PSSCR 0x357 /* Processor Stop Status and Control Register (ISA 3.0) */
|
||||||
#define SPRN_PSSCR_PR 0x337 /* PSSCR ISA 3.0, privileged mode access */
|
#define SPRN_PSSCR_PR 0x337 /* PSSCR ISA 3.0, privileged mode access */
|
||||||
#define SPRN_PMCR 0x374 /* Power Management Control Register */
|
#define SPRN_PMCR 0x374 /* Power Management Control Register */
|
||||||
|
#define SPRN_RWMR 0x375 /* Region-Weighting Mode Register */
|
||||||
|
|
||||||
/* HFSCR and FSCR bit numbers are the same */
|
/* HFSCR and FSCR bit numbers are the same */
|
||||||
#define FSCR_SCV_LG 12 /* Enable System Call Vectored */
|
#define FSCR_SCV_LG 12 /* Enable System Call Vectored */
|
||||||
|
|
|
@ -633,6 +633,7 @@ struct kvm_ppc_cpu_char {
|
||||||
#define KVM_REG_PPC_PSSCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xbd)
|
#define KVM_REG_PPC_PSSCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xbd)
|
||||||
|
|
||||||
#define KVM_REG_PPC_DEC_EXPIRY (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xbe)
|
#define KVM_REG_PPC_DEC_EXPIRY (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xbe)
|
||||||
|
#define KVM_REG_PPC_ONLINE (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xbf)
|
||||||
|
|
||||||
/* Transactional Memory checkpointed state:
|
/* Transactional Memory checkpointed state:
|
||||||
* This is all GPRs, all VSX regs and a subset of SPRs
|
* This is all GPRs, all VSX regs and a subset of SPRs
|
||||||
|
|
|
@ -426,20 +426,20 @@ int main(void)
|
||||||
OFFSET(VCPU_HOST_STACK, kvm_vcpu, arch.host_stack);
|
OFFSET(VCPU_HOST_STACK, kvm_vcpu, arch.host_stack);
|
||||||
OFFSET(VCPU_HOST_PID, kvm_vcpu, arch.host_pid);
|
OFFSET(VCPU_HOST_PID, kvm_vcpu, arch.host_pid);
|
||||||
OFFSET(VCPU_GUEST_PID, kvm_vcpu, arch.pid);
|
OFFSET(VCPU_GUEST_PID, kvm_vcpu, arch.pid);
|
||||||
OFFSET(VCPU_GPRS, kvm_vcpu, arch.gpr);
|
OFFSET(VCPU_GPRS, kvm_vcpu, arch.regs.gpr);
|
||||||
OFFSET(VCPU_VRSAVE, kvm_vcpu, arch.vrsave);
|
OFFSET(VCPU_VRSAVE, kvm_vcpu, arch.vrsave);
|
||||||
OFFSET(VCPU_FPRS, kvm_vcpu, arch.fp.fpr);
|
OFFSET(VCPU_FPRS, kvm_vcpu, arch.fp.fpr);
|
||||||
#ifdef CONFIG_ALTIVEC
|
#ifdef CONFIG_ALTIVEC
|
||||||
OFFSET(VCPU_VRS, kvm_vcpu, arch.vr.vr);
|
OFFSET(VCPU_VRS, kvm_vcpu, arch.vr.vr);
|
||||||
#endif
|
#endif
|
||||||
OFFSET(VCPU_XER, kvm_vcpu, arch.xer);
|
OFFSET(VCPU_XER, kvm_vcpu, arch.regs.xer);
|
||||||
OFFSET(VCPU_CTR, kvm_vcpu, arch.ctr);
|
OFFSET(VCPU_CTR, kvm_vcpu, arch.regs.ctr);
|
||||||
OFFSET(VCPU_LR, kvm_vcpu, arch.lr);
|
OFFSET(VCPU_LR, kvm_vcpu, arch.regs.link);
|
||||||
#ifdef CONFIG_PPC_BOOK3S
|
#ifdef CONFIG_PPC_BOOK3S
|
||||||
OFFSET(VCPU_TAR, kvm_vcpu, arch.tar);
|
OFFSET(VCPU_TAR, kvm_vcpu, arch.tar);
|
||||||
#endif
|
#endif
|
||||||
OFFSET(VCPU_CR, kvm_vcpu, arch.cr);
|
OFFSET(VCPU_CR, kvm_vcpu, arch.cr);
|
||||||
OFFSET(VCPU_PC, kvm_vcpu, arch.pc);
|
OFFSET(VCPU_PC, kvm_vcpu, arch.regs.nip);
|
||||||
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
|
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
|
||||||
OFFSET(VCPU_MSR, kvm_vcpu, arch.shregs.msr);
|
OFFSET(VCPU_MSR, kvm_vcpu, arch.shregs.msr);
|
||||||
OFFSET(VCPU_SRR0, kvm_vcpu, arch.shregs.srr0);
|
OFFSET(VCPU_SRR0, kvm_vcpu, arch.shregs.srr0);
|
||||||
|
@ -696,10 +696,10 @@ int main(void)
|
||||||
|
|
||||||
#else /* CONFIG_PPC_BOOK3S */
|
#else /* CONFIG_PPC_BOOK3S */
|
||||||
OFFSET(VCPU_CR, kvm_vcpu, arch.cr);
|
OFFSET(VCPU_CR, kvm_vcpu, arch.cr);
|
||||||
OFFSET(VCPU_XER, kvm_vcpu, arch.xer);
|
OFFSET(VCPU_XER, kvm_vcpu, arch.regs.xer);
|
||||||
OFFSET(VCPU_LR, kvm_vcpu, arch.lr);
|
OFFSET(VCPU_LR, kvm_vcpu, arch.regs.link);
|
||||||
OFFSET(VCPU_CTR, kvm_vcpu, arch.ctr);
|
OFFSET(VCPU_CTR, kvm_vcpu, arch.regs.ctr);
|
||||||
OFFSET(VCPU_PC, kvm_vcpu, arch.pc);
|
OFFSET(VCPU_PC, kvm_vcpu, arch.regs.nip);
|
||||||
OFFSET(VCPU_SPRG9, kvm_vcpu, arch.sprg9);
|
OFFSET(VCPU_SPRG9, kvm_vcpu, arch.sprg9);
|
||||||
OFFSET(VCPU_LAST_INST, kvm_vcpu, arch.last_inst);
|
OFFSET(VCPU_LAST_INST, kvm_vcpu, arch.last_inst);
|
||||||
OFFSET(VCPU_FAULT_DEAR, kvm_vcpu, arch.fault_dear);
|
OFFSET(VCPU_FAULT_DEAR, kvm_vcpu, arch.fault_dear);
|
||||||
|
|
|
@ -63,6 +63,9 @@ kvm-pr-y := \
|
||||||
book3s_64_mmu.o \
|
book3s_64_mmu.o \
|
||||||
book3s_32_mmu.o
|
book3s_32_mmu.o
|
||||||
|
|
||||||
|
kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HANDLER) += \
|
||||||
|
tm.o
|
||||||
|
|
||||||
ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
|
ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
|
||||||
kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HANDLER) += \
|
kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HANDLER) += \
|
||||||
book3s_rmhandlers.o
|
book3s_rmhandlers.o
|
||||||
|
|
|
@ -134,7 +134,7 @@ void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags)
|
||||||
{
|
{
|
||||||
kvmppc_unfixup_split_real(vcpu);
|
kvmppc_unfixup_split_real(vcpu);
|
||||||
kvmppc_set_srr0(vcpu, kvmppc_get_pc(vcpu));
|
kvmppc_set_srr0(vcpu, kvmppc_get_pc(vcpu));
|
||||||
kvmppc_set_srr1(vcpu, kvmppc_get_msr(vcpu) | flags);
|
kvmppc_set_srr1(vcpu, (kvmppc_get_msr(vcpu) & ~0x783f0000ul) | flags);
|
||||||
kvmppc_set_pc(vcpu, kvmppc_interrupt_offset(vcpu) + vec);
|
kvmppc_set_pc(vcpu, kvmppc_interrupt_offset(vcpu) + vec);
|
||||||
vcpu->arch.mmu.reset_msr(vcpu);
|
vcpu->arch.mmu.reset_msr(vcpu);
|
||||||
}
|
}
|
||||||
|
@ -256,18 +256,15 @@ void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu, ulong dar,
|
||||||
{
|
{
|
||||||
kvmppc_set_dar(vcpu, dar);
|
kvmppc_set_dar(vcpu, dar);
|
||||||
kvmppc_set_dsisr(vcpu, flags);
|
kvmppc_set_dsisr(vcpu, flags);
|
||||||
kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE);
|
kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE, 0);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(kvmppc_core_queue_data_storage); /* used by kvm_hv */
|
EXPORT_SYMBOL_GPL(kvmppc_core_queue_data_storage);
|
||||||
|
|
||||||
void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu, ulong flags)
|
void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu, ulong flags)
|
||||||
{
|
{
|
||||||
u64 msr = kvmppc_get_msr(vcpu);
|
kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_INST_STORAGE, flags);
|
||||||
msr &= ~(SRR1_ISI_NOPT | SRR1_ISI_N_OR_G | SRR1_ISI_PROT);
|
|
||||||
msr |= flags & (SRR1_ISI_NOPT | SRR1_ISI_N_OR_G | SRR1_ISI_PROT);
|
|
||||||
kvmppc_set_msr_fast(vcpu, msr);
|
|
||||||
kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_INST_STORAGE);
|
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(kvmppc_core_queue_inst_storage);
|
||||||
|
|
||||||
static int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu,
|
static int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu,
|
||||||
unsigned int priority)
|
unsigned int priority)
|
||||||
|
@ -450,8 +447,8 @@ int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, enum xlate_instdata xlid,
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, enum instruction_type type,
|
int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
|
||||||
u32 *inst)
|
enum instruction_fetch_type type, u32 *inst)
|
||||||
{
|
{
|
||||||
ulong pc = kvmppc_get_pc(vcpu);
|
ulong pc = kvmppc_get_pc(vcpu);
|
||||||
int r;
|
int r;
|
||||||
|
@ -509,8 +506,6 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
vcpu_load(vcpu);
|
|
||||||
|
|
||||||
regs->pc = kvmppc_get_pc(vcpu);
|
regs->pc = kvmppc_get_pc(vcpu);
|
||||||
regs->cr = kvmppc_get_cr(vcpu);
|
regs->cr = kvmppc_get_cr(vcpu);
|
||||||
regs->ctr = kvmppc_get_ctr(vcpu);
|
regs->ctr = kvmppc_get_ctr(vcpu);
|
||||||
|
@ -532,7 +527,6 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
|
||||||
for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
|
for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
|
||||||
regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
|
regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
|
||||||
|
|
||||||
vcpu_put(vcpu);
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -540,8 +534,6 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
vcpu_load(vcpu);
|
|
||||||
|
|
||||||
kvmppc_set_pc(vcpu, regs->pc);
|
kvmppc_set_pc(vcpu, regs->pc);
|
||||||
kvmppc_set_cr(vcpu, regs->cr);
|
kvmppc_set_cr(vcpu, regs->cr);
|
||||||
kvmppc_set_ctr(vcpu, regs->ctr);
|
kvmppc_set_ctr(vcpu, regs->ctr);
|
||||||
|
@ -562,7 +554,6 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
|
||||||
for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
|
for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
|
||||||
kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
|
kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
|
||||||
|
|
||||||
vcpu_put(vcpu);
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -31,4 +31,10 @@ extern int kvmppc_core_emulate_mfspr_pr(struct kvm_vcpu *vcpu,
|
||||||
extern int kvmppc_book3s_init_pr(void);
|
extern int kvmppc_book3s_init_pr(void);
|
||||||
extern void kvmppc_book3s_exit_pr(void);
|
extern void kvmppc_book3s_exit_pr(void);
|
||||||
|
|
||||||
|
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||||
|
extern void kvmppc_emulate_tabort(struct kvm_vcpu *vcpu, int ra_val);
|
||||||
|
#else
|
||||||
|
static inline void kvmppc_emulate_tabort(struct kvm_vcpu *vcpu, int ra_val) {}
|
||||||
|
#endif
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -52,7 +52,7 @@
|
||||||
static inline bool check_debug_ip(struct kvm_vcpu *vcpu)
|
static inline bool check_debug_ip(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
#ifdef DEBUG_MMU_PTE_IP
|
#ifdef DEBUG_MMU_PTE_IP
|
||||||
return vcpu->arch.pc == DEBUG_MMU_PTE_IP;
|
return vcpu->arch.regs.nip == DEBUG_MMU_PTE_IP;
|
||||||
#else
|
#else
|
||||||
return true;
|
return true;
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -38,7 +38,16 @@
|
||||||
|
|
||||||
static void kvmppc_mmu_book3s_64_reset_msr(struct kvm_vcpu *vcpu)
|
static void kvmppc_mmu_book3s_64_reset_msr(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
kvmppc_set_msr(vcpu, vcpu->arch.intr_msr);
|
unsigned long msr = vcpu->arch.intr_msr;
|
||||||
|
unsigned long cur_msr = kvmppc_get_msr(vcpu);
|
||||||
|
|
||||||
|
/* If transactional, change to suspend mode on IRQ delivery */
|
||||||
|
if (MSR_TM_TRANSACTIONAL(cur_msr))
|
||||||
|
msr |= MSR_TS_S;
|
||||||
|
else
|
||||||
|
msr |= cur_msr & MSR_TS_MASK;
|
||||||
|
|
||||||
|
kvmppc_set_msr(vcpu, msr);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct kvmppc_slb *kvmppc_mmu_book3s_64_find_slbe(
|
static struct kvmppc_slb *kvmppc_mmu_book3s_64_find_slbe(
|
||||||
|
|
|
@ -272,6 +272,9 @@ int kvmppc_mmu_hv_init(void)
|
||||||
if (!cpu_has_feature(CPU_FTR_HVMODE))
|
if (!cpu_has_feature(CPU_FTR_HVMODE))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (!mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
/* POWER7 has 10-bit LPIDs (12-bit in POWER8) */
|
/* POWER7 has 10-bit LPIDs (12-bit in POWER8) */
|
||||||
host_lpid = mfspr(SPRN_LPID);
|
host_lpid = mfspr(SPRN_LPID);
|
||||||
rsvd_lpid = LPID_RSVD;
|
rsvd_lpid = LPID_RSVD;
|
||||||
|
|
|
@ -139,44 +139,24 @@ int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_PPC_64K_PAGES
|
|
||||||
#define MMU_BASE_PSIZE MMU_PAGE_64K
|
|
||||||
#else
|
|
||||||
#define MMU_BASE_PSIZE MMU_PAGE_4K
|
|
||||||
#endif
|
|
||||||
|
|
||||||
static void kvmppc_radix_tlbie_page(struct kvm *kvm, unsigned long addr,
|
static void kvmppc_radix_tlbie_page(struct kvm *kvm, unsigned long addr,
|
||||||
unsigned int pshift)
|
unsigned int pshift)
|
||||||
{
|
{
|
||||||
int psize = MMU_BASE_PSIZE;
|
unsigned long psize = PAGE_SIZE;
|
||||||
|
|
||||||
if (pshift >= PUD_SHIFT)
|
if (pshift)
|
||||||
psize = MMU_PAGE_1G;
|
psize = 1UL << pshift;
|
||||||
else if (pshift >= PMD_SHIFT)
|
|
||||||
psize = MMU_PAGE_2M;
|
addr &= ~(psize - 1);
|
||||||
addr &= ~0xfffUL;
|
radix__flush_tlb_lpid_page(kvm->arch.lpid, addr, psize);
|
||||||
addr |= mmu_psize_defs[psize].ap << 5;
|
|
||||||
asm volatile("ptesync": : :"memory");
|
|
||||||
asm volatile(PPC_TLBIE_5(%0, %1, 0, 0, 1)
|
|
||||||
: : "r" (addr), "r" (kvm->arch.lpid) : "memory");
|
|
||||||
if (cpu_has_feature(CPU_FTR_P9_TLBIE_BUG))
|
|
||||||
asm volatile(PPC_TLBIE_5(%0, %1, 0, 0, 1)
|
|
||||||
: : "r" (addr), "r" (kvm->arch.lpid) : "memory");
|
|
||||||
asm volatile("eieio ; tlbsync ; ptesync": : :"memory");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void kvmppc_radix_flush_pwc(struct kvm *kvm, unsigned long addr)
|
static void kvmppc_radix_flush_pwc(struct kvm *kvm)
|
||||||
{
|
{
|
||||||
unsigned long rb = 0x2 << PPC_BITLSHIFT(53); /* IS = 2 */
|
radix__flush_pwc_lpid(kvm->arch.lpid);
|
||||||
|
|
||||||
asm volatile("ptesync": : :"memory");
|
|
||||||
/* RIC=1 PRS=0 R=1 IS=2 */
|
|
||||||
asm volatile(PPC_TLBIE_5(%0, %1, 1, 0, 1)
|
|
||||||
: : "r" (rb), "r" (kvm->arch.lpid) : "memory");
|
|
||||||
asm volatile("eieio ; tlbsync ; ptesync": : :"memory");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
unsigned long kvmppc_radix_update_pte(struct kvm *kvm, pte_t *ptep,
|
static unsigned long kvmppc_radix_update_pte(struct kvm *kvm, pte_t *ptep,
|
||||||
unsigned long clr, unsigned long set,
|
unsigned long clr, unsigned long set,
|
||||||
unsigned long addr, unsigned int shift)
|
unsigned long addr, unsigned int shift)
|
||||||
{
|
{
|
||||||
|
@ -228,6 +208,167 @@ static void kvmppc_pmd_free(pmd_t *pmdp)
|
||||||
kmem_cache_free(kvm_pmd_cache, pmdp);
|
kmem_cache_free(kvm_pmd_cache, pmdp);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte,
|
||||||
|
unsigned long gpa, unsigned int shift)
|
||||||
|
|
||||||
|
{
|
||||||
|
unsigned long page_size = 1ul << shift;
|
||||||
|
unsigned long old;
|
||||||
|
|
||||||
|
old = kvmppc_radix_update_pte(kvm, pte, ~0UL, 0, gpa, shift);
|
||||||
|
kvmppc_radix_tlbie_page(kvm, gpa, shift);
|
||||||
|
if (old & _PAGE_DIRTY) {
|
||||||
|
unsigned long gfn = gpa >> PAGE_SHIFT;
|
||||||
|
struct kvm_memory_slot *memslot;
|
||||||
|
|
||||||
|
memslot = gfn_to_memslot(kvm, gfn);
|
||||||
|
if (memslot && memslot->dirty_bitmap)
|
||||||
|
kvmppc_update_dirty_map(memslot, gfn, page_size);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* kvmppc_free_p?d are used to free existing page tables, and recursively
|
||||||
|
* descend and clear and free children.
|
||||||
|
* Callers are responsible for flushing the PWC.
|
||||||
|
*
|
||||||
|
* When page tables are being unmapped/freed as part of page fault path
|
||||||
|
* (full == false), ptes are not expected. There is code to unmap them
|
||||||
|
* and emit a warning if encountered, but there may already be data
|
||||||
|
* corruption due to the unexpected mappings.
|
||||||
|
*/
|
||||||
|
static void kvmppc_unmap_free_pte(struct kvm *kvm, pte_t *pte, bool full)
|
||||||
|
{
|
||||||
|
if (full) {
|
||||||
|
memset(pte, 0, sizeof(long) << PTE_INDEX_SIZE);
|
||||||
|
} else {
|
||||||
|
pte_t *p = pte;
|
||||||
|
unsigned long it;
|
||||||
|
|
||||||
|
for (it = 0; it < PTRS_PER_PTE; ++it, ++p) {
|
||||||
|
if (pte_val(*p) == 0)
|
||||||
|
continue;
|
||||||
|
WARN_ON_ONCE(1);
|
||||||
|
kvmppc_unmap_pte(kvm, p,
|
||||||
|
pte_pfn(*p) << PAGE_SHIFT,
|
||||||
|
PAGE_SHIFT);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
kvmppc_pte_free(pte);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void kvmppc_unmap_free_pmd(struct kvm *kvm, pmd_t *pmd, bool full)
|
||||||
|
{
|
||||||
|
unsigned long im;
|
||||||
|
pmd_t *p = pmd;
|
||||||
|
|
||||||
|
for (im = 0; im < PTRS_PER_PMD; ++im, ++p) {
|
||||||
|
if (!pmd_present(*p))
|
||||||
|
continue;
|
||||||
|
if (pmd_is_leaf(*p)) {
|
||||||
|
if (full) {
|
||||||
|
pmd_clear(p);
|
||||||
|
} else {
|
||||||
|
WARN_ON_ONCE(1);
|
||||||
|
kvmppc_unmap_pte(kvm, (pte_t *)p,
|
||||||
|
pte_pfn(*(pte_t *)p) << PAGE_SHIFT,
|
||||||
|
PMD_SHIFT);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
pte_t *pte;
|
||||||
|
|
||||||
|
pte = pte_offset_map(p, 0);
|
||||||
|
kvmppc_unmap_free_pte(kvm, pte, full);
|
||||||
|
pmd_clear(p);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
kvmppc_pmd_free(pmd);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void kvmppc_unmap_free_pud(struct kvm *kvm, pud_t *pud)
|
||||||
|
{
|
||||||
|
unsigned long iu;
|
||||||
|
pud_t *p = pud;
|
||||||
|
|
||||||
|
for (iu = 0; iu < PTRS_PER_PUD; ++iu, ++p) {
|
||||||
|
if (!pud_present(*p))
|
||||||
|
continue;
|
||||||
|
if (pud_huge(*p)) {
|
||||||
|
pud_clear(p);
|
||||||
|
} else {
|
||||||
|
pmd_t *pmd;
|
||||||
|
|
||||||
|
pmd = pmd_offset(p, 0);
|
||||||
|
kvmppc_unmap_free_pmd(kvm, pmd, true);
|
||||||
|
pud_clear(p);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pud_free(kvm->mm, pud);
|
||||||
|
}
|
||||||
|
|
||||||
|
void kvmppc_free_radix(struct kvm *kvm)
|
||||||
|
{
|
||||||
|
unsigned long ig;
|
||||||
|
pgd_t *pgd;
|
||||||
|
|
||||||
|
if (!kvm->arch.pgtable)
|
||||||
|
return;
|
||||||
|
pgd = kvm->arch.pgtable;
|
||||||
|
for (ig = 0; ig < PTRS_PER_PGD; ++ig, ++pgd) {
|
||||||
|
pud_t *pud;
|
||||||
|
|
||||||
|
if (!pgd_present(*pgd))
|
||||||
|
continue;
|
||||||
|
pud = pud_offset(pgd, 0);
|
||||||
|
kvmppc_unmap_free_pud(kvm, pud);
|
||||||
|
pgd_clear(pgd);
|
||||||
|
}
|
||||||
|
pgd_free(kvm->mm, kvm->arch.pgtable);
|
||||||
|
kvm->arch.pgtable = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void kvmppc_unmap_free_pmd_entry_table(struct kvm *kvm, pmd_t *pmd,
|
||||||
|
unsigned long gpa)
|
||||||
|
{
|
||||||
|
pte_t *pte = pte_offset_kernel(pmd, 0);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Clearing the pmd entry then flushing the PWC ensures that the pte
|
||||||
|
* page no longer be cached by the MMU, so can be freed without
|
||||||
|
* flushing the PWC again.
|
||||||
|
*/
|
||||||
|
pmd_clear(pmd);
|
||||||
|
kvmppc_radix_flush_pwc(kvm);
|
||||||
|
|
||||||
|
kvmppc_unmap_free_pte(kvm, pte, false);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void kvmppc_unmap_free_pud_entry_table(struct kvm *kvm, pud_t *pud,
|
||||||
|
unsigned long gpa)
|
||||||
|
{
|
||||||
|
pmd_t *pmd = pmd_offset(pud, 0);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Clearing the pud entry then flushing the PWC ensures that the pmd
|
||||||
|
* page and any children pte pages will no longer be cached by the MMU,
|
||||||
|
* so can be freed without flushing the PWC again.
|
||||||
|
*/
|
||||||
|
pud_clear(pud);
|
||||||
|
kvmppc_radix_flush_pwc(kvm);
|
||||||
|
|
||||||
|
kvmppc_unmap_free_pmd(kvm, pmd, false);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* There are a number of bits which may differ between different faults to
|
||||||
|
* the same partition scope entry. RC bits, in the course of cleaning and
|
||||||
|
* aging. And the write bit can change, either the access could have been
|
||||||
|
* upgraded, or a read fault could happen concurrently with a write fault
|
||||||
|
* that sets those bits first.
|
||||||
|
*/
|
||||||
|
#define PTE_BITS_MUST_MATCH (~(_PAGE_WRITE | _PAGE_DIRTY | _PAGE_ACCESSED))
|
||||||
|
|
||||||
static int kvmppc_create_pte(struct kvm *kvm, pte_t pte, unsigned long gpa,
|
static int kvmppc_create_pte(struct kvm *kvm, pte_t pte, unsigned long gpa,
|
||||||
unsigned int level, unsigned long mmu_seq)
|
unsigned int level, unsigned long mmu_seq)
|
||||||
{
|
{
|
||||||
|
@ -235,7 +376,6 @@ static int kvmppc_create_pte(struct kvm *kvm, pte_t pte, unsigned long gpa,
|
||||||
pud_t *pud, *new_pud = NULL;
|
pud_t *pud, *new_pud = NULL;
|
||||||
pmd_t *pmd, *new_pmd = NULL;
|
pmd_t *pmd, *new_pmd = NULL;
|
||||||
pte_t *ptep, *new_ptep = NULL;
|
pte_t *ptep, *new_ptep = NULL;
|
||||||
unsigned long old;
|
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
/* Traverse the guest's 2nd-level tree, allocate new levels needed */
|
/* Traverse the guest's 2nd-level tree, allocate new levels needed */
|
||||||
|
@ -273,42 +413,39 @@ static int kvmppc_create_pte(struct kvm *kvm, pte_t pte, unsigned long gpa,
|
||||||
if (pud_huge(*pud)) {
|
if (pud_huge(*pud)) {
|
||||||
unsigned long hgpa = gpa & PUD_MASK;
|
unsigned long hgpa = gpa & PUD_MASK;
|
||||||
|
|
||||||
|
/* Check if we raced and someone else has set the same thing */
|
||||||
|
if (level == 2) {
|
||||||
|
if (pud_raw(*pud) == pte_raw(pte)) {
|
||||||
|
ret = 0;
|
||||||
|
goto out_unlock;
|
||||||
|
}
|
||||||
|
/* Valid 1GB page here already, add our extra bits */
|
||||||
|
WARN_ON_ONCE((pud_val(*pud) ^ pte_val(pte)) &
|
||||||
|
PTE_BITS_MUST_MATCH);
|
||||||
|
kvmppc_radix_update_pte(kvm, (pte_t *)pud,
|
||||||
|
0, pte_val(pte), hgpa, PUD_SHIFT);
|
||||||
|
ret = 0;
|
||||||
|
goto out_unlock;
|
||||||
|
}
|
||||||
/*
|
/*
|
||||||
* If we raced with another CPU which has just put
|
* If we raced with another CPU which has just put
|
||||||
* a 1GB pte in after we saw a pmd page, try again.
|
* a 1GB pte in after we saw a pmd page, try again.
|
||||||
*/
|
*/
|
||||||
if (level <= 1 && !new_pmd) {
|
if (!new_pmd) {
|
||||||
ret = -EAGAIN;
|
ret = -EAGAIN;
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
}
|
}
|
||||||
/* Check if we raced and someone else has set the same thing */
|
|
||||||
if (level == 2 && pud_raw(*pud) == pte_raw(pte)) {
|
|
||||||
ret = 0;
|
|
||||||
goto out_unlock;
|
|
||||||
}
|
|
||||||
/* Valid 1GB page here already, remove it */
|
/* Valid 1GB page here already, remove it */
|
||||||
old = kvmppc_radix_update_pte(kvm, (pte_t *)pud,
|
kvmppc_unmap_pte(kvm, (pte_t *)pud, hgpa, PUD_SHIFT);
|
||||||
~0UL, 0, hgpa, PUD_SHIFT);
|
|
||||||
kvmppc_radix_tlbie_page(kvm, hgpa, PUD_SHIFT);
|
|
||||||
if (old & _PAGE_DIRTY) {
|
|
||||||
unsigned long gfn = hgpa >> PAGE_SHIFT;
|
|
||||||
struct kvm_memory_slot *memslot;
|
|
||||||
memslot = gfn_to_memslot(kvm, gfn);
|
|
||||||
if (memslot && memslot->dirty_bitmap)
|
|
||||||
kvmppc_update_dirty_map(memslot,
|
|
||||||
gfn, PUD_SIZE);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if (level == 2) {
|
if (level == 2) {
|
||||||
if (!pud_none(*pud)) {
|
if (!pud_none(*pud)) {
|
||||||
/*
|
/*
|
||||||
* There's a page table page here, but we wanted to
|
* There's a page table page here, but we wanted to
|
||||||
* install a large page, so remove and free the page
|
* install a large page, so remove and free the page
|
||||||
* table page. new_pmd will be NULL since level == 2.
|
* table page.
|
||||||
*/
|
*/
|
||||||
new_pmd = pmd_offset(pud, 0);
|
kvmppc_unmap_free_pud_entry_table(kvm, pud, gpa);
|
||||||
pud_clear(pud);
|
|
||||||
kvmppc_radix_flush_pwc(kvm, gpa);
|
|
||||||
}
|
}
|
||||||
kvmppc_radix_set_pte_at(kvm, gpa, (pte_t *)pud, pte);
|
kvmppc_radix_set_pte_at(kvm, gpa, (pte_t *)pud, pte);
|
||||||
ret = 0;
|
ret = 0;
|
||||||
|
@ -324,42 +461,40 @@ static int kvmppc_create_pte(struct kvm *kvm, pte_t pte, unsigned long gpa,
|
||||||
if (pmd_is_leaf(*pmd)) {
|
if (pmd_is_leaf(*pmd)) {
|
||||||
unsigned long lgpa = gpa & PMD_MASK;
|
unsigned long lgpa = gpa & PMD_MASK;
|
||||||
|
|
||||||
|
/* Check if we raced and someone else has set the same thing */
|
||||||
|
if (level == 1) {
|
||||||
|
if (pmd_raw(*pmd) == pte_raw(pte)) {
|
||||||
|
ret = 0;
|
||||||
|
goto out_unlock;
|
||||||
|
}
|
||||||
|
/* Valid 2MB page here already, add our extra bits */
|
||||||
|
WARN_ON_ONCE((pmd_val(*pmd) ^ pte_val(pte)) &
|
||||||
|
PTE_BITS_MUST_MATCH);
|
||||||
|
kvmppc_radix_update_pte(kvm, pmdp_ptep(pmd),
|
||||||
|
0, pte_val(pte), lgpa, PMD_SHIFT);
|
||||||
|
ret = 0;
|
||||||
|
goto out_unlock;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If we raced with another CPU which has just put
|
* If we raced with another CPU which has just put
|
||||||
* a 2MB pte in after we saw a pte page, try again.
|
* a 2MB pte in after we saw a pte page, try again.
|
||||||
*/
|
*/
|
||||||
if (level == 0 && !new_ptep) {
|
if (!new_ptep) {
|
||||||
ret = -EAGAIN;
|
ret = -EAGAIN;
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
}
|
}
|
||||||
/* Check if we raced and someone else has set the same thing */
|
|
||||||
if (level == 1 && pmd_raw(*pmd) == pte_raw(pte)) {
|
|
||||||
ret = 0;
|
|
||||||
goto out_unlock;
|
|
||||||
}
|
|
||||||
/* Valid 2MB page here already, remove it */
|
/* Valid 2MB page here already, remove it */
|
||||||
old = kvmppc_radix_update_pte(kvm, pmdp_ptep(pmd),
|
kvmppc_unmap_pte(kvm, pmdp_ptep(pmd), lgpa, PMD_SHIFT);
|
||||||
~0UL, 0, lgpa, PMD_SHIFT);
|
|
||||||
kvmppc_radix_tlbie_page(kvm, lgpa, PMD_SHIFT);
|
|
||||||
if (old & _PAGE_DIRTY) {
|
|
||||||
unsigned long gfn = lgpa >> PAGE_SHIFT;
|
|
||||||
struct kvm_memory_slot *memslot;
|
|
||||||
memslot = gfn_to_memslot(kvm, gfn);
|
|
||||||
if (memslot && memslot->dirty_bitmap)
|
|
||||||
kvmppc_update_dirty_map(memslot,
|
|
||||||
gfn, PMD_SIZE);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if (level == 1) {
|
if (level == 1) {
|
||||||
if (!pmd_none(*pmd)) {
|
if (!pmd_none(*pmd)) {
|
||||||
/*
|
/*
|
||||||
* There's a page table page here, but we wanted to
|
* There's a page table page here, but we wanted to
|
||||||
* install a large page, so remove and free the page
|
* install a large page, so remove and free the page
|
||||||
* table page. new_ptep will be NULL since level == 1.
|
* table page.
|
||||||
*/
|
*/
|
||||||
new_ptep = pte_offset_kernel(pmd, 0);
|
kvmppc_unmap_free_pmd_entry_table(kvm, pmd, gpa);
|
||||||
pmd_clear(pmd);
|
|
||||||
kvmppc_radix_flush_pwc(kvm, gpa);
|
|
||||||
}
|
}
|
||||||
kvmppc_radix_set_pte_at(kvm, gpa, pmdp_ptep(pmd), pte);
|
kvmppc_radix_set_pte_at(kvm, gpa, pmdp_ptep(pmd), pte);
|
||||||
ret = 0;
|
ret = 0;
|
||||||
|
@ -378,12 +513,12 @@ static int kvmppc_create_pte(struct kvm *kvm, pte_t pte, unsigned long gpa,
|
||||||
ret = 0;
|
ret = 0;
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
}
|
}
|
||||||
/* PTE was previously valid, so invalidate it */
|
/* Valid page here already, add our extra bits */
|
||||||
old = kvmppc_radix_update_pte(kvm, ptep, _PAGE_PRESENT,
|
WARN_ON_ONCE((pte_val(*ptep) ^ pte_val(pte)) &
|
||||||
0, gpa, 0);
|
PTE_BITS_MUST_MATCH);
|
||||||
kvmppc_radix_tlbie_page(kvm, gpa, 0);
|
kvmppc_radix_update_pte(kvm, ptep, 0, pte_val(pte), gpa, 0);
|
||||||
if (old & _PAGE_DIRTY)
|
ret = 0;
|
||||||
mark_page_dirty(kvm, gpa >> PAGE_SHIFT);
|
goto out_unlock;
|
||||||
}
|
}
|
||||||
kvmppc_radix_set_pte_at(kvm, gpa, ptep, pte);
|
kvmppc_radix_set_pte_at(kvm, gpa, ptep, pte);
|
||||||
ret = 0;
|
ret = 0;
|
||||||
|
@ -565,9 +700,13 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||||
unsigned long mask = (1ul << shift) - PAGE_SIZE;
|
unsigned long mask = (1ul << shift) - PAGE_SIZE;
|
||||||
pte = __pte(pte_val(pte) | (hva & mask));
|
pte = __pte(pte_val(pte) | (hva & mask));
|
||||||
}
|
}
|
||||||
if (!(writing || upgrade_write))
|
pte = __pte(pte_val(pte) | _PAGE_EXEC | _PAGE_ACCESSED);
|
||||||
pte = __pte(pte_val(pte) & ~ _PAGE_WRITE);
|
if (writing || upgrade_write) {
|
||||||
pte = __pte(pte_val(pte) | _PAGE_EXEC);
|
if (pte_val(pte) & _PAGE_WRITE)
|
||||||
|
pte = __pte(pte_val(pte) | _PAGE_DIRTY);
|
||||||
|
} else {
|
||||||
|
pte = __pte(pte_val(pte) & ~(_PAGE_WRITE | _PAGE_DIRTY));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Allocate space in the tree and write the PTE */
|
/* Allocate space in the tree and write the PTE */
|
||||||
|
@ -734,51 +873,6 @@ int kvmppc_init_vm_radix(struct kvm *kvm)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void kvmppc_free_radix(struct kvm *kvm)
|
|
||||||
{
|
|
||||||
unsigned long ig, iu, im;
|
|
||||||
pte_t *pte;
|
|
||||||
pmd_t *pmd;
|
|
||||||
pud_t *pud;
|
|
||||||
pgd_t *pgd;
|
|
||||||
|
|
||||||
if (!kvm->arch.pgtable)
|
|
||||||
return;
|
|
||||||
pgd = kvm->arch.pgtable;
|
|
||||||
for (ig = 0; ig < PTRS_PER_PGD; ++ig, ++pgd) {
|
|
||||||
if (!pgd_present(*pgd))
|
|
||||||
continue;
|
|
||||||
pud = pud_offset(pgd, 0);
|
|
||||||
for (iu = 0; iu < PTRS_PER_PUD; ++iu, ++pud) {
|
|
||||||
if (!pud_present(*pud))
|
|
||||||
continue;
|
|
||||||
if (pud_huge(*pud)) {
|
|
||||||
pud_clear(pud);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
pmd = pmd_offset(pud, 0);
|
|
||||||
for (im = 0; im < PTRS_PER_PMD; ++im, ++pmd) {
|
|
||||||
if (pmd_is_leaf(*pmd)) {
|
|
||||||
pmd_clear(pmd);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
if (!pmd_present(*pmd))
|
|
||||||
continue;
|
|
||||||
pte = pte_offset_map(pmd, 0);
|
|
||||||
memset(pte, 0, sizeof(long) << PTE_INDEX_SIZE);
|
|
||||||
kvmppc_pte_free(pte);
|
|
||||||
pmd_clear(pmd);
|
|
||||||
}
|
|
||||||
kvmppc_pmd_free(pmd_offset(pud, 0));
|
|
||||||
pud_clear(pud);
|
|
||||||
}
|
|
||||||
pud_free(kvm->mm, pud_offset(pgd, 0));
|
|
||||||
pgd_clear(pgd);
|
|
||||||
}
|
|
||||||
pgd_free(kvm->mm, kvm->arch.pgtable);
|
|
||||||
kvm->arch.pgtable = NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void pte_ctor(void *addr)
|
static void pte_ctor(void *addr)
|
||||||
{
|
{
|
||||||
memset(addr, 0, RADIX_PTE_TABLE_SIZE);
|
memset(addr, 0, RADIX_PTE_TABLE_SIZE);
|
||||||
|
|
|
@ -176,14 +176,12 @@ extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
|
||||||
|
|
||||||
if (!tbltmp)
|
if (!tbltmp)
|
||||||
continue;
|
continue;
|
||||||
/*
|
/* Make sure hardware table parameters are compatible */
|
||||||
* Make sure hardware table parameters are exactly the same;
|
if ((tbltmp->it_page_shift <= stt->page_shift) &&
|
||||||
* this is used in the TCE handlers where boundary checks
|
(tbltmp->it_offset << tbltmp->it_page_shift ==
|
||||||
* use only the first attached table.
|
stt->offset << stt->page_shift) &&
|
||||||
*/
|
(tbltmp->it_size << tbltmp->it_page_shift ==
|
||||||
if ((tbltmp->it_page_shift == stt->page_shift) &&
|
stt->size << stt->page_shift)) {
|
||||||
(tbltmp->it_offset == stt->offset) &&
|
|
||||||
(tbltmp->it_size == stt->size)) {
|
|
||||||
/*
|
/*
|
||||||
* Reference the table to avoid races with
|
* Reference the table to avoid races with
|
||||||
* add/remove DMA windows.
|
* add/remove DMA windows.
|
||||||
|
@ -237,7 +235,7 @@ static void release_spapr_tce_table(struct rcu_head *head)
|
||||||
kfree(stt);
|
kfree(stt);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int kvm_spapr_tce_fault(struct vm_fault *vmf)
|
static vm_fault_t kvm_spapr_tce_fault(struct vm_fault *vmf)
|
||||||
{
|
{
|
||||||
struct kvmppc_spapr_tce_table *stt = vmf->vma->vm_file->private_data;
|
struct kvmppc_spapr_tce_table *stt = vmf->vma->vm_file->private_data;
|
||||||
struct page *page;
|
struct page *page;
|
||||||
|
@ -302,7 +300,8 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
|
||||||
int ret = -ENOMEM;
|
int ret = -ENOMEM;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
if (!args->size)
|
if (!args->size || args->page_shift < 12 || args->page_shift > 34 ||
|
||||||
|
(args->offset + args->size > (ULLONG_MAX >> args->page_shift)))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
size = _ALIGN_UP(args->size, PAGE_SIZE >> 3);
|
size = _ALIGN_UP(args->size, PAGE_SIZE >> 3);
|
||||||
|
@ -396,7 +395,7 @@ static long kvmppc_tce_iommu_mapped_dec(struct kvm *kvm,
|
||||||
return H_SUCCESS;
|
return H_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
static long kvmppc_tce_iommu_unmap(struct kvm *kvm,
|
static long kvmppc_tce_iommu_do_unmap(struct kvm *kvm,
|
||||||
struct iommu_table *tbl, unsigned long entry)
|
struct iommu_table *tbl, unsigned long entry)
|
||||||
{
|
{
|
||||||
enum dma_data_direction dir = DMA_NONE;
|
enum dma_data_direction dir = DMA_NONE;
|
||||||
|
@ -416,7 +415,24 @@ static long kvmppc_tce_iommu_unmap(struct kvm *kvm,
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
long kvmppc_tce_iommu_map(struct kvm *kvm, struct iommu_table *tbl,
|
static long kvmppc_tce_iommu_unmap(struct kvm *kvm,
|
||||||
|
struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl,
|
||||||
|
unsigned long entry)
|
||||||
|
{
|
||||||
|
unsigned long i, ret = H_SUCCESS;
|
||||||
|
unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
|
||||||
|
unsigned long io_entry = entry * subpages;
|
||||||
|
|
||||||
|
for (i = 0; i < subpages; ++i) {
|
||||||
|
ret = kvmppc_tce_iommu_do_unmap(kvm, tbl, io_entry + i);
|
||||||
|
if (ret != H_SUCCESS)
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
long kvmppc_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
|
||||||
unsigned long entry, unsigned long ua,
|
unsigned long entry, unsigned long ua,
|
||||||
enum dma_data_direction dir)
|
enum dma_data_direction dir)
|
||||||
{
|
{
|
||||||
|
@ -453,6 +469,27 @@ long kvmppc_tce_iommu_map(struct kvm *kvm, struct iommu_table *tbl,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static long kvmppc_tce_iommu_map(struct kvm *kvm,
|
||||||
|
struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl,
|
||||||
|
unsigned long entry, unsigned long ua,
|
||||||
|
enum dma_data_direction dir)
|
||||||
|
{
|
||||||
|
unsigned long i, pgoff, ret = H_SUCCESS;
|
||||||
|
unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
|
||||||
|
unsigned long io_entry = entry * subpages;
|
||||||
|
|
||||||
|
for (i = 0, pgoff = 0; i < subpages;
|
||||||
|
++i, pgoff += IOMMU_PAGE_SIZE(tbl)) {
|
||||||
|
|
||||||
|
ret = kvmppc_tce_iommu_do_map(kvm, tbl,
|
||||||
|
io_entry + i, ua + pgoff, dir);
|
||||||
|
if (ret != H_SUCCESS)
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
|
long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
|
||||||
unsigned long ioba, unsigned long tce)
|
unsigned long ioba, unsigned long tce)
|
||||||
{
|
{
|
||||||
|
@ -491,10 +528,10 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
|
||||||
|
|
||||||
list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
|
list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
|
||||||
if (dir == DMA_NONE)
|
if (dir == DMA_NONE)
|
||||||
ret = kvmppc_tce_iommu_unmap(vcpu->kvm,
|
ret = kvmppc_tce_iommu_unmap(vcpu->kvm, stt,
|
||||||
stit->tbl, entry);
|
stit->tbl, entry);
|
||||||
else
|
else
|
||||||
ret = kvmppc_tce_iommu_map(vcpu->kvm, stit->tbl,
|
ret = kvmppc_tce_iommu_map(vcpu->kvm, stt, stit->tbl,
|
||||||
entry, ua, dir);
|
entry, ua, dir);
|
||||||
|
|
||||||
if (ret == H_SUCCESS)
|
if (ret == H_SUCCESS)
|
||||||
|
@ -570,7 +607,7 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
|
||||||
return H_PARAMETER;
|
return H_PARAMETER;
|
||||||
|
|
||||||
list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
|
list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
|
||||||
ret = kvmppc_tce_iommu_map(vcpu->kvm,
|
ret = kvmppc_tce_iommu_map(vcpu->kvm, stt,
|
||||||
stit->tbl, entry + i, ua,
|
stit->tbl, entry + i, ua,
|
||||||
iommu_tce_direction(tce));
|
iommu_tce_direction(tce));
|
||||||
|
|
||||||
|
@ -615,10 +652,10 @@ long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu,
|
||||||
return H_PARAMETER;
|
return H_PARAMETER;
|
||||||
|
|
||||||
list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
|
list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
|
||||||
unsigned long entry = ioba >> stit->tbl->it_page_shift;
|
unsigned long entry = ioba >> stt->page_shift;
|
||||||
|
|
||||||
for (i = 0; i < npages; ++i) {
|
for (i = 0; i < npages; ++i) {
|
||||||
ret = kvmppc_tce_iommu_unmap(vcpu->kvm,
|
ret = kvmppc_tce_iommu_unmap(vcpu->kvm, stt,
|
||||||
stit->tbl, entry + i);
|
stit->tbl, entry + i);
|
||||||
|
|
||||||
if (ret == H_SUCCESS)
|
if (ret == H_SUCCESS)
|
||||||
|
|
|
@ -221,7 +221,7 @@ static long kvmppc_rm_tce_iommu_mapped_dec(struct kvm *kvm,
|
||||||
return H_SUCCESS;
|
return H_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
static long kvmppc_rm_tce_iommu_unmap(struct kvm *kvm,
|
static long kvmppc_rm_tce_iommu_do_unmap(struct kvm *kvm,
|
||||||
struct iommu_table *tbl, unsigned long entry)
|
struct iommu_table *tbl, unsigned long entry)
|
||||||
{
|
{
|
||||||
enum dma_data_direction dir = DMA_NONE;
|
enum dma_data_direction dir = DMA_NONE;
|
||||||
|
@ -245,7 +245,24 @@ static long kvmppc_rm_tce_iommu_unmap(struct kvm *kvm,
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static long kvmppc_rm_tce_iommu_map(struct kvm *kvm, struct iommu_table *tbl,
|
static long kvmppc_rm_tce_iommu_unmap(struct kvm *kvm,
|
||||||
|
struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl,
|
||||||
|
unsigned long entry)
|
||||||
|
{
|
||||||
|
unsigned long i, ret = H_SUCCESS;
|
||||||
|
unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
|
||||||
|
unsigned long io_entry = entry * subpages;
|
||||||
|
|
||||||
|
for (i = 0; i < subpages; ++i) {
|
||||||
|
ret = kvmppc_rm_tce_iommu_do_unmap(kvm, tbl, io_entry + i);
|
||||||
|
if (ret != H_SUCCESS)
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static long kvmppc_rm_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
|
||||||
unsigned long entry, unsigned long ua,
|
unsigned long entry, unsigned long ua,
|
||||||
enum dma_data_direction dir)
|
enum dma_data_direction dir)
|
||||||
{
|
{
|
||||||
|
@ -290,6 +307,27 @@ static long kvmppc_rm_tce_iommu_map(struct kvm *kvm, struct iommu_table *tbl,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static long kvmppc_rm_tce_iommu_map(struct kvm *kvm,
|
||||||
|
struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl,
|
||||||
|
unsigned long entry, unsigned long ua,
|
||||||
|
enum dma_data_direction dir)
|
||||||
|
{
|
||||||
|
unsigned long i, pgoff, ret = H_SUCCESS;
|
||||||
|
unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
|
||||||
|
unsigned long io_entry = entry * subpages;
|
||||||
|
|
||||||
|
for (i = 0, pgoff = 0; i < subpages;
|
||||||
|
++i, pgoff += IOMMU_PAGE_SIZE(tbl)) {
|
||||||
|
|
||||||
|
ret = kvmppc_rm_tce_iommu_do_map(kvm, tbl,
|
||||||
|
io_entry + i, ua + pgoff, dir);
|
||||||
|
if (ret != H_SUCCESS)
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
|
long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
|
||||||
unsigned long ioba, unsigned long tce)
|
unsigned long ioba, unsigned long tce)
|
||||||
{
|
{
|
||||||
|
@ -327,10 +365,10 @@ long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
|
||||||
|
|
||||||
list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
|
list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
|
||||||
if (dir == DMA_NONE)
|
if (dir == DMA_NONE)
|
||||||
ret = kvmppc_rm_tce_iommu_unmap(vcpu->kvm,
|
ret = kvmppc_rm_tce_iommu_unmap(vcpu->kvm, stt,
|
||||||
stit->tbl, entry);
|
stit->tbl, entry);
|
||||||
else
|
else
|
||||||
ret = kvmppc_rm_tce_iommu_map(vcpu->kvm,
|
ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, stt,
|
||||||
stit->tbl, entry, ua, dir);
|
stit->tbl, entry, ua, dir);
|
||||||
|
|
||||||
if (ret == H_SUCCESS)
|
if (ret == H_SUCCESS)
|
||||||
|
@ -477,7 +515,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
|
||||||
return H_PARAMETER;
|
return H_PARAMETER;
|
||||||
|
|
||||||
list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
|
list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
|
||||||
ret = kvmppc_rm_tce_iommu_map(vcpu->kvm,
|
ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, stt,
|
||||||
stit->tbl, entry + i, ua,
|
stit->tbl, entry + i, ua,
|
||||||
iommu_tce_direction(tce));
|
iommu_tce_direction(tce));
|
||||||
|
|
||||||
|
@ -526,10 +564,10 @@ long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
|
||||||
return H_PARAMETER;
|
return H_PARAMETER;
|
||||||
|
|
||||||
list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
|
list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
|
||||||
unsigned long entry = ioba >> stit->tbl->it_page_shift;
|
unsigned long entry = ioba >> stt->page_shift;
|
||||||
|
|
||||||
for (i = 0; i < npages; ++i) {
|
for (i = 0; i < npages; ++i) {
|
||||||
ret = kvmppc_rm_tce_iommu_unmap(vcpu->kvm,
|
ret = kvmppc_rm_tce_iommu_unmap(vcpu->kvm, stt,
|
||||||
stit->tbl, entry + i);
|
stit->tbl, entry + i);
|
||||||
|
|
||||||
if (ret == H_SUCCESS)
|
if (ret == H_SUCCESS)
|
||||||
|
@ -571,7 +609,7 @@ long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
|
||||||
page = stt->pages[idx / TCES_PER_PAGE];
|
page = stt->pages[idx / TCES_PER_PAGE];
|
||||||
tbl = (u64 *)page_address(page);
|
tbl = (u64 *)page_address(page);
|
||||||
|
|
||||||
vcpu->arch.gpr[4] = tbl[idx % TCES_PER_PAGE];
|
vcpu->arch.regs.gpr[4] = tbl[idx % TCES_PER_PAGE];
|
||||||
|
|
||||||
return H_SUCCESS;
|
return H_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
|
@ -23,7 +23,9 @@
|
||||||
#include <asm/reg.h>
|
#include <asm/reg.h>
|
||||||
#include <asm/switch_to.h>
|
#include <asm/switch_to.h>
|
||||||
#include <asm/time.h>
|
#include <asm/time.h>
|
||||||
|
#include <asm/tm.h>
|
||||||
#include "book3s.h"
|
#include "book3s.h"
|
||||||
|
#include <asm/asm-prototypes.h>
|
||||||
|
|
||||||
#define OP_19_XOP_RFID 18
|
#define OP_19_XOP_RFID 18
|
||||||
#define OP_19_XOP_RFI 50
|
#define OP_19_XOP_RFI 50
|
||||||
|
@ -47,6 +49,12 @@
|
||||||
#define OP_31_XOP_EIOIO 854
|
#define OP_31_XOP_EIOIO 854
|
||||||
#define OP_31_XOP_SLBMFEE 915
|
#define OP_31_XOP_SLBMFEE 915
|
||||||
|
|
||||||
|
#define OP_31_XOP_TBEGIN 654
|
||||||
|
#define OP_31_XOP_TABORT 910
|
||||||
|
|
||||||
|
#define OP_31_XOP_TRECLAIM 942
|
||||||
|
#define OP_31_XOP_TRCHKPT 1006
|
||||||
|
|
||||||
/* DCBZ is actually 1014, but we patch it to 1010 so we get a trap */
|
/* DCBZ is actually 1014, but we patch it to 1010 so we get a trap */
|
||||||
#define OP_31_XOP_DCBZ 1010
|
#define OP_31_XOP_DCBZ 1010
|
||||||
|
|
||||||
|
@ -87,6 +95,157 @@ static bool spr_allowed(struct kvm_vcpu *vcpu, enum priv_level level)
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||||
|
static inline void kvmppc_copyto_vcpu_tm(struct kvm_vcpu *vcpu)
|
||||||
|
{
|
||||||
|
memcpy(&vcpu->arch.gpr_tm[0], &vcpu->arch.regs.gpr[0],
|
||||||
|
sizeof(vcpu->arch.gpr_tm));
|
||||||
|
memcpy(&vcpu->arch.fp_tm, &vcpu->arch.fp,
|
||||||
|
sizeof(struct thread_fp_state));
|
||||||
|
memcpy(&vcpu->arch.vr_tm, &vcpu->arch.vr,
|
||||||
|
sizeof(struct thread_vr_state));
|
||||||
|
vcpu->arch.ppr_tm = vcpu->arch.ppr;
|
||||||
|
vcpu->arch.dscr_tm = vcpu->arch.dscr;
|
||||||
|
vcpu->arch.amr_tm = vcpu->arch.amr;
|
||||||
|
vcpu->arch.ctr_tm = vcpu->arch.regs.ctr;
|
||||||
|
vcpu->arch.tar_tm = vcpu->arch.tar;
|
||||||
|
vcpu->arch.lr_tm = vcpu->arch.regs.link;
|
||||||
|
vcpu->arch.cr_tm = vcpu->arch.cr;
|
||||||
|
vcpu->arch.xer_tm = vcpu->arch.regs.xer;
|
||||||
|
vcpu->arch.vrsave_tm = vcpu->arch.vrsave;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void kvmppc_copyfrom_vcpu_tm(struct kvm_vcpu *vcpu)
|
||||||
|
{
|
||||||
|
memcpy(&vcpu->arch.regs.gpr[0], &vcpu->arch.gpr_tm[0],
|
||||||
|
sizeof(vcpu->arch.regs.gpr));
|
||||||
|
memcpy(&vcpu->arch.fp, &vcpu->arch.fp_tm,
|
||||||
|
sizeof(struct thread_fp_state));
|
||||||
|
memcpy(&vcpu->arch.vr, &vcpu->arch.vr_tm,
|
||||||
|
sizeof(struct thread_vr_state));
|
||||||
|
vcpu->arch.ppr = vcpu->arch.ppr_tm;
|
||||||
|
vcpu->arch.dscr = vcpu->arch.dscr_tm;
|
||||||
|
vcpu->arch.amr = vcpu->arch.amr_tm;
|
||||||
|
vcpu->arch.regs.ctr = vcpu->arch.ctr_tm;
|
||||||
|
vcpu->arch.tar = vcpu->arch.tar_tm;
|
||||||
|
vcpu->arch.regs.link = vcpu->arch.lr_tm;
|
||||||
|
vcpu->arch.cr = vcpu->arch.cr_tm;
|
||||||
|
vcpu->arch.regs.xer = vcpu->arch.xer_tm;
|
||||||
|
vcpu->arch.vrsave = vcpu->arch.vrsave_tm;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void kvmppc_emulate_treclaim(struct kvm_vcpu *vcpu, int ra_val)
|
||||||
|
{
|
||||||
|
unsigned long guest_msr = kvmppc_get_msr(vcpu);
|
||||||
|
int fc_val = ra_val ? ra_val : 1;
|
||||||
|
uint64_t texasr;
|
||||||
|
|
||||||
|
/* CR0 = 0 | MSR[TS] | 0 */
|
||||||
|
vcpu->arch.cr = (vcpu->arch.cr & ~(CR0_MASK << CR0_SHIFT)) |
|
||||||
|
(((guest_msr & MSR_TS_MASK) >> (MSR_TS_S_LG - 1))
|
||||||
|
<< CR0_SHIFT);
|
||||||
|
|
||||||
|
preempt_disable();
|
||||||
|
tm_enable();
|
||||||
|
texasr = mfspr(SPRN_TEXASR);
|
||||||
|
kvmppc_save_tm_pr(vcpu);
|
||||||
|
kvmppc_copyfrom_vcpu_tm(vcpu);
|
||||||
|
|
||||||
|
/* failure recording depends on Failure Summary bit */
|
||||||
|
if (!(texasr & TEXASR_FS)) {
|
||||||
|
texasr &= ~TEXASR_FC;
|
||||||
|
texasr |= ((u64)fc_val << TEXASR_FC_LG) | TEXASR_FS;
|
||||||
|
|
||||||
|
texasr &= ~(TEXASR_PR | TEXASR_HV);
|
||||||
|
if (kvmppc_get_msr(vcpu) & MSR_PR)
|
||||||
|
texasr |= TEXASR_PR;
|
||||||
|
|
||||||
|
if (kvmppc_get_msr(vcpu) & MSR_HV)
|
||||||
|
texasr |= TEXASR_HV;
|
||||||
|
|
||||||
|
vcpu->arch.texasr = texasr;
|
||||||
|
vcpu->arch.tfiar = kvmppc_get_pc(vcpu);
|
||||||
|
mtspr(SPRN_TEXASR, texasr);
|
||||||
|
mtspr(SPRN_TFIAR, vcpu->arch.tfiar);
|
||||||
|
}
|
||||||
|
tm_disable();
|
||||||
|
/*
|
||||||
|
* treclaim need quit to non-transactional state.
|
||||||
|
*/
|
||||||
|
guest_msr &= ~(MSR_TS_MASK);
|
||||||
|
kvmppc_set_msr(vcpu, guest_msr);
|
||||||
|
preempt_enable();
|
||||||
|
|
||||||
|
if (vcpu->arch.shadow_fscr & FSCR_TAR)
|
||||||
|
mtspr(SPRN_TAR, vcpu->arch.tar);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void kvmppc_emulate_trchkpt(struct kvm_vcpu *vcpu)
|
||||||
|
{
|
||||||
|
unsigned long guest_msr = kvmppc_get_msr(vcpu);
|
||||||
|
|
||||||
|
preempt_disable();
|
||||||
|
/*
|
||||||
|
* need flush FP/VEC/VSX to vcpu save area before
|
||||||
|
* copy.
|
||||||
|
*/
|
||||||
|
kvmppc_giveup_ext(vcpu, MSR_VSX);
|
||||||
|
kvmppc_giveup_fac(vcpu, FSCR_TAR_LG);
|
||||||
|
kvmppc_copyto_vcpu_tm(vcpu);
|
||||||
|
kvmppc_save_tm_sprs(vcpu);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* as a result of trecheckpoint. set TS to suspended.
|
||||||
|
*/
|
||||||
|
guest_msr &= ~(MSR_TS_MASK);
|
||||||
|
guest_msr |= MSR_TS_S;
|
||||||
|
kvmppc_set_msr(vcpu, guest_msr);
|
||||||
|
kvmppc_restore_tm_pr(vcpu);
|
||||||
|
preempt_enable();
|
||||||
|
}
|
||||||
|
|
||||||
|
/* emulate tabort. at guest privilege state */
|
||||||
|
void kvmppc_emulate_tabort(struct kvm_vcpu *vcpu, int ra_val)
|
||||||
|
{
|
||||||
|
/* currently we only emulate tabort. but no emulation of other
|
||||||
|
* tabort variants since there is no kernel usage of them at
|
||||||
|
* present.
|
||||||
|
*/
|
||||||
|
unsigned long guest_msr = kvmppc_get_msr(vcpu);
|
||||||
|
uint64_t org_texasr;
|
||||||
|
|
||||||
|
preempt_disable();
|
||||||
|
tm_enable();
|
||||||
|
org_texasr = mfspr(SPRN_TEXASR);
|
||||||
|
tm_abort(ra_val);
|
||||||
|
|
||||||
|
/* CR0 = 0 | MSR[TS] | 0 */
|
||||||
|
vcpu->arch.cr = (vcpu->arch.cr & ~(CR0_MASK << CR0_SHIFT)) |
|
||||||
|
(((guest_msr & MSR_TS_MASK) >> (MSR_TS_S_LG - 1))
|
||||||
|
<< CR0_SHIFT);
|
||||||
|
|
||||||
|
vcpu->arch.texasr = mfspr(SPRN_TEXASR);
|
||||||
|
/* failure recording depends on Failure Summary bit,
|
||||||
|
* and tabort will be treated as nops in non-transactional
|
||||||
|
* state.
|
||||||
|
*/
|
||||||
|
if (!(org_texasr & TEXASR_FS) &&
|
||||||
|
MSR_TM_ACTIVE(guest_msr)) {
|
||||||
|
vcpu->arch.texasr &= ~(TEXASR_PR | TEXASR_HV);
|
||||||
|
if (guest_msr & MSR_PR)
|
||||||
|
vcpu->arch.texasr |= TEXASR_PR;
|
||||||
|
|
||||||
|
if (guest_msr & MSR_HV)
|
||||||
|
vcpu->arch.texasr |= TEXASR_HV;
|
||||||
|
|
||||||
|
vcpu->arch.tfiar = kvmppc_get_pc(vcpu);
|
||||||
|
}
|
||||||
|
tm_disable();
|
||||||
|
preempt_enable();
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||||
unsigned int inst, int *advance)
|
unsigned int inst, int *advance)
|
||||||
{
|
{
|
||||||
|
@ -117,11 +276,28 @@ int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||||
case 19:
|
case 19:
|
||||||
switch (get_xop(inst)) {
|
switch (get_xop(inst)) {
|
||||||
case OP_19_XOP_RFID:
|
case OP_19_XOP_RFID:
|
||||||
case OP_19_XOP_RFI:
|
case OP_19_XOP_RFI: {
|
||||||
|
unsigned long srr1 = kvmppc_get_srr1(vcpu);
|
||||||
|
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||||
|
unsigned long cur_msr = kvmppc_get_msr(vcpu);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* add rules to fit in ISA specification regarding TM
|
||||||
|
* state transistion in TM disable/Suspended state,
|
||||||
|
* and target TM state is TM inactive(00) state. (the
|
||||||
|
* change should be suppressed).
|
||||||
|
*/
|
||||||
|
if (((cur_msr & MSR_TM) == 0) &&
|
||||||
|
((srr1 & MSR_TM) == 0) &&
|
||||||
|
MSR_TM_SUSPENDED(cur_msr) &&
|
||||||
|
!MSR_TM_ACTIVE(srr1))
|
||||||
|
srr1 |= MSR_TS_S;
|
||||||
|
#endif
|
||||||
kvmppc_set_pc(vcpu, kvmppc_get_srr0(vcpu));
|
kvmppc_set_pc(vcpu, kvmppc_get_srr0(vcpu));
|
||||||
kvmppc_set_msr(vcpu, kvmppc_get_srr1(vcpu));
|
kvmppc_set_msr(vcpu, srr1);
|
||||||
*advance = 0;
|
*advance = 0;
|
||||||
break;
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
default:
|
default:
|
||||||
emulated = EMULATE_FAIL;
|
emulated = EMULATE_FAIL;
|
||||||
|
@ -304,6 +480,140 @@ int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||||
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||||
|
case OP_31_XOP_TBEGIN:
|
||||||
|
{
|
||||||
|
if (!cpu_has_feature(CPU_FTR_TM))
|
||||||
|
break;
|
||||||
|
|
||||||
|
if (!(kvmppc_get_msr(vcpu) & MSR_TM)) {
|
||||||
|
kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG);
|
||||||
|
emulated = EMULATE_AGAIN;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!(kvmppc_get_msr(vcpu) & MSR_PR)) {
|
||||||
|
preempt_disable();
|
||||||
|
vcpu->arch.cr = (CR0_TBEGIN_FAILURE |
|
||||||
|
(vcpu->arch.cr & ~(CR0_MASK << CR0_SHIFT)));
|
||||||
|
|
||||||
|
vcpu->arch.texasr = (TEXASR_FS | TEXASR_EXACT |
|
||||||
|
(((u64)(TM_CAUSE_EMULATE | TM_CAUSE_PERSISTENT))
|
||||||
|
<< TEXASR_FC_LG));
|
||||||
|
|
||||||
|
if ((inst >> 21) & 0x1)
|
||||||
|
vcpu->arch.texasr |= TEXASR_ROT;
|
||||||
|
|
||||||
|
if (kvmppc_get_msr(vcpu) & MSR_HV)
|
||||||
|
vcpu->arch.texasr |= TEXASR_HV;
|
||||||
|
|
||||||
|
vcpu->arch.tfhar = kvmppc_get_pc(vcpu) + 4;
|
||||||
|
vcpu->arch.tfiar = kvmppc_get_pc(vcpu);
|
||||||
|
|
||||||
|
kvmppc_restore_tm_sprs(vcpu);
|
||||||
|
preempt_enable();
|
||||||
|
} else
|
||||||
|
emulated = EMULATE_FAIL;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
case OP_31_XOP_TABORT:
|
||||||
|
{
|
||||||
|
ulong guest_msr = kvmppc_get_msr(vcpu);
|
||||||
|
unsigned long ra_val = 0;
|
||||||
|
|
||||||
|
if (!cpu_has_feature(CPU_FTR_TM))
|
||||||
|
break;
|
||||||
|
|
||||||
|
if (!(kvmppc_get_msr(vcpu) & MSR_TM)) {
|
||||||
|
kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG);
|
||||||
|
emulated = EMULATE_AGAIN;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* only emulate for privilege guest, since problem state
|
||||||
|
* guest can run with TM enabled and we don't expect to
|
||||||
|
* trap at here for that case.
|
||||||
|
*/
|
||||||
|
WARN_ON(guest_msr & MSR_PR);
|
||||||
|
|
||||||
|
if (ra)
|
||||||
|
ra_val = kvmppc_get_gpr(vcpu, ra);
|
||||||
|
|
||||||
|
kvmppc_emulate_tabort(vcpu, ra_val);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
case OP_31_XOP_TRECLAIM:
|
||||||
|
{
|
||||||
|
ulong guest_msr = kvmppc_get_msr(vcpu);
|
||||||
|
unsigned long ra_val = 0;
|
||||||
|
|
||||||
|
if (!cpu_has_feature(CPU_FTR_TM))
|
||||||
|
break;
|
||||||
|
|
||||||
|
if (!(kvmppc_get_msr(vcpu) & MSR_TM)) {
|
||||||
|
kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG);
|
||||||
|
emulated = EMULATE_AGAIN;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* generate interrupts based on priorities */
|
||||||
|
if (guest_msr & MSR_PR) {
|
||||||
|
/* Privileged Instruction type Program Interrupt */
|
||||||
|
kvmppc_core_queue_program(vcpu, SRR1_PROGPRIV);
|
||||||
|
emulated = EMULATE_AGAIN;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!MSR_TM_ACTIVE(guest_msr)) {
|
||||||
|
/* TM bad thing interrupt */
|
||||||
|
kvmppc_core_queue_program(vcpu, SRR1_PROGTM);
|
||||||
|
emulated = EMULATE_AGAIN;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (ra)
|
||||||
|
ra_val = kvmppc_get_gpr(vcpu, ra);
|
||||||
|
kvmppc_emulate_treclaim(vcpu, ra_val);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
case OP_31_XOP_TRCHKPT:
|
||||||
|
{
|
||||||
|
ulong guest_msr = kvmppc_get_msr(vcpu);
|
||||||
|
unsigned long texasr;
|
||||||
|
|
||||||
|
if (!cpu_has_feature(CPU_FTR_TM))
|
||||||
|
break;
|
||||||
|
|
||||||
|
if (!(kvmppc_get_msr(vcpu) & MSR_TM)) {
|
||||||
|
kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG);
|
||||||
|
emulated = EMULATE_AGAIN;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* generate interrupt based on priorities */
|
||||||
|
if (guest_msr & MSR_PR) {
|
||||||
|
/* Privileged Instruction type Program Intr */
|
||||||
|
kvmppc_core_queue_program(vcpu, SRR1_PROGPRIV);
|
||||||
|
emulated = EMULATE_AGAIN;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
tm_enable();
|
||||||
|
texasr = mfspr(SPRN_TEXASR);
|
||||||
|
tm_disable();
|
||||||
|
|
||||||
|
if (MSR_TM_ACTIVE(guest_msr) ||
|
||||||
|
!(texasr & (TEXASR_FS))) {
|
||||||
|
/* TM bad thing interrupt */
|
||||||
|
kvmppc_core_queue_program(vcpu, SRR1_PROGTM);
|
||||||
|
emulated = EMULATE_AGAIN;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
kvmppc_emulate_trchkpt(vcpu);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
default:
|
default:
|
||||||
emulated = EMULATE_FAIL;
|
emulated = EMULATE_FAIL;
|
||||||
}
|
}
|
||||||
|
@ -465,13 +775,38 @@ int kvmppc_core_emulate_mtspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
|
||||||
break;
|
break;
|
||||||
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||||
case SPRN_TFHAR:
|
case SPRN_TFHAR:
|
||||||
vcpu->arch.tfhar = spr_val;
|
|
||||||
break;
|
|
||||||
case SPRN_TEXASR:
|
case SPRN_TEXASR:
|
||||||
vcpu->arch.texasr = spr_val;
|
|
||||||
break;
|
|
||||||
case SPRN_TFIAR:
|
case SPRN_TFIAR:
|
||||||
vcpu->arch.tfiar = spr_val;
|
if (!cpu_has_feature(CPU_FTR_TM))
|
||||||
|
break;
|
||||||
|
|
||||||
|
if (!(kvmppc_get_msr(vcpu) & MSR_TM)) {
|
||||||
|
kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG);
|
||||||
|
emulated = EMULATE_AGAIN;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (MSR_TM_ACTIVE(kvmppc_get_msr(vcpu)) &&
|
||||||
|
!((MSR_TM_SUSPENDED(kvmppc_get_msr(vcpu))) &&
|
||||||
|
(sprn == SPRN_TFHAR))) {
|
||||||
|
/* it is illegal to mtspr() TM regs in
|
||||||
|
* other than non-transactional state, with
|
||||||
|
* the exception of TFHAR in suspend state.
|
||||||
|
*/
|
||||||
|
kvmppc_core_queue_program(vcpu, SRR1_PROGTM);
|
||||||
|
emulated = EMULATE_AGAIN;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
tm_enable();
|
||||||
|
if (sprn == SPRN_TFHAR)
|
||||||
|
mtspr(SPRN_TFHAR, spr_val);
|
||||||
|
else if (sprn == SPRN_TEXASR)
|
||||||
|
mtspr(SPRN_TEXASR, spr_val);
|
||||||
|
else
|
||||||
|
mtspr(SPRN_TFIAR, spr_val);
|
||||||
|
tm_disable();
|
||||||
|
|
||||||
break;
|
break;
|
||||||
#endif
|
#endif
|
||||||
#endif
|
#endif
|
||||||
|
@ -618,13 +953,25 @@ int kvmppc_core_emulate_mfspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val
|
||||||
break;
|
break;
|
||||||
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||||
case SPRN_TFHAR:
|
case SPRN_TFHAR:
|
||||||
*spr_val = vcpu->arch.tfhar;
|
|
||||||
break;
|
|
||||||
case SPRN_TEXASR:
|
case SPRN_TEXASR:
|
||||||
*spr_val = vcpu->arch.texasr;
|
|
||||||
break;
|
|
||||||
case SPRN_TFIAR:
|
case SPRN_TFIAR:
|
||||||
*spr_val = vcpu->arch.tfiar;
|
if (!cpu_has_feature(CPU_FTR_TM))
|
||||||
|
break;
|
||||||
|
|
||||||
|
if (!(kvmppc_get_msr(vcpu) & MSR_TM)) {
|
||||||
|
kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG);
|
||||||
|
emulated = EMULATE_AGAIN;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
tm_enable();
|
||||||
|
if (sprn == SPRN_TFHAR)
|
||||||
|
*spr_val = mfspr(SPRN_TFHAR);
|
||||||
|
else if (sprn == SPRN_TEXASR)
|
||||||
|
*spr_val = mfspr(SPRN_TEXASR);
|
||||||
|
else if (sprn == SPRN_TFIAR)
|
||||||
|
*spr_val = mfspr(SPRN_TFIAR);
|
||||||
|
tm_disable();
|
||||||
break;
|
break;
|
||||||
#endif
|
#endif
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -123,6 +123,32 @@ static bool no_mixing_hpt_and_radix;
|
||||||
static void kvmppc_end_cede(struct kvm_vcpu *vcpu);
|
static void kvmppc_end_cede(struct kvm_vcpu *vcpu);
|
||||||
static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu);
|
static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* RWMR values for POWER8. These control the rate at which PURR
|
||||||
|
* and SPURR count and should be set according to the number of
|
||||||
|
* online threads in the vcore being run.
|
||||||
|
*/
|
||||||
|
#define RWMR_RPA_P8_1THREAD 0x164520C62609AECA
|
||||||
|
#define RWMR_RPA_P8_2THREAD 0x7FFF2908450D8DA9
|
||||||
|
#define RWMR_RPA_P8_3THREAD 0x164520C62609AECA
|
||||||
|
#define RWMR_RPA_P8_4THREAD 0x199A421245058DA9
|
||||||
|
#define RWMR_RPA_P8_5THREAD 0x164520C62609AECA
|
||||||
|
#define RWMR_RPA_P8_6THREAD 0x164520C62609AECA
|
||||||
|
#define RWMR_RPA_P8_7THREAD 0x164520C62609AECA
|
||||||
|
#define RWMR_RPA_P8_8THREAD 0x164520C62609AECA
|
||||||
|
|
||||||
|
static unsigned long p8_rwmr_values[MAX_SMT_THREADS + 1] = {
|
||||||
|
RWMR_RPA_P8_1THREAD,
|
||||||
|
RWMR_RPA_P8_1THREAD,
|
||||||
|
RWMR_RPA_P8_2THREAD,
|
||||||
|
RWMR_RPA_P8_3THREAD,
|
||||||
|
RWMR_RPA_P8_4THREAD,
|
||||||
|
RWMR_RPA_P8_5THREAD,
|
||||||
|
RWMR_RPA_P8_6THREAD,
|
||||||
|
RWMR_RPA_P8_7THREAD,
|
||||||
|
RWMR_RPA_P8_8THREAD,
|
||||||
|
};
|
||||||
|
|
||||||
static inline struct kvm_vcpu *next_runnable_thread(struct kvmppc_vcore *vc,
|
static inline struct kvm_vcpu *next_runnable_thread(struct kvmppc_vcore *vc,
|
||||||
int *ip)
|
int *ip)
|
||||||
{
|
{
|
||||||
|
@ -371,13 +397,13 @@ static void kvmppc_dump_regs(struct kvm_vcpu *vcpu)
|
||||||
|
|
||||||
pr_err("vcpu %p (%d):\n", vcpu, vcpu->vcpu_id);
|
pr_err("vcpu %p (%d):\n", vcpu, vcpu->vcpu_id);
|
||||||
pr_err("pc = %.16lx msr = %.16llx trap = %x\n",
|
pr_err("pc = %.16lx msr = %.16llx trap = %x\n",
|
||||||
vcpu->arch.pc, vcpu->arch.shregs.msr, vcpu->arch.trap);
|
vcpu->arch.regs.nip, vcpu->arch.shregs.msr, vcpu->arch.trap);
|
||||||
for (r = 0; r < 16; ++r)
|
for (r = 0; r < 16; ++r)
|
||||||
pr_err("r%2d = %.16lx r%d = %.16lx\n",
|
pr_err("r%2d = %.16lx r%d = %.16lx\n",
|
||||||
r, kvmppc_get_gpr(vcpu, r),
|
r, kvmppc_get_gpr(vcpu, r),
|
||||||
r+16, kvmppc_get_gpr(vcpu, r+16));
|
r+16, kvmppc_get_gpr(vcpu, r+16));
|
||||||
pr_err("ctr = %.16lx lr = %.16lx\n",
|
pr_err("ctr = %.16lx lr = %.16lx\n",
|
||||||
vcpu->arch.ctr, vcpu->arch.lr);
|
vcpu->arch.regs.ctr, vcpu->arch.regs.link);
|
||||||
pr_err("srr0 = %.16llx srr1 = %.16llx\n",
|
pr_err("srr0 = %.16llx srr1 = %.16llx\n",
|
||||||
vcpu->arch.shregs.srr0, vcpu->arch.shregs.srr1);
|
vcpu->arch.shregs.srr0, vcpu->arch.shregs.srr1);
|
||||||
pr_err("sprg0 = %.16llx sprg1 = %.16llx\n",
|
pr_err("sprg0 = %.16llx sprg1 = %.16llx\n",
|
||||||
|
@ -385,7 +411,7 @@ static void kvmppc_dump_regs(struct kvm_vcpu *vcpu)
|
||||||
pr_err("sprg2 = %.16llx sprg3 = %.16llx\n",
|
pr_err("sprg2 = %.16llx sprg3 = %.16llx\n",
|
||||||
vcpu->arch.shregs.sprg2, vcpu->arch.shregs.sprg3);
|
vcpu->arch.shregs.sprg2, vcpu->arch.shregs.sprg3);
|
||||||
pr_err("cr = %.8x xer = %.16lx dsisr = %.8x\n",
|
pr_err("cr = %.8x xer = %.16lx dsisr = %.8x\n",
|
||||||
vcpu->arch.cr, vcpu->arch.xer, vcpu->arch.shregs.dsisr);
|
vcpu->arch.cr, vcpu->arch.regs.xer, vcpu->arch.shregs.dsisr);
|
||||||
pr_err("dar = %.16llx\n", vcpu->arch.shregs.dar);
|
pr_err("dar = %.16llx\n", vcpu->arch.shregs.dar);
|
||||||
pr_err("fault dar = %.16lx dsisr = %.8x\n",
|
pr_err("fault dar = %.16lx dsisr = %.8x\n",
|
||||||
vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
|
vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
|
||||||
|
@ -1526,6 +1552,9 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
|
||||||
*val = get_reg_val(id, vcpu->arch.dec_expires +
|
*val = get_reg_val(id, vcpu->arch.dec_expires +
|
||||||
vcpu->arch.vcore->tb_offset);
|
vcpu->arch.vcore->tb_offset);
|
||||||
break;
|
break;
|
||||||
|
case KVM_REG_PPC_ONLINE:
|
||||||
|
*val = get_reg_val(id, vcpu->arch.online);
|
||||||
|
break;
|
||||||
default:
|
default:
|
||||||
r = -EINVAL;
|
r = -EINVAL;
|
||||||
break;
|
break;
|
||||||
|
@ -1757,6 +1786,14 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
|
||||||
vcpu->arch.dec_expires = set_reg_val(id, *val) -
|
vcpu->arch.dec_expires = set_reg_val(id, *val) -
|
||||||
vcpu->arch.vcore->tb_offset;
|
vcpu->arch.vcore->tb_offset;
|
||||||
break;
|
break;
|
||||||
|
case KVM_REG_PPC_ONLINE:
|
||||||
|
i = set_reg_val(id, *val);
|
||||||
|
if (i && !vcpu->arch.online)
|
||||||
|
atomic_inc(&vcpu->arch.vcore->online_count);
|
||||||
|
else if (!i && vcpu->arch.online)
|
||||||
|
atomic_dec(&vcpu->arch.vcore->online_count);
|
||||||
|
vcpu->arch.online = i;
|
||||||
|
break;
|
||||||
default:
|
default:
|
||||||
r = -EINVAL;
|
r = -EINVAL;
|
||||||
break;
|
break;
|
||||||
|
@ -2850,6 +2887,25 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* On POWER8, set RWMR register.
|
||||||
|
* Since it only affects PURR and SPURR, it doesn't affect
|
||||||
|
* the host, so we don't save/restore the host value.
|
||||||
|
*/
|
||||||
|
if (is_power8) {
|
||||||
|
unsigned long rwmr_val = RWMR_RPA_P8_8THREAD;
|
||||||
|
int n_online = atomic_read(&vc->online_count);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Use the 8-thread value if we're doing split-core
|
||||||
|
* or if the vcore's online count looks bogus.
|
||||||
|
*/
|
||||||
|
if (split == 1 && threads_per_subcore == MAX_SMT_THREADS &&
|
||||||
|
n_online >= 1 && n_online <= MAX_SMT_THREADS)
|
||||||
|
rwmr_val = p8_rwmr_values[n_online];
|
||||||
|
mtspr(SPRN_RWMR, rwmr_val);
|
||||||
|
}
|
||||||
|
|
||||||
/* Start all the threads */
|
/* Start all the threads */
|
||||||
active = 0;
|
active = 0;
|
||||||
for (sub = 0; sub < core_info.n_subcores; ++sub) {
|
for (sub = 0; sub < core_info.n_subcores; ++sub) {
|
||||||
|
@ -2902,6 +2958,32 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
|
||||||
for (sub = 0; sub < core_info.n_subcores; ++sub)
|
for (sub = 0; sub < core_info.n_subcores; ++sub)
|
||||||
spin_unlock(&core_info.vc[sub]->lock);
|
spin_unlock(&core_info.vc[sub]->lock);
|
||||||
|
|
||||||
|
if (kvm_is_radix(vc->kvm)) {
|
||||||
|
int tmp = pcpu;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Do we need to flush the process scoped TLB for the LPAR?
|
||||||
|
*
|
||||||
|
* On POWER9, individual threads can come in here, but the
|
||||||
|
* TLB is shared between the 4 threads in a core, hence
|
||||||
|
* invalidating on one thread invalidates for all.
|
||||||
|
* Thus we make all 4 threads use the same bit here.
|
||||||
|
*
|
||||||
|
* Hash must be flushed in realmode in order to use tlbiel.
|
||||||
|
*/
|
||||||
|
mtspr(SPRN_LPID, vc->kvm->arch.lpid);
|
||||||
|
isync();
|
||||||
|
|
||||||
|
if (cpu_has_feature(CPU_FTR_ARCH_300))
|
||||||
|
tmp &= ~0x3UL;
|
||||||
|
|
||||||
|
if (cpumask_test_cpu(tmp, &vc->kvm->arch.need_tlb_flush)) {
|
||||||
|
radix__local_flush_tlb_lpid_guest(vc->kvm->arch.lpid);
|
||||||
|
/* Clear the bit after the TLB flush */
|
||||||
|
cpumask_clear_cpu(tmp, &vc->kvm->arch.need_tlb_flush);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Interrupts will be enabled once we get into the guest,
|
* Interrupts will be enabled once we get into the guest,
|
||||||
* so tell lockdep that we're about to enable interrupts.
|
* so tell lockdep that we're about to enable interrupts.
|
||||||
|
@ -3356,6 +3438,15 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Force online to 1 for the sake of old userspace which doesn't
|
||||||
|
* set it.
|
||||||
|
*/
|
||||||
|
if (!vcpu->arch.online) {
|
||||||
|
atomic_inc(&vcpu->arch.vcore->online_count);
|
||||||
|
vcpu->arch.online = 1;
|
||||||
|
}
|
||||||
|
|
||||||
kvmppc_core_prepare_to_enter(vcpu);
|
kvmppc_core_prepare_to_enter(vcpu);
|
||||||
|
|
||||||
/* No need to go into the guest when all we'll do is come back out */
|
/* No need to go into the guest when all we'll do is come back out */
|
||||||
|
|
|
@ -18,6 +18,7 @@
|
||||||
#include <linux/cma.h>
|
#include <linux/cma.h>
|
||||||
#include <linux/bitops.h>
|
#include <linux/bitops.h>
|
||||||
|
|
||||||
|
#include <asm/asm-prototypes.h>
|
||||||
#include <asm/cputable.h>
|
#include <asm/cputable.h>
|
||||||
#include <asm/kvm_ppc.h>
|
#include <asm/kvm_ppc.h>
|
||||||
#include <asm/kvm_book3s.h>
|
#include <asm/kvm_book3s.h>
|
||||||
|
@ -211,9 +212,9 @@ long kvmppc_h_random(struct kvm_vcpu *vcpu)
|
||||||
|
|
||||||
/* Only need to do the expensive mfmsr() on radix */
|
/* Only need to do the expensive mfmsr() on radix */
|
||||||
if (kvm_is_radix(vcpu->kvm) && (mfmsr() & MSR_IR))
|
if (kvm_is_radix(vcpu->kvm) && (mfmsr() & MSR_IR))
|
||||||
r = powernv_get_random_long(&vcpu->arch.gpr[4]);
|
r = powernv_get_random_long(&vcpu->arch.regs.gpr[4]);
|
||||||
else
|
else
|
||||||
r = powernv_get_random_real_mode(&vcpu->arch.gpr[4]);
|
r = powernv_get_random_real_mode(&vcpu->arch.regs.gpr[4]);
|
||||||
if (r)
|
if (r)
|
||||||
return H_SUCCESS;
|
return H_SUCCESS;
|
||||||
|
|
||||||
|
@ -562,7 +563,7 @@ unsigned long kvmppc_rm_h_xirr_x(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
if (!kvmppc_xics_enabled(vcpu))
|
if (!kvmppc_xics_enabled(vcpu))
|
||||||
return H_TOO_HARD;
|
return H_TOO_HARD;
|
||||||
vcpu->arch.gpr[5] = get_tb();
|
vcpu->arch.regs.gpr[5] = get_tb();
|
||||||
if (xive_enabled()) {
|
if (xive_enabled()) {
|
||||||
if (is_rm())
|
if (is_rm())
|
||||||
return xive_rm_h_xirr(vcpu);
|
return xive_rm_h_xirr(vcpu);
|
||||||
|
@ -633,7 +634,19 @@ int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr)
|
||||||
|
|
||||||
void kvmppc_bad_interrupt(struct pt_regs *regs)
|
void kvmppc_bad_interrupt(struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
|
/*
|
||||||
|
* 100 could happen at any time, 200 can happen due to invalid real
|
||||||
|
* address access for example (or any time due to a hardware problem).
|
||||||
|
*/
|
||||||
|
if (TRAP(regs) == 0x100) {
|
||||||
|
get_paca()->in_nmi++;
|
||||||
|
system_reset_exception(regs);
|
||||||
|
get_paca()->in_nmi--;
|
||||||
|
} else if (TRAP(regs) == 0x200) {
|
||||||
|
machine_check_exception(regs);
|
||||||
|
} else {
|
||||||
die("Bad interrupt in KVM entry/exit code", regs, SIGABRT);
|
die("Bad interrupt in KVM entry/exit code", regs, SIGABRT);
|
||||||
|
}
|
||||||
panic("Bad KVM trap");
|
panic("Bad KVM trap");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -137,7 +137,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
|
||||||
/*
|
/*
|
||||||
* We return here in virtual mode after the guest exits
|
* We return here in virtual mode after the guest exits
|
||||||
* with something that we can't handle in real mode.
|
* with something that we can't handle in real mode.
|
||||||
* Interrupts are enabled again at this point.
|
* Interrupts are still hard-disabled.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -418,7 +418,8 @@ long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
|
||||||
long pte_index, unsigned long pteh, unsigned long ptel)
|
long pte_index, unsigned long pteh, unsigned long ptel)
|
||||||
{
|
{
|
||||||
return kvmppc_do_h_enter(vcpu->kvm, flags, pte_index, pteh, ptel,
|
return kvmppc_do_h_enter(vcpu->kvm, flags, pte_index, pteh, ptel,
|
||||||
vcpu->arch.pgdir, true, &vcpu->arch.gpr[4]);
|
vcpu->arch.pgdir, true,
|
||||||
|
&vcpu->arch.regs.gpr[4]);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef __BIG_ENDIAN__
|
#ifdef __BIG_ENDIAN__
|
||||||
|
@ -434,24 +435,6 @@ static inline int is_mmio_hpte(unsigned long v, unsigned long r)
|
||||||
(HPTE_R_KEY_HI | HPTE_R_KEY_LO));
|
(HPTE_R_KEY_HI | HPTE_R_KEY_LO));
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int try_lock_tlbie(unsigned int *lock)
|
|
||||||
{
|
|
||||||
unsigned int tmp, old;
|
|
||||||
unsigned int token = LOCK_TOKEN;
|
|
||||||
|
|
||||||
asm volatile("1:lwarx %1,0,%2\n"
|
|
||||||
" cmpwi cr0,%1,0\n"
|
|
||||||
" bne 2f\n"
|
|
||||||
" stwcx. %3,0,%2\n"
|
|
||||||
" bne- 1b\n"
|
|
||||||
" isync\n"
|
|
||||||
"2:"
|
|
||||||
: "=&r" (tmp), "=&r" (old)
|
|
||||||
: "r" (lock), "r" (token)
|
|
||||||
: "cc", "memory");
|
|
||||||
return old == 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void do_tlbies(struct kvm *kvm, unsigned long *rbvalues,
|
static void do_tlbies(struct kvm *kvm, unsigned long *rbvalues,
|
||||||
long npages, int global, bool need_sync)
|
long npages, int global, bool need_sync)
|
||||||
{
|
{
|
||||||
|
@ -463,8 +446,6 @@ static void do_tlbies(struct kvm *kvm, unsigned long *rbvalues,
|
||||||
* the RS field, this is backwards-compatible with P7 and P8.
|
* the RS field, this is backwards-compatible with P7 and P8.
|
||||||
*/
|
*/
|
||||||
if (global) {
|
if (global) {
|
||||||
while (!try_lock_tlbie(&kvm->arch.tlbie_lock))
|
|
||||||
cpu_relax();
|
|
||||||
if (need_sync)
|
if (need_sync)
|
||||||
asm volatile("ptesync" : : : "memory");
|
asm volatile("ptesync" : : : "memory");
|
||||||
for (i = 0; i < npages; ++i) {
|
for (i = 0; i < npages; ++i) {
|
||||||
|
@ -483,7 +464,6 @@ static void do_tlbies(struct kvm *kvm, unsigned long *rbvalues,
|
||||||
}
|
}
|
||||||
|
|
||||||
asm volatile("eieio; tlbsync; ptesync" : : : "memory");
|
asm volatile("eieio; tlbsync; ptesync" : : : "memory");
|
||||||
kvm->arch.tlbie_lock = 0;
|
|
||||||
} else {
|
} else {
|
||||||
if (need_sync)
|
if (need_sync)
|
||||||
asm volatile("ptesync" : : : "memory");
|
asm volatile("ptesync" : : : "memory");
|
||||||
|
@ -561,13 +541,13 @@ long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags,
|
||||||
unsigned long pte_index, unsigned long avpn)
|
unsigned long pte_index, unsigned long avpn)
|
||||||
{
|
{
|
||||||
return kvmppc_do_h_remove(vcpu->kvm, flags, pte_index, avpn,
|
return kvmppc_do_h_remove(vcpu->kvm, flags, pte_index, avpn,
|
||||||
&vcpu->arch.gpr[4]);
|
&vcpu->arch.regs.gpr[4]);
|
||||||
}
|
}
|
||||||
|
|
||||||
long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
|
long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
struct kvm *kvm = vcpu->kvm;
|
struct kvm *kvm = vcpu->kvm;
|
||||||
unsigned long *args = &vcpu->arch.gpr[4];
|
unsigned long *args = &vcpu->arch.regs.gpr[4];
|
||||||
__be64 *hp, *hptes[4];
|
__be64 *hp, *hptes[4];
|
||||||
unsigned long tlbrb[4];
|
unsigned long tlbrb[4];
|
||||||
long int i, j, k, n, found, indexes[4];
|
long int i, j, k, n, found, indexes[4];
|
||||||
|
@ -787,8 +767,8 @@ long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
|
||||||
r = rev[i].guest_rpte | (r & (HPTE_R_R | HPTE_R_C));
|
r = rev[i].guest_rpte | (r & (HPTE_R_R | HPTE_R_C));
|
||||||
r &= ~HPTE_GR_RESERVED;
|
r &= ~HPTE_GR_RESERVED;
|
||||||
}
|
}
|
||||||
vcpu->arch.gpr[4 + i * 2] = v;
|
vcpu->arch.regs.gpr[4 + i * 2] = v;
|
||||||
vcpu->arch.gpr[5 + i * 2] = r;
|
vcpu->arch.regs.gpr[5 + i * 2] = r;
|
||||||
}
|
}
|
||||||
return H_SUCCESS;
|
return H_SUCCESS;
|
||||||
}
|
}
|
||||||
|
@ -834,7 +814,7 @@ long kvmppc_h_clear_ref(struct kvm_vcpu *vcpu, unsigned long flags,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
vcpu->arch.gpr[4] = gr;
|
vcpu->arch.regs.gpr[4] = gr;
|
||||||
ret = H_SUCCESS;
|
ret = H_SUCCESS;
|
||||||
out:
|
out:
|
||||||
unlock_hpte(hpte, v & ~HPTE_V_HVLOCK);
|
unlock_hpte(hpte, v & ~HPTE_V_HVLOCK);
|
||||||
|
@ -881,7 +861,7 @@ long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags,
|
||||||
kvmppc_set_dirty_from_hpte(kvm, v, gr);
|
kvmppc_set_dirty_from_hpte(kvm, v, gr);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
vcpu->arch.gpr[4] = gr;
|
vcpu->arch.regs.gpr[4] = gr;
|
||||||
ret = H_SUCCESS;
|
ret = H_SUCCESS;
|
||||||
out:
|
out:
|
||||||
unlock_hpte(hpte, v & ~HPTE_V_HVLOCK);
|
unlock_hpte(hpte, v & ~HPTE_V_HVLOCK);
|
||||||
|
|
|
@ -517,7 +517,7 @@ unsigned long xics_rm_h_xirr(struct kvm_vcpu *vcpu)
|
||||||
} while (!icp_rm_try_update(icp, old_state, new_state));
|
} while (!icp_rm_try_update(icp, old_state, new_state));
|
||||||
|
|
||||||
/* Return the result in GPR4 */
|
/* Return the result in GPR4 */
|
||||||
vcpu->arch.gpr[4] = xirr;
|
vcpu->arch.regs.gpr[4] = xirr;
|
||||||
|
|
||||||
return check_too_hard(xics, icp);
|
return check_too_hard(xics, icp);
|
||||||
}
|
}
|
||||||
|
|
|
@ -39,8 +39,6 @@ BEGIN_FTR_SECTION; \
|
||||||
extsw reg, reg; \
|
extsw reg, reg; \
|
||||||
END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
|
END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
|
||||||
|
|
||||||
#define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM)
|
|
||||||
|
|
||||||
/* Values in HSTATE_NAPPING(r13) */
|
/* Values in HSTATE_NAPPING(r13) */
|
||||||
#define NAPPING_CEDE 1
|
#define NAPPING_CEDE 1
|
||||||
#define NAPPING_NOVCPU 2
|
#define NAPPING_NOVCPU 2
|
||||||
|
@ -639,6 +637,10 @@ kvmppc_hv_entry:
|
||||||
/* Primary thread switches to guest partition. */
|
/* Primary thread switches to guest partition. */
|
||||||
cmpwi r6,0
|
cmpwi r6,0
|
||||||
bne 10f
|
bne 10f
|
||||||
|
|
||||||
|
/* Radix has already switched LPID and flushed core TLB */
|
||||||
|
bne cr7, 22f
|
||||||
|
|
||||||
lwz r7,KVM_LPID(r9)
|
lwz r7,KVM_LPID(r9)
|
||||||
BEGIN_FTR_SECTION
|
BEGIN_FTR_SECTION
|
||||||
ld r6,KVM_SDR1(r9)
|
ld r6,KVM_SDR1(r9)
|
||||||
|
@ -650,7 +652,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
|
||||||
mtspr SPRN_LPID,r7
|
mtspr SPRN_LPID,r7
|
||||||
isync
|
isync
|
||||||
|
|
||||||
/* See if we need to flush the TLB */
|
/* See if we need to flush the TLB. Hash has to be done in RM */
|
||||||
lhz r6,PACAPACAINDEX(r13) /* test_bit(cpu, need_tlb_flush) */
|
lhz r6,PACAPACAINDEX(r13) /* test_bit(cpu, need_tlb_flush) */
|
||||||
BEGIN_FTR_SECTION
|
BEGIN_FTR_SECTION
|
||||||
/*
|
/*
|
||||||
|
@ -677,15 +679,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
|
||||||
li r7,0x800 /* IS field = 0b10 */
|
li r7,0x800 /* IS field = 0b10 */
|
||||||
ptesync
|
ptesync
|
||||||
li r0,0 /* RS for P9 version of tlbiel */
|
li r0,0 /* RS for P9 version of tlbiel */
|
||||||
bne cr7, 29f
|
|
||||||
28: tlbiel r7 /* On P9, rs=0, RIC=0, PRS=0, R=0 */
|
28: tlbiel r7 /* On P9, rs=0, RIC=0, PRS=0, R=0 */
|
||||||
addi r7,r7,0x1000
|
addi r7,r7,0x1000
|
||||||
bdnz 28b
|
bdnz 28b
|
||||||
b 30f
|
ptesync
|
||||||
29: PPC_TLBIEL(7,0,2,1,1) /* for radix, RIC=2, PRS=1, R=1 */
|
|
||||||
addi r7,r7,0x1000
|
|
||||||
bdnz 29b
|
|
||||||
30: ptesync
|
|
||||||
23: ldarx r7,0,r6 /* clear the bit after TLB flushed */
|
23: ldarx r7,0,r6 /* clear the bit after TLB flushed */
|
||||||
andc r7,r7,r8
|
andc r7,r7,r8
|
||||||
stdcx. r7,0,r6
|
stdcx. r7,0,r6
|
||||||
|
@ -799,7 +796,10 @@ END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
|
||||||
/*
|
/*
|
||||||
* NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR
|
* NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR
|
||||||
*/
|
*/
|
||||||
bl kvmppc_restore_tm
|
mr r3, r4
|
||||||
|
ld r4, VCPU_MSR(r3)
|
||||||
|
bl kvmppc_restore_tm_hv
|
||||||
|
ld r4, HSTATE_KVM_VCPU(r13)
|
||||||
91:
|
91:
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -1783,7 +1783,10 @@ END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
|
||||||
/*
|
/*
|
||||||
* NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR
|
* NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR
|
||||||
*/
|
*/
|
||||||
bl kvmppc_save_tm
|
mr r3, r9
|
||||||
|
ld r4, VCPU_MSR(r3)
|
||||||
|
bl kvmppc_save_tm_hv
|
||||||
|
ld r9, HSTATE_KVM_VCPU(r13)
|
||||||
91:
|
91:
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -2686,8 +2689,9 @@ END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
|
||||||
/*
|
/*
|
||||||
* NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR
|
* NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR
|
||||||
*/
|
*/
|
||||||
ld r9, HSTATE_KVM_VCPU(r13)
|
ld r3, HSTATE_KVM_VCPU(r13)
|
||||||
bl kvmppc_save_tm
|
ld r4, VCPU_MSR(r3)
|
||||||
|
bl kvmppc_save_tm_hv
|
||||||
91:
|
91:
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -2805,7 +2809,10 @@ END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
|
||||||
/*
|
/*
|
||||||
* NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR
|
* NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR
|
||||||
*/
|
*/
|
||||||
bl kvmppc_restore_tm
|
mr r3, r4
|
||||||
|
ld r4, VCPU_MSR(r3)
|
||||||
|
bl kvmppc_restore_tm_hv
|
||||||
|
ld r4, HSTATE_KVM_VCPU(r13)
|
||||||
91:
|
91:
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -3126,11 +3133,22 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
|
||||||
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||||
/*
|
/*
|
||||||
* Save transactional state and TM-related registers.
|
* Save transactional state and TM-related registers.
|
||||||
* Called with r9 pointing to the vcpu struct.
|
* Called with r3 pointing to the vcpu struct and r4 containing
|
||||||
|
* the guest MSR value.
|
||||||
* This can modify all checkpointed registers, but
|
* This can modify all checkpointed registers, but
|
||||||
* restores r1, r2 and r9 (vcpu pointer) before exit.
|
* restores r1 and r2 before exit.
|
||||||
*/
|
*/
|
||||||
kvmppc_save_tm:
|
kvmppc_save_tm_hv:
|
||||||
|
/* See if we need to handle fake suspend mode */
|
||||||
|
BEGIN_FTR_SECTION
|
||||||
|
b __kvmppc_save_tm
|
||||||
|
END_FTR_SECTION_IFCLR(CPU_FTR_P9_TM_HV_ASSIST)
|
||||||
|
|
||||||
|
lbz r0, HSTATE_FAKE_SUSPEND(r13) /* Were we fake suspended? */
|
||||||
|
cmpwi r0, 0
|
||||||
|
beq __kvmppc_save_tm
|
||||||
|
|
||||||
|
/* The following code handles the fake_suspend = 1 case */
|
||||||
mflr r0
|
mflr r0
|
||||||
std r0, PPC_LR_STKOFF(r1)
|
std r0, PPC_LR_STKOFF(r1)
|
||||||
stdu r1, -PPC_MIN_STKFRM(r1)
|
stdu r1, -PPC_MIN_STKFRM(r1)
|
||||||
|
@ -3141,59 +3159,37 @@ kvmppc_save_tm:
|
||||||
rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
|
rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
|
||||||
mtmsrd r8
|
mtmsrd r8
|
||||||
|
|
||||||
ld r5, VCPU_MSR(r9)
|
|
||||||
rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
|
|
||||||
beq 1f /* TM not active in guest. */
|
|
||||||
|
|
||||||
std r1, HSTATE_HOST_R1(r13)
|
|
||||||
li r3, TM_CAUSE_KVM_RESCHED
|
|
||||||
|
|
||||||
BEGIN_FTR_SECTION
|
|
||||||
lbz r0, HSTATE_FAKE_SUSPEND(r13) /* Were we fake suspended? */
|
|
||||||
cmpwi r0, 0
|
|
||||||
beq 3f
|
|
||||||
rldicl. r8, r8, 64 - MSR_TS_S_LG, 62 /* Did we actually hrfid? */
|
rldicl. r8, r8, 64 - MSR_TS_S_LG, 62 /* Did we actually hrfid? */
|
||||||
beq 4f
|
beq 4f
|
||||||
BEGIN_FTR_SECTION_NESTED(96)
|
BEGIN_FTR_SECTION
|
||||||
bl pnv_power9_force_smt4_catch
|
bl pnv_power9_force_smt4_catch
|
||||||
END_FTR_SECTION_NESTED(CPU_FTR_P9_TM_XER_SO_BUG, CPU_FTR_P9_TM_XER_SO_BUG, 96)
|
END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG)
|
||||||
nop
|
nop
|
||||||
b 6f
|
|
||||||
3:
|
|
||||||
/* Emulation of the treclaim instruction needs TEXASR before treclaim */
|
|
||||||
mfspr r6, SPRN_TEXASR
|
|
||||||
std r6, VCPU_ORIG_TEXASR(r9)
|
|
||||||
6:
|
|
||||||
END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_HV_ASSIST)
|
|
||||||
|
|
||||||
/* Clear the MSR RI since r1, r13 are all going to be foobar. */
|
std r1, HSTATE_HOST_R1(r13)
|
||||||
|
|
||||||
|
/* Clear the MSR RI since r1, r13 may be foobar. */
|
||||||
li r5, 0
|
li r5, 0
|
||||||
mtmsrd r5, 1
|
mtmsrd r5, 1
|
||||||
|
|
||||||
/* All GPRs are volatile at this point. */
|
/* We have to treclaim here because that's the only way to do S->N */
|
||||||
|
li r3, TM_CAUSE_KVM_RESCHED
|
||||||
TRECLAIM(R3)
|
TRECLAIM(R3)
|
||||||
|
|
||||||
/* Temporarily store r13 and r9 so we have some regs to play with */
|
|
||||||
SET_SCRATCH0(r13)
|
|
||||||
GET_PACA(r13)
|
|
||||||
std r9, PACATMSCRATCH(r13)
|
|
||||||
|
|
||||||
/* If doing TM emulation on POWER9 DD2.2, check for fake suspend mode */
|
|
||||||
BEGIN_FTR_SECTION
|
|
||||||
lbz r9, HSTATE_FAKE_SUSPEND(r13)
|
|
||||||
cmpwi r9, 0
|
|
||||||
beq 2f
|
|
||||||
/*
|
/*
|
||||||
* We were in fake suspend, so we are not going to save the
|
* We were in fake suspend, so we are not going to save the
|
||||||
* register state as the guest checkpointed state (since
|
* register state as the guest checkpointed state (since
|
||||||
* we already have it), therefore we can now use any volatile GPR.
|
* we already have it), therefore we can now use any volatile GPR.
|
||||||
*/
|
*/
|
||||||
/* Reload stack pointer and TOC. */
|
/* Reload PACA pointer, stack pointer and TOC. */
|
||||||
|
GET_PACA(r13)
|
||||||
ld r1, HSTATE_HOST_R1(r13)
|
ld r1, HSTATE_HOST_R1(r13)
|
||||||
ld r2, PACATOC(r13)
|
ld r2, PACATOC(r13)
|
||||||
|
|
||||||
/* Set MSR RI now we have r1 and r13 back. */
|
/* Set MSR RI now we have r1 and r13 back. */
|
||||||
li r5, MSR_RI
|
li r5, MSR_RI
|
||||||
mtmsrd r5, 1
|
mtmsrd r5, 1
|
||||||
|
|
||||||
HMT_MEDIUM
|
HMT_MEDIUM
|
||||||
ld r6, HSTATE_DSCR(r13)
|
ld r6, HSTATE_DSCR(r13)
|
||||||
mtspr SPRN_DSCR, r6
|
mtspr SPRN_DSCR, r6
|
||||||
|
@ -3208,85 +3204,9 @@ END_FTR_SECTION_NESTED(CPU_FTR_P9_TM_XER_SO_BUG, CPU_FTR_P9_TM_XER_SO_BUG, 96)
|
||||||
li r0, PSSCR_FAKE_SUSPEND
|
li r0, PSSCR_FAKE_SUSPEND
|
||||||
andc r3, r3, r0
|
andc r3, r3, r0
|
||||||
mtspr SPRN_PSSCR, r3
|
mtspr SPRN_PSSCR, r3
|
||||||
ld r9, HSTATE_KVM_VCPU(r13)
|
|
||||||
/* Don't save TEXASR, use value from last exit in real suspend state */
|
/* Don't save TEXASR, use value from last exit in real suspend state */
|
||||||
b 11f
|
|
||||||
2:
|
|
||||||
END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_HV_ASSIST)
|
|
||||||
|
|
||||||
ld r9, HSTATE_KVM_VCPU(r13)
|
ld r9, HSTATE_KVM_VCPU(r13)
|
||||||
|
|
||||||
/* Get a few more GPRs free. */
|
|
||||||
std r29, VCPU_GPRS_TM(29)(r9)
|
|
||||||
std r30, VCPU_GPRS_TM(30)(r9)
|
|
||||||
std r31, VCPU_GPRS_TM(31)(r9)
|
|
||||||
|
|
||||||
/* Save away PPR and DSCR soon so don't run with user values. */
|
|
||||||
mfspr r31, SPRN_PPR
|
|
||||||
HMT_MEDIUM
|
|
||||||
mfspr r30, SPRN_DSCR
|
|
||||||
ld r29, HSTATE_DSCR(r13)
|
|
||||||
mtspr SPRN_DSCR, r29
|
|
||||||
|
|
||||||
/* Save all but r9, r13 & r29-r31 */
|
|
||||||
reg = 0
|
|
||||||
.rept 29
|
|
||||||
.if (reg != 9) && (reg != 13)
|
|
||||||
std reg, VCPU_GPRS_TM(reg)(r9)
|
|
||||||
.endif
|
|
||||||
reg = reg + 1
|
|
||||||
.endr
|
|
||||||
/* ... now save r13 */
|
|
||||||
GET_SCRATCH0(r4)
|
|
||||||
std r4, VCPU_GPRS_TM(13)(r9)
|
|
||||||
/* ... and save r9 */
|
|
||||||
ld r4, PACATMSCRATCH(r13)
|
|
||||||
std r4, VCPU_GPRS_TM(9)(r9)
|
|
||||||
|
|
||||||
/* Reload stack pointer and TOC. */
|
|
||||||
ld r1, HSTATE_HOST_R1(r13)
|
|
||||||
ld r2, PACATOC(r13)
|
|
||||||
|
|
||||||
/* Set MSR RI now we have r1 and r13 back. */
|
|
||||||
li r5, MSR_RI
|
|
||||||
mtmsrd r5, 1
|
|
||||||
|
|
||||||
/* Save away checkpinted SPRs. */
|
|
||||||
std r31, VCPU_PPR_TM(r9)
|
|
||||||
std r30, VCPU_DSCR_TM(r9)
|
|
||||||
mflr r5
|
|
||||||
mfcr r6
|
|
||||||
mfctr r7
|
|
||||||
mfspr r8, SPRN_AMR
|
|
||||||
mfspr r10, SPRN_TAR
|
|
||||||
mfxer r11
|
|
||||||
std r5, VCPU_LR_TM(r9)
|
|
||||||
stw r6, VCPU_CR_TM(r9)
|
|
||||||
std r7, VCPU_CTR_TM(r9)
|
|
||||||
std r8, VCPU_AMR_TM(r9)
|
|
||||||
std r10, VCPU_TAR_TM(r9)
|
|
||||||
std r11, VCPU_XER_TM(r9)
|
|
||||||
|
|
||||||
/* Restore r12 as trap number. */
|
|
||||||
lwz r12, VCPU_TRAP(r9)
|
|
||||||
|
|
||||||
/* Save FP/VSX. */
|
|
||||||
addi r3, r9, VCPU_FPRS_TM
|
|
||||||
bl store_fp_state
|
|
||||||
addi r3, r9, VCPU_VRS_TM
|
|
||||||
bl store_vr_state
|
|
||||||
mfspr r6, SPRN_VRSAVE
|
|
||||||
stw r6, VCPU_VRSAVE_TM(r9)
|
|
||||||
1:
|
|
||||||
/*
|
|
||||||
* We need to save these SPRs after the treclaim so that the software
|
|
||||||
* error code is recorded correctly in the TEXASR. Also the user may
|
|
||||||
* change these outside of a transaction, so they must always be
|
|
||||||
* context switched.
|
|
||||||
*/
|
|
||||||
mfspr r7, SPRN_TEXASR
|
|
||||||
std r7, VCPU_TEXASR(r9)
|
|
||||||
11:
|
|
||||||
mfspr r5, SPRN_TFHAR
|
mfspr r5, SPRN_TFHAR
|
||||||
mfspr r6, SPRN_TFIAR
|
mfspr r6, SPRN_TFIAR
|
||||||
std r5, VCPU_TFHAR(r9)
|
std r5, VCPU_TFHAR(r9)
|
||||||
|
@ -3299,149 +3219,63 @@ END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_HV_ASSIST)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Restore transactional state and TM-related registers.
|
* Restore transactional state and TM-related registers.
|
||||||
* Called with r4 pointing to the vcpu struct.
|
* Called with r3 pointing to the vcpu struct
|
||||||
|
* and r4 containing the guest MSR value.
|
||||||
* This potentially modifies all checkpointed registers.
|
* This potentially modifies all checkpointed registers.
|
||||||
* It restores r1, r2, r4 from the PACA.
|
* It restores r1 and r2 from the PACA.
|
||||||
*/
|
*/
|
||||||
kvmppc_restore_tm:
|
kvmppc_restore_tm_hv:
|
||||||
mflr r0
|
|
||||||
std r0, PPC_LR_STKOFF(r1)
|
|
||||||
|
|
||||||
/* Turn on TM/FP/VSX/VMX so we can restore them. */
|
|
||||||
mfmsr r5
|
|
||||||
li r6, MSR_TM >> 32
|
|
||||||
sldi r6, r6, 32
|
|
||||||
or r5, r5, r6
|
|
||||||
ori r5, r5, MSR_FP
|
|
||||||
oris r5, r5, (MSR_VEC | MSR_VSX)@h
|
|
||||||
mtmsrd r5
|
|
||||||
|
|
||||||
/*
|
|
||||||
* The user may change these outside of a transaction, so they must
|
|
||||||
* always be context switched.
|
|
||||||
*/
|
|
||||||
ld r5, VCPU_TFHAR(r4)
|
|
||||||
ld r6, VCPU_TFIAR(r4)
|
|
||||||
ld r7, VCPU_TEXASR(r4)
|
|
||||||
mtspr SPRN_TFHAR, r5
|
|
||||||
mtspr SPRN_TFIAR, r6
|
|
||||||
mtspr SPRN_TEXASR, r7
|
|
||||||
|
|
||||||
li r0, 0
|
|
||||||
stb r0, HSTATE_FAKE_SUSPEND(r13)
|
|
||||||
ld r5, VCPU_MSR(r4)
|
|
||||||
rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
|
|
||||||
beqlr /* TM not active in guest */
|
|
||||||
std r1, HSTATE_HOST_R1(r13)
|
|
||||||
|
|
||||||
/* Make sure the failure summary is set, otherwise we'll program check
|
|
||||||
* when we trechkpt. It's possible that this might have been not set
|
|
||||||
* on a kvmppc_set_one_reg() call but we shouldn't let this crash the
|
|
||||||
* host.
|
|
||||||
*/
|
|
||||||
oris r7, r7, (TEXASR_FS)@h
|
|
||||||
mtspr SPRN_TEXASR, r7
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If we are doing TM emulation for the guest on a POWER9 DD2,
|
* If we are doing TM emulation for the guest on a POWER9 DD2,
|
||||||
* then we don't actually do a trechkpt -- we either set up
|
* then we don't actually do a trechkpt -- we either set up
|
||||||
* fake-suspend mode, or emulate a TM rollback.
|
* fake-suspend mode, or emulate a TM rollback.
|
||||||
*/
|
*/
|
||||||
BEGIN_FTR_SECTION
|
BEGIN_FTR_SECTION
|
||||||
b .Ldo_tm_fake_load
|
b __kvmppc_restore_tm
|
||||||
END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_HV_ASSIST)
|
END_FTR_SECTION_IFCLR(CPU_FTR_P9_TM_HV_ASSIST)
|
||||||
|
mflr r0
|
||||||
|
std r0, PPC_LR_STKOFF(r1)
|
||||||
|
|
||||||
|
li r0, 0
|
||||||
|
stb r0, HSTATE_FAKE_SUSPEND(r13)
|
||||||
|
|
||||||
|
/* Turn on TM so we can restore TM SPRs */
|
||||||
|
mfmsr r5
|
||||||
|
li r0, 1
|
||||||
|
rldimi r5, r0, MSR_TM_LG, 63-MSR_TM_LG
|
||||||
|
mtmsrd r5
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We need to load up the checkpointed state for the guest.
|
* The user may change these outside of a transaction, so they must
|
||||||
* We need to do this early as it will blow away any GPRs, VSRs and
|
* always be context switched.
|
||||||
* some SPRs.
|
|
||||||
*/
|
*/
|
||||||
|
ld r5, VCPU_TFHAR(r3)
|
||||||
|
ld r6, VCPU_TFIAR(r3)
|
||||||
|
ld r7, VCPU_TEXASR(r3)
|
||||||
|
mtspr SPRN_TFHAR, r5
|
||||||
|
mtspr SPRN_TFIAR, r6
|
||||||
|
mtspr SPRN_TEXASR, r7
|
||||||
|
|
||||||
mr r31, r4
|
rldicl. r5, r4, 64 - MSR_TS_S_LG, 62
|
||||||
addi r3, r31, VCPU_FPRS_TM
|
beqlr /* TM not active in guest */
|
||||||
bl load_fp_state
|
|
||||||
addi r3, r31, VCPU_VRS_TM
|
|
||||||
bl load_vr_state
|
|
||||||
mr r4, r31
|
|
||||||
lwz r7, VCPU_VRSAVE_TM(r4)
|
|
||||||
mtspr SPRN_VRSAVE, r7
|
|
||||||
|
|
||||||
ld r5, VCPU_LR_TM(r4)
|
/* Make sure the failure summary is set */
|
||||||
lwz r6, VCPU_CR_TM(r4)
|
oris r7, r7, (TEXASR_FS)@h
|
||||||
ld r7, VCPU_CTR_TM(r4)
|
mtspr SPRN_TEXASR, r7
|
||||||
ld r8, VCPU_AMR_TM(r4)
|
|
||||||
ld r9, VCPU_TAR_TM(r4)
|
|
||||||
ld r10, VCPU_XER_TM(r4)
|
|
||||||
mtlr r5
|
|
||||||
mtcr r6
|
|
||||||
mtctr r7
|
|
||||||
mtspr SPRN_AMR, r8
|
|
||||||
mtspr SPRN_TAR, r9
|
|
||||||
mtxer r10
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Load up PPR and DSCR values but don't put them in the actual SPRs
|
|
||||||
* till the last moment to avoid running with userspace PPR and DSCR for
|
|
||||||
* too long.
|
|
||||||
*/
|
|
||||||
ld r29, VCPU_DSCR_TM(r4)
|
|
||||||
ld r30, VCPU_PPR_TM(r4)
|
|
||||||
|
|
||||||
std r2, PACATMSCRATCH(r13) /* Save TOC */
|
|
||||||
|
|
||||||
/* Clear the MSR RI since r1, r13 are all going to be foobar. */
|
|
||||||
li r5, 0
|
|
||||||
mtmsrd r5, 1
|
|
||||||
|
|
||||||
/* Load GPRs r0-r28 */
|
|
||||||
reg = 0
|
|
||||||
.rept 29
|
|
||||||
ld reg, VCPU_GPRS_TM(reg)(r31)
|
|
||||||
reg = reg + 1
|
|
||||||
.endr
|
|
||||||
|
|
||||||
mtspr SPRN_DSCR, r29
|
|
||||||
mtspr SPRN_PPR, r30
|
|
||||||
|
|
||||||
/* Load final GPRs */
|
|
||||||
ld 29, VCPU_GPRS_TM(29)(r31)
|
|
||||||
ld 30, VCPU_GPRS_TM(30)(r31)
|
|
||||||
ld 31, VCPU_GPRS_TM(31)(r31)
|
|
||||||
|
|
||||||
/* TM checkpointed state is now setup. All GPRs are now volatile. */
|
|
||||||
TRECHKPT
|
|
||||||
|
|
||||||
/* Now let's get back the state we need. */
|
|
||||||
HMT_MEDIUM
|
|
||||||
GET_PACA(r13)
|
|
||||||
ld r29, HSTATE_DSCR(r13)
|
|
||||||
mtspr SPRN_DSCR, r29
|
|
||||||
ld r4, HSTATE_KVM_VCPU(r13)
|
|
||||||
ld r1, HSTATE_HOST_R1(r13)
|
|
||||||
ld r2, PACATMSCRATCH(r13)
|
|
||||||
|
|
||||||
/* Set the MSR RI since we have our registers back. */
|
|
||||||
li r5, MSR_RI
|
|
||||||
mtmsrd r5, 1
|
|
||||||
9:
|
|
||||||
ld r0, PPC_LR_STKOFF(r1)
|
|
||||||
mtlr r0
|
|
||||||
blr
|
|
||||||
|
|
||||||
.Ldo_tm_fake_load:
|
|
||||||
cmpwi r5, 1 /* check for suspended state */
|
cmpwi r5, 1 /* check for suspended state */
|
||||||
bgt 10f
|
bgt 10f
|
||||||
stb r5, HSTATE_FAKE_SUSPEND(r13)
|
stb r5, HSTATE_FAKE_SUSPEND(r13)
|
||||||
b 9b /* and return */
|
b 9f /* and return */
|
||||||
10: stdu r1, -PPC_MIN_STKFRM(r1)
|
10: stdu r1, -PPC_MIN_STKFRM(r1)
|
||||||
/* guest is in transactional state, so simulate rollback */
|
/* guest is in transactional state, so simulate rollback */
|
||||||
mr r3, r4
|
|
||||||
bl kvmhv_emulate_tm_rollback
|
bl kvmhv_emulate_tm_rollback
|
||||||
nop
|
nop
|
||||||
ld r4, HSTATE_KVM_VCPU(r13) /* our vcpu pointer has been trashed */
|
|
||||||
addi r1, r1, PPC_MIN_STKFRM
|
addi r1, r1, PPC_MIN_STKFRM
|
||||||
b 9b
|
9: ld r0, PPC_LR_STKOFF(r1)
|
||||||
#endif
|
mtlr r0
|
||||||
|
blr
|
||||||
|
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We come here if we get any exception or interrupt while we are
|
* We come here if we get any exception or interrupt while we are
|
||||||
|
@ -3572,6 +3406,8 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
|
||||||
bcl 20, 31, .+4
|
bcl 20, 31, .+4
|
||||||
5: mflr r3
|
5: mflr r3
|
||||||
addi r3, r3, 9f - 5b
|
addi r3, r3, 9f - 5b
|
||||||
|
li r4, -1
|
||||||
|
rldimi r3, r4, 62, 0 /* ensure 0xc000000000000000 bits are set */
|
||||||
ld r4, PACAKMSR(r13)
|
ld r4, PACAKMSR(r13)
|
||||||
mtspr SPRN_SRR0, r3
|
mtspr SPRN_SRR0, r3
|
||||||
mtspr SPRN_SRR1, r4
|
mtspr SPRN_SRR1, r4
|
||||||
|
|
|
@ -19,7 +19,7 @@ static void emulate_tx_failure(struct kvm_vcpu *vcpu, u64 failure_cause)
|
||||||
u64 texasr, tfiar;
|
u64 texasr, tfiar;
|
||||||
u64 msr = vcpu->arch.shregs.msr;
|
u64 msr = vcpu->arch.shregs.msr;
|
||||||
|
|
||||||
tfiar = vcpu->arch.pc & ~0x3ull;
|
tfiar = vcpu->arch.regs.nip & ~0x3ull;
|
||||||
texasr = (failure_cause << 56) | TEXASR_ABORT | TEXASR_FS | TEXASR_EXACT;
|
texasr = (failure_cause << 56) | TEXASR_ABORT | TEXASR_FS | TEXASR_EXACT;
|
||||||
if (MSR_TM_SUSPENDED(vcpu->arch.shregs.msr))
|
if (MSR_TM_SUSPENDED(vcpu->arch.shregs.msr))
|
||||||
texasr |= TEXASR_SUSP;
|
texasr |= TEXASR_SUSP;
|
||||||
|
@ -57,8 +57,8 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
|
||||||
(newmsr & MSR_TM)));
|
(newmsr & MSR_TM)));
|
||||||
newmsr = sanitize_msr(newmsr);
|
newmsr = sanitize_msr(newmsr);
|
||||||
vcpu->arch.shregs.msr = newmsr;
|
vcpu->arch.shregs.msr = newmsr;
|
||||||
vcpu->arch.cfar = vcpu->arch.pc - 4;
|
vcpu->arch.cfar = vcpu->arch.regs.nip - 4;
|
||||||
vcpu->arch.pc = vcpu->arch.shregs.srr0;
|
vcpu->arch.regs.nip = vcpu->arch.shregs.srr0;
|
||||||
return RESUME_GUEST;
|
return RESUME_GUEST;
|
||||||
|
|
||||||
case PPC_INST_RFEBB:
|
case PPC_INST_RFEBB:
|
||||||
|
@ -90,8 +90,8 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
|
||||||
vcpu->arch.bescr = bescr;
|
vcpu->arch.bescr = bescr;
|
||||||
msr = (msr & ~MSR_TS_MASK) | MSR_TS_T;
|
msr = (msr & ~MSR_TS_MASK) | MSR_TS_T;
|
||||||
vcpu->arch.shregs.msr = msr;
|
vcpu->arch.shregs.msr = msr;
|
||||||
vcpu->arch.cfar = vcpu->arch.pc - 4;
|
vcpu->arch.cfar = vcpu->arch.regs.nip - 4;
|
||||||
vcpu->arch.pc = vcpu->arch.ebbrr;
|
vcpu->arch.regs.nip = vcpu->arch.ebbrr;
|
||||||
return RESUME_GUEST;
|
return RESUME_GUEST;
|
||||||
|
|
||||||
case PPC_INST_MTMSRD:
|
case PPC_INST_MTMSRD:
|
||||||
|
|
|
@ -35,8 +35,8 @@ int kvmhv_p9_tm_emulation_early(struct kvm_vcpu *vcpu)
|
||||||
return 0;
|
return 0;
|
||||||
newmsr = sanitize_msr(newmsr);
|
newmsr = sanitize_msr(newmsr);
|
||||||
vcpu->arch.shregs.msr = newmsr;
|
vcpu->arch.shregs.msr = newmsr;
|
||||||
vcpu->arch.cfar = vcpu->arch.pc - 4;
|
vcpu->arch.cfar = vcpu->arch.regs.nip - 4;
|
||||||
vcpu->arch.pc = vcpu->arch.shregs.srr0;
|
vcpu->arch.regs.nip = vcpu->arch.shregs.srr0;
|
||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
case PPC_INST_RFEBB:
|
case PPC_INST_RFEBB:
|
||||||
|
@ -58,8 +58,8 @@ int kvmhv_p9_tm_emulation_early(struct kvm_vcpu *vcpu)
|
||||||
mtspr(SPRN_BESCR, bescr);
|
mtspr(SPRN_BESCR, bescr);
|
||||||
msr = (msr & ~MSR_TS_MASK) | MSR_TS_T;
|
msr = (msr & ~MSR_TS_MASK) | MSR_TS_T;
|
||||||
vcpu->arch.shregs.msr = msr;
|
vcpu->arch.shregs.msr = msr;
|
||||||
vcpu->arch.cfar = vcpu->arch.pc - 4;
|
vcpu->arch.cfar = vcpu->arch.regs.nip - 4;
|
||||||
vcpu->arch.pc = mfspr(SPRN_EBBRR);
|
vcpu->arch.regs.nip = mfspr(SPRN_EBBRR);
|
||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
case PPC_INST_MTMSRD:
|
case PPC_INST_MTMSRD:
|
||||||
|
@ -103,7 +103,7 @@ int kvmhv_p9_tm_emulation_early(struct kvm_vcpu *vcpu)
|
||||||
void kvmhv_emulate_tm_rollback(struct kvm_vcpu *vcpu)
|
void kvmhv_emulate_tm_rollback(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
vcpu->arch.shregs.msr &= ~MSR_TS_MASK; /* go to N state */
|
vcpu->arch.shregs.msr &= ~MSR_TS_MASK; /* go to N state */
|
||||||
vcpu->arch.pc = vcpu->arch.tfhar;
|
vcpu->arch.regs.nip = vcpu->arch.tfhar;
|
||||||
copy_from_checkpoint(vcpu);
|
copy_from_checkpoint(vcpu);
|
||||||
vcpu->arch.cr = (vcpu->arch.cr & 0x0fffffff) | 0xa0000000;
|
vcpu->arch.cr = (vcpu->arch.cr & 0x0fffffff) | 0xa0000000;
|
||||||
}
|
}
|
||||||
|
|
|
@ -42,6 +42,8 @@
|
||||||
#include <linux/highmem.h>
|
#include <linux/highmem.h>
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/miscdevice.h>
|
#include <linux/miscdevice.h>
|
||||||
|
#include <asm/asm-prototypes.h>
|
||||||
|
#include <asm/tm.h>
|
||||||
|
|
||||||
#include "book3s.h"
|
#include "book3s.h"
|
||||||
|
|
||||||
|
@ -53,7 +55,9 @@
|
||||||
|
|
||||||
static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
|
static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
|
||||||
ulong msr);
|
ulong msr);
|
||||||
static void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac);
|
#ifdef CONFIG_PPC_BOOK3S_64
|
||||||
|
static int kvmppc_handle_fac(struct kvm_vcpu *vcpu, ulong fac);
|
||||||
|
#endif
|
||||||
|
|
||||||
/* Some compatibility defines */
|
/* Some compatibility defines */
|
||||||
#ifdef CONFIG_PPC_BOOK3S_32
|
#ifdef CONFIG_PPC_BOOK3S_32
|
||||||
|
@ -114,6 +118,8 @@ static void kvmppc_core_vcpu_load_pr(struct kvm_vcpu *vcpu, int cpu)
|
||||||
|
|
||||||
if (kvmppc_is_split_real(vcpu))
|
if (kvmppc_is_split_real(vcpu))
|
||||||
kvmppc_fixup_split_real(vcpu);
|
kvmppc_fixup_split_real(vcpu);
|
||||||
|
|
||||||
|
kvmppc_restore_tm_pr(vcpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu)
|
static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu)
|
||||||
|
@ -133,6 +139,7 @@ static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu)
|
||||||
|
|
||||||
kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX);
|
kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX);
|
||||||
kvmppc_giveup_fac(vcpu, FSCR_TAR_LG);
|
kvmppc_giveup_fac(vcpu, FSCR_TAR_LG);
|
||||||
|
kvmppc_save_tm_pr(vcpu);
|
||||||
|
|
||||||
/* Enable AIL if supported */
|
/* Enable AIL if supported */
|
||||||
if (cpu_has_feature(CPU_FTR_HVMODE) &&
|
if (cpu_has_feature(CPU_FTR_HVMODE) &&
|
||||||
|
@ -147,25 +154,25 @@ void kvmppc_copy_to_svcpu(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
|
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
|
||||||
|
|
||||||
svcpu->gpr[0] = vcpu->arch.gpr[0];
|
svcpu->gpr[0] = vcpu->arch.regs.gpr[0];
|
||||||
svcpu->gpr[1] = vcpu->arch.gpr[1];
|
svcpu->gpr[1] = vcpu->arch.regs.gpr[1];
|
||||||
svcpu->gpr[2] = vcpu->arch.gpr[2];
|
svcpu->gpr[2] = vcpu->arch.regs.gpr[2];
|
||||||
svcpu->gpr[3] = vcpu->arch.gpr[3];
|
svcpu->gpr[3] = vcpu->arch.regs.gpr[3];
|
||||||
svcpu->gpr[4] = vcpu->arch.gpr[4];
|
svcpu->gpr[4] = vcpu->arch.regs.gpr[4];
|
||||||
svcpu->gpr[5] = vcpu->arch.gpr[5];
|
svcpu->gpr[5] = vcpu->arch.regs.gpr[5];
|
||||||
svcpu->gpr[6] = vcpu->arch.gpr[6];
|
svcpu->gpr[6] = vcpu->arch.regs.gpr[6];
|
||||||
svcpu->gpr[7] = vcpu->arch.gpr[7];
|
svcpu->gpr[7] = vcpu->arch.regs.gpr[7];
|
||||||
svcpu->gpr[8] = vcpu->arch.gpr[8];
|
svcpu->gpr[8] = vcpu->arch.regs.gpr[8];
|
||||||
svcpu->gpr[9] = vcpu->arch.gpr[9];
|
svcpu->gpr[9] = vcpu->arch.regs.gpr[9];
|
||||||
svcpu->gpr[10] = vcpu->arch.gpr[10];
|
svcpu->gpr[10] = vcpu->arch.regs.gpr[10];
|
||||||
svcpu->gpr[11] = vcpu->arch.gpr[11];
|
svcpu->gpr[11] = vcpu->arch.regs.gpr[11];
|
||||||
svcpu->gpr[12] = vcpu->arch.gpr[12];
|
svcpu->gpr[12] = vcpu->arch.regs.gpr[12];
|
||||||
svcpu->gpr[13] = vcpu->arch.gpr[13];
|
svcpu->gpr[13] = vcpu->arch.regs.gpr[13];
|
||||||
svcpu->cr = vcpu->arch.cr;
|
svcpu->cr = vcpu->arch.cr;
|
||||||
svcpu->xer = vcpu->arch.xer;
|
svcpu->xer = vcpu->arch.regs.xer;
|
||||||
svcpu->ctr = vcpu->arch.ctr;
|
svcpu->ctr = vcpu->arch.regs.ctr;
|
||||||
svcpu->lr = vcpu->arch.lr;
|
svcpu->lr = vcpu->arch.regs.link;
|
||||||
svcpu->pc = vcpu->arch.pc;
|
svcpu->pc = vcpu->arch.regs.nip;
|
||||||
#ifdef CONFIG_PPC_BOOK3S_64
|
#ifdef CONFIG_PPC_BOOK3S_64
|
||||||
svcpu->shadow_fscr = vcpu->arch.shadow_fscr;
|
svcpu->shadow_fscr = vcpu->arch.shadow_fscr;
|
||||||
#endif
|
#endif
|
||||||
|
@ -182,10 +189,45 @@ void kvmppc_copy_to_svcpu(struct kvm_vcpu *vcpu)
|
||||||
svcpu_put(svcpu);
|
svcpu_put(svcpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu)
|
||||||
|
{
|
||||||
|
ulong guest_msr = kvmppc_get_msr(vcpu);
|
||||||
|
ulong smsr = guest_msr;
|
||||||
|
|
||||||
|
/* Guest MSR values */
|
||||||
|
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||||
|
smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE | MSR_LE |
|
||||||
|
MSR_TM | MSR_TS_MASK;
|
||||||
|
#else
|
||||||
|
smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE | MSR_LE;
|
||||||
|
#endif
|
||||||
|
/* Process MSR values */
|
||||||
|
smsr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR | MSR_EE;
|
||||||
|
/* External providers the guest reserved */
|
||||||
|
smsr |= (guest_msr & vcpu->arch.guest_owned_ext);
|
||||||
|
/* 64-bit Process MSR values */
|
||||||
|
#ifdef CONFIG_PPC_BOOK3S_64
|
||||||
|
smsr |= MSR_ISF | MSR_HV;
|
||||||
|
#endif
|
||||||
|
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||||
|
/*
|
||||||
|
* in guest privileged state, we want to fail all TM transactions.
|
||||||
|
* So disable MSR TM bit so that all tbegin. will be able to be
|
||||||
|
* trapped into host.
|
||||||
|
*/
|
||||||
|
if (!(guest_msr & MSR_PR))
|
||||||
|
smsr &= ~MSR_TM;
|
||||||
|
#endif
|
||||||
|
vcpu->arch.shadow_msr = smsr;
|
||||||
|
}
|
||||||
|
|
||||||
/* Copy data touched by real-mode code from shadow vcpu back to vcpu */
|
/* Copy data touched by real-mode code from shadow vcpu back to vcpu */
|
||||||
void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu)
|
void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
|
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
|
||||||
|
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||||
|
ulong old_msr;
|
||||||
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Maybe we were already preempted and synced the svcpu from
|
* Maybe we were already preempted and synced the svcpu from
|
||||||
|
@ -194,25 +236,25 @@ void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu)
|
||||||
if (!svcpu->in_use)
|
if (!svcpu->in_use)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
vcpu->arch.gpr[0] = svcpu->gpr[0];
|
vcpu->arch.regs.gpr[0] = svcpu->gpr[0];
|
||||||
vcpu->arch.gpr[1] = svcpu->gpr[1];
|
vcpu->arch.regs.gpr[1] = svcpu->gpr[1];
|
||||||
vcpu->arch.gpr[2] = svcpu->gpr[2];
|
vcpu->arch.regs.gpr[2] = svcpu->gpr[2];
|
||||||
vcpu->arch.gpr[3] = svcpu->gpr[3];
|
vcpu->arch.regs.gpr[3] = svcpu->gpr[3];
|
||||||
vcpu->arch.gpr[4] = svcpu->gpr[4];
|
vcpu->arch.regs.gpr[4] = svcpu->gpr[4];
|
||||||
vcpu->arch.gpr[5] = svcpu->gpr[5];
|
vcpu->arch.regs.gpr[5] = svcpu->gpr[5];
|
||||||
vcpu->arch.gpr[6] = svcpu->gpr[6];
|
vcpu->arch.regs.gpr[6] = svcpu->gpr[6];
|
||||||
vcpu->arch.gpr[7] = svcpu->gpr[7];
|
vcpu->arch.regs.gpr[7] = svcpu->gpr[7];
|
||||||
vcpu->arch.gpr[8] = svcpu->gpr[8];
|
vcpu->arch.regs.gpr[8] = svcpu->gpr[8];
|
||||||
vcpu->arch.gpr[9] = svcpu->gpr[9];
|
vcpu->arch.regs.gpr[9] = svcpu->gpr[9];
|
||||||
vcpu->arch.gpr[10] = svcpu->gpr[10];
|
vcpu->arch.regs.gpr[10] = svcpu->gpr[10];
|
||||||
vcpu->arch.gpr[11] = svcpu->gpr[11];
|
vcpu->arch.regs.gpr[11] = svcpu->gpr[11];
|
||||||
vcpu->arch.gpr[12] = svcpu->gpr[12];
|
vcpu->arch.regs.gpr[12] = svcpu->gpr[12];
|
||||||
vcpu->arch.gpr[13] = svcpu->gpr[13];
|
vcpu->arch.regs.gpr[13] = svcpu->gpr[13];
|
||||||
vcpu->arch.cr = svcpu->cr;
|
vcpu->arch.cr = svcpu->cr;
|
||||||
vcpu->arch.xer = svcpu->xer;
|
vcpu->arch.regs.xer = svcpu->xer;
|
||||||
vcpu->arch.ctr = svcpu->ctr;
|
vcpu->arch.regs.ctr = svcpu->ctr;
|
||||||
vcpu->arch.lr = svcpu->lr;
|
vcpu->arch.regs.link = svcpu->lr;
|
||||||
vcpu->arch.pc = svcpu->pc;
|
vcpu->arch.regs.nip = svcpu->pc;
|
||||||
vcpu->arch.shadow_srr1 = svcpu->shadow_srr1;
|
vcpu->arch.shadow_srr1 = svcpu->shadow_srr1;
|
||||||
vcpu->arch.fault_dar = svcpu->fault_dar;
|
vcpu->arch.fault_dar = svcpu->fault_dar;
|
||||||
vcpu->arch.fault_dsisr = svcpu->fault_dsisr;
|
vcpu->arch.fault_dsisr = svcpu->fault_dsisr;
|
||||||
|
@ -228,12 +270,116 @@ void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu)
|
||||||
to_book3s(vcpu)->vtb += get_vtb() - vcpu->arch.entry_vtb;
|
to_book3s(vcpu)->vtb += get_vtb() - vcpu->arch.entry_vtb;
|
||||||
if (cpu_has_feature(CPU_FTR_ARCH_207S))
|
if (cpu_has_feature(CPU_FTR_ARCH_207S))
|
||||||
vcpu->arch.ic += mfspr(SPRN_IC) - vcpu->arch.entry_ic;
|
vcpu->arch.ic += mfspr(SPRN_IC) - vcpu->arch.entry_ic;
|
||||||
|
|
||||||
|
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||||
|
/*
|
||||||
|
* Unlike other MSR bits, MSR[TS]bits can be changed at guest without
|
||||||
|
* notifying host:
|
||||||
|
* modified by unprivileged instructions like "tbegin"/"tend"/
|
||||||
|
* "tresume"/"tsuspend" in PR KVM guest.
|
||||||
|
*
|
||||||
|
* It is necessary to sync here to calculate a correct shadow_msr.
|
||||||
|
*
|
||||||
|
* privileged guest's tbegin will be failed at present. So we
|
||||||
|
* only take care of problem state guest.
|
||||||
|
*/
|
||||||
|
old_msr = kvmppc_get_msr(vcpu);
|
||||||
|
if (unlikely((old_msr & MSR_PR) &&
|
||||||
|
(vcpu->arch.shadow_srr1 & (MSR_TS_MASK)) !=
|
||||||
|
(old_msr & (MSR_TS_MASK)))) {
|
||||||
|
old_msr &= ~(MSR_TS_MASK);
|
||||||
|
old_msr |= (vcpu->arch.shadow_srr1 & (MSR_TS_MASK));
|
||||||
|
kvmppc_set_msr_fast(vcpu, old_msr);
|
||||||
|
kvmppc_recalc_shadow_msr(vcpu);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
svcpu->in_use = false;
|
svcpu->in_use = false;
|
||||||
|
|
||||||
out:
|
out:
|
||||||
svcpu_put(svcpu);
|
svcpu_put(svcpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||||
|
void kvmppc_save_tm_sprs(struct kvm_vcpu *vcpu)
|
||||||
|
{
|
||||||
|
tm_enable();
|
||||||
|
vcpu->arch.tfhar = mfspr(SPRN_TFHAR);
|
||||||
|
vcpu->arch.texasr = mfspr(SPRN_TEXASR);
|
||||||
|
vcpu->arch.tfiar = mfspr(SPRN_TFIAR);
|
||||||
|
tm_disable();
|
||||||
|
}
|
||||||
|
|
||||||
|
void kvmppc_restore_tm_sprs(struct kvm_vcpu *vcpu)
|
||||||
|
{
|
||||||
|
tm_enable();
|
||||||
|
mtspr(SPRN_TFHAR, vcpu->arch.tfhar);
|
||||||
|
mtspr(SPRN_TEXASR, vcpu->arch.texasr);
|
||||||
|
mtspr(SPRN_TFIAR, vcpu->arch.tfiar);
|
||||||
|
tm_disable();
|
||||||
|
}
|
||||||
|
|
||||||
|
/* loadup math bits which is enabled at kvmppc_get_msr() but not enabled at
|
||||||
|
* hardware.
|
||||||
|
*/
|
||||||
|
static void kvmppc_handle_lost_math_exts(struct kvm_vcpu *vcpu)
|
||||||
|
{
|
||||||
|
ulong exit_nr;
|
||||||
|
ulong ext_diff = (kvmppc_get_msr(vcpu) & ~vcpu->arch.guest_owned_ext) &
|
||||||
|
(MSR_FP | MSR_VEC | MSR_VSX);
|
||||||
|
|
||||||
|
if (!ext_diff)
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (ext_diff == MSR_FP)
|
||||||
|
exit_nr = BOOK3S_INTERRUPT_FP_UNAVAIL;
|
||||||
|
else if (ext_diff == MSR_VEC)
|
||||||
|
exit_nr = BOOK3S_INTERRUPT_ALTIVEC;
|
||||||
|
else
|
||||||
|
exit_nr = BOOK3S_INTERRUPT_VSX;
|
||||||
|
|
||||||
|
kvmppc_handle_ext(vcpu, exit_nr, ext_diff);
|
||||||
|
}
|
||||||
|
|
||||||
|
void kvmppc_save_tm_pr(struct kvm_vcpu *vcpu)
|
||||||
|
{
|
||||||
|
if (!(MSR_TM_ACTIVE(kvmppc_get_msr(vcpu)))) {
|
||||||
|
kvmppc_save_tm_sprs(vcpu);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
kvmppc_giveup_fac(vcpu, FSCR_TAR_LG);
|
||||||
|
kvmppc_giveup_ext(vcpu, MSR_VSX);
|
||||||
|
|
||||||
|
preempt_disable();
|
||||||
|
_kvmppc_save_tm_pr(vcpu, mfmsr());
|
||||||
|
preempt_enable();
|
||||||
|
}
|
||||||
|
|
||||||
|
void kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu)
|
||||||
|
{
|
||||||
|
if (!MSR_TM_ACTIVE(kvmppc_get_msr(vcpu))) {
|
||||||
|
kvmppc_restore_tm_sprs(vcpu);
|
||||||
|
if (kvmppc_get_msr(vcpu) & MSR_TM) {
|
||||||
|
kvmppc_handle_lost_math_exts(vcpu);
|
||||||
|
if (vcpu->arch.fscr & FSCR_TAR)
|
||||||
|
kvmppc_handle_fac(vcpu, FSCR_TAR_LG);
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
preempt_disable();
|
||||||
|
_kvmppc_restore_tm_pr(vcpu, kvmppc_get_msr(vcpu));
|
||||||
|
preempt_enable();
|
||||||
|
|
||||||
|
if (kvmppc_get_msr(vcpu) & MSR_TM) {
|
||||||
|
kvmppc_handle_lost_math_exts(vcpu);
|
||||||
|
if (vcpu->arch.fscr & FSCR_TAR)
|
||||||
|
kvmppc_handle_fac(vcpu, FSCR_TAR_LG);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu)
|
static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
int r = 1; /* Indicate we want to get back into the guest */
|
int r = 1; /* Indicate we want to get back into the guest */
|
||||||
|
@ -306,32 +452,29 @@ static void kvm_set_spte_hva_pr(struct kvm *kvm, unsigned long hva, pte_t pte)
|
||||||
|
|
||||||
/*****************************************/
|
/*****************************************/
|
||||||
|
|
||||||
static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu)
|
|
||||||
{
|
|
||||||
ulong guest_msr = kvmppc_get_msr(vcpu);
|
|
||||||
ulong smsr = guest_msr;
|
|
||||||
|
|
||||||
/* Guest MSR values */
|
|
||||||
smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE | MSR_LE;
|
|
||||||
/* Process MSR values */
|
|
||||||
smsr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR | MSR_EE;
|
|
||||||
/* External providers the guest reserved */
|
|
||||||
smsr |= (guest_msr & vcpu->arch.guest_owned_ext);
|
|
||||||
/* 64-bit Process MSR values */
|
|
||||||
#ifdef CONFIG_PPC_BOOK3S_64
|
|
||||||
smsr |= MSR_ISF | MSR_HV;
|
|
||||||
#endif
|
|
||||||
vcpu->arch.shadow_msr = smsr;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr)
|
static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr)
|
||||||
{
|
{
|
||||||
ulong old_msr = kvmppc_get_msr(vcpu);
|
ulong old_msr;
|
||||||
|
|
||||||
|
/* For PAPR guest, make sure MSR reflects guest mode */
|
||||||
|
if (vcpu->arch.papr_enabled)
|
||||||
|
msr = (msr & ~MSR_HV) | MSR_ME;
|
||||||
|
|
||||||
#ifdef EXIT_DEBUG
|
#ifdef EXIT_DEBUG
|
||||||
printk(KERN_INFO "KVM: Set MSR to 0x%llx\n", msr);
|
printk(KERN_INFO "KVM: Set MSR to 0x%llx\n", msr);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||||
|
/* We should never target guest MSR to TS=10 && PR=0,
|
||||||
|
* since we always fail transaction for guest privilege
|
||||||
|
* state.
|
||||||
|
*/
|
||||||
|
if (!(msr & MSR_PR) && MSR_TM_TRANSACTIONAL(msr))
|
||||||
|
kvmppc_emulate_tabort(vcpu,
|
||||||
|
TM_CAUSE_KVM_FAC_UNAV | TM_CAUSE_PERSISTENT);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
old_msr = kvmppc_get_msr(vcpu);
|
||||||
msr &= to_book3s(vcpu)->msr_mask;
|
msr &= to_book3s(vcpu)->msr_mask;
|
||||||
kvmppc_set_msr_fast(vcpu, msr);
|
kvmppc_set_msr_fast(vcpu, msr);
|
||||||
kvmppc_recalc_shadow_msr(vcpu);
|
kvmppc_recalc_shadow_msr(vcpu);
|
||||||
|
@ -387,6 +530,11 @@ static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr)
|
||||||
/* Preload FPU if it's enabled */
|
/* Preload FPU if it's enabled */
|
||||||
if (kvmppc_get_msr(vcpu) & MSR_FP)
|
if (kvmppc_get_msr(vcpu) & MSR_FP)
|
||||||
kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
|
kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
|
||||||
|
|
||||||
|
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||||
|
if (kvmppc_get_msr(vcpu) & MSR_TM)
|
||||||
|
kvmppc_handle_lost_math_exts(vcpu);
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
void kvmppc_set_pvr_pr(struct kvm_vcpu *vcpu, u32 pvr)
|
void kvmppc_set_pvr_pr(struct kvm_vcpu *vcpu, u32 pvr)
|
||||||
|
@ -584,24 +732,20 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||||
pte.may_execute = !data;
|
pte.may_execute = !data;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (page_found == -ENOENT) {
|
if (page_found == -ENOENT || page_found == -EPERM) {
|
||||||
/* Page not found in guest PTE entries */
|
/* Page not found in guest PTE entries, or protection fault */
|
||||||
u64 ssrr1 = vcpu->arch.shadow_srr1;
|
u64 flags;
|
||||||
u64 msr = kvmppc_get_msr(vcpu);
|
|
||||||
kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu));
|
if (page_found == -EPERM)
|
||||||
kvmppc_set_dsisr(vcpu, vcpu->arch.fault_dsisr);
|
flags = DSISR_PROTFAULT;
|
||||||
kvmppc_set_msr_fast(vcpu, msr | (ssrr1 & 0xf8000000ULL));
|
else
|
||||||
kvmppc_book3s_queue_irqprio(vcpu, vec);
|
flags = DSISR_NOHPTE;
|
||||||
} else if (page_found == -EPERM) {
|
if (data) {
|
||||||
/* Storage protection */
|
flags |= vcpu->arch.fault_dsisr & DSISR_ISSTORE;
|
||||||
u32 dsisr = vcpu->arch.fault_dsisr;
|
kvmppc_core_queue_data_storage(vcpu, eaddr, flags);
|
||||||
u64 ssrr1 = vcpu->arch.shadow_srr1;
|
} else {
|
||||||
u64 msr = kvmppc_get_msr(vcpu);
|
kvmppc_core_queue_inst_storage(vcpu, flags);
|
||||||
kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu));
|
}
|
||||||
dsisr = (dsisr & ~DSISR_NOHPTE) | DSISR_PROTFAULT;
|
|
||||||
kvmppc_set_dsisr(vcpu, dsisr);
|
|
||||||
kvmppc_set_msr_fast(vcpu, msr | (ssrr1 & 0xf8000000ULL));
|
|
||||||
kvmppc_book3s_queue_irqprio(vcpu, vec);
|
|
||||||
} else if (page_found == -EINVAL) {
|
} else if (page_found == -EINVAL) {
|
||||||
/* Page not found in guest SLB */
|
/* Page not found in guest SLB */
|
||||||
kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu));
|
kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu));
|
||||||
|
@ -683,7 +827,7 @@ void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Give up facility (TAR / EBB / DSCR) */
|
/* Give up facility (TAR / EBB / DSCR) */
|
||||||
static void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac)
|
void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_PPC_BOOK3S_64
|
#ifdef CONFIG_PPC_BOOK3S_64
|
||||||
if (!(vcpu->arch.shadow_fscr & (1ULL << fac))) {
|
if (!(vcpu->arch.shadow_fscr & (1ULL << fac))) {
|
||||||
|
@ -802,7 +946,7 @@ static void kvmppc_handle_lost_ext(struct kvm_vcpu *vcpu)
|
||||||
|
|
||||||
#ifdef CONFIG_PPC_BOOK3S_64
|
#ifdef CONFIG_PPC_BOOK3S_64
|
||||||
|
|
||||||
static void kvmppc_trigger_fac_interrupt(struct kvm_vcpu *vcpu, ulong fac)
|
void kvmppc_trigger_fac_interrupt(struct kvm_vcpu *vcpu, ulong fac)
|
||||||
{
|
{
|
||||||
/* Inject the Interrupt Cause field and trigger a guest interrupt */
|
/* Inject the Interrupt Cause field and trigger a guest interrupt */
|
||||||
vcpu->arch.fscr &= ~(0xffULL << 56);
|
vcpu->arch.fscr &= ~(0xffULL << 56);
|
||||||
|
@ -864,6 +1008,18 @@ static int kvmppc_handle_fac(struct kvm_vcpu *vcpu, ulong fac)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||||
|
/* Since we disabled MSR_TM at privilege state, the mfspr instruction
|
||||||
|
* for TM spr can trigger TM fac unavailable. In this case, the
|
||||||
|
* emulation is handled by kvmppc_emulate_fac(), which invokes
|
||||||
|
* kvmppc_emulate_mfspr() finally. But note the mfspr can include
|
||||||
|
* RT for NV registers. So it need to restore those NV reg to reflect
|
||||||
|
* the update.
|
||||||
|
*/
|
||||||
|
if ((fac == FSCR_TM_LG) && !(kvmppc_get_msr(vcpu) & MSR_PR))
|
||||||
|
return RESUME_GUEST_NV;
|
||||||
|
#endif
|
||||||
|
|
||||||
return RESUME_GUEST;
|
return RESUME_GUEST;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -872,7 +1028,12 @@ void kvmppc_set_fscr(struct kvm_vcpu *vcpu, u64 fscr)
|
||||||
if ((vcpu->arch.fscr & FSCR_TAR) && !(fscr & FSCR_TAR)) {
|
if ((vcpu->arch.fscr & FSCR_TAR) && !(fscr & FSCR_TAR)) {
|
||||||
/* TAR got dropped, drop it in shadow too */
|
/* TAR got dropped, drop it in shadow too */
|
||||||
kvmppc_giveup_fac(vcpu, FSCR_TAR_LG);
|
kvmppc_giveup_fac(vcpu, FSCR_TAR_LG);
|
||||||
|
} else if (!(vcpu->arch.fscr & FSCR_TAR) && (fscr & FSCR_TAR)) {
|
||||||
|
vcpu->arch.fscr = fscr;
|
||||||
|
kvmppc_handle_fac(vcpu, FSCR_TAR_LG);
|
||||||
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
vcpu->arch.fscr = fscr;
|
vcpu->arch.fscr = fscr;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@ -1017,10 +1178,8 @@ int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||||
kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL);
|
kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL);
|
||||||
r = RESUME_GUEST;
|
r = RESUME_GUEST;
|
||||||
} else {
|
} else {
|
||||||
u64 msr = kvmppc_get_msr(vcpu);
|
kvmppc_core_queue_inst_storage(vcpu,
|
||||||
msr |= shadow_srr1 & 0x58000000;
|
shadow_srr1 & 0x58000000);
|
||||||
kvmppc_set_msr_fast(vcpu, msr);
|
|
||||||
kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
|
|
||||||
r = RESUME_GUEST;
|
r = RESUME_GUEST;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
@ -1059,9 +1218,7 @@ int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||||
r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr);
|
r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr);
|
||||||
srcu_read_unlock(&vcpu->kvm->srcu, idx);
|
srcu_read_unlock(&vcpu->kvm->srcu, idx);
|
||||||
} else {
|
} else {
|
||||||
kvmppc_set_dar(vcpu, dar);
|
kvmppc_core_queue_data_storage(vcpu, dar, fault_dsisr);
|
||||||
kvmppc_set_dsisr(vcpu, fault_dsisr);
|
|
||||||
kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
|
|
||||||
r = RESUME_GUEST;
|
r = RESUME_GUEST;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
@ -1092,10 +1249,13 @@ int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||||
case BOOK3S_INTERRUPT_EXTERNAL:
|
case BOOK3S_INTERRUPT_EXTERNAL:
|
||||||
case BOOK3S_INTERRUPT_EXTERNAL_LEVEL:
|
case BOOK3S_INTERRUPT_EXTERNAL_LEVEL:
|
||||||
case BOOK3S_INTERRUPT_EXTERNAL_HV:
|
case BOOK3S_INTERRUPT_EXTERNAL_HV:
|
||||||
|
case BOOK3S_INTERRUPT_H_VIRT:
|
||||||
vcpu->stat.ext_intr_exits++;
|
vcpu->stat.ext_intr_exits++;
|
||||||
r = RESUME_GUEST;
|
r = RESUME_GUEST;
|
||||||
break;
|
break;
|
||||||
|
case BOOK3S_INTERRUPT_HMI:
|
||||||
case BOOK3S_INTERRUPT_PERFMON:
|
case BOOK3S_INTERRUPT_PERFMON:
|
||||||
|
case BOOK3S_INTERRUPT_SYSTEM_RESET:
|
||||||
r = RESUME_GUEST;
|
r = RESUME_GUEST;
|
||||||
break;
|
break;
|
||||||
case BOOK3S_INTERRUPT_PROGRAM:
|
case BOOK3S_INTERRUPT_PROGRAM:
|
||||||
|
@ -1225,8 +1385,7 @@ int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||||
}
|
}
|
||||||
#ifdef CONFIG_PPC_BOOK3S_64
|
#ifdef CONFIG_PPC_BOOK3S_64
|
||||||
case BOOK3S_INTERRUPT_FAC_UNAVAIL:
|
case BOOK3S_INTERRUPT_FAC_UNAVAIL:
|
||||||
kvmppc_handle_fac(vcpu, vcpu->arch.shadow_fscr >> 56);
|
r = kvmppc_handle_fac(vcpu, vcpu->arch.shadow_fscr >> 56);
|
||||||
r = RESUME_GUEST;
|
|
||||||
break;
|
break;
|
||||||
#endif
|
#endif
|
||||||
case BOOK3S_INTERRUPT_MACHINE_CHECK:
|
case BOOK3S_INTERRUPT_MACHINE_CHECK:
|
||||||
|
@ -1379,6 +1538,73 @@ static int kvmppc_get_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
|
||||||
else
|
else
|
||||||
*val = get_reg_val(id, 0);
|
*val = get_reg_val(id, 0);
|
||||||
break;
|
break;
|
||||||
|
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||||
|
case KVM_REG_PPC_TFHAR:
|
||||||
|
*val = get_reg_val(id, vcpu->arch.tfhar);
|
||||||
|
break;
|
||||||
|
case KVM_REG_PPC_TFIAR:
|
||||||
|
*val = get_reg_val(id, vcpu->arch.tfiar);
|
||||||
|
break;
|
||||||
|
case KVM_REG_PPC_TEXASR:
|
||||||
|
*val = get_reg_val(id, vcpu->arch.texasr);
|
||||||
|
break;
|
||||||
|
case KVM_REG_PPC_TM_GPR0 ... KVM_REG_PPC_TM_GPR31:
|
||||||
|
*val = get_reg_val(id,
|
||||||
|
vcpu->arch.gpr_tm[id-KVM_REG_PPC_TM_GPR0]);
|
||||||
|
break;
|
||||||
|
case KVM_REG_PPC_TM_VSR0 ... KVM_REG_PPC_TM_VSR63:
|
||||||
|
{
|
||||||
|
int i, j;
|
||||||
|
|
||||||
|
i = id - KVM_REG_PPC_TM_VSR0;
|
||||||
|
if (i < 32)
|
||||||
|
for (j = 0; j < TS_FPRWIDTH; j++)
|
||||||
|
val->vsxval[j] = vcpu->arch.fp_tm.fpr[i][j];
|
||||||
|
else {
|
||||||
|
if (cpu_has_feature(CPU_FTR_ALTIVEC))
|
||||||
|
val->vval = vcpu->arch.vr_tm.vr[i-32];
|
||||||
|
else
|
||||||
|
r = -ENXIO;
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
case KVM_REG_PPC_TM_CR:
|
||||||
|
*val = get_reg_val(id, vcpu->arch.cr_tm);
|
||||||
|
break;
|
||||||
|
case KVM_REG_PPC_TM_XER:
|
||||||
|
*val = get_reg_val(id, vcpu->arch.xer_tm);
|
||||||
|
break;
|
||||||
|
case KVM_REG_PPC_TM_LR:
|
||||||
|
*val = get_reg_val(id, vcpu->arch.lr_tm);
|
||||||
|
break;
|
||||||
|
case KVM_REG_PPC_TM_CTR:
|
||||||
|
*val = get_reg_val(id, vcpu->arch.ctr_tm);
|
||||||
|
break;
|
||||||
|
case KVM_REG_PPC_TM_FPSCR:
|
||||||
|
*val = get_reg_val(id, vcpu->arch.fp_tm.fpscr);
|
||||||
|
break;
|
||||||
|
case KVM_REG_PPC_TM_AMR:
|
||||||
|
*val = get_reg_val(id, vcpu->arch.amr_tm);
|
||||||
|
break;
|
||||||
|
case KVM_REG_PPC_TM_PPR:
|
||||||
|
*val = get_reg_val(id, vcpu->arch.ppr_tm);
|
||||||
|
break;
|
||||||
|
case KVM_REG_PPC_TM_VRSAVE:
|
||||||
|
*val = get_reg_val(id, vcpu->arch.vrsave_tm);
|
||||||
|
break;
|
||||||
|
case KVM_REG_PPC_TM_VSCR:
|
||||||
|
if (cpu_has_feature(CPU_FTR_ALTIVEC))
|
||||||
|
*val = get_reg_val(id, vcpu->arch.vr_tm.vscr.u[3]);
|
||||||
|
else
|
||||||
|
r = -ENXIO;
|
||||||
|
break;
|
||||||
|
case KVM_REG_PPC_TM_DSCR:
|
||||||
|
*val = get_reg_val(id, vcpu->arch.dscr_tm);
|
||||||
|
break;
|
||||||
|
case KVM_REG_PPC_TM_TAR:
|
||||||
|
*val = get_reg_val(id, vcpu->arch.tar_tm);
|
||||||
|
break;
|
||||||
|
#endif
|
||||||
default:
|
default:
|
||||||
r = -EINVAL;
|
r = -EINVAL;
|
||||||
break;
|
break;
|
||||||
|
@ -1412,6 +1638,72 @@ static int kvmppc_set_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
|
||||||
case KVM_REG_PPC_LPCR_64:
|
case KVM_REG_PPC_LPCR_64:
|
||||||
kvmppc_set_lpcr_pr(vcpu, set_reg_val(id, *val));
|
kvmppc_set_lpcr_pr(vcpu, set_reg_val(id, *val));
|
||||||
break;
|
break;
|
||||||
|
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||||
|
case KVM_REG_PPC_TFHAR:
|
||||||
|
vcpu->arch.tfhar = set_reg_val(id, *val);
|
||||||
|
break;
|
||||||
|
case KVM_REG_PPC_TFIAR:
|
||||||
|
vcpu->arch.tfiar = set_reg_val(id, *val);
|
||||||
|
break;
|
||||||
|
case KVM_REG_PPC_TEXASR:
|
||||||
|
vcpu->arch.texasr = set_reg_val(id, *val);
|
||||||
|
break;
|
||||||
|
case KVM_REG_PPC_TM_GPR0 ... KVM_REG_PPC_TM_GPR31:
|
||||||
|
vcpu->arch.gpr_tm[id - KVM_REG_PPC_TM_GPR0] =
|
||||||
|
set_reg_val(id, *val);
|
||||||
|
break;
|
||||||
|
case KVM_REG_PPC_TM_VSR0 ... KVM_REG_PPC_TM_VSR63:
|
||||||
|
{
|
||||||
|
int i, j;
|
||||||
|
|
||||||
|
i = id - KVM_REG_PPC_TM_VSR0;
|
||||||
|
if (i < 32)
|
||||||
|
for (j = 0; j < TS_FPRWIDTH; j++)
|
||||||
|
vcpu->arch.fp_tm.fpr[i][j] = val->vsxval[j];
|
||||||
|
else
|
||||||
|
if (cpu_has_feature(CPU_FTR_ALTIVEC))
|
||||||
|
vcpu->arch.vr_tm.vr[i-32] = val->vval;
|
||||||
|
else
|
||||||
|
r = -ENXIO;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
case KVM_REG_PPC_TM_CR:
|
||||||
|
vcpu->arch.cr_tm = set_reg_val(id, *val);
|
||||||
|
break;
|
||||||
|
case KVM_REG_PPC_TM_XER:
|
||||||
|
vcpu->arch.xer_tm = set_reg_val(id, *val);
|
||||||
|
break;
|
||||||
|
case KVM_REG_PPC_TM_LR:
|
||||||
|
vcpu->arch.lr_tm = set_reg_val(id, *val);
|
||||||
|
break;
|
||||||
|
case KVM_REG_PPC_TM_CTR:
|
||||||
|
vcpu->arch.ctr_tm = set_reg_val(id, *val);
|
||||||
|
break;
|
||||||
|
case KVM_REG_PPC_TM_FPSCR:
|
||||||
|
vcpu->arch.fp_tm.fpscr = set_reg_val(id, *val);
|
||||||
|
break;
|
||||||
|
case KVM_REG_PPC_TM_AMR:
|
||||||
|
vcpu->arch.amr_tm = set_reg_val(id, *val);
|
||||||
|
break;
|
||||||
|
case KVM_REG_PPC_TM_PPR:
|
||||||
|
vcpu->arch.ppr_tm = set_reg_val(id, *val);
|
||||||
|
break;
|
||||||
|
case KVM_REG_PPC_TM_VRSAVE:
|
||||||
|
vcpu->arch.vrsave_tm = set_reg_val(id, *val);
|
||||||
|
break;
|
||||||
|
case KVM_REG_PPC_TM_VSCR:
|
||||||
|
if (cpu_has_feature(CPU_FTR_ALTIVEC))
|
||||||
|
vcpu->arch.vr.vscr.u[3] = set_reg_val(id, *val);
|
||||||
|
else
|
||||||
|
r = -ENXIO;
|
||||||
|
break;
|
||||||
|
case KVM_REG_PPC_TM_DSCR:
|
||||||
|
vcpu->arch.dscr_tm = set_reg_val(id, *val);
|
||||||
|
break;
|
||||||
|
case KVM_REG_PPC_TM_TAR:
|
||||||
|
vcpu->arch.tar_tm = set_reg_val(id, *val);
|
||||||
|
break;
|
||||||
|
#endif
|
||||||
default:
|
default:
|
||||||
r = -EINVAL;
|
r = -EINVAL;
|
||||||
break;
|
break;
|
||||||
|
@ -1687,6 +1979,17 @@ static int kvm_vm_ioctl_get_smmu_info_pr(struct kvm *kvm,
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int kvm_configure_mmu_pr(struct kvm *kvm, struct kvm_ppc_mmuv3_cfg *cfg)
|
||||||
|
{
|
||||||
|
if (!cpu_has_feature(CPU_FTR_ARCH_300))
|
||||||
|
return -ENODEV;
|
||||||
|
/* Require flags and process table base and size to all be zero. */
|
||||||
|
if (cfg->flags || cfg->process_table)
|
||||||
|
return -EINVAL;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
#else
|
#else
|
||||||
static int kvm_vm_ioctl_get_smmu_info_pr(struct kvm *kvm,
|
static int kvm_vm_ioctl_get_smmu_info_pr(struct kvm *kvm,
|
||||||
struct kvm_ppc_smmu_info *info)
|
struct kvm_ppc_smmu_info *info)
|
||||||
|
@ -1735,9 +2038,12 @@ static void kvmppc_core_destroy_vm_pr(struct kvm *kvm)
|
||||||
static int kvmppc_core_check_processor_compat_pr(void)
|
static int kvmppc_core_check_processor_compat_pr(void)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* Disable KVM for Power9 untill the required bits merged.
|
* PR KVM can work on POWER9 inside a guest partition
|
||||||
|
* running in HPT mode. It can't work if we are using
|
||||||
|
* radix translation (because radix provides no way for
|
||||||
|
* a process to have unique translations in quadrant 3).
|
||||||
*/
|
*/
|
||||||
if (cpu_has_feature(CPU_FTR_ARCH_300))
|
if (cpu_has_feature(CPU_FTR_ARCH_300) && radix_enabled())
|
||||||
return -EIO;
|
return -EIO;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -1781,7 +2087,9 @@ static struct kvmppc_ops kvm_ops_pr = {
|
||||||
.arch_vm_ioctl = kvm_arch_vm_ioctl_pr,
|
.arch_vm_ioctl = kvm_arch_vm_ioctl_pr,
|
||||||
#ifdef CONFIG_PPC_BOOK3S_64
|
#ifdef CONFIG_PPC_BOOK3S_64
|
||||||
.hcall_implemented = kvmppc_hcall_impl_pr,
|
.hcall_implemented = kvmppc_hcall_impl_pr,
|
||||||
|
.configure_mmu = kvm_configure_mmu_pr,
|
||||||
#endif
|
#endif
|
||||||
|
.giveup_ext = kvmppc_giveup_ext,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -383,6 +383,19 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
|
||||||
*/
|
*/
|
||||||
|
|
||||||
PPC_LL r6, HSTATE_HOST_MSR(r13)
|
PPC_LL r6, HSTATE_HOST_MSR(r13)
|
||||||
|
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||||
|
/*
|
||||||
|
* We don't want to change MSR[TS] bits via rfi here.
|
||||||
|
* The actual TM handling logic will be in host with
|
||||||
|
* recovered DR/IR bits after HSTATE_VMHANDLER.
|
||||||
|
* And MSR_TM can be enabled in HOST_MSR so rfid may
|
||||||
|
* not suppress this change and can lead to exception.
|
||||||
|
* Manually set MSR to prevent TS state change here.
|
||||||
|
*/
|
||||||
|
mfmsr r7
|
||||||
|
rldicl r7, r7, 64 - MSR_TS_S_LG, 62
|
||||||
|
rldimi r6, r7, MSR_TS_S_LG, 63 - MSR_TS_T_LG
|
||||||
|
#endif
|
||||||
PPC_LL r8, HSTATE_VMHANDLER(r13)
|
PPC_LL r8, HSTATE_VMHANDLER(r13)
|
||||||
|
|
||||||
#ifdef CONFIG_PPC64
|
#ifdef CONFIG_PPC64
|
||||||
|
|
|
@ -334,7 +334,7 @@ X_STATIC unsigned long GLUE(X_PFX,h_xirr)(struct kvm_vcpu *vcpu)
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/* Return interrupt and old CPPR in GPR4 */
|
/* Return interrupt and old CPPR in GPR4 */
|
||||||
vcpu->arch.gpr[4] = hirq | (old_cppr << 24);
|
vcpu->arch.regs.gpr[4] = hirq | (old_cppr << 24);
|
||||||
|
|
||||||
return H_SUCCESS;
|
return H_SUCCESS;
|
||||||
}
|
}
|
||||||
|
@ -369,7 +369,7 @@ X_STATIC unsigned long GLUE(X_PFX,h_ipoll)(struct kvm_vcpu *vcpu, unsigned long
|
||||||
hirq = GLUE(X_PFX,scan_interrupts)(xc, pending, scan_poll);
|
hirq = GLUE(X_PFX,scan_interrupts)(xc, pending, scan_poll);
|
||||||
|
|
||||||
/* Return interrupt and old CPPR in GPR4 */
|
/* Return interrupt and old CPPR in GPR4 */
|
||||||
vcpu->arch.gpr[4] = hirq | (xc->cppr << 24);
|
vcpu->arch.regs.gpr[4] = hirq | (xc->cppr << 24);
|
||||||
|
|
||||||
return H_SUCCESS;
|
return H_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
|
@ -77,8 +77,10 @@ void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
printk("pc: %08lx msr: %08llx\n", vcpu->arch.pc, vcpu->arch.shared->msr);
|
printk("pc: %08lx msr: %08llx\n", vcpu->arch.regs.nip,
|
||||||
printk("lr: %08lx ctr: %08lx\n", vcpu->arch.lr, vcpu->arch.ctr);
|
vcpu->arch.shared->msr);
|
||||||
|
printk("lr: %08lx ctr: %08lx\n", vcpu->arch.regs.link,
|
||||||
|
vcpu->arch.regs.ctr);
|
||||||
printk("srr0: %08llx srr1: %08llx\n", vcpu->arch.shared->srr0,
|
printk("srr0: %08llx srr1: %08llx\n", vcpu->arch.shared->srr0,
|
||||||
vcpu->arch.shared->srr1);
|
vcpu->arch.shared->srr1);
|
||||||
|
|
||||||
|
@ -491,24 +493,25 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
|
||||||
if (allowed) {
|
if (allowed) {
|
||||||
switch (int_class) {
|
switch (int_class) {
|
||||||
case INT_CLASS_NONCRIT:
|
case INT_CLASS_NONCRIT:
|
||||||
set_guest_srr(vcpu, vcpu->arch.pc,
|
set_guest_srr(vcpu, vcpu->arch.regs.nip,
|
||||||
vcpu->arch.shared->msr);
|
vcpu->arch.shared->msr);
|
||||||
break;
|
break;
|
||||||
case INT_CLASS_CRIT:
|
case INT_CLASS_CRIT:
|
||||||
set_guest_csrr(vcpu, vcpu->arch.pc,
|
set_guest_csrr(vcpu, vcpu->arch.regs.nip,
|
||||||
vcpu->arch.shared->msr);
|
vcpu->arch.shared->msr);
|
||||||
break;
|
break;
|
||||||
case INT_CLASS_DBG:
|
case INT_CLASS_DBG:
|
||||||
set_guest_dsrr(vcpu, vcpu->arch.pc,
|
set_guest_dsrr(vcpu, vcpu->arch.regs.nip,
|
||||||
vcpu->arch.shared->msr);
|
vcpu->arch.shared->msr);
|
||||||
break;
|
break;
|
||||||
case INT_CLASS_MC:
|
case INT_CLASS_MC:
|
||||||
set_guest_mcsrr(vcpu, vcpu->arch.pc,
|
set_guest_mcsrr(vcpu, vcpu->arch.regs.nip,
|
||||||
vcpu->arch.shared->msr);
|
vcpu->arch.shared->msr);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority];
|
vcpu->arch.regs.nip = vcpu->arch.ivpr |
|
||||||
|
vcpu->arch.ivor[priority];
|
||||||
if (update_esr == true)
|
if (update_esr == true)
|
||||||
kvmppc_set_esr(vcpu, vcpu->arch.queued_esr);
|
kvmppc_set_esr(vcpu, vcpu->arch.queued_esr);
|
||||||
if (update_dear == true)
|
if (update_dear == true)
|
||||||
|
@ -826,7 +829,7 @@ static int emulation_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
|
||||||
|
|
||||||
case EMULATE_FAIL:
|
case EMULATE_FAIL:
|
||||||
printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
|
printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
|
||||||
__func__, vcpu->arch.pc, vcpu->arch.last_inst);
|
__func__, vcpu->arch.regs.nip, vcpu->arch.last_inst);
|
||||||
/* For debugging, encode the failing instruction and
|
/* For debugging, encode the failing instruction and
|
||||||
* report it to userspace. */
|
* report it to userspace. */
|
||||||
run->hw.hardware_exit_reason = ~0ULL << 32;
|
run->hw.hardware_exit_reason = ~0ULL << 32;
|
||||||
|
@ -875,7 +878,7 @@ static int kvmppc_handle_debug(struct kvm_run *run, struct kvm_vcpu *vcpu)
|
||||||
*/
|
*/
|
||||||
vcpu->arch.dbsr = 0;
|
vcpu->arch.dbsr = 0;
|
||||||
run->debug.arch.status = 0;
|
run->debug.arch.status = 0;
|
||||||
run->debug.arch.address = vcpu->arch.pc;
|
run->debug.arch.address = vcpu->arch.regs.nip;
|
||||||
|
|
||||||
if (dbsr & (DBSR_IAC1 | DBSR_IAC2 | DBSR_IAC3 | DBSR_IAC4)) {
|
if (dbsr & (DBSR_IAC1 | DBSR_IAC2 | DBSR_IAC3 | DBSR_IAC4)) {
|
||||||
run->debug.arch.status |= KVMPPC_DEBUG_BREAKPOINT;
|
run->debug.arch.status |= KVMPPC_DEBUG_BREAKPOINT;
|
||||||
|
@ -971,7 +974,7 @@ static int kvmppc_resume_inst_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||||
|
|
||||||
case EMULATE_FAIL:
|
case EMULATE_FAIL:
|
||||||
pr_debug("%s: load instruction from guest address %lx failed\n",
|
pr_debug("%s: load instruction from guest address %lx failed\n",
|
||||||
__func__, vcpu->arch.pc);
|
__func__, vcpu->arch.regs.nip);
|
||||||
/* For debugging, encode the failing instruction and
|
/* For debugging, encode the failing instruction and
|
||||||
* report it to userspace. */
|
* report it to userspace. */
|
||||||
run->hw.hardware_exit_reason = ~0ULL << 32;
|
run->hw.hardware_exit_reason = ~0ULL << 32;
|
||||||
|
@ -1169,7 +1172,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||||
case BOOKE_INTERRUPT_SPE_FP_DATA:
|
case BOOKE_INTERRUPT_SPE_FP_DATA:
|
||||||
case BOOKE_INTERRUPT_SPE_FP_ROUND:
|
case BOOKE_INTERRUPT_SPE_FP_ROUND:
|
||||||
printk(KERN_CRIT "%s: unexpected SPE interrupt %u at %08lx\n",
|
printk(KERN_CRIT "%s: unexpected SPE interrupt %u at %08lx\n",
|
||||||
__func__, exit_nr, vcpu->arch.pc);
|
__func__, exit_nr, vcpu->arch.regs.nip);
|
||||||
run->hw.hardware_exit_reason = exit_nr;
|
run->hw.hardware_exit_reason = exit_nr;
|
||||||
r = RESUME_HOST;
|
r = RESUME_HOST;
|
||||||
break;
|
break;
|
||||||
|
@ -1299,7 +1302,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||||
}
|
}
|
||||||
|
|
||||||
case BOOKE_INTERRUPT_ITLB_MISS: {
|
case BOOKE_INTERRUPT_ITLB_MISS: {
|
||||||
unsigned long eaddr = vcpu->arch.pc;
|
unsigned long eaddr = vcpu->arch.regs.nip;
|
||||||
gpa_t gpaddr;
|
gpa_t gpaddr;
|
||||||
gfn_t gfn;
|
gfn_t gfn;
|
||||||
int gtlb_index;
|
int gtlb_index;
|
||||||
|
@ -1391,7 +1394,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
|
||||||
int i;
|
int i;
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
vcpu->arch.pc = 0;
|
vcpu->arch.regs.nip = 0;
|
||||||
vcpu->arch.shared->pir = vcpu->vcpu_id;
|
vcpu->arch.shared->pir = vcpu->vcpu_id;
|
||||||
kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */
|
kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */
|
||||||
kvmppc_set_msr(vcpu, 0);
|
kvmppc_set_msr(vcpu, 0);
|
||||||
|
@ -1440,10 +1443,10 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
|
||||||
|
|
||||||
vcpu_load(vcpu);
|
vcpu_load(vcpu);
|
||||||
|
|
||||||
regs->pc = vcpu->arch.pc;
|
regs->pc = vcpu->arch.regs.nip;
|
||||||
regs->cr = kvmppc_get_cr(vcpu);
|
regs->cr = kvmppc_get_cr(vcpu);
|
||||||
regs->ctr = vcpu->arch.ctr;
|
regs->ctr = vcpu->arch.regs.ctr;
|
||||||
regs->lr = vcpu->arch.lr;
|
regs->lr = vcpu->arch.regs.link;
|
||||||
regs->xer = kvmppc_get_xer(vcpu);
|
regs->xer = kvmppc_get_xer(vcpu);
|
||||||
regs->msr = vcpu->arch.shared->msr;
|
regs->msr = vcpu->arch.shared->msr;
|
||||||
regs->srr0 = kvmppc_get_srr0(vcpu);
|
regs->srr0 = kvmppc_get_srr0(vcpu);
|
||||||
|
@ -1471,10 +1474,10 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
|
||||||
|
|
||||||
vcpu_load(vcpu);
|
vcpu_load(vcpu);
|
||||||
|
|
||||||
vcpu->arch.pc = regs->pc;
|
vcpu->arch.regs.nip = regs->pc;
|
||||||
kvmppc_set_cr(vcpu, regs->cr);
|
kvmppc_set_cr(vcpu, regs->cr);
|
||||||
vcpu->arch.ctr = regs->ctr;
|
vcpu->arch.regs.ctr = regs->ctr;
|
||||||
vcpu->arch.lr = regs->lr;
|
vcpu->arch.regs.link = regs->lr;
|
||||||
kvmppc_set_xer(vcpu, regs->xer);
|
kvmppc_set_xer(vcpu, regs->xer);
|
||||||
kvmppc_set_msr(vcpu, regs->msr);
|
kvmppc_set_msr(vcpu, regs->msr);
|
||||||
kvmppc_set_srr0(vcpu, regs->srr0);
|
kvmppc_set_srr0(vcpu, regs->srr0);
|
||||||
|
|
|
@ -34,19 +34,19 @@
|
||||||
|
|
||||||
static void kvmppc_emul_rfi(struct kvm_vcpu *vcpu)
|
static void kvmppc_emul_rfi(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
vcpu->arch.pc = vcpu->arch.shared->srr0;
|
vcpu->arch.regs.nip = vcpu->arch.shared->srr0;
|
||||||
kvmppc_set_msr(vcpu, vcpu->arch.shared->srr1);
|
kvmppc_set_msr(vcpu, vcpu->arch.shared->srr1);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void kvmppc_emul_rfdi(struct kvm_vcpu *vcpu)
|
static void kvmppc_emul_rfdi(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
vcpu->arch.pc = vcpu->arch.dsrr0;
|
vcpu->arch.regs.nip = vcpu->arch.dsrr0;
|
||||||
kvmppc_set_msr(vcpu, vcpu->arch.dsrr1);
|
kvmppc_set_msr(vcpu, vcpu->arch.dsrr1);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void kvmppc_emul_rfci(struct kvm_vcpu *vcpu)
|
static void kvmppc_emul_rfci(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
vcpu->arch.pc = vcpu->arch.csrr0;
|
vcpu->arch.regs.nip = vcpu->arch.csrr0;
|
||||||
kvmppc_set_msr(vcpu, vcpu->arch.csrr1);
|
kvmppc_set_msr(vcpu, vcpu->arch.csrr1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -53,7 +53,7 @@ static int dbell2prio(ulong param)
|
||||||
|
|
||||||
static int kvmppc_e500_emul_msgclr(struct kvm_vcpu *vcpu, int rb)
|
static int kvmppc_e500_emul_msgclr(struct kvm_vcpu *vcpu, int rb)
|
||||||
{
|
{
|
||||||
ulong param = vcpu->arch.gpr[rb];
|
ulong param = vcpu->arch.regs.gpr[rb];
|
||||||
int prio = dbell2prio(param);
|
int prio = dbell2prio(param);
|
||||||
|
|
||||||
if (prio < 0)
|
if (prio < 0)
|
||||||
|
@ -65,7 +65,7 @@ static int kvmppc_e500_emul_msgclr(struct kvm_vcpu *vcpu, int rb)
|
||||||
|
|
||||||
static int kvmppc_e500_emul_msgsnd(struct kvm_vcpu *vcpu, int rb)
|
static int kvmppc_e500_emul_msgsnd(struct kvm_vcpu *vcpu, int rb)
|
||||||
{
|
{
|
||||||
ulong param = vcpu->arch.gpr[rb];
|
ulong param = vcpu->arch.regs.gpr[rb];
|
||||||
int prio = dbell2prio(rb);
|
int prio = dbell2prio(rb);
|
||||||
int pir = param & PPC_DBELL_PIR_MASK;
|
int pir = param & PPC_DBELL_PIR_MASK;
|
||||||
int i;
|
int i;
|
||||||
|
@ -94,7 +94,7 @@ static int kvmppc_e500_emul_ehpriv(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||||
switch (get_oc(inst)) {
|
switch (get_oc(inst)) {
|
||||||
case EHPRIV_OC_DEBUG:
|
case EHPRIV_OC_DEBUG:
|
||||||
run->exit_reason = KVM_EXIT_DEBUG;
|
run->exit_reason = KVM_EXIT_DEBUG;
|
||||||
run->debug.arch.address = vcpu->arch.pc;
|
run->debug.arch.address = vcpu->arch.regs.nip;
|
||||||
run->debug.arch.status = 0;
|
run->debug.arch.status = 0;
|
||||||
kvmppc_account_exit(vcpu, DEBUG_EXITS);
|
kvmppc_account_exit(vcpu, DEBUG_EXITS);
|
||||||
emulated = EMULATE_EXIT_USER;
|
emulated = EMULATE_EXIT_USER;
|
||||||
|
|
|
@ -513,7 +513,7 @@ void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS);
|
unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS);
|
||||||
|
|
||||||
kvmppc_e500_deliver_tlb_miss(vcpu, vcpu->arch.pc, as);
|
kvmppc_e500_deliver_tlb_miss(vcpu, vcpu->arch.regs.nip, as);
|
||||||
}
|
}
|
||||||
|
|
||||||
void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu)
|
void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu)
|
||||||
|
|
|
@ -625,8 +625,8 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr,
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_KVM_BOOKE_HV
|
#ifdef CONFIG_KVM_BOOKE_HV
|
||||||
int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, enum instruction_type type,
|
int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
|
||||||
u32 *instr)
|
enum instruction_fetch_type type, u32 *instr)
|
||||||
{
|
{
|
||||||
gva_t geaddr;
|
gva_t geaddr;
|
||||||
hpa_t addr;
|
hpa_t addr;
|
||||||
|
@ -715,8 +715,8 @@ int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, enum instruction_type type,
|
||||||
return EMULATE_DONE;
|
return EMULATE_DONE;
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, enum instruction_type type,
|
int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
|
||||||
u32 *instr)
|
enum instruction_fetch_type type, u32 *instr)
|
||||||
{
|
{
|
||||||
return EMULATE_AGAIN;
|
return EMULATE_AGAIN;
|
||||||
}
|
}
|
||||||
|
|
|
@ -31,6 +31,7 @@
|
||||||
#include <asm/kvm_ppc.h>
|
#include <asm/kvm_ppc.h>
|
||||||
#include <asm/disassemble.h>
|
#include <asm/disassemble.h>
|
||||||
#include <asm/ppc-opcode.h>
|
#include <asm/ppc-opcode.h>
|
||||||
|
#include <asm/sstep.h>
|
||||||
#include "timing.h"
|
#include "timing.h"
|
||||||
#include "trace.h"
|
#include "trace.h"
|
||||||
|
|
||||||
|
@ -84,8 +85,9 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
|
||||||
struct kvm_run *run = vcpu->run;
|
struct kvm_run *run = vcpu->run;
|
||||||
u32 inst;
|
u32 inst;
|
||||||
int ra, rs, rt;
|
int ra, rs, rt;
|
||||||
enum emulation_result emulated;
|
enum emulation_result emulated = EMULATE_FAIL;
|
||||||
int advance = 1;
|
int advance = 1;
|
||||||
|
struct instruction_op op;
|
||||||
|
|
||||||
/* this default type might be overwritten by subcategories */
|
/* this default type might be overwritten by subcategories */
|
||||||
kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS);
|
kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS);
|
||||||
|
@ -107,580 +109,276 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
|
||||||
vcpu->arch.mmio_vsx_tx_sx_enabled = get_tx_or_sx(inst);
|
vcpu->arch.mmio_vsx_tx_sx_enabled = get_tx_or_sx(inst);
|
||||||
vcpu->arch.mmio_vsx_copy_nums = 0;
|
vcpu->arch.mmio_vsx_copy_nums = 0;
|
||||||
vcpu->arch.mmio_vsx_offset = 0;
|
vcpu->arch.mmio_vsx_offset = 0;
|
||||||
vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_NONE;
|
vcpu->arch.mmio_copy_type = KVMPPC_VSX_COPY_NONE;
|
||||||
vcpu->arch.mmio_sp64_extend = 0;
|
vcpu->arch.mmio_sp64_extend = 0;
|
||||||
vcpu->arch.mmio_sign_extend = 0;
|
vcpu->arch.mmio_sign_extend = 0;
|
||||||
vcpu->arch.mmio_vmx_copy_nums = 0;
|
vcpu->arch.mmio_vmx_copy_nums = 0;
|
||||||
|
vcpu->arch.mmio_vmx_offset = 0;
|
||||||
|
vcpu->arch.mmio_host_swabbed = 0;
|
||||||
|
|
||||||
switch (get_op(inst)) {
|
emulated = EMULATE_FAIL;
|
||||||
case 31:
|
vcpu->arch.regs.msr = vcpu->arch.shared->msr;
|
||||||
switch (get_xop(inst)) {
|
vcpu->arch.regs.ccr = vcpu->arch.cr;
|
||||||
case OP_31_XOP_LWZX:
|
if (analyse_instr(&op, &vcpu->arch.regs, inst) == 0) {
|
||||||
emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
|
int type = op.type & INSTR_TYPE_MASK;
|
||||||
|
int size = GETSIZE(op.type);
|
||||||
|
|
||||||
|
switch (type) {
|
||||||
|
case LOAD: {
|
||||||
|
int instr_byte_swap = op.type & BYTEREV;
|
||||||
|
|
||||||
|
if (op.type & SIGNEXT)
|
||||||
|
emulated = kvmppc_handle_loads(run, vcpu,
|
||||||
|
op.reg, size, !instr_byte_swap);
|
||||||
|
else
|
||||||
|
emulated = kvmppc_handle_load(run, vcpu,
|
||||||
|
op.reg, size, !instr_byte_swap);
|
||||||
|
|
||||||
|
if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
|
||||||
|
kvmppc_set_gpr(vcpu, op.update_reg, op.ea);
|
||||||
|
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
#ifdef CONFIG_PPC_FPU
|
||||||
|
case LOAD_FP:
|
||||||
|
if (kvmppc_check_fp_disabled(vcpu))
|
||||||
|
return EMULATE_DONE;
|
||||||
|
|
||||||
|
if (op.type & FPCONV)
|
||||||
|
vcpu->arch.mmio_sp64_extend = 1;
|
||||||
|
|
||||||
|
if (op.type & SIGNEXT)
|
||||||
|
emulated = kvmppc_handle_loads(run, vcpu,
|
||||||
|
KVM_MMIO_REG_FPR|op.reg, size, 1);
|
||||||
|
else
|
||||||
|
emulated = kvmppc_handle_load(run, vcpu,
|
||||||
|
KVM_MMIO_REG_FPR|op.reg, size, 1);
|
||||||
|
|
||||||
|
if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
|
||||||
|
kvmppc_set_gpr(vcpu, op.update_reg, op.ea);
|
||||||
|
|
||||||
|
break;
|
||||||
|
#endif
|
||||||
|
#ifdef CONFIG_ALTIVEC
|
||||||
|
case LOAD_VMX:
|
||||||
|
if (kvmppc_check_altivec_disabled(vcpu))
|
||||||
|
return EMULATE_DONE;
|
||||||
|
|
||||||
|
/* Hardware enforces alignment of VMX accesses */
|
||||||
|
vcpu->arch.vaddr_accessed &= ~((unsigned long)size - 1);
|
||||||
|
vcpu->arch.paddr_accessed &= ~((unsigned long)size - 1);
|
||||||
|
|
||||||
|
if (size == 16) { /* lvx */
|
||||||
|
vcpu->arch.mmio_copy_type =
|
||||||
|
KVMPPC_VMX_COPY_DWORD;
|
||||||
|
} else if (size == 4) { /* lvewx */
|
||||||
|
vcpu->arch.mmio_copy_type =
|
||||||
|
KVMPPC_VMX_COPY_WORD;
|
||||||
|
} else if (size == 2) { /* lvehx */
|
||||||
|
vcpu->arch.mmio_copy_type =
|
||||||
|
KVMPPC_VMX_COPY_HWORD;
|
||||||
|
} else if (size == 1) { /* lvebx */
|
||||||
|
vcpu->arch.mmio_copy_type =
|
||||||
|
KVMPPC_VMX_COPY_BYTE;
|
||||||
|
} else
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case OP_31_XOP_LWZUX:
|
vcpu->arch.mmio_vmx_offset =
|
||||||
emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
|
(vcpu->arch.vaddr_accessed & 0xf)/size;
|
||||||
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
|
|
||||||
|
if (size == 16) {
|
||||||
|
vcpu->arch.mmio_vmx_copy_nums = 2;
|
||||||
|
emulated = kvmppc_handle_vmx_load(run,
|
||||||
|
vcpu, KVM_MMIO_REG_VMX|op.reg,
|
||||||
|
8, 1);
|
||||||
|
} else {
|
||||||
|
vcpu->arch.mmio_vmx_copy_nums = 1;
|
||||||
|
emulated = kvmppc_handle_vmx_load(run, vcpu,
|
||||||
|
KVM_MMIO_REG_VMX|op.reg,
|
||||||
|
size, 1);
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
#endif
|
||||||
|
#ifdef CONFIG_VSX
|
||||||
|
case LOAD_VSX: {
|
||||||
|
int io_size_each;
|
||||||
|
|
||||||
|
if (op.vsx_flags & VSX_CHECK_VEC) {
|
||||||
|
if (kvmppc_check_altivec_disabled(vcpu))
|
||||||
|
return EMULATE_DONE;
|
||||||
|
} else {
|
||||||
|
if (kvmppc_check_vsx_disabled(vcpu))
|
||||||
|
return EMULATE_DONE;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (op.vsx_flags & VSX_FPCONV)
|
||||||
|
vcpu->arch.mmio_sp64_extend = 1;
|
||||||
|
|
||||||
|
if (op.element_size == 8) {
|
||||||
|
if (op.vsx_flags & VSX_SPLAT)
|
||||||
|
vcpu->arch.mmio_copy_type =
|
||||||
|
KVMPPC_VSX_COPY_DWORD_LOAD_DUMP;
|
||||||
|
else
|
||||||
|
vcpu->arch.mmio_copy_type =
|
||||||
|
KVMPPC_VSX_COPY_DWORD;
|
||||||
|
} else if (op.element_size == 4) {
|
||||||
|
if (op.vsx_flags & VSX_SPLAT)
|
||||||
|
vcpu->arch.mmio_copy_type =
|
||||||
|
KVMPPC_VSX_COPY_WORD_LOAD_DUMP;
|
||||||
|
else
|
||||||
|
vcpu->arch.mmio_copy_type =
|
||||||
|
KVMPPC_VSX_COPY_WORD;
|
||||||
|
} else
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case OP_31_XOP_LBZX:
|
if (size < op.element_size) {
|
||||||
emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
|
/* precision convert case: lxsspx, etc */
|
||||||
break;
|
vcpu->arch.mmio_vsx_copy_nums = 1;
|
||||||
|
io_size_each = size;
|
||||||
|
} else { /* lxvw4x, lxvd2x, etc */
|
||||||
|
vcpu->arch.mmio_vsx_copy_nums =
|
||||||
|
size/op.element_size;
|
||||||
|
io_size_each = op.element_size;
|
||||||
|
}
|
||||||
|
|
||||||
case OP_31_XOP_LBZUX:
|
emulated = kvmppc_handle_vsx_load(run, vcpu,
|
||||||
emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
|
KVM_MMIO_REG_VSX | (op.reg & 0x1f),
|
||||||
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
|
io_size_each, 1, op.type & SIGNEXT);
|
||||||
break;
|
break;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
case STORE:
|
||||||
|
/* if need byte reverse, op.val has been reversed by
|
||||||
|
* analyse_instr().
|
||||||
|
*/
|
||||||
|
emulated = kvmppc_handle_store(run, vcpu, op.val,
|
||||||
|
size, 1);
|
||||||
|
|
||||||
|
if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
|
||||||
|
kvmppc_set_gpr(vcpu, op.update_reg, op.ea);
|
||||||
|
|
||||||
|
break;
|
||||||
|
#ifdef CONFIG_PPC_FPU
|
||||||
|
case STORE_FP:
|
||||||
|
if (kvmppc_check_fp_disabled(vcpu))
|
||||||
|
return EMULATE_DONE;
|
||||||
|
|
||||||
|
/* The FP registers need to be flushed so that
|
||||||
|
* kvmppc_handle_store() can read actual FP vals
|
||||||
|
* from vcpu->arch.
|
||||||
|
*/
|
||||||
|
if (vcpu->kvm->arch.kvm_ops->giveup_ext)
|
||||||
|
vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu,
|
||||||
|
MSR_FP);
|
||||||
|
|
||||||
|
if (op.type & FPCONV)
|
||||||
|
vcpu->arch.mmio_sp64_extend = 1;
|
||||||
|
|
||||||
case OP_31_XOP_STDX:
|
|
||||||
emulated = kvmppc_handle_store(run, vcpu,
|
emulated = kvmppc_handle_store(run, vcpu,
|
||||||
kvmppc_get_gpr(vcpu, rs), 8, 1);
|
VCPU_FPR(vcpu, op.reg), size, 1);
|
||||||
|
|
||||||
|
if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
|
||||||
|
kvmppc_set_gpr(vcpu, op.update_reg, op.ea);
|
||||||
|
|
||||||
|
break;
|
||||||
|
#endif
|
||||||
|
#ifdef CONFIG_ALTIVEC
|
||||||
|
case STORE_VMX:
|
||||||
|
if (kvmppc_check_altivec_disabled(vcpu))
|
||||||
|
return EMULATE_DONE;
|
||||||
|
|
||||||
|
/* Hardware enforces alignment of VMX accesses. */
|
||||||
|
vcpu->arch.vaddr_accessed &= ~((unsigned long)size - 1);
|
||||||
|
vcpu->arch.paddr_accessed &= ~((unsigned long)size - 1);
|
||||||
|
|
||||||
|
if (vcpu->kvm->arch.kvm_ops->giveup_ext)
|
||||||
|
vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu,
|
||||||
|
MSR_VEC);
|
||||||
|
if (size == 16) { /* stvx */
|
||||||
|
vcpu->arch.mmio_copy_type =
|
||||||
|
KVMPPC_VMX_COPY_DWORD;
|
||||||
|
} else if (size == 4) { /* stvewx */
|
||||||
|
vcpu->arch.mmio_copy_type =
|
||||||
|
KVMPPC_VMX_COPY_WORD;
|
||||||
|
} else if (size == 2) { /* stvehx */
|
||||||
|
vcpu->arch.mmio_copy_type =
|
||||||
|
KVMPPC_VMX_COPY_HWORD;
|
||||||
|
} else if (size == 1) { /* stvebx */
|
||||||
|
vcpu->arch.mmio_copy_type =
|
||||||
|
KVMPPC_VMX_COPY_BYTE;
|
||||||
|
} else
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case OP_31_XOP_STDUX:
|
vcpu->arch.mmio_vmx_offset =
|
||||||
emulated = kvmppc_handle_store(run, vcpu,
|
(vcpu->arch.vaddr_accessed & 0xf)/size;
|
||||||
kvmppc_get_gpr(vcpu, rs), 8, 1);
|
|
||||||
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
|
if (size == 16) {
|
||||||
|
vcpu->arch.mmio_vmx_copy_nums = 2;
|
||||||
|
emulated = kvmppc_handle_vmx_store(run,
|
||||||
|
vcpu, op.reg, 8, 1);
|
||||||
|
} else {
|
||||||
|
vcpu->arch.mmio_vmx_copy_nums = 1;
|
||||||
|
emulated = kvmppc_handle_vmx_store(run,
|
||||||
|
vcpu, op.reg, size, 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
break;
|
||||||
|
#endif
|
||||||
|
#ifdef CONFIG_VSX
|
||||||
|
case STORE_VSX: {
|
||||||
|
int io_size_each;
|
||||||
|
|
||||||
|
if (op.vsx_flags & VSX_CHECK_VEC) {
|
||||||
|
if (kvmppc_check_altivec_disabled(vcpu))
|
||||||
|
return EMULATE_DONE;
|
||||||
|
} else {
|
||||||
|
if (kvmppc_check_vsx_disabled(vcpu))
|
||||||
|
return EMULATE_DONE;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (vcpu->kvm->arch.kvm_ops->giveup_ext)
|
||||||
|
vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu,
|
||||||
|
MSR_VSX);
|
||||||
|
|
||||||
|
if (op.vsx_flags & VSX_FPCONV)
|
||||||
|
vcpu->arch.mmio_sp64_extend = 1;
|
||||||
|
|
||||||
|
if (op.element_size == 8)
|
||||||
|
vcpu->arch.mmio_copy_type =
|
||||||
|
KVMPPC_VSX_COPY_DWORD;
|
||||||
|
else if (op.element_size == 4)
|
||||||
|
vcpu->arch.mmio_copy_type =
|
||||||
|
KVMPPC_VSX_COPY_WORD;
|
||||||
|
else
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case OP_31_XOP_STWX:
|
if (size < op.element_size) {
|
||||||
emulated = kvmppc_handle_store(run, vcpu,
|
/* precise conversion case, like stxsspx */
|
||||||
kvmppc_get_gpr(vcpu, rs), 4, 1);
|
vcpu->arch.mmio_vsx_copy_nums = 1;
|
||||||
break;
|
io_size_each = size;
|
||||||
|
} else { /* stxvw4x, stxvd2x, etc */
|
||||||
|
vcpu->arch.mmio_vsx_copy_nums =
|
||||||
|
size/op.element_size;
|
||||||
|
io_size_each = op.element_size;
|
||||||
|
}
|
||||||
|
|
||||||
case OP_31_XOP_STWUX:
|
emulated = kvmppc_handle_vsx_store(run, vcpu,
|
||||||
emulated = kvmppc_handle_store(run, vcpu,
|
op.reg & 0x1f, io_size_each, 1);
|
||||||
kvmppc_get_gpr(vcpu, rs), 4, 1);
|
|
||||||
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
|
|
||||||
break;
|
break;
|
||||||
|
}
|
||||||
case OP_31_XOP_STBX:
|
#endif
|
||||||
emulated = kvmppc_handle_store(run, vcpu,
|
case CACHEOP:
|
||||||
kvmppc_get_gpr(vcpu, rs), 1, 1);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case OP_31_XOP_STBUX:
|
|
||||||
emulated = kvmppc_handle_store(run, vcpu,
|
|
||||||
kvmppc_get_gpr(vcpu, rs), 1, 1);
|
|
||||||
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case OP_31_XOP_LHAX:
|
|
||||||
emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case OP_31_XOP_LHAUX:
|
|
||||||
emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
|
|
||||||
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case OP_31_XOP_LHZX:
|
|
||||||
emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case OP_31_XOP_LHZUX:
|
|
||||||
emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
|
|
||||||
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case OP_31_XOP_STHX:
|
|
||||||
emulated = kvmppc_handle_store(run, vcpu,
|
|
||||||
kvmppc_get_gpr(vcpu, rs), 2, 1);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case OP_31_XOP_STHUX:
|
|
||||||
emulated = kvmppc_handle_store(run, vcpu,
|
|
||||||
kvmppc_get_gpr(vcpu, rs), 2, 1);
|
|
||||||
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case OP_31_XOP_DCBST:
|
|
||||||
case OP_31_XOP_DCBF:
|
|
||||||
case OP_31_XOP_DCBI:
|
|
||||||
/* Do nothing. The guest is performing dcbi because
|
/* Do nothing. The guest is performing dcbi because
|
||||||
* hardware DMA is not snooped by the dcache, but
|
* hardware DMA is not snooped by the dcache, but
|
||||||
* emulated DMA either goes through the dcache as
|
* emulated DMA either goes through the dcache as
|
||||||
* normal writes, or the host kernel has handled dcache
|
* normal writes, or the host kernel has handled dcache
|
||||||
* coherence. */
|
* coherence.
|
||||||
break;
|
|
||||||
|
|
||||||
case OP_31_XOP_LWBRX:
|
|
||||||
emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case OP_31_XOP_STWBRX:
|
|
||||||
emulated = kvmppc_handle_store(run, vcpu,
|
|
||||||
kvmppc_get_gpr(vcpu, rs), 4, 0);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case OP_31_XOP_LHBRX:
|
|
||||||
emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case OP_31_XOP_STHBRX:
|
|
||||||
emulated = kvmppc_handle_store(run, vcpu,
|
|
||||||
kvmppc_get_gpr(vcpu, rs), 2, 0);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case OP_31_XOP_LDBRX:
|
|
||||||
emulated = kvmppc_handle_load(run, vcpu, rt, 8, 0);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case OP_31_XOP_STDBRX:
|
|
||||||
emulated = kvmppc_handle_store(run, vcpu,
|
|
||||||
kvmppc_get_gpr(vcpu, rs), 8, 0);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case OP_31_XOP_LDX:
|
|
||||||
emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case OP_31_XOP_LDUX:
|
|
||||||
emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1);
|
|
||||||
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case OP_31_XOP_LWAX:
|
|
||||||
emulated = kvmppc_handle_loads(run, vcpu, rt, 4, 1);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case OP_31_XOP_LWAUX:
|
|
||||||
emulated = kvmppc_handle_loads(run, vcpu, rt, 4, 1);
|
|
||||||
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
|
|
||||||
break;
|
|
||||||
|
|
||||||
#ifdef CONFIG_PPC_FPU
|
|
||||||
case OP_31_XOP_LFSX:
|
|
||||||
if (kvmppc_check_fp_disabled(vcpu))
|
|
||||||
return EMULATE_DONE;
|
|
||||||
vcpu->arch.mmio_sp64_extend = 1;
|
|
||||||
emulated = kvmppc_handle_load(run, vcpu,
|
|
||||||
KVM_MMIO_REG_FPR|rt, 4, 1);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case OP_31_XOP_LFSUX:
|
|
||||||
if (kvmppc_check_fp_disabled(vcpu))
|
|
||||||
return EMULATE_DONE;
|
|
||||||
vcpu->arch.mmio_sp64_extend = 1;
|
|
||||||
emulated = kvmppc_handle_load(run, vcpu,
|
|
||||||
KVM_MMIO_REG_FPR|rt, 4, 1);
|
|
||||||
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case OP_31_XOP_LFDX:
|
|
||||||
if (kvmppc_check_fp_disabled(vcpu))
|
|
||||||
return EMULATE_DONE;
|
|
||||||
emulated = kvmppc_handle_load(run, vcpu,
|
|
||||||
KVM_MMIO_REG_FPR|rt, 8, 1);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case OP_31_XOP_LFDUX:
|
|
||||||
if (kvmppc_check_fp_disabled(vcpu))
|
|
||||||
return EMULATE_DONE;
|
|
||||||
emulated = kvmppc_handle_load(run, vcpu,
|
|
||||||
KVM_MMIO_REG_FPR|rt, 8, 1);
|
|
||||||
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case OP_31_XOP_LFIWAX:
|
|
||||||
if (kvmppc_check_fp_disabled(vcpu))
|
|
||||||
return EMULATE_DONE;
|
|
||||||
emulated = kvmppc_handle_loads(run, vcpu,
|
|
||||||
KVM_MMIO_REG_FPR|rt, 4, 1);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case OP_31_XOP_LFIWZX:
|
|
||||||
if (kvmppc_check_fp_disabled(vcpu))
|
|
||||||
return EMULATE_DONE;
|
|
||||||
emulated = kvmppc_handle_load(run, vcpu,
|
|
||||||
KVM_MMIO_REG_FPR|rt, 4, 1);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case OP_31_XOP_STFSX:
|
|
||||||
if (kvmppc_check_fp_disabled(vcpu))
|
|
||||||
return EMULATE_DONE;
|
|
||||||
vcpu->arch.mmio_sp64_extend = 1;
|
|
||||||
emulated = kvmppc_handle_store(run, vcpu,
|
|
||||||
VCPU_FPR(vcpu, rs), 4, 1);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case OP_31_XOP_STFSUX:
|
|
||||||
if (kvmppc_check_fp_disabled(vcpu))
|
|
||||||
return EMULATE_DONE;
|
|
||||||
vcpu->arch.mmio_sp64_extend = 1;
|
|
||||||
emulated = kvmppc_handle_store(run, vcpu,
|
|
||||||
VCPU_FPR(vcpu, rs), 4, 1);
|
|
||||||
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case OP_31_XOP_STFDX:
|
|
||||||
if (kvmppc_check_fp_disabled(vcpu))
|
|
||||||
return EMULATE_DONE;
|
|
||||||
emulated = kvmppc_handle_store(run, vcpu,
|
|
||||||
VCPU_FPR(vcpu, rs), 8, 1);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case OP_31_XOP_STFDUX:
|
|
||||||
if (kvmppc_check_fp_disabled(vcpu))
|
|
||||||
return EMULATE_DONE;
|
|
||||||
emulated = kvmppc_handle_store(run, vcpu,
|
|
||||||
VCPU_FPR(vcpu, rs), 8, 1);
|
|
||||||
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case OP_31_XOP_STFIWX:
|
|
||||||
if (kvmppc_check_fp_disabled(vcpu))
|
|
||||||
return EMULATE_DONE;
|
|
||||||
emulated = kvmppc_handle_store(run, vcpu,
|
|
||||||
VCPU_FPR(vcpu, rs), 4, 1);
|
|
||||||
break;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef CONFIG_VSX
|
|
||||||
case OP_31_XOP_LXSDX:
|
|
||||||
if (kvmppc_check_vsx_disabled(vcpu))
|
|
||||||
return EMULATE_DONE;
|
|
||||||
vcpu->arch.mmio_vsx_copy_nums = 1;
|
|
||||||
vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD;
|
|
||||||
emulated = kvmppc_handle_vsx_load(run, vcpu,
|
|
||||||
KVM_MMIO_REG_VSX|rt, 8, 1, 0);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case OP_31_XOP_LXSSPX:
|
|
||||||
if (kvmppc_check_vsx_disabled(vcpu))
|
|
||||||
return EMULATE_DONE;
|
|
||||||
vcpu->arch.mmio_vsx_copy_nums = 1;
|
|
||||||
vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD;
|
|
||||||
vcpu->arch.mmio_sp64_extend = 1;
|
|
||||||
emulated = kvmppc_handle_vsx_load(run, vcpu,
|
|
||||||
KVM_MMIO_REG_VSX|rt, 4, 1, 0);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case OP_31_XOP_LXSIWAX:
|
|
||||||
if (kvmppc_check_vsx_disabled(vcpu))
|
|
||||||
return EMULATE_DONE;
|
|
||||||
vcpu->arch.mmio_vsx_copy_nums = 1;
|
|
||||||
vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD;
|
|
||||||
emulated = kvmppc_handle_vsx_load(run, vcpu,
|
|
||||||
KVM_MMIO_REG_VSX|rt, 4, 1, 1);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case OP_31_XOP_LXSIWZX:
|
|
||||||
if (kvmppc_check_vsx_disabled(vcpu))
|
|
||||||
return EMULATE_DONE;
|
|
||||||
vcpu->arch.mmio_vsx_copy_nums = 1;
|
|
||||||
vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD;
|
|
||||||
emulated = kvmppc_handle_vsx_load(run, vcpu,
|
|
||||||
KVM_MMIO_REG_VSX|rt, 4, 1, 0);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case OP_31_XOP_LXVD2X:
|
|
||||||
/*
|
|
||||||
* In this case, the official load/store process is like this:
|
|
||||||
* Step1, exit from vm by page fault isr, then kvm save vsr.
|
|
||||||
* Please see guest_exit_cont->store_fp_state->SAVE_32VSRS
|
|
||||||
* as reference.
|
|
||||||
*
|
|
||||||
* Step2, copy data between memory and VCPU
|
|
||||||
* Notice: for LXVD2X/STXVD2X/LXVW4X/STXVW4X, we use
|
|
||||||
* 2copies*8bytes or 4copies*4bytes
|
|
||||||
* to simulate one copy of 16bytes.
|
|
||||||
* Also there is an endian issue here, we should notice the
|
|
||||||
* layout of memory.
|
|
||||||
* Please see MARCO of LXVD2X_ROT/STXVD2X_ROT as more reference.
|
|
||||||
* If host is little-endian, kvm will call XXSWAPD for
|
|
||||||
* LXVD2X_ROT/STXVD2X_ROT.
|
|
||||||
* So, if host is little-endian,
|
|
||||||
* the postion of memeory should be swapped.
|
|
||||||
*
|
|
||||||
* Step3, return to guest, kvm reset register.
|
|
||||||
* Please see kvmppc_hv_entry->load_fp_state->REST_32VSRS
|
|
||||||
* as reference.
|
|
||||||
*/
|
*/
|
||||||
if (kvmppc_check_vsx_disabled(vcpu))
|
emulated = EMULATE_DONE;
|
||||||
return EMULATE_DONE;
|
|
||||||
vcpu->arch.mmio_vsx_copy_nums = 2;
|
|
||||||
vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD;
|
|
||||||
emulated = kvmppc_handle_vsx_load(run, vcpu,
|
|
||||||
KVM_MMIO_REG_VSX|rt, 8, 1, 0);
|
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case OP_31_XOP_LXVW4X:
|
|
||||||
if (kvmppc_check_vsx_disabled(vcpu))
|
|
||||||
return EMULATE_DONE;
|
|
||||||
vcpu->arch.mmio_vsx_copy_nums = 4;
|
|
||||||
vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_WORD;
|
|
||||||
emulated = kvmppc_handle_vsx_load(run, vcpu,
|
|
||||||
KVM_MMIO_REG_VSX|rt, 4, 1, 0);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case OP_31_XOP_LXVDSX:
|
|
||||||
if (kvmppc_check_vsx_disabled(vcpu))
|
|
||||||
return EMULATE_DONE;
|
|
||||||
vcpu->arch.mmio_vsx_copy_nums = 1;
|
|
||||||
vcpu->arch.mmio_vsx_copy_type =
|
|
||||||
KVMPPC_VSX_COPY_DWORD_LOAD_DUMP;
|
|
||||||
emulated = kvmppc_handle_vsx_load(run, vcpu,
|
|
||||||
KVM_MMIO_REG_VSX|rt, 8, 1, 0);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case OP_31_XOP_STXSDX:
|
|
||||||
if (kvmppc_check_vsx_disabled(vcpu))
|
|
||||||
return EMULATE_DONE;
|
|
||||||
vcpu->arch.mmio_vsx_copy_nums = 1;
|
|
||||||
vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD;
|
|
||||||
emulated = kvmppc_handle_vsx_store(run, vcpu,
|
|
||||||
rs, 8, 1);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case OP_31_XOP_STXSSPX:
|
|
||||||
if (kvmppc_check_vsx_disabled(vcpu))
|
|
||||||
return EMULATE_DONE;
|
|
||||||
vcpu->arch.mmio_vsx_copy_nums = 1;
|
|
||||||
vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD;
|
|
||||||
vcpu->arch.mmio_sp64_extend = 1;
|
|
||||||
emulated = kvmppc_handle_vsx_store(run, vcpu,
|
|
||||||
rs, 4, 1);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case OP_31_XOP_STXSIWX:
|
|
||||||
if (kvmppc_check_vsx_disabled(vcpu))
|
|
||||||
return EMULATE_DONE;
|
|
||||||
vcpu->arch.mmio_vsx_offset = 1;
|
|
||||||
vcpu->arch.mmio_vsx_copy_nums = 1;
|
|
||||||
vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_WORD;
|
|
||||||
emulated = kvmppc_handle_vsx_store(run, vcpu,
|
|
||||||
rs, 4, 1);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case OP_31_XOP_STXVD2X:
|
|
||||||
if (kvmppc_check_vsx_disabled(vcpu))
|
|
||||||
return EMULATE_DONE;
|
|
||||||
vcpu->arch.mmio_vsx_copy_nums = 2;
|
|
||||||
vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD;
|
|
||||||
emulated = kvmppc_handle_vsx_store(run, vcpu,
|
|
||||||
rs, 8, 1);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case OP_31_XOP_STXVW4X:
|
|
||||||
if (kvmppc_check_vsx_disabled(vcpu))
|
|
||||||
return EMULATE_DONE;
|
|
||||||
vcpu->arch.mmio_vsx_copy_nums = 4;
|
|
||||||
vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_WORD;
|
|
||||||
emulated = kvmppc_handle_vsx_store(run, vcpu,
|
|
||||||
rs, 4, 1);
|
|
||||||
break;
|
|
||||||
#endif /* CONFIG_VSX */
|
|
||||||
|
|
||||||
#ifdef CONFIG_ALTIVEC
|
|
||||||
case OP_31_XOP_LVX:
|
|
||||||
if (kvmppc_check_altivec_disabled(vcpu))
|
|
||||||
return EMULATE_DONE;
|
|
||||||
vcpu->arch.vaddr_accessed &= ~0xFULL;
|
|
||||||
vcpu->arch.paddr_accessed &= ~0xFULL;
|
|
||||||
vcpu->arch.mmio_vmx_copy_nums = 2;
|
|
||||||
emulated = kvmppc_handle_load128_by2x64(run, vcpu,
|
|
||||||
KVM_MMIO_REG_VMX|rt, 1);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case OP_31_XOP_STVX:
|
|
||||||
if (kvmppc_check_altivec_disabled(vcpu))
|
|
||||||
return EMULATE_DONE;
|
|
||||||
vcpu->arch.vaddr_accessed &= ~0xFULL;
|
|
||||||
vcpu->arch.paddr_accessed &= ~0xFULL;
|
|
||||||
vcpu->arch.mmio_vmx_copy_nums = 2;
|
|
||||||
emulated = kvmppc_handle_store128_by2x64(run, vcpu,
|
|
||||||
rs, 1);
|
|
||||||
break;
|
|
||||||
#endif /* CONFIG_ALTIVEC */
|
|
||||||
|
|
||||||
default:
|
default:
|
||||||
emulated = EMULATE_FAIL;
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
break;
|
|
||||||
|
|
||||||
case OP_LWZ:
|
|
||||||
emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
|
|
||||||
break;
|
|
||||||
|
|
||||||
#ifdef CONFIG_PPC_FPU
|
|
||||||
case OP_STFS:
|
|
||||||
if (kvmppc_check_fp_disabled(vcpu))
|
|
||||||
return EMULATE_DONE;
|
|
||||||
vcpu->arch.mmio_sp64_extend = 1;
|
|
||||||
emulated = kvmppc_handle_store(run, vcpu,
|
|
||||||
VCPU_FPR(vcpu, rs),
|
|
||||||
4, 1);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case OP_STFSU:
|
|
||||||
if (kvmppc_check_fp_disabled(vcpu))
|
|
||||||
return EMULATE_DONE;
|
|
||||||
vcpu->arch.mmio_sp64_extend = 1;
|
|
||||||
emulated = kvmppc_handle_store(run, vcpu,
|
|
||||||
VCPU_FPR(vcpu, rs),
|
|
||||||
4, 1);
|
|
||||||
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case OP_STFD:
|
|
||||||
if (kvmppc_check_fp_disabled(vcpu))
|
|
||||||
return EMULATE_DONE;
|
|
||||||
emulated = kvmppc_handle_store(run, vcpu,
|
|
||||||
VCPU_FPR(vcpu, rs),
|
|
||||||
8, 1);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case OP_STFDU:
|
|
||||||
if (kvmppc_check_fp_disabled(vcpu))
|
|
||||||
return EMULATE_DONE;
|
|
||||||
emulated = kvmppc_handle_store(run, vcpu,
|
|
||||||
VCPU_FPR(vcpu, rs),
|
|
||||||
8, 1);
|
|
||||||
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
|
|
||||||
break;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
case OP_LD:
|
|
||||||
rt = get_rt(inst);
|
|
||||||
switch (inst & 3) {
|
|
||||||
case 0: /* ld */
|
|
||||||
emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1);
|
|
||||||
break;
|
|
||||||
case 1: /* ldu */
|
|
||||||
emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1);
|
|
||||||
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
|
|
||||||
break;
|
|
||||||
case 2: /* lwa */
|
|
||||||
emulated = kvmppc_handle_loads(run, vcpu, rt, 4, 1);
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
emulated = EMULATE_FAIL;
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
|
|
||||||
case OP_LWZU:
|
|
||||||
emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
|
|
||||||
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case OP_LBZ:
|
|
||||||
emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case OP_LBZU:
|
|
||||||
emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
|
|
||||||
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case OP_STW:
|
|
||||||
emulated = kvmppc_handle_store(run, vcpu,
|
|
||||||
kvmppc_get_gpr(vcpu, rs),
|
|
||||||
4, 1);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case OP_STD:
|
|
||||||
rs = get_rs(inst);
|
|
||||||
switch (inst & 3) {
|
|
||||||
case 0: /* std */
|
|
||||||
emulated = kvmppc_handle_store(run, vcpu,
|
|
||||||
kvmppc_get_gpr(vcpu, rs), 8, 1);
|
|
||||||
break;
|
|
||||||
case 1: /* stdu */
|
|
||||||
emulated = kvmppc_handle_store(run, vcpu,
|
|
||||||
kvmppc_get_gpr(vcpu, rs), 8, 1);
|
|
||||||
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
emulated = EMULATE_FAIL;
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
|
|
||||||
case OP_STWU:
|
|
||||||
emulated = kvmppc_handle_store(run, vcpu,
|
|
||||||
kvmppc_get_gpr(vcpu, rs), 4, 1);
|
|
||||||
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case OP_STB:
|
|
||||||
emulated = kvmppc_handle_store(run, vcpu,
|
|
||||||
kvmppc_get_gpr(vcpu, rs), 1, 1);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case OP_STBU:
|
|
||||||
emulated = kvmppc_handle_store(run, vcpu,
|
|
||||||
kvmppc_get_gpr(vcpu, rs), 1, 1);
|
|
||||||
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case OP_LHZ:
|
|
||||||
emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case OP_LHZU:
|
|
||||||
emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
|
|
||||||
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case OP_LHA:
|
|
||||||
emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case OP_LHAU:
|
|
||||||
emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
|
|
||||||
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case OP_STH:
|
|
||||||
emulated = kvmppc_handle_store(run, vcpu,
|
|
||||||
kvmppc_get_gpr(vcpu, rs), 2, 1);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case OP_STHU:
|
|
||||||
emulated = kvmppc_handle_store(run, vcpu,
|
|
||||||
kvmppc_get_gpr(vcpu, rs), 2, 1);
|
|
||||||
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
|
|
||||||
break;
|
|
||||||
|
|
||||||
#ifdef CONFIG_PPC_FPU
|
|
||||||
case OP_LFS:
|
|
||||||
if (kvmppc_check_fp_disabled(vcpu))
|
|
||||||
return EMULATE_DONE;
|
|
||||||
vcpu->arch.mmio_sp64_extend = 1;
|
|
||||||
emulated = kvmppc_handle_load(run, vcpu,
|
|
||||||
KVM_MMIO_REG_FPR|rt, 4, 1);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case OP_LFSU:
|
|
||||||
if (kvmppc_check_fp_disabled(vcpu))
|
|
||||||
return EMULATE_DONE;
|
|
||||||
vcpu->arch.mmio_sp64_extend = 1;
|
|
||||||
emulated = kvmppc_handle_load(run, vcpu,
|
|
||||||
KVM_MMIO_REG_FPR|rt, 4, 1);
|
|
||||||
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case OP_LFD:
|
|
||||||
if (kvmppc_check_fp_disabled(vcpu))
|
|
||||||
return EMULATE_DONE;
|
|
||||||
emulated = kvmppc_handle_load(run, vcpu,
|
|
||||||
KVM_MMIO_REG_FPR|rt, 8, 1);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case OP_LFDU:
|
|
||||||
if (kvmppc_check_fp_disabled(vcpu))
|
|
||||||
return EMULATE_DONE;
|
|
||||||
emulated = kvmppc_handle_load(run, vcpu,
|
|
||||||
KVM_MMIO_REG_FPR|rt, 8, 1);
|
|
||||||
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
|
|
||||||
break;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
default:
|
|
||||||
emulated = EMULATE_FAIL;
|
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (emulated == EMULATE_FAIL) {
|
if (emulated == EMULATE_FAIL) {
|
||||||
|
|
|
@ -648,9 +648,8 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
|
||||||
#endif
|
#endif
|
||||||
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||||
case KVM_CAP_PPC_HTM:
|
case KVM_CAP_PPC_HTM:
|
||||||
r = hv_enabled &&
|
r = !!(cur_cpu_spec->cpu_user_features2 & PPC_FEATURE2_HTM) ||
|
||||||
(!!(cur_cpu_spec->cpu_user_features2 & PPC_FEATURE2_HTM) ||
|
(hv_enabled && cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST));
|
||||||
cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST));
|
|
||||||
break;
|
break;
|
||||||
#endif
|
#endif
|
||||||
default:
|
default:
|
||||||
|
@ -907,6 +906,26 @@ static inline void kvmppc_set_vsr_dword_dump(struct kvm_vcpu *vcpu,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void kvmppc_set_vsr_word_dump(struct kvm_vcpu *vcpu,
|
||||||
|
u32 gpr)
|
||||||
|
{
|
||||||
|
union kvmppc_one_reg val;
|
||||||
|
int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
|
||||||
|
|
||||||
|
if (vcpu->arch.mmio_vsx_tx_sx_enabled) {
|
||||||
|
val.vsx32val[0] = gpr;
|
||||||
|
val.vsx32val[1] = gpr;
|
||||||
|
val.vsx32val[2] = gpr;
|
||||||
|
val.vsx32val[3] = gpr;
|
||||||
|
VCPU_VSX_VR(vcpu, index) = val.vval;
|
||||||
|
} else {
|
||||||
|
val.vsx32val[0] = gpr;
|
||||||
|
val.vsx32val[1] = gpr;
|
||||||
|
VCPU_VSX_FPR(vcpu, index, 0) = val.vsxval[0];
|
||||||
|
VCPU_VSX_FPR(vcpu, index, 1) = val.vsxval[0];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static inline void kvmppc_set_vsr_word(struct kvm_vcpu *vcpu,
|
static inline void kvmppc_set_vsr_word(struct kvm_vcpu *vcpu,
|
||||||
u32 gpr32)
|
u32 gpr32)
|
||||||
{
|
{
|
||||||
|
@ -933,30 +952,110 @@ static inline void kvmppc_set_vsr_word(struct kvm_vcpu *vcpu,
|
||||||
#endif /* CONFIG_VSX */
|
#endif /* CONFIG_VSX */
|
||||||
|
|
||||||
#ifdef CONFIG_ALTIVEC
|
#ifdef CONFIG_ALTIVEC
|
||||||
|
static inline int kvmppc_get_vmx_offset_generic(struct kvm_vcpu *vcpu,
|
||||||
|
int index, int element_size)
|
||||||
|
{
|
||||||
|
int offset;
|
||||||
|
int elts = sizeof(vector128)/element_size;
|
||||||
|
|
||||||
|
if ((index < 0) || (index >= elts))
|
||||||
|
return -1;
|
||||||
|
|
||||||
|
if (kvmppc_need_byteswap(vcpu))
|
||||||
|
offset = elts - index - 1;
|
||||||
|
else
|
||||||
|
offset = index;
|
||||||
|
|
||||||
|
return offset;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int kvmppc_get_vmx_dword_offset(struct kvm_vcpu *vcpu,
|
||||||
|
int index)
|
||||||
|
{
|
||||||
|
return kvmppc_get_vmx_offset_generic(vcpu, index, 8);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int kvmppc_get_vmx_word_offset(struct kvm_vcpu *vcpu,
|
||||||
|
int index)
|
||||||
|
{
|
||||||
|
return kvmppc_get_vmx_offset_generic(vcpu, index, 4);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int kvmppc_get_vmx_hword_offset(struct kvm_vcpu *vcpu,
|
||||||
|
int index)
|
||||||
|
{
|
||||||
|
return kvmppc_get_vmx_offset_generic(vcpu, index, 2);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int kvmppc_get_vmx_byte_offset(struct kvm_vcpu *vcpu,
|
||||||
|
int index)
|
||||||
|
{
|
||||||
|
return kvmppc_get_vmx_offset_generic(vcpu, index, 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
static inline void kvmppc_set_vmx_dword(struct kvm_vcpu *vcpu,
|
static inline void kvmppc_set_vmx_dword(struct kvm_vcpu *vcpu,
|
||||||
u64 gpr)
|
u64 gpr)
|
||||||
{
|
{
|
||||||
|
union kvmppc_one_reg val;
|
||||||
|
int offset = kvmppc_get_vmx_dword_offset(vcpu,
|
||||||
|
vcpu->arch.mmio_vmx_offset);
|
||||||
int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
|
int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
|
||||||
u32 hi, lo;
|
|
||||||
u32 di;
|
|
||||||
|
|
||||||
#ifdef __BIG_ENDIAN
|
if (offset == -1)
|
||||||
hi = gpr >> 32;
|
|
||||||
lo = gpr & 0xffffffff;
|
|
||||||
#else
|
|
||||||
lo = gpr >> 32;
|
|
||||||
hi = gpr & 0xffffffff;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
di = 2 - vcpu->arch.mmio_vmx_copy_nums; /* doubleword index */
|
|
||||||
if (di > 1)
|
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (vcpu->arch.mmio_host_swabbed)
|
val.vval = VCPU_VSX_VR(vcpu, index);
|
||||||
di = 1 - di;
|
val.vsxval[offset] = gpr;
|
||||||
|
VCPU_VSX_VR(vcpu, index) = val.vval;
|
||||||
|
}
|
||||||
|
|
||||||
VCPU_VSX_VR(vcpu, index).u[di * 2] = hi;
|
static inline void kvmppc_set_vmx_word(struct kvm_vcpu *vcpu,
|
||||||
VCPU_VSX_VR(vcpu, index).u[di * 2 + 1] = lo;
|
u32 gpr32)
|
||||||
|
{
|
||||||
|
union kvmppc_one_reg val;
|
||||||
|
int offset = kvmppc_get_vmx_word_offset(vcpu,
|
||||||
|
vcpu->arch.mmio_vmx_offset);
|
||||||
|
int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
|
||||||
|
|
||||||
|
if (offset == -1)
|
||||||
|
return;
|
||||||
|
|
||||||
|
val.vval = VCPU_VSX_VR(vcpu, index);
|
||||||
|
val.vsx32val[offset] = gpr32;
|
||||||
|
VCPU_VSX_VR(vcpu, index) = val.vval;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void kvmppc_set_vmx_hword(struct kvm_vcpu *vcpu,
|
||||||
|
u16 gpr16)
|
||||||
|
{
|
||||||
|
union kvmppc_one_reg val;
|
||||||
|
int offset = kvmppc_get_vmx_hword_offset(vcpu,
|
||||||
|
vcpu->arch.mmio_vmx_offset);
|
||||||
|
int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
|
||||||
|
|
||||||
|
if (offset == -1)
|
||||||
|
return;
|
||||||
|
|
||||||
|
val.vval = VCPU_VSX_VR(vcpu, index);
|
||||||
|
val.vsx16val[offset] = gpr16;
|
||||||
|
VCPU_VSX_VR(vcpu, index) = val.vval;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void kvmppc_set_vmx_byte(struct kvm_vcpu *vcpu,
|
||||||
|
u8 gpr8)
|
||||||
|
{
|
||||||
|
union kvmppc_one_reg val;
|
||||||
|
int offset = kvmppc_get_vmx_byte_offset(vcpu,
|
||||||
|
vcpu->arch.mmio_vmx_offset);
|
||||||
|
int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
|
||||||
|
|
||||||
|
if (offset == -1)
|
||||||
|
return;
|
||||||
|
|
||||||
|
val.vval = VCPU_VSX_VR(vcpu, index);
|
||||||
|
val.vsx8val[offset] = gpr8;
|
||||||
|
VCPU_VSX_VR(vcpu, index) = val.vval;
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_ALTIVEC */
|
#endif /* CONFIG_ALTIVEC */
|
||||||
|
|
||||||
|
@ -1041,6 +1140,9 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
|
||||||
kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
|
kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
|
||||||
break;
|
break;
|
||||||
case KVM_MMIO_REG_FPR:
|
case KVM_MMIO_REG_FPR:
|
||||||
|
if (vcpu->kvm->arch.kvm_ops->giveup_ext)
|
||||||
|
vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_FP);
|
||||||
|
|
||||||
VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
|
VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
|
||||||
break;
|
break;
|
||||||
#ifdef CONFIG_PPC_BOOK3S
|
#ifdef CONFIG_PPC_BOOK3S
|
||||||
|
@ -1054,18 +1156,36 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
|
||||||
#endif
|
#endif
|
||||||
#ifdef CONFIG_VSX
|
#ifdef CONFIG_VSX
|
||||||
case KVM_MMIO_REG_VSX:
|
case KVM_MMIO_REG_VSX:
|
||||||
if (vcpu->arch.mmio_vsx_copy_type == KVMPPC_VSX_COPY_DWORD)
|
if (vcpu->kvm->arch.kvm_ops->giveup_ext)
|
||||||
|
vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VSX);
|
||||||
|
|
||||||
|
if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_DWORD)
|
||||||
kvmppc_set_vsr_dword(vcpu, gpr);
|
kvmppc_set_vsr_dword(vcpu, gpr);
|
||||||
else if (vcpu->arch.mmio_vsx_copy_type == KVMPPC_VSX_COPY_WORD)
|
else if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_WORD)
|
||||||
kvmppc_set_vsr_word(vcpu, gpr);
|
kvmppc_set_vsr_word(vcpu, gpr);
|
||||||
else if (vcpu->arch.mmio_vsx_copy_type ==
|
else if (vcpu->arch.mmio_copy_type ==
|
||||||
KVMPPC_VSX_COPY_DWORD_LOAD_DUMP)
|
KVMPPC_VSX_COPY_DWORD_LOAD_DUMP)
|
||||||
kvmppc_set_vsr_dword_dump(vcpu, gpr);
|
kvmppc_set_vsr_dword_dump(vcpu, gpr);
|
||||||
|
else if (vcpu->arch.mmio_copy_type ==
|
||||||
|
KVMPPC_VSX_COPY_WORD_LOAD_DUMP)
|
||||||
|
kvmppc_set_vsr_word_dump(vcpu, gpr);
|
||||||
break;
|
break;
|
||||||
#endif
|
#endif
|
||||||
#ifdef CONFIG_ALTIVEC
|
#ifdef CONFIG_ALTIVEC
|
||||||
case KVM_MMIO_REG_VMX:
|
case KVM_MMIO_REG_VMX:
|
||||||
|
if (vcpu->kvm->arch.kvm_ops->giveup_ext)
|
||||||
|
vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VEC);
|
||||||
|
|
||||||
|
if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_DWORD)
|
||||||
kvmppc_set_vmx_dword(vcpu, gpr);
|
kvmppc_set_vmx_dword(vcpu, gpr);
|
||||||
|
else if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_WORD)
|
||||||
|
kvmppc_set_vmx_word(vcpu, gpr);
|
||||||
|
else if (vcpu->arch.mmio_copy_type ==
|
||||||
|
KVMPPC_VMX_COPY_HWORD)
|
||||||
|
kvmppc_set_vmx_hword(vcpu, gpr);
|
||||||
|
else if (vcpu->arch.mmio_copy_type ==
|
||||||
|
KVMPPC_VMX_COPY_BYTE)
|
||||||
|
kvmppc_set_vmx_byte(vcpu, gpr);
|
||||||
break;
|
break;
|
||||||
#endif
|
#endif
|
||||||
default:
|
default:
|
||||||
|
@ -1228,7 +1348,7 @@ static inline int kvmppc_get_vsr_data(struct kvm_vcpu *vcpu, int rs, u64 *val)
|
||||||
u32 dword_offset, word_offset;
|
u32 dword_offset, word_offset;
|
||||||
union kvmppc_one_reg reg;
|
union kvmppc_one_reg reg;
|
||||||
int vsx_offset = 0;
|
int vsx_offset = 0;
|
||||||
int copy_type = vcpu->arch.mmio_vsx_copy_type;
|
int copy_type = vcpu->arch.mmio_copy_type;
|
||||||
int result = 0;
|
int result = 0;
|
||||||
|
|
||||||
switch (copy_type) {
|
switch (copy_type) {
|
||||||
|
@ -1344,14 +1464,16 @@ static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu *vcpu,
|
||||||
#endif /* CONFIG_VSX */
|
#endif /* CONFIG_VSX */
|
||||||
|
|
||||||
#ifdef CONFIG_ALTIVEC
|
#ifdef CONFIG_ALTIVEC
|
||||||
/* handle quadword load access in two halves */
|
int kvmppc_handle_vmx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||||
int kvmppc_handle_load128_by2x64(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
unsigned int rt, unsigned int bytes, int is_default_endian)
|
||||||
unsigned int rt, int is_default_endian)
|
|
||||||
{
|
{
|
||||||
enum emulation_result emulated = EMULATE_DONE;
|
enum emulation_result emulated = EMULATE_DONE;
|
||||||
|
|
||||||
|
if (vcpu->arch.mmio_vsx_copy_nums > 2)
|
||||||
|
return EMULATE_FAIL;
|
||||||
|
|
||||||
while (vcpu->arch.mmio_vmx_copy_nums) {
|
while (vcpu->arch.mmio_vmx_copy_nums) {
|
||||||
emulated = __kvmppc_handle_load(run, vcpu, rt, 8,
|
emulated = __kvmppc_handle_load(run, vcpu, rt, bytes,
|
||||||
is_default_endian, 0);
|
is_default_endian, 0);
|
||||||
|
|
||||||
if (emulated != EMULATE_DONE)
|
if (emulated != EMULATE_DONE)
|
||||||
|
@ -1359,55 +1481,127 @@ int kvmppc_handle_load128_by2x64(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||||
|
|
||||||
vcpu->arch.paddr_accessed += run->mmio.len;
|
vcpu->arch.paddr_accessed += run->mmio.len;
|
||||||
vcpu->arch.mmio_vmx_copy_nums--;
|
vcpu->arch.mmio_vmx_copy_nums--;
|
||||||
|
vcpu->arch.mmio_vmx_offset++;
|
||||||
}
|
}
|
||||||
|
|
||||||
return emulated;
|
return emulated;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int kvmppc_get_vmx_data(struct kvm_vcpu *vcpu, int rs, u64 *val)
|
int kvmppc_get_vmx_dword(struct kvm_vcpu *vcpu, int index, u64 *val)
|
||||||
{
|
{
|
||||||
vector128 vrs = VCPU_VSX_VR(vcpu, rs);
|
union kvmppc_one_reg reg;
|
||||||
u32 di;
|
int vmx_offset = 0;
|
||||||
u64 w0, w1;
|
int result = 0;
|
||||||
|
|
||||||
di = 2 - vcpu->arch.mmio_vmx_copy_nums; /* doubleword index */
|
vmx_offset =
|
||||||
if (di > 1)
|
kvmppc_get_vmx_dword_offset(vcpu, vcpu->arch.mmio_vmx_offset);
|
||||||
|
|
||||||
|
if (vmx_offset == -1)
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
if (vcpu->arch.mmio_host_swabbed)
|
reg.vval = VCPU_VSX_VR(vcpu, index);
|
||||||
di = 1 - di;
|
*val = reg.vsxval[vmx_offset];
|
||||||
|
|
||||||
w0 = vrs.u[di * 2];
|
return result;
|
||||||
w1 = vrs.u[di * 2 + 1];
|
|
||||||
|
|
||||||
#ifdef __BIG_ENDIAN
|
|
||||||
*val = (w0 << 32) | w1;
|
|
||||||
#else
|
|
||||||
*val = (w1 << 32) | w0;
|
|
||||||
#endif
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* handle quadword store in two halves */
|
int kvmppc_get_vmx_word(struct kvm_vcpu *vcpu, int index, u64 *val)
|
||||||
int kvmppc_handle_store128_by2x64(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
{
|
||||||
unsigned int rs, int is_default_endian)
|
union kvmppc_one_reg reg;
|
||||||
|
int vmx_offset = 0;
|
||||||
|
int result = 0;
|
||||||
|
|
||||||
|
vmx_offset =
|
||||||
|
kvmppc_get_vmx_word_offset(vcpu, vcpu->arch.mmio_vmx_offset);
|
||||||
|
|
||||||
|
if (vmx_offset == -1)
|
||||||
|
return -1;
|
||||||
|
|
||||||
|
reg.vval = VCPU_VSX_VR(vcpu, index);
|
||||||
|
*val = reg.vsx32val[vmx_offset];
|
||||||
|
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
int kvmppc_get_vmx_hword(struct kvm_vcpu *vcpu, int index, u64 *val)
|
||||||
|
{
|
||||||
|
union kvmppc_one_reg reg;
|
||||||
|
int vmx_offset = 0;
|
||||||
|
int result = 0;
|
||||||
|
|
||||||
|
vmx_offset =
|
||||||
|
kvmppc_get_vmx_hword_offset(vcpu, vcpu->arch.mmio_vmx_offset);
|
||||||
|
|
||||||
|
if (vmx_offset == -1)
|
||||||
|
return -1;
|
||||||
|
|
||||||
|
reg.vval = VCPU_VSX_VR(vcpu, index);
|
||||||
|
*val = reg.vsx16val[vmx_offset];
|
||||||
|
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
int kvmppc_get_vmx_byte(struct kvm_vcpu *vcpu, int index, u64 *val)
|
||||||
|
{
|
||||||
|
union kvmppc_one_reg reg;
|
||||||
|
int vmx_offset = 0;
|
||||||
|
int result = 0;
|
||||||
|
|
||||||
|
vmx_offset =
|
||||||
|
kvmppc_get_vmx_byte_offset(vcpu, vcpu->arch.mmio_vmx_offset);
|
||||||
|
|
||||||
|
if (vmx_offset == -1)
|
||||||
|
return -1;
|
||||||
|
|
||||||
|
reg.vval = VCPU_VSX_VR(vcpu, index);
|
||||||
|
*val = reg.vsx8val[vmx_offset];
|
||||||
|
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
int kvmppc_handle_vmx_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||||
|
unsigned int rs, unsigned int bytes, int is_default_endian)
|
||||||
{
|
{
|
||||||
u64 val = 0;
|
u64 val = 0;
|
||||||
|
unsigned int index = rs & KVM_MMIO_REG_MASK;
|
||||||
enum emulation_result emulated = EMULATE_DONE;
|
enum emulation_result emulated = EMULATE_DONE;
|
||||||
|
|
||||||
|
if (vcpu->arch.mmio_vsx_copy_nums > 2)
|
||||||
|
return EMULATE_FAIL;
|
||||||
|
|
||||||
vcpu->arch.io_gpr = rs;
|
vcpu->arch.io_gpr = rs;
|
||||||
|
|
||||||
while (vcpu->arch.mmio_vmx_copy_nums) {
|
while (vcpu->arch.mmio_vmx_copy_nums) {
|
||||||
if (kvmppc_get_vmx_data(vcpu, rs, &val) == -1)
|
switch (vcpu->arch.mmio_copy_type) {
|
||||||
|
case KVMPPC_VMX_COPY_DWORD:
|
||||||
|
if (kvmppc_get_vmx_dword(vcpu, index, &val) == -1)
|
||||||
return EMULATE_FAIL;
|
return EMULATE_FAIL;
|
||||||
|
|
||||||
emulated = kvmppc_handle_store(run, vcpu, val, 8,
|
break;
|
||||||
|
case KVMPPC_VMX_COPY_WORD:
|
||||||
|
if (kvmppc_get_vmx_word(vcpu, index, &val) == -1)
|
||||||
|
return EMULATE_FAIL;
|
||||||
|
break;
|
||||||
|
case KVMPPC_VMX_COPY_HWORD:
|
||||||
|
if (kvmppc_get_vmx_hword(vcpu, index, &val) == -1)
|
||||||
|
return EMULATE_FAIL;
|
||||||
|
break;
|
||||||
|
case KVMPPC_VMX_COPY_BYTE:
|
||||||
|
if (kvmppc_get_vmx_byte(vcpu, index, &val) == -1)
|
||||||
|
return EMULATE_FAIL;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
return EMULATE_FAIL;
|
||||||
|
}
|
||||||
|
|
||||||
|
emulated = kvmppc_handle_store(run, vcpu, val, bytes,
|
||||||
is_default_endian);
|
is_default_endian);
|
||||||
if (emulated != EMULATE_DONE)
|
if (emulated != EMULATE_DONE)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
vcpu->arch.paddr_accessed += run->mmio.len;
|
vcpu->arch.paddr_accessed += run->mmio.len;
|
||||||
vcpu->arch.mmio_vmx_copy_nums--;
|
vcpu->arch.mmio_vmx_copy_nums--;
|
||||||
|
vcpu->arch.mmio_vmx_offset++;
|
||||||
}
|
}
|
||||||
|
|
||||||
return emulated;
|
return emulated;
|
||||||
|
@ -1422,11 +1616,11 @@ static int kvmppc_emulate_mmio_vmx_loadstore(struct kvm_vcpu *vcpu,
|
||||||
vcpu->arch.paddr_accessed += run->mmio.len;
|
vcpu->arch.paddr_accessed += run->mmio.len;
|
||||||
|
|
||||||
if (!vcpu->mmio_is_write) {
|
if (!vcpu->mmio_is_write) {
|
||||||
emulated = kvmppc_handle_load128_by2x64(run, vcpu,
|
emulated = kvmppc_handle_vmx_load(run, vcpu,
|
||||||
vcpu->arch.io_gpr, 1);
|
vcpu->arch.io_gpr, run->mmio.len, 1);
|
||||||
} else {
|
} else {
|
||||||
emulated = kvmppc_handle_store128_by2x64(run, vcpu,
|
emulated = kvmppc_handle_vmx_store(run, vcpu,
|
||||||
vcpu->arch.io_gpr, 1);
|
vcpu->arch.io_gpr, run->mmio.len, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
switch (emulated) {
|
switch (emulated) {
|
||||||
|
@ -1570,8 +1764,10 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
#ifdef CONFIG_ALTIVEC
|
#ifdef CONFIG_ALTIVEC
|
||||||
if (vcpu->arch.mmio_vmx_copy_nums > 0)
|
if (vcpu->arch.mmio_vmx_copy_nums > 0) {
|
||||||
vcpu->arch.mmio_vmx_copy_nums--;
|
vcpu->arch.mmio_vmx_copy_nums--;
|
||||||
|
vcpu->arch.mmio_vmx_offset++;
|
||||||
|
}
|
||||||
|
|
||||||
if (vcpu->arch.mmio_vmx_copy_nums > 0) {
|
if (vcpu->arch.mmio_vmx_copy_nums > 0) {
|
||||||
r = kvmppc_emulate_mmio_vmx_loadstore(vcpu, run);
|
r = kvmppc_emulate_mmio_vmx_loadstore(vcpu, run);
|
||||||
|
@ -1784,16 +1980,16 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
|
||||||
void __user *argp = (void __user *)arg;
|
void __user *argp = (void __user *)arg;
|
||||||
long r;
|
long r;
|
||||||
|
|
||||||
vcpu_load(vcpu);
|
|
||||||
|
|
||||||
switch (ioctl) {
|
switch (ioctl) {
|
||||||
case KVM_ENABLE_CAP:
|
case KVM_ENABLE_CAP:
|
||||||
{
|
{
|
||||||
struct kvm_enable_cap cap;
|
struct kvm_enable_cap cap;
|
||||||
r = -EFAULT;
|
r = -EFAULT;
|
||||||
|
vcpu_load(vcpu);
|
||||||
if (copy_from_user(&cap, argp, sizeof(cap)))
|
if (copy_from_user(&cap, argp, sizeof(cap)))
|
||||||
goto out;
|
goto out;
|
||||||
r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
|
r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
|
||||||
|
vcpu_put(vcpu);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1815,9 +2011,11 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
|
||||||
case KVM_DIRTY_TLB: {
|
case KVM_DIRTY_TLB: {
|
||||||
struct kvm_dirty_tlb dirty;
|
struct kvm_dirty_tlb dirty;
|
||||||
r = -EFAULT;
|
r = -EFAULT;
|
||||||
|
vcpu_load(vcpu);
|
||||||
if (copy_from_user(&dirty, argp, sizeof(dirty)))
|
if (copy_from_user(&dirty, argp, sizeof(dirty)))
|
||||||
goto out;
|
goto out;
|
||||||
r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty);
|
r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty);
|
||||||
|
vcpu_put(vcpu);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@ -1826,7 +2024,6 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
|
||||||
}
|
}
|
||||||
|
|
||||||
out:
|
out:
|
||||||
vcpu_put(vcpu);
|
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,384 @@
|
||||||
|
/*
|
||||||
|
* This program is free software; you can redistribute it and/or modify
|
||||||
|
* it under the terms of the GNU General Public License, version 2, as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This program is distributed in the hope that it will be useful,
|
||||||
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
* GNU General Public License for more details.
|
||||||
|
*
|
||||||
|
* Derived from book3s_hv_rmhandlers.S, which is:
|
||||||
|
*
|
||||||
|
* Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <asm/reg.h>
|
||||||
|
#include <asm/ppc_asm.h>
|
||||||
|
#include <asm/asm-offsets.h>
|
||||||
|
#include <asm/export.h>
|
||||||
|
#include <asm/tm.h>
|
||||||
|
#include <asm/cputable.h>
|
||||||
|
|
||||||
|
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||||
|
#define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Save transactional state and TM-related registers.
|
||||||
|
* Called with:
|
||||||
|
* - r3 pointing to the vcpu struct
|
||||||
|
* - r4 points to the MSR with current TS bits:
|
||||||
|
* (For HV KVM, it is VCPU_MSR ; For PR KVM, it is host MSR).
|
||||||
|
* This can modify all checkpointed registers, but
|
||||||
|
* restores r1, r2 before exit.
|
||||||
|
*/
|
||||||
|
_GLOBAL(__kvmppc_save_tm)
|
||||||
|
mflr r0
|
||||||
|
std r0, PPC_LR_STKOFF(r1)
|
||||||
|
|
||||||
|
/* Turn on TM. */
|
||||||
|
mfmsr r8
|
||||||
|
li r0, 1
|
||||||
|
rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
|
||||||
|
ori r8, r8, MSR_FP
|
||||||
|
oris r8, r8, (MSR_VEC | MSR_VSX)@h
|
||||||
|
mtmsrd r8
|
||||||
|
|
||||||
|
rldicl. r4, r4, 64 - MSR_TS_S_LG, 62
|
||||||
|
beq 1f /* TM not active in guest. */
|
||||||
|
|
||||||
|
std r1, HSTATE_SCRATCH2(r13)
|
||||||
|
std r3, HSTATE_SCRATCH1(r13)
|
||||||
|
|
||||||
|
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
|
||||||
|
BEGIN_FTR_SECTION
|
||||||
|
/* Emulation of the treclaim instruction needs TEXASR before treclaim */
|
||||||
|
mfspr r6, SPRN_TEXASR
|
||||||
|
std r6, VCPU_ORIG_TEXASR(r3)
|
||||||
|
END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_HV_ASSIST)
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/* Clear the MSR RI since r1, r13 are all going to be foobar. */
|
||||||
|
li r5, 0
|
||||||
|
mtmsrd r5, 1
|
||||||
|
|
||||||
|
li r3, TM_CAUSE_KVM_RESCHED
|
||||||
|
|
||||||
|
/* All GPRs are volatile at this point. */
|
||||||
|
TRECLAIM(R3)
|
||||||
|
|
||||||
|
/* Temporarily store r13 and r9 so we have some regs to play with */
|
||||||
|
SET_SCRATCH0(r13)
|
||||||
|
GET_PACA(r13)
|
||||||
|
std r9, PACATMSCRATCH(r13)
|
||||||
|
ld r9, HSTATE_SCRATCH1(r13)
|
||||||
|
|
||||||
|
/* Get a few more GPRs free. */
|
||||||
|
std r29, VCPU_GPRS_TM(29)(r9)
|
||||||
|
std r30, VCPU_GPRS_TM(30)(r9)
|
||||||
|
std r31, VCPU_GPRS_TM(31)(r9)
|
||||||
|
|
||||||
|
/* Save away PPR and DSCR soon so don't run with user values. */
|
||||||
|
mfspr r31, SPRN_PPR
|
||||||
|
HMT_MEDIUM
|
||||||
|
mfspr r30, SPRN_DSCR
|
||||||
|
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
|
||||||
|
ld r29, HSTATE_DSCR(r13)
|
||||||
|
mtspr SPRN_DSCR, r29
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/* Save all but r9, r13 & r29-r31 */
|
||||||
|
reg = 0
|
||||||
|
.rept 29
|
||||||
|
.if (reg != 9) && (reg != 13)
|
||||||
|
std reg, VCPU_GPRS_TM(reg)(r9)
|
||||||
|
.endif
|
||||||
|
reg = reg + 1
|
||||||
|
.endr
|
||||||
|
/* ... now save r13 */
|
||||||
|
GET_SCRATCH0(r4)
|
||||||
|
std r4, VCPU_GPRS_TM(13)(r9)
|
||||||
|
/* ... and save r9 */
|
||||||
|
ld r4, PACATMSCRATCH(r13)
|
||||||
|
std r4, VCPU_GPRS_TM(9)(r9)
|
||||||
|
|
||||||
|
/* Reload stack pointer and TOC. */
|
||||||
|
ld r1, HSTATE_SCRATCH2(r13)
|
||||||
|
ld r2, PACATOC(r13)
|
||||||
|
|
||||||
|
/* Set MSR RI now we have r1 and r13 back. */
|
||||||
|
li r5, MSR_RI
|
||||||
|
mtmsrd r5, 1
|
||||||
|
|
||||||
|
/* Save away checkpinted SPRs. */
|
||||||
|
std r31, VCPU_PPR_TM(r9)
|
||||||
|
std r30, VCPU_DSCR_TM(r9)
|
||||||
|
mflr r5
|
||||||
|
mfcr r6
|
||||||
|
mfctr r7
|
||||||
|
mfspr r8, SPRN_AMR
|
||||||
|
mfspr r10, SPRN_TAR
|
||||||
|
mfxer r11
|
||||||
|
std r5, VCPU_LR_TM(r9)
|
||||||
|
stw r6, VCPU_CR_TM(r9)
|
||||||
|
std r7, VCPU_CTR_TM(r9)
|
||||||
|
std r8, VCPU_AMR_TM(r9)
|
||||||
|
std r10, VCPU_TAR_TM(r9)
|
||||||
|
std r11, VCPU_XER_TM(r9)
|
||||||
|
|
||||||
|
/* Restore r12 as trap number. */
|
||||||
|
lwz r12, VCPU_TRAP(r9)
|
||||||
|
|
||||||
|
/* Save FP/VSX. */
|
||||||
|
addi r3, r9, VCPU_FPRS_TM
|
||||||
|
bl store_fp_state
|
||||||
|
addi r3, r9, VCPU_VRS_TM
|
||||||
|
bl store_vr_state
|
||||||
|
mfspr r6, SPRN_VRSAVE
|
||||||
|
stw r6, VCPU_VRSAVE_TM(r9)
|
||||||
|
1:
|
||||||
|
/*
|
||||||
|
* We need to save these SPRs after the treclaim so that the software
|
||||||
|
* error code is recorded correctly in the TEXASR. Also the user may
|
||||||
|
* change these outside of a transaction, so they must always be
|
||||||
|
* context switched.
|
||||||
|
*/
|
||||||
|
mfspr r7, SPRN_TEXASR
|
||||||
|
std r7, VCPU_TEXASR(r9)
|
||||||
|
11:
|
||||||
|
mfspr r5, SPRN_TFHAR
|
||||||
|
mfspr r6, SPRN_TFIAR
|
||||||
|
std r5, VCPU_TFHAR(r9)
|
||||||
|
std r6, VCPU_TFIAR(r9)
|
||||||
|
|
||||||
|
ld r0, PPC_LR_STKOFF(r1)
|
||||||
|
mtlr r0
|
||||||
|
blr
|
||||||
|
|
||||||
|
/*
|
||||||
|
* _kvmppc_save_tm_pr() is a wrapper around __kvmppc_save_tm(), so that it can
|
||||||
|
* be invoked from C function by PR KVM only.
|
||||||
|
*/
|
||||||
|
_GLOBAL(_kvmppc_save_tm_pr)
|
||||||
|
mflr r5
|
||||||
|
std r5, PPC_LR_STKOFF(r1)
|
||||||
|
stdu r1, -SWITCH_FRAME_SIZE(r1)
|
||||||
|
SAVE_NVGPRS(r1)
|
||||||
|
|
||||||
|
/* save MSR since TM/math bits might be impacted
|
||||||
|
* by __kvmppc_save_tm().
|
||||||
|
*/
|
||||||
|
mfmsr r5
|
||||||
|
SAVE_GPR(5, r1)
|
||||||
|
|
||||||
|
/* also save DSCR/CR/TAR so that it can be recovered later */
|
||||||
|
mfspr r6, SPRN_DSCR
|
||||||
|
SAVE_GPR(6, r1)
|
||||||
|
|
||||||
|
mfcr r7
|
||||||
|
stw r7, _CCR(r1)
|
||||||
|
|
||||||
|
mfspr r8, SPRN_TAR
|
||||||
|
SAVE_GPR(8, r1)
|
||||||
|
|
||||||
|
bl __kvmppc_save_tm
|
||||||
|
|
||||||
|
REST_GPR(8, r1)
|
||||||
|
mtspr SPRN_TAR, r8
|
||||||
|
|
||||||
|
ld r7, _CCR(r1)
|
||||||
|
mtcr r7
|
||||||
|
|
||||||
|
REST_GPR(6, r1)
|
||||||
|
mtspr SPRN_DSCR, r6
|
||||||
|
|
||||||
|
/* need preserve current MSR's MSR_TS bits */
|
||||||
|
REST_GPR(5, r1)
|
||||||
|
mfmsr r6
|
||||||
|
rldicl r6, r6, 64 - MSR_TS_S_LG, 62
|
||||||
|
rldimi r5, r6, MSR_TS_S_LG, 63 - MSR_TS_T_LG
|
||||||
|
mtmsrd r5
|
||||||
|
|
||||||
|
REST_NVGPRS(r1)
|
||||||
|
addi r1, r1, SWITCH_FRAME_SIZE
|
||||||
|
ld r5, PPC_LR_STKOFF(r1)
|
||||||
|
mtlr r5
|
||||||
|
blr
|
||||||
|
|
||||||
|
EXPORT_SYMBOL_GPL(_kvmppc_save_tm_pr);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Restore transactional state and TM-related registers.
|
||||||
|
* Called with:
|
||||||
|
* - r3 pointing to the vcpu struct.
|
||||||
|
* - r4 is the guest MSR with desired TS bits:
|
||||||
|
* For HV KVM, it is VCPU_MSR
|
||||||
|
* For PR KVM, it is provided by caller
|
||||||
|
* This potentially modifies all checkpointed registers.
|
||||||
|
* It restores r1, r2 from the PACA.
|
||||||
|
*/
|
||||||
|
_GLOBAL(__kvmppc_restore_tm)
|
||||||
|
mflr r0
|
||||||
|
std r0, PPC_LR_STKOFF(r1)
|
||||||
|
|
||||||
|
/* Turn on TM/FP/VSX/VMX so we can restore them. */
|
||||||
|
mfmsr r5
|
||||||
|
li r6, MSR_TM >> 32
|
||||||
|
sldi r6, r6, 32
|
||||||
|
or r5, r5, r6
|
||||||
|
ori r5, r5, MSR_FP
|
||||||
|
oris r5, r5, (MSR_VEC | MSR_VSX)@h
|
||||||
|
mtmsrd r5
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The user may change these outside of a transaction, so they must
|
||||||
|
* always be context switched.
|
||||||
|
*/
|
||||||
|
ld r5, VCPU_TFHAR(r3)
|
||||||
|
ld r6, VCPU_TFIAR(r3)
|
||||||
|
ld r7, VCPU_TEXASR(r3)
|
||||||
|
mtspr SPRN_TFHAR, r5
|
||||||
|
mtspr SPRN_TFIAR, r6
|
||||||
|
mtspr SPRN_TEXASR, r7
|
||||||
|
|
||||||
|
mr r5, r4
|
||||||
|
rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
|
||||||
|
beqlr /* TM not active in guest */
|
||||||
|
std r1, HSTATE_SCRATCH2(r13)
|
||||||
|
|
||||||
|
/* Make sure the failure summary is set, otherwise we'll program check
|
||||||
|
* when we trechkpt. It's possible that this might have been not set
|
||||||
|
* on a kvmppc_set_one_reg() call but we shouldn't let this crash the
|
||||||
|
* host.
|
||||||
|
*/
|
||||||
|
oris r7, r7, (TEXASR_FS)@h
|
||||||
|
mtspr SPRN_TEXASR, r7
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We need to load up the checkpointed state for the guest.
|
||||||
|
* We need to do this early as it will blow away any GPRs, VSRs and
|
||||||
|
* some SPRs.
|
||||||
|
*/
|
||||||
|
|
||||||
|
mr r31, r3
|
||||||
|
addi r3, r31, VCPU_FPRS_TM
|
||||||
|
bl load_fp_state
|
||||||
|
addi r3, r31, VCPU_VRS_TM
|
||||||
|
bl load_vr_state
|
||||||
|
mr r3, r31
|
||||||
|
lwz r7, VCPU_VRSAVE_TM(r3)
|
||||||
|
mtspr SPRN_VRSAVE, r7
|
||||||
|
|
||||||
|
ld r5, VCPU_LR_TM(r3)
|
||||||
|
lwz r6, VCPU_CR_TM(r3)
|
||||||
|
ld r7, VCPU_CTR_TM(r3)
|
||||||
|
ld r8, VCPU_AMR_TM(r3)
|
||||||
|
ld r9, VCPU_TAR_TM(r3)
|
||||||
|
ld r10, VCPU_XER_TM(r3)
|
||||||
|
mtlr r5
|
||||||
|
mtcr r6
|
||||||
|
mtctr r7
|
||||||
|
mtspr SPRN_AMR, r8
|
||||||
|
mtspr SPRN_TAR, r9
|
||||||
|
mtxer r10
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Load up PPR and DSCR values but don't put them in the actual SPRs
|
||||||
|
* till the last moment to avoid running with userspace PPR and DSCR for
|
||||||
|
* too long.
|
||||||
|
*/
|
||||||
|
ld r29, VCPU_DSCR_TM(r3)
|
||||||
|
ld r30, VCPU_PPR_TM(r3)
|
||||||
|
|
||||||
|
std r2, PACATMSCRATCH(r13) /* Save TOC */
|
||||||
|
|
||||||
|
/* Clear the MSR RI since r1, r13 are all going to be foobar. */
|
||||||
|
li r5, 0
|
||||||
|
mtmsrd r5, 1
|
||||||
|
|
||||||
|
/* Load GPRs r0-r28 */
|
||||||
|
reg = 0
|
||||||
|
.rept 29
|
||||||
|
ld reg, VCPU_GPRS_TM(reg)(r31)
|
||||||
|
reg = reg + 1
|
||||||
|
.endr
|
||||||
|
|
||||||
|
mtspr SPRN_DSCR, r29
|
||||||
|
mtspr SPRN_PPR, r30
|
||||||
|
|
||||||
|
/* Load final GPRs */
|
||||||
|
ld 29, VCPU_GPRS_TM(29)(r31)
|
||||||
|
ld 30, VCPU_GPRS_TM(30)(r31)
|
||||||
|
ld 31, VCPU_GPRS_TM(31)(r31)
|
||||||
|
|
||||||
|
/* TM checkpointed state is now setup. All GPRs are now volatile. */
|
||||||
|
TRECHKPT
|
||||||
|
|
||||||
|
/* Now let's get back the state we need. */
|
||||||
|
HMT_MEDIUM
|
||||||
|
GET_PACA(r13)
|
||||||
|
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
|
||||||
|
ld r29, HSTATE_DSCR(r13)
|
||||||
|
mtspr SPRN_DSCR, r29
|
||||||
|
#endif
|
||||||
|
ld r1, HSTATE_SCRATCH2(r13)
|
||||||
|
ld r2, PACATMSCRATCH(r13)
|
||||||
|
|
||||||
|
/* Set the MSR RI since we have our registers back. */
|
||||||
|
li r5, MSR_RI
|
||||||
|
mtmsrd r5, 1
|
||||||
|
ld r0, PPC_LR_STKOFF(r1)
|
||||||
|
mtlr r0
|
||||||
|
blr
|
||||||
|
|
||||||
|
/*
|
||||||
|
* _kvmppc_restore_tm_pr() is a wrapper around __kvmppc_restore_tm(), so that it
|
||||||
|
* can be invoked from C function by PR KVM only.
|
||||||
|
*/
|
||||||
|
_GLOBAL(_kvmppc_restore_tm_pr)
|
||||||
|
mflr r5
|
||||||
|
std r5, PPC_LR_STKOFF(r1)
|
||||||
|
stdu r1, -SWITCH_FRAME_SIZE(r1)
|
||||||
|
SAVE_NVGPRS(r1)
|
||||||
|
|
||||||
|
/* save MSR to avoid TM/math bits change */
|
||||||
|
mfmsr r5
|
||||||
|
SAVE_GPR(5, r1)
|
||||||
|
|
||||||
|
/* also save DSCR/CR/TAR so that it can be recovered later */
|
||||||
|
mfspr r6, SPRN_DSCR
|
||||||
|
SAVE_GPR(6, r1)
|
||||||
|
|
||||||
|
mfcr r7
|
||||||
|
stw r7, _CCR(r1)
|
||||||
|
|
||||||
|
mfspr r8, SPRN_TAR
|
||||||
|
SAVE_GPR(8, r1)
|
||||||
|
|
||||||
|
bl __kvmppc_restore_tm
|
||||||
|
|
||||||
|
REST_GPR(8, r1)
|
||||||
|
mtspr SPRN_TAR, r8
|
||||||
|
|
||||||
|
ld r7, _CCR(r1)
|
||||||
|
mtcr r7
|
||||||
|
|
||||||
|
REST_GPR(6, r1)
|
||||||
|
mtspr SPRN_DSCR, r6
|
||||||
|
|
||||||
|
/* need preserve current MSR's MSR_TS bits */
|
||||||
|
REST_GPR(5, r1)
|
||||||
|
mfmsr r6
|
||||||
|
rldicl r6, r6, 64 - MSR_TS_S_LG, 62
|
||||||
|
rldimi r5, r6, MSR_TS_S_LG, 63 - MSR_TS_T_LG
|
||||||
|
mtmsrd r5
|
||||||
|
|
||||||
|
REST_NVGPRS(r1)
|
||||||
|
addi r1, r1, SWITCH_FRAME_SIZE
|
||||||
|
ld r5, PPC_LR_STKOFF(r1)
|
||||||
|
mtlr r5
|
||||||
|
blr
|
||||||
|
|
||||||
|
EXPORT_SYMBOL_GPL(_kvmppc_restore_tm_pr);
|
||||||
|
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
|
|
@ -4429,16 +4429,14 @@ static int alloc_loaded_vmcs(struct loaded_vmcs *loaded_vmcs)
|
||||||
goto out_vmcs;
|
goto out_vmcs;
|
||||||
memset(loaded_vmcs->msr_bitmap, 0xff, PAGE_SIZE);
|
memset(loaded_vmcs->msr_bitmap, 0xff, PAGE_SIZE);
|
||||||
|
|
||||||
#if IS_ENABLED(CONFIG_HYPERV)
|
if (IS_ENABLED(CONFIG_HYPERV) &&
|
||||||
if (static_branch_unlikely(&enable_evmcs) &&
|
static_branch_unlikely(&enable_evmcs) &&
|
||||||
(ms_hyperv.nested_features & HV_X64_NESTED_MSR_BITMAP)) {
|
(ms_hyperv.nested_features & HV_X64_NESTED_MSR_BITMAP)) {
|
||||||
struct hv_enlightened_vmcs *evmcs =
|
struct hv_enlightened_vmcs *evmcs =
|
||||||
(struct hv_enlightened_vmcs *)loaded_vmcs->vmcs;
|
(struct hv_enlightened_vmcs *)loaded_vmcs->vmcs;
|
||||||
|
|
||||||
evmcs->hv_enlightenments_control.msr_bitmap = 1;
|
evmcs->hv_enlightenments_control.msr_bitmap = 1;
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
|
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
|
|
@ -8567,7 +8567,7 @@ int kvm_arch_hardware_setup(void)
|
||||||
/*
|
/*
|
||||||
* Make sure the user can only configure tsc_khz values that
|
* Make sure the user can only configure tsc_khz values that
|
||||||
* fit into a signed integer.
|
* fit into a signed integer.
|
||||||
* A min value is not calculated needed because it will always
|
* A min value is not calculated because it will always
|
||||||
* be 1 on all machines.
|
* be 1 on all machines.
|
||||||
*/
|
*/
|
||||||
u64 max = min(0x7fffffffULL,
|
u64 max = min(0x7fffffffULL,
|
||||||
|
|
Loading…
Reference in New Issue