KVM: VMX: Support for injecting software exceptions

VMX differentiates between processor and software generated exceptions
when injecting them into the guest. Extend vmx_queue_exception
accordingly (and refactor related constants) so that we can use this
service reliably for the new guest debugging framework.

Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
This commit is contained in:
Jan Kiszka 2008-12-15 13:52:10 +01:00 committed by Avi Kivity
parent d80174745b
commit 8ab2d2e231
2 changed files with 22 additions and 16 deletions

View File

@ -270,8 +270,9 @@ enum vmcs_field {
#define INTR_TYPE_EXT_INTR (0 << 8) /* external interrupt */ #define INTR_TYPE_EXT_INTR (0 << 8) /* external interrupt */
#define INTR_TYPE_NMI_INTR (2 << 8) /* NMI */ #define INTR_TYPE_NMI_INTR (2 << 8) /* NMI */
#define INTR_TYPE_EXCEPTION (3 << 8) /* processor exception */ #define INTR_TYPE_HARD_EXCEPTION (3 << 8) /* processor exception */
#define INTR_TYPE_SOFT_INTR (4 << 8) /* software interrupt */ #define INTR_TYPE_SOFT_INTR (4 << 8) /* software interrupt */
#define INTR_TYPE_SOFT_EXCEPTION (6 << 8) /* software exception */
/* GUEST_INTERRUPTIBILITY_INFO flags. */ /* GUEST_INTERRUPTIBILITY_INFO flags. */
#define GUEST_INTR_STATE_STI 0x00000001 #define GUEST_INTR_STATE_STI 0x00000001

View File

@ -189,21 +189,21 @@ static inline int is_page_fault(u32 intr_info)
{ {
return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK | return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
INTR_INFO_VALID_MASK)) == INTR_INFO_VALID_MASK)) ==
(INTR_TYPE_EXCEPTION | PF_VECTOR | INTR_INFO_VALID_MASK); (INTR_TYPE_HARD_EXCEPTION | PF_VECTOR | INTR_INFO_VALID_MASK);
} }
static inline int is_no_device(u32 intr_info) static inline int is_no_device(u32 intr_info)
{ {
return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK | return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
INTR_INFO_VALID_MASK)) == INTR_INFO_VALID_MASK)) ==
(INTR_TYPE_EXCEPTION | NM_VECTOR | INTR_INFO_VALID_MASK); (INTR_TYPE_HARD_EXCEPTION | NM_VECTOR | INTR_INFO_VALID_MASK);
} }
static inline int is_invalid_opcode(u32 intr_info) static inline int is_invalid_opcode(u32 intr_info)
{ {
return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK | return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
INTR_INFO_VALID_MASK)) == INTR_INFO_VALID_MASK)) ==
(INTR_TYPE_EXCEPTION | UD_VECTOR | INTR_INFO_VALID_MASK); (INTR_TYPE_HARD_EXCEPTION | UD_VECTOR | INTR_INFO_VALID_MASK);
} }
static inline int is_external_interrupt(u32 intr_info) static inline int is_external_interrupt(u32 intr_info)
@ -747,29 +747,33 @@ static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
bool has_error_code, u32 error_code) bool has_error_code, u32 error_code)
{ {
struct vcpu_vmx *vmx = to_vmx(vcpu); struct vcpu_vmx *vmx = to_vmx(vcpu);
u32 intr_info = nr | INTR_INFO_VALID_MASK;
if (has_error_code) if (has_error_code) {
vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code); vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code);
intr_info |= INTR_INFO_DELIVER_CODE_MASK;
}
if (vcpu->arch.rmode.active) { if (vcpu->arch.rmode.active) {
vmx->rmode.irq.pending = true; vmx->rmode.irq.pending = true;
vmx->rmode.irq.vector = nr; vmx->rmode.irq.vector = nr;
vmx->rmode.irq.rip = kvm_rip_read(vcpu); vmx->rmode.irq.rip = kvm_rip_read(vcpu);
if (nr == BP_VECTOR) if (nr == BP_VECTOR || nr == OF_VECTOR)
vmx->rmode.irq.rip++; vmx->rmode.irq.rip++;
vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr_info |= INTR_TYPE_SOFT_INTR;
nr | INTR_TYPE_SOFT_INTR vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr_info);
| (has_error_code ? INTR_INFO_DELIVER_CODE_MASK : 0)
| INTR_INFO_VALID_MASK);
vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, 1); vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, 1);
kvm_rip_write(vcpu, vmx->rmode.irq.rip - 1); kvm_rip_write(vcpu, vmx->rmode.irq.rip - 1);
return; return;
} }
vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, if (nr == BP_VECTOR || nr == OF_VECTOR) {
nr | INTR_TYPE_EXCEPTION vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, 1);
| (has_error_code ? INTR_INFO_DELIVER_CODE_MASK : 0) intr_info |= INTR_TYPE_SOFT_EXCEPTION;
| INTR_INFO_VALID_MASK); } else
intr_info |= INTR_TYPE_HARD_EXCEPTION;
vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr_info);
} }
static bool vmx_exception_injected(struct kvm_vcpu *vcpu) static bool vmx_exception_injected(struct kvm_vcpu *vcpu)
@ -2650,7 +2654,7 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
} }
if ((intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK)) == if ((intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK)) ==
(INTR_TYPE_EXCEPTION | 1)) { (INTR_TYPE_HARD_EXCEPTION | 1)) {
kvm_run->exit_reason = KVM_EXIT_DEBUG; kvm_run->exit_reason = KVM_EXIT_DEBUG;
return 0; return 0;
} }
@ -3238,7 +3242,8 @@ static void vmx_complete_interrupts(struct vcpu_vmx *vmx)
vmx->vcpu.arch.nmi_injected = false; vmx->vcpu.arch.nmi_injected = false;
} }
kvm_clear_exception_queue(&vmx->vcpu); kvm_clear_exception_queue(&vmx->vcpu);
if (idtv_info_valid && type == INTR_TYPE_EXCEPTION) { if (idtv_info_valid && (type == INTR_TYPE_HARD_EXCEPTION ||
type == INTR_TYPE_SOFT_EXCEPTION)) {
if (idt_vectoring_info & VECTORING_INFO_DELIVER_CODE_MASK) { if (idt_vectoring_info & VECTORING_INFO_DELIVER_CODE_MASK) {
error = vmcs_read32(IDT_VECTORING_ERROR_CODE); error = vmcs_read32(IDT_VECTORING_ERROR_CODE);
kvm_queue_exception_e(&vmx->vcpu, vector, error); kvm_queue_exception_e(&vmx->vcpu, vector, error);