Merge remote-tracking branch 'qemu-kvm/uq/master' into staging

* qemu-kvm/uq/master:
  qemu-kvm/pci-assign: 64 bits bar emulation
  target-i386: Enabling IA32_TSC_ADJUST for QEMU KVM guest VMs

Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
This commit is contained in:
Anthony Liguori 2013-01-02 08:01:54 -06:00
commit 34daffa048
4 changed files with 47 additions and 4 deletions

View File

@ -46,6 +46,7 @@
#define IORESOURCE_IRQ 0x00000400 #define IORESOURCE_IRQ 0x00000400
#define IORESOURCE_DMA 0x00000800 #define IORESOURCE_DMA 0x00000800
#define IORESOURCE_PREFETCH 0x00002000 /* No side effects */ #define IORESOURCE_PREFETCH 0x00002000 /* No side effects */
#define IORESOURCE_MEM_64 0x00100000
//#define DEVICE_ASSIGNMENT_DEBUG //#define DEVICE_ASSIGNMENT_DEBUG
@ -442,9 +443,13 @@ static int assigned_dev_register_regions(PCIRegion *io_regions,
/* handle memory io regions */ /* handle memory io regions */
if (cur_region->type & IORESOURCE_MEM) { if (cur_region->type & IORESOURCE_MEM) {
int t = cur_region->type & IORESOURCE_PREFETCH int t = PCI_BASE_ADDRESS_SPACE_MEMORY;
? PCI_BASE_ADDRESS_MEM_PREFETCH if (cur_region->type & IORESOURCE_PREFETCH) {
: PCI_BASE_ADDRESS_SPACE_MEMORY; t |= PCI_BASE_ADDRESS_MEM_PREFETCH;
}
if (cur_region->type & IORESOURCE_MEM_64) {
t |= PCI_BASE_ADDRESS_MEM_TYPE_64;
}
/* map physical memory */ /* map physical memory */
pci_dev->v_addrs[i].u.r_virtbase = mmap(NULL, cur_region->size, pci_dev->v_addrs[i].u.r_virtbase = mmap(NULL, cur_region->size,
@ -632,7 +637,8 @@ again:
rp->valid = 0; rp->valid = 0;
rp->resource_fd = -1; rp->resource_fd = -1;
size = end - start + 1; size = end - start + 1;
flags &= IORESOURCE_IO | IORESOURCE_MEM | IORESOURCE_PREFETCH; flags &= IORESOURCE_IO | IORESOURCE_MEM | IORESOURCE_PREFETCH
| IORESOURCE_MEM_64;
if (size == 0 || (flags & ~IORESOURCE_PREFETCH) == 0) { if (size == 0 || (flags & ~IORESOURCE_PREFETCH) == 0) {
continue; continue;
} }

View File

@ -295,6 +295,7 @@
#define MSR_IA32_APICBASE_BSP (1<<8) #define MSR_IA32_APICBASE_BSP (1<<8)
#define MSR_IA32_APICBASE_ENABLE (1<<11) #define MSR_IA32_APICBASE_ENABLE (1<<11)
#define MSR_IA32_APICBASE_BASE (0xfffff<<12) #define MSR_IA32_APICBASE_BASE (0xfffff<<12)
#define MSR_TSC_ADJUST 0x0000003b
#define MSR_IA32_TSCDEADLINE 0x6e0 #define MSR_IA32_TSCDEADLINE 0x6e0
#define MSR_MTRRcap 0xfe #define MSR_MTRRcap 0xfe
@ -774,6 +775,7 @@ typedef struct CPUX86State {
uint64_t pv_eoi_en_msr; uint64_t pv_eoi_en_msr;
uint64_t tsc; uint64_t tsc;
uint64_t tsc_adjust;
uint64_t tsc_deadline; uint64_t tsc_deadline;
uint64_t mcg_status; uint64_t mcg_status;

View File

@ -63,6 +63,7 @@ const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
static bool has_msr_star; static bool has_msr_star;
static bool has_msr_hsave_pa; static bool has_msr_hsave_pa;
static bool has_msr_tsc_adjust;
static bool has_msr_tsc_deadline; static bool has_msr_tsc_deadline;
static bool has_msr_async_pf_en; static bool has_msr_async_pf_en;
static bool has_msr_pv_eoi_en; static bool has_msr_pv_eoi_en;
@ -683,6 +684,10 @@ static int kvm_get_supported_msrs(KVMState *s)
has_msr_hsave_pa = true; has_msr_hsave_pa = true;
continue; continue;
} }
if (kvm_msr_list->indices[i] == MSR_TSC_ADJUST) {
has_msr_tsc_adjust = true;
continue;
}
if (kvm_msr_list->indices[i] == MSR_IA32_TSCDEADLINE) { if (kvm_msr_list->indices[i] == MSR_IA32_TSCDEADLINE) {
has_msr_tsc_deadline = true; has_msr_tsc_deadline = true;
continue; continue;
@ -1026,6 +1031,9 @@ static int kvm_put_msrs(X86CPU *cpu, int level)
if (has_msr_hsave_pa) { if (has_msr_hsave_pa) {
kvm_msr_entry_set(&msrs[n++], MSR_VM_HSAVE_PA, env->vm_hsave); kvm_msr_entry_set(&msrs[n++], MSR_VM_HSAVE_PA, env->vm_hsave);
} }
if (has_msr_tsc_adjust) {
kvm_msr_entry_set(&msrs[n++], MSR_TSC_ADJUST, env->tsc_adjust);
}
if (has_msr_tsc_deadline) { if (has_msr_tsc_deadline) {
kvm_msr_entry_set(&msrs[n++], MSR_IA32_TSCDEADLINE, env->tsc_deadline); kvm_msr_entry_set(&msrs[n++], MSR_IA32_TSCDEADLINE, env->tsc_deadline);
} }
@ -1291,6 +1299,9 @@ static int kvm_get_msrs(X86CPU *cpu)
if (has_msr_hsave_pa) { if (has_msr_hsave_pa) {
msrs[n++].index = MSR_VM_HSAVE_PA; msrs[n++].index = MSR_VM_HSAVE_PA;
} }
if (has_msr_tsc_adjust) {
msrs[n++].index = MSR_TSC_ADJUST;
}
if (has_msr_tsc_deadline) { if (has_msr_tsc_deadline) {
msrs[n++].index = MSR_IA32_TSCDEADLINE; msrs[n++].index = MSR_IA32_TSCDEADLINE;
} }
@ -1368,6 +1379,9 @@ static int kvm_get_msrs(X86CPU *cpu)
case MSR_IA32_TSC: case MSR_IA32_TSC:
env->tsc = msrs[i].data; env->tsc = msrs[i].data;
break; break;
case MSR_TSC_ADJUST:
env->tsc_adjust = msrs[i].data;
break;
case MSR_IA32_TSCDEADLINE: case MSR_IA32_TSCDEADLINE:
env->tsc_deadline = msrs[i].data; env->tsc_deadline = msrs[i].data;
break; break;

View File

@ -328,6 +328,24 @@ static const VMStateDescription vmstate_fpop_ip_dp = {
} }
}; };
static bool tsc_adjust_needed(void *opaque)
{
CPUX86State *env = opaque;
return env->tsc_adjust != 0;
}
static const VMStateDescription vmstate_msr_tsc_adjust = {
.name = "cpu/msr_tsc_adjust",
.version_id = 1,
.minimum_version_id = 1,
.minimum_version_id_old = 1,
.fields = (VMStateField[]) {
VMSTATE_UINT64(tsc_adjust, CPUX86State),
VMSTATE_END_OF_LIST()
}
};
static bool tscdeadline_needed(void *opaque) static bool tscdeadline_needed(void *opaque)
{ {
CPUX86State *env = opaque; CPUX86State *env = opaque;
@ -477,6 +495,9 @@ static const VMStateDescription vmstate_cpu = {
} , { } , {
.vmsd = &vmstate_fpop_ip_dp, .vmsd = &vmstate_fpop_ip_dp,
.needed = fpop_ip_dp_needed, .needed = fpop_ip_dp_needed,
}, {
.vmsd = &vmstate_msr_tsc_adjust,
.needed = tsc_adjust_needed,
}, { }, {
.vmsd = &vmstate_msr_tscdeadline, .vmsd = &vmstate_msr_tscdeadline,
.needed = tscdeadline_needed, .needed = tscdeadline_needed,