pc,pci,vhost,net fixes, enhancements

Don's patches to limit below-4g ram for pc
 Marcel's pcie hotplug rewrite
 Gabriel's changes to e1000 auto-negotiation
 qemu char bugfixes by Stefan
 misc bugfixes
 
 Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1
 
 iQEcBAABAgAGBQJTqEbfAAoJECgfDbjSjVRpFkoIAKj/JZvwQfOkDON12iIou8Fm
 qFhn9t6Tf/HHvwhsrLrPH3wnmTiwJpnSrU2/X9SjSBqugiG4NdS2BrzGS1T5mNET
 Q8q2CdYhrWzriCnNMotoug8oXIJGSfORITsU2c3zH7mLJHi5V6m78PZzLiiyTgJW
 VS0Zb/F2YFn9vDA6vzSX3dUCH/1MGVwTf3pSaGBs3k1DwPT9OpV7ejdhqJ0SB03t
 ih//P3E5t2+kkoUEUEqHVhRepvn0nrDq9fxL97scUbhx5Lz3hBteoewNjlqDsrCv
 kbX+PswfSRWTEbjQ3yeGSyyzITIe/6W7rLb2gYAl2Efn/WDGIw2JXff2XPiro1I=
 =qjXy
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/mst/tags/for_upstream' into staging

pc,pci,vhost,net fixes, enhancements

Don's patches to limit below-4g ram for pc
Marcel's pcie hotplug rewrite
Gabriel's changes to e1000 auto-negotiation
qemu char bugfixes by Stefan
misc bugfixes

Signed-off-by: Michael S. Tsirkin <mst@redhat.com>

# gpg: Signature made Mon 23 Jun 2014 16:25:19 BST using RSA key ID D28D5469
# gpg: Good signature from "Michael S. Tsirkin <mst@kernel.org>"
# gpg:                 aka "Michael S. Tsirkin <mst@redhat.com>"

* remotes/mst/tags/for_upstream: (23 commits)
  xen-hvm: Handle machine opt max-ram-below-4g
  pc & q35: Add new machine opt max-ram-below-4g
  xen-hvm: Fix xen_hvm_init() to adjust pc memory layout
  pcie: coding style tweak
  hw/pcie: better hotplug/hotunplug support
  hw/pcie: implement power controller functionality
  hw/pcie: correct debug message
  q35: Use PC_Q35_COMPAT_1_4 on pc-q35-1.4 compat_props
  virtio-pci: Report an error when msix vectors init fails
  qemu-char: avoid leaking unused fds in tcp_get_msgfds()
  qemu-char: fix qemu_chr_fe_get_msgfd()
  qapi/string-output-visitor: fix human output
  e1000: factor out checking for auto-negotiation availability
  e1000: move e1000_autoneg_timer() to after set_ics()
  e1000: signal guest on successful link auto-negotiation
  e1000: improve auto-negotiation reporting via mii-tool
  e1000: emulate auto-negotiation during external link status change
  qtest: fix vhost-user-test unbalanced mutex locks
  qtest: fix qtest for vhost-user
  libqemustub: add more stubs for qemu-char
  ...

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2014-06-24 11:14:47 +01:00
commit 27acb9dd24
28 changed files with 474 additions and 126 deletions

2
hmp.c
View File

@ -1692,7 +1692,7 @@ void hmp_info_memdev(Monitor *mon, const QDict *qdict)
ov = string_output_visitor_new(false);
visit_type_uint16List(string_output_get_visitor(ov),
&m->value->host_nodes, NULL, NULL);
monitor_printf(mon, "memory device %d\n", i);
monitor_printf(mon, "memory backend: %d\n", i);
monitor_printf(mon, " size: %" PRId64 "\n", m->value->size);
monitor_printf(mon, " merge: %s\n",
m->value->merge ? "true" : "false");

View File

@ -1643,11 +1643,58 @@ pc_machine_get_hotplug_memory_region_size(Object *obj, Visitor *v, void *opaque,
visit_type_int(v, &value, name, errp);
}
static void pc_machine_get_max_ram_below_4g(Object *obj, Visitor *v,
void *opaque, const char *name,
Error **errp)
{
PCMachineState *pcms = PC_MACHINE(obj);
uint64_t value = pcms->max_ram_below_4g;
visit_type_size(v, &value, name, errp);
}
static void pc_machine_set_max_ram_below_4g(Object *obj, Visitor *v,
void *opaque, const char *name,
Error **errp)
{
PCMachineState *pcms = PC_MACHINE(obj);
Error *error = NULL;
uint64_t value;
visit_type_size(v, &value, name, &error);
if (error) {
error_propagate(errp, error);
return;
}
if (value > (1ULL << 32)) {
error_set(&error, ERROR_CLASS_GENERIC_ERROR,
"Machine option 'max-ram-below-4g=%"PRIu64
"' expects size less than or equal to 4G", value);
error_propagate(errp, error);
return;
}
if (value < (1ULL << 20)) {
error_report("Warning: small max_ram_below_4g(%"PRIu64
") less than 1M. BIOS may not work..",
value);
}
pcms->max_ram_below_4g = value;
}
static void pc_machine_initfn(Object *obj)
{
PCMachineState *pcms = PC_MACHINE(obj);
object_property_add(obj, PC_MACHINE_MEMHP_REGION_SIZE, "int",
pc_machine_get_hotplug_memory_region_size,
NULL, NULL, NULL, NULL);
pcms->max_ram_below_4g = 1ULL << 32; /* 4G */
object_property_add(obj, PC_MACHINE_MAX_RAM_BELOW_4G, "size",
pc_machine_get_max_ram_below_4g,
pc_machine_set_max_ram_below_4g,
NULL, NULL, NULL);
}
static void pc_machine_class_init(ObjectClass *oc, void *data)

View File

@ -48,6 +48,7 @@
#include "exec/address-spaces.h"
#include "hw/acpi/acpi.h"
#include "cpu.h"
#include "qemu/error-report.h"
#ifdef CONFIG_XEN
# include <xen/hvm/hvm_info_table.h>
#endif
@ -98,8 +99,44 @@ static void pc_init1(MachineState *machine,
DeviceState *icc_bridge;
FWCfgState *fw_cfg = NULL;
PcGuestInfo *guest_info;
ram_addr_t lowmem;
if (xen_enabled() && xen_hvm_init(&ram_memory) != 0) {
/* Check whether RAM fits below 4G (leaving 1/2 GByte for IO memory).
* If it doesn't, we need to split it in chunks below and above 4G.
* In any case, try to make sure that guest addresses aligned at
* 1G boundaries get mapped to host addresses aligned at 1G boundaries.
* For old machine types, use whatever split we used historically to avoid
* breaking migration.
*/
if (machine->ram_size >= 0xe0000000) {
lowmem = gigabyte_align ? 0xc0000000 : 0xe0000000;
} else {
lowmem = 0xe0000000;
}
/* Handle the machine opt max-ram-below-4g. It is basicly doing
* min(qemu limit, user limit).
*/
if (lowmem > pc_machine->max_ram_below_4g) {
lowmem = pc_machine->max_ram_below_4g;
if (machine->ram_size - lowmem > lowmem &&
lowmem & ((1ULL << 30) - 1)) {
error_report("Warning: Large machine and max_ram_below_4g(%"PRIu64
") not a multiple of 1G; possible bad performance.",
pc_machine->max_ram_below_4g);
}
}
if (machine->ram_size >= lowmem) {
above_4g_mem_size = machine->ram_size - lowmem;
below_4g_mem_size = lowmem;
} else {
above_4g_mem_size = 0;
below_4g_mem_size = machine->ram_size;
}
if (xen_enabled() && xen_hvm_init(&below_4g_mem_size, &above_4g_mem_size,
&ram_memory) != 0) {
fprintf(stderr, "xen hardware virtual machine initialisation failed\n");
exit(1);
}
@ -114,22 +151,6 @@ static void pc_init1(MachineState *machine,
kvmclock_create();
}
/* Check whether RAM fits below 4G (leaving 1/2 GByte for IO memory).
* If it doesn't, we need to split it in chunks below and above 4G.
* In any case, try to make sure that guest addresses aligned at
* 1G boundaries get mapped to host addresses aligned at 1G boundaries.
* For old machine types, use whatever split we used historically to avoid
* breaking migration.
*/
if (machine->ram_size >= 0xe0000000) {
ram_addr_t lowmem = gigabyte_align ? 0xc0000000 : 0xe0000000;
above_4g_mem_size = machine->ram_size - lowmem;
below_4g_mem_size = lowmem;
} else {
above_4g_mem_size = 0;
below_4g_mem_size = machine->ram_size;
}
if (pci_enabled) {
pci_memory = g_new(MemoryRegion, 1);
memory_region_init(pci_memory, NULL, "pci", UINT64_MAX);

View File

@ -44,6 +44,7 @@
#include "hw/ide/ahci.h"
#include "hw/usb.h"
#include "hw/cpu/icc_bus.h"
#include "qemu/error-report.h"
/* ICH9 AHCI has 6 ports */
#define MAX_SATA_PORTS 6
@ -85,8 +86,46 @@ static void pc_q35_init(MachineState *machine)
PCIDevice *ahci;
DeviceState *icc_bridge;
PcGuestInfo *guest_info;
ram_addr_t lowmem;
if (xen_enabled() && xen_hvm_init(&ram_memory) != 0) {
/* Check whether RAM fits below 4G (leaving 1/2 GByte for IO memory
* and 256 Mbytes for PCI Express Enhanced Configuration Access Mapping
* also known as MMCFG).
* If it doesn't, we need to split it in chunks below and above 4G.
* In any case, try to make sure that guest addresses aligned at
* 1G boundaries get mapped to host addresses aligned at 1G boundaries.
* For old machine types, use whatever split we used historically to avoid
* breaking migration.
*/
if (machine->ram_size >= 0xb0000000) {
lowmem = gigabyte_align ? 0x80000000 : 0xb0000000;
} else {
lowmem = 0xb0000000;
}
/* Handle the machine opt max-ram-below-4g. It is basicly doing
* min(qemu limit, user limit).
*/
if (lowmem > pc_machine->max_ram_below_4g) {
lowmem = pc_machine->max_ram_below_4g;
if (machine->ram_size - lowmem > lowmem &&
lowmem & ((1ULL << 30) - 1)) {
error_report("Warning: Large machine and max_ram_below_4g(%"PRIu64
") not a multiple of 1G; possible bad performance.",
pc_machine->max_ram_below_4g);
}
}
if (machine->ram_size >= lowmem) {
above_4g_mem_size = machine->ram_size - lowmem;
below_4g_mem_size = lowmem;
} else {
above_4g_mem_size = 0;
below_4g_mem_size = machine->ram_size;
}
if (xen_enabled() && xen_hvm_init(&below_4g_mem_size, &above_4g_mem_size,
&ram_memory) != 0) {
fprintf(stderr, "xen hardware virtual machine initialisation failed\n");
exit(1);
}
@ -100,24 +139,6 @@ static void pc_q35_init(MachineState *machine)
kvmclock_create();
/* Check whether RAM fits below 4G (leaving 1/2 GByte for IO memory
* and 256 Mbytes for PCI Express Enhanced Configuration Access Mapping
* also known as MMCFG).
* If it doesn't, we need to split it in chunks below and above 4G.
* In any case, try to make sure that guest addresses aligned at
* 1G boundaries get mapped to host addresses aligned at 1G boundaries.
* For old machine types, use whatever split we used historically to avoid
* breaking migration.
*/
if (machine->ram_size >= 0xb0000000) {
ram_addr_t lowmem = gigabyte_align ? 0x80000000 : 0xb0000000;
above_4g_mem_size = machine->ram_size - lowmem;
below_4g_mem_size = lowmem;
} else {
above_4g_mem_size = 0;
below_4g_mem_size = machine->ram_size;
}
/* pci enabled */
if (pci_enabled) {
pci_memory = g_new(MemoryRegion, 1);
@ -388,7 +409,7 @@ static QEMUMachine pc_q35_machine_v1_4 = {
.name = "pc-q35-1.4",
.init = pc_q35_init_1_4,
.compat_props = (GlobalProperty[]) {
PC_COMPAT_1_4,
PC_Q35_COMPAT_1_4,
{ /* end of list */ }
},
};

View File

@ -175,6 +175,8 @@ e1000_link_down(E1000State *s)
{
s->mac_reg[STATUS] &= ~E1000_STATUS_LU;
s->phy_reg[PHY_STATUS] &= ~MII_SR_LINK_STATUS;
s->phy_reg[PHY_STATUS] &= ~MII_SR_AUTONEG_COMPLETE;
s->phy_reg[PHY_LP_ABILITY] &= ~MII_LPAR_LPACK;
}
static void
@ -197,23 +199,11 @@ set_phy_ctrl(E1000State *s, int index, uint16_t val)
}
if ((val & MII_CR_AUTO_NEG_EN) && (val & MII_CR_RESTART_AUTO_NEG)) {
e1000_link_down(s);
s->phy_reg[PHY_STATUS] &= ~MII_SR_AUTONEG_COMPLETE;
DBGOUT(PHY, "Start link auto negotiation\n");
timer_mod(s->autoneg_timer, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 500);
}
}
static void
e1000_autoneg_timer(void *opaque)
{
E1000State *s = opaque;
if (!qemu_get_queue(s->nic)->link_down) {
e1000_link_up(s);
}
s->phy_reg[PHY_STATUS] |= MII_SR_AUTONEG_COMPLETE;
DBGOUT(PHY, "Auto negotiation is completed\n");
}
static void (*phyreg_writeops[])(E1000State *, int, uint16_t) = {
[PHY_CTRL] = set_phy_ctrl,
};
@ -227,7 +217,8 @@ static const char phy_regcap[0x20] = {
[PHY_CTRL] = PHY_RW, [PHY_1000T_CTRL] = PHY_RW,
[PHY_LP_ABILITY] = PHY_R, [PHY_1000T_STATUS] = PHY_R,
[PHY_AUTONEG_ADV] = PHY_RW, [M88E1000_RX_ERR_CNTR] = PHY_R,
[PHY_ID2] = PHY_R, [M88E1000_PHY_SPEC_STATUS] = PHY_R
[PHY_ID2] = PHY_R, [M88E1000_PHY_SPEC_STATUS] = PHY_R,
[PHY_AUTONEG_EXP] = PHY_R,
};
/* PHY_ID2 documented in 8254x_GBe_SDM.pdf, pp. 250 */
@ -344,6 +335,19 @@ set_ics(E1000State *s, int index, uint32_t val)
set_interrupt_cause(s, 0, val | s->mac_reg[ICR]);
}
static void
e1000_autoneg_timer(void *opaque)
{
E1000State *s = opaque;
if (!qemu_get_queue(s->nic)->link_down) {
e1000_link_up(s);
s->phy_reg[PHY_LP_ABILITY] |= MII_LPAR_LPACK;
s->phy_reg[PHY_STATUS] |= MII_SR_AUTONEG_COMPLETE;
DBGOUT(PHY, "Auto negotiation is completed\n");
set_ics(s, 0, E1000_ICS_LSC); /* signal link status change to guest */
}
}
static int
rxbufsize(uint32_t v)
{
@ -844,6 +848,14 @@ receive_filter(E1000State *s, const uint8_t *buf, int size)
return 0;
}
static bool
have_autoneg(E1000State *s)
{
return (s->compat_flags & E1000_FLAG_AUTONEG) &&
(s->phy_reg[PHY_CTRL] & MII_CR_AUTO_NEG_EN) &&
(s->phy_reg[PHY_CTRL] & MII_CR_RESTART_AUTO_NEG);
}
static void
e1000_set_link_status(NetClientState *nc)
{
@ -853,7 +865,14 @@ e1000_set_link_status(NetClientState *nc)
if (nc->link_down) {
e1000_link_down(s);
} else {
e1000_link_up(s);
if (have_autoneg(s) &&
!(s->phy_reg[PHY_STATUS] & MII_SR_AUTONEG_COMPLETE)) {
/* emulate auto-negotiation if supported */
timer_mod(s->autoneg_timer,
qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 500);
} else {
e1000_link_up(s);
}
}
if (s->mac_reg[STATUS] != old_status)
@ -1279,19 +1298,13 @@ static void e1000_pre_save(void *opaque)
e1000_mit_timer(s);
}
if (!(s->compat_flags & E1000_FLAG_AUTONEG)) {
return;
}
/*
* If link is down and auto-negotiation is ongoing, complete
* auto-negotiation immediately. This allows is to look at
* MII_SR_AUTONEG_COMPLETE to infer link status on load.
* If link is down and auto-negotiation is supported and ongoing,
* complete auto-negotiation immediately. This allows us to look
* at MII_SR_AUTONEG_COMPLETE to infer link status on load.
*/
if (nc->link_down &&
s->phy_reg[PHY_CTRL] & MII_CR_AUTO_NEG_EN &&
s->phy_reg[PHY_CTRL] & MII_CR_RESTART_AUTO_NEG) {
s->phy_reg[PHY_STATUS] |= MII_SR_AUTONEG_COMPLETE;
if (nc->link_down && have_autoneg(s)) {
s->phy_reg[PHY_STATUS] |= MII_SR_AUTONEG_COMPLETE;
}
}
@ -1313,15 +1326,11 @@ static int e1000_post_load(void *opaque, int version_id)
* Alternatively, restart link negotiation if it was in progress. */
nc->link_down = (s->mac_reg[STATUS] & E1000_STATUS_LU) == 0;
if (!(s->compat_flags & E1000_FLAG_AUTONEG)) {
return 0;
}
if (s->phy_reg[PHY_CTRL] & MII_CR_AUTO_NEG_EN &&
s->phy_reg[PHY_CTRL] & MII_CR_RESTART_AUTO_NEG &&
if (have_autoneg(s) &&
!(s->phy_reg[PHY_STATUS] & MII_SR_AUTONEG_COMPLETE)) {
nc->link_down = false;
timer_mod(s->autoneg_timer, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 500);
timer_mod(s->autoneg_timer,
qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 500);
}
return 0;

View File

@ -384,6 +384,9 @@
#define MII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */
#define MII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */
/* PHY Link Partner Ability Register */
#define MII_LPAR_LPACK 0x4000 /* Acked by link partner */
/* Interrupt Cause Read */
#define E1000_ICR_TXDW 0x00000001 /* Transmit desc written back */
#define E1000_ICR_TXQE 0x00000002 /* Transmit Queue empty */

View File

@ -180,6 +180,12 @@ PCIESlot *ioh3420_init(PCIBus *bus, int devfn, bool multifunction,
return PCIE_SLOT(d);
}
static Property ioh3420_props[] = {
DEFINE_PROP_BIT(COMPAT_PROP_PCP, PCIDevice, cap_present,
QEMU_PCIE_SLTCAP_PCP_BITNR, true),
DEFINE_PROP_END_OF_LIST()
};
static const VMStateDescription vmstate_ioh3420 = {
.name = "ioh-3240-express-root-port",
.version_id = 1,
@ -210,6 +216,7 @@ static void ioh3420_class_init(ObjectClass *klass, void *data)
dc->desc = "Intel IOH device id 3420 PCIE Root Port";
dc->reset = ioh3420_reset;
dc->vmsd = &vmstate_ioh3420;
dc->props = ioh3420_props;
}
static const TypeInfo ioh3420_info = {

View File

@ -147,6 +147,12 @@ PCIESlot *xio3130_downstream_init(PCIBus *bus, int devfn, bool multifunction,
return PCIE_SLOT(d);
}
static Property xio3130_downstream_props[] = {
DEFINE_PROP_BIT(COMPAT_PROP_PCP, PCIDevice, cap_present,
QEMU_PCIE_SLTCAP_PCP_BITNR, true),
DEFINE_PROP_END_OF_LIST()
};
static const VMStateDescription vmstate_xio3130_downstream = {
.name = "xio3130-express-downstream-port",
.version_id = 1,
@ -177,6 +183,7 @@ static void xio3130_downstream_class_init(ObjectClass *klass, void *data)
dc->desc = "TI X3130 Downstream Port of PCI Express Switch";
dc->reset = xio3130_downstream_reset;
dc->vmsd = &vmstate_xio3130_downstream;
dc->props = xio3130_downstream_props;
}
static const TypeInfo xio3130_downstream_info = {

View File

@ -224,7 +224,7 @@ static void pcie_cap_slot_hotplug_common(PCIDevice *hotplug_dev,
*exp_cap = hotplug_dev->config + hotplug_dev->exp.exp_cap;
uint16_t sltsta = pci_get_word(*exp_cap + PCI_EXP_SLTSTA);
PCIE_DEV_PRINTF(PCI_DEVICE(dev), "hotplug state: %d\n", state);
PCIE_DEV_PRINTF(PCI_DEVICE(dev), "hotplug state: 0x%x\n", sltsta);
if (sltsta & PCI_EXP_SLTSTA_EIS) {
/* the slot is electromechanically locked.
* This error is propagated up to qdev and then to HMP/QMP.
@ -258,7 +258,8 @@ void pcie_cap_slot_hotplug_cb(HotplugHandler *hotplug_dev, DeviceState *dev,
pci_word_test_and_set_mask(exp_cap + PCI_EXP_SLTSTA,
PCI_EXP_SLTSTA_PDS);
pcie_cap_slot_event(PCI_DEVICE(hotplug_dev), PCI_EXP_HP_EV_PDC);
pcie_cap_slot_event(PCI_DEVICE(hotplug_dev),
PCI_EXP_HP_EV_PDC | PCI_EXP_HP_EV_ABP);
}
void pcie_cap_slot_hot_unplug_cb(HotplugHandler *hotplug_dev, DeviceState *dev,
@ -268,10 +269,7 @@ void pcie_cap_slot_hot_unplug_cb(HotplugHandler *hotplug_dev, DeviceState *dev,
pcie_cap_slot_hotplug_common(PCI_DEVICE(hotplug_dev), dev, &exp_cap, errp);
object_unparent(OBJECT(dev));
pci_word_test_and_clear_mask(exp_cap + PCI_EXP_SLTSTA,
PCI_EXP_SLTSTA_PDS);
pcie_cap_slot_event(PCI_DEVICE(hotplug_dev), PCI_EXP_HP_EV_PDC);
pcie_cap_slot_push_attention_button(PCI_DEVICE(hotplug_dev));
}
/* pci express slot for pci express root/downstream port
@ -294,6 +292,15 @@ void pcie_cap_slot_init(PCIDevice *dev, uint16_t slot)
PCI_EXP_SLTCAP_AIP |
PCI_EXP_SLTCAP_ABP);
if (dev->cap_present & QEMU_PCIE_SLTCAP_PCP) {
pci_long_test_and_set_mask(dev->config + pos + PCI_EXP_SLTCAP,
PCI_EXP_SLTCAP_PCP);
pci_word_test_and_clear_mask(dev->config + pos + PCI_EXP_SLTCTL,
PCI_EXP_SLTCTL_PCC);
pci_word_test_and_set_mask(dev->wmask + pos + PCI_EXP_SLTCTL,
PCI_EXP_SLTCTL_PCC);
}
pci_word_test_and_clear_mask(dev->config + pos + PCI_EXP_SLTCTL,
PCI_EXP_SLTCTL_PIC |
PCI_EXP_SLTCTL_AIC);
@ -327,6 +334,10 @@ void pcie_cap_slot_init(PCIDevice *dev, uint16_t slot)
void pcie_cap_slot_reset(PCIDevice *dev)
{
uint8_t *exp_cap = dev->config + dev->exp.exp_cap;
uint8_t port_type = pcie_cap_get_type(dev);
assert(port_type == PCI_EXP_TYPE_DOWNSTREAM ||
port_type == PCI_EXP_TYPE_ROOT_PORT);
PCIE_DEV_PRINTF(dev, "reset\n");
@ -339,9 +350,25 @@ void pcie_cap_slot_reset(PCIDevice *dev)
PCI_EXP_SLTCTL_PDCE |
PCI_EXP_SLTCTL_ABPE);
pci_word_test_and_set_mask(exp_cap + PCI_EXP_SLTCTL,
PCI_EXP_SLTCTL_PIC_OFF |
PCI_EXP_SLTCTL_AIC_OFF);
if (dev->cap_present & QEMU_PCIE_SLTCAP_PCP) {
/* Downstream ports enforce device number 0. */
bool populated = pci_bridge_get_sec_bus(PCI_BRIDGE(dev))->devices[0];
uint16_t pic;
if (populated) {
pci_word_test_and_clear_mask(exp_cap + PCI_EXP_SLTCTL,
PCI_EXP_SLTCTL_PCC);
} else {
pci_word_test_and_set_mask(exp_cap + PCI_EXP_SLTCTL,
PCI_EXP_SLTCTL_PCC);
}
pic = populated ? PCI_EXP_SLTCTL_PIC_ON : PCI_EXP_SLTCTL_PIC_OFF;
pci_word_test_and_set_mask(exp_cap + PCI_EXP_SLTCTL, pic);
}
pci_word_test_and_clear_mask(exp_cap + PCI_EXP_SLTSTA,
PCI_EXP_SLTSTA_EIS |/* on reset,
the lock is released */
@ -352,6 +379,11 @@ void pcie_cap_slot_reset(PCIDevice *dev)
hotplug_event_update_event_status(dev);
}
static void pcie_unplug_device(PCIBus *bus, PCIDevice *dev, void *opaque)
{
object_unparent(OBJECT(dev));
}
void pcie_cap_slot_write_config(PCIDevice *dev,
uint32_t addr, uint32_t val, int len)
{
@ -376,6 +408,22 @@ void pcie_cap_slot_write_config(PCIDevice *dev,
sltsta);
}
/*
* If the slot is polulated, power indicator is off and power
* controller is off, it is safe to detach the devices.
*/
if ((sltsta & PCI_EXP_SLTSTA_PDS) && (val & PCI_EXP_SLTCTL_PCC) &&
((val & PCI_EXP_SLTCTL_PIC_OFF) == PCI_EXP_SLTCTL_PIC_OFF)) {
PCIBus *sec_bus = pci_bridge_get_sec_bus(PCI_BRIDGE(dev));
pci_for_each_device(sec_bus, pci_bus_num(sec_bus),
pcie_unplug_device, NULL);
pci_word_test_and_clear_mask(exp_cap + PCI_EXP_SLTSTA,
PCI_EXP_SLTSTA_PDS);
pci_word_test_and_set_mask(exp_cap + PCI_EXP_SLTSTA,
PCI_EXP_SLTSTA_PDC);
}
hotplug_event_notify(dev);
/*

View File

@ -20,6 +20,7 @@
#include <linux/vhost.h>
#include "exec/address-spaces.h"
#include "hw/virtio/virtio-bus.h"
#include "migration/migration.h"
static void vhost_dev_sync_region(struct vhost_dev *dev,
MemoryRegionSection *section,
@ -304,7 +305,9 @@ static int vhost_verify_ring_mappings(struct vhost_dev *dev,
uint64_t size)
{
int i;
for (i = 0; i < dev->nvqs; ++i) {
int r = 0;
for (i = 0; !r && i < dev->nvqs; ++i) {
struct vhost_virtqueue *vq = dev->vqs + i;
hwaddr l;
void *p;
@ -316,15 +319,15 @@ static int vhost_verify_ring_mappings(struct vhost_dev *dev,
p = cpu_physical_memory_map(vq->ring_phys, &l, 1);
if (!p || l != vq->ring_size) {
fprintf(stderr, "Unable to map ring buffer for ring %d\n", i);
return -ENOMEM;
r = -ENOMEM;
}
if (p != vq->ring) {
fprintf(stderr, "Ring buffer relocated for ring %d\n", i);
return -EBUSY;
r = -EBUSY;
}
cpu_physical_memory_unmap(p, l, 0, 0);
}
return 0;
return r;
}
static struct vhost_memory_region *vhost_dev_find_reg(struct vhost_dev *dev,
@ -854,6 +857,12 @@ int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
.eventfd_del = vhost_eventfd_del,
.priority = 10
};
hdev->migration_blocker = NULL;
if (!(hdev->features & (0x1 << VHOST_F_LOG_ALL))) {
error_setg(&hdev->migration_blocker,
"Migration disabled: vhost lacks VHOST_F_LOG_ALL feature.");
migrate_add_blocker(hdev->migration_blocker);
}
hdev->mem = g_malloc0(offsetof(struct vhost_memory, regions));
hdev->n_mem_sections = 0;
hdev->mem_sections = NULL;
@ -882,6 +891,10 @@ void vhost_dev_cleanup(struct vhost_dev *hdev)
vhost_virtqueue_cleanup(hdev->vqs + i);
}
memory_listener_unregister(&hdev->memory_listener);
if (hdev->migration_blocker) {
migrate_del_blocker(hdev->migration_blocker);
error_free(hdev->migration_blocker);
}
g_free(hdev->mem);
g_free(hdev->mem_sections);
hdev->vhost_ops->vhost_backend_cleanup(hdev);

View File

@ -976,6 +976,8 @@ static void virtio_pci_device_plugged(DeviceState *d)
if (proxy->nvectors &&
msix_init_exclusive_bar(&proxy->pci_dev, proxy->nvectors, 1)) {
error_report("unable to init msix vectors to %" PRIu32,
proxy->nvectors);
proxy->nvectors = 0;
}

View File

@ -33,10 +33,13 @@ struct PCMachineState {
MemoryRegion hotplug_memory;
HotplugHandler *acpi_dev;
uint64_t max_ram_below_4g;
};
#define PC_MACHINE_ACPI_DEVICE_PROP "acpi-device"
#define PC_MACHINE_MEMHP_REGION_SIZE "hotplug-memory-region-size"
#define PC_MACHINE_MAX_RAM_BELOW_4G "max-ram-below-4g"
/**
* PCMachineClass:
@ -297,8 +300,16 @@ bool e820_get_entry(int, uint32_t, uint64_t *, uint64_t *);
.driver = "ICH9-LPC",\
.property = "memory-hotplug-support",\
.value = "off",\
},{\
.driver = "xio3130-downstream",\
.property = COMPAT_PROP_PCP,\
.value = "off",\
},{\
.driver = "ioh3420",\
.property = COMPAT_PROP_PCP,\
.value = "off",\
}
#define PC_Q35_COMPAT_1_7 \
PC_COMPAT_1_7, \
PC_Q35_COMPAT_2_0, \

View File

@ -158,6 +158,9 @@ enum {
QEMU_PCI_CAP_SHPC = (1 << QEMU_PCI_SHPC_BITNR),
#define QEMU_PCI_SLOTID_BITNR 6
QEMU_PCI_CAP_SLOTID = (1 << QEMU_PCI_SLOTID_BITNR),
/* PCI Express capability - Power Controller Present */
#define QEMU_PCIE_SLTCAP_PCP_BITNR 7
QEMU_PCIE_SLTCAP_PCP = (1 << QEMU_PCIE_SLTCAP_PCP_BITNR),
};
#define TYPE_PCI_DEVICE "pci-device"

View File

@ -76,6 +76,8 @@ struct PCIExpressDevice {
PCIEAERLog aer_log;
};
#define COMPAT_PROP_PCP "power_controller_present"
/* PCI express capability helper functions */
int pcie_cap_init(PCIDevice *dev, uint8_t offset, uint8_t type, uint8_t port);
int pcie_endpoint_cap_init(PCIDevice *dev, uint8_t offset);

View File

@ -57,6 +57,8 @@
#define PCI_EXP_SLTCTL_PIC_SHIFT (ffs(PCI_EXP_SLTCTL_PIC) - 1)
#define PCI_EXP_SLTCTL_PIC_OFF \
(PCI_EXP_SLTCTL_IND_OFF << PCI_EXP_SLTCTL_PIC_SHIFT)
#define PCI_EXP_SLTCTL_PIC_ON \
(PCI_EXP_SLTCTL_IND_ON << PCI_EXP_SLTCTL_PIC_SHIFT)
#define PCI_EXP_SLTCTL_SUPPORTED \
(PCI_EXP_SLTCTL_ABPE | \

View File

@ -45,6 +45,7 @@ struct vhost_dev {
bool log_enabled;
vhost_log_chunk_t *log;
unsigned long long log_size;
Error *migration_blocker;
bool force;
bool memory_changed;
hwaddr mem_changed_start_addr;

View File

@ -37,10 +37,11 @@ void xen_cmos_set_s3_resume(void *opaque, int irq, int level);
qemu_irq *xen_interrupt_controller_init(void);
int xen_init(MachineClass *mc);
int xen_hvm_init(MemoryRegion **ram_memory);
void xenstore_store_pv_console_info(int i, struct CharDriverState *chr);
#if defined(NEED_CPU_H) && !defined(CONFIG_USER_ONLY)
int xen_hvm_init(ram_addr_t *below_4g_mem_size, ram_addr_t *above_4g_mem_size,
MemoryRegion **ram_memory);
void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t size,
struct MemoryRegion *mr);
void xen_modified_memory(ram_addr_t start, ram_addr_t length);

View File

@ -2968,7 +2968,7 @@ static mon_cmd_t info_cmds[] = {
.name = "memdev",
.args_type = "",
.params = "",
.help = "show the memory device",
.help = "show memory backends",
.mhandler.cmd = hmp_info_memdev,
},
{

View File

@ -3158,19 +3158,19 @@
##
# @Memdev:
#
# Information of memory device
# Information about memory backend
#
# @size: memory device size
# @size: memory backend size
#
# @merge: enables or disables memory merge support
#
# @dump: includes memory device's memory in a core dump or not
# @dump: includes memory backend's memory in a core dump or not
#
# @prealloc: enables or disables memory preallocation
#
# @host-nodes: host nodes for its memory policy
#
# @policy: memory policy of memory device
# @policy: memory policy of memory backend
#
# Since: 2.1
##
@ -3187,13 +3187,15 @@
##
# @query-memdev:
#
# Returns information for all memory devices.
# Returns information for all memory backends.
#
# Returns: a list of @Memdev.
#
# Since: 2.1
##
{ 'command': 'query-memdev', 'returns': ['Memdev'] }
##
# @PCDIMMDeviceInfo:
#
# PCDIMMDevice state information

View File

@ -98,7 +98,7 @@ static void format_string(StringOutputVisitor *sov, Range *r, bool next,
{
if (r->end - r->begin > 1) {
if (human) {
g_string_append_printf(sov->string, "0x%" PRIx64 "-%" PRIx64,
g_string_append_printf(sov->string, "0x%" PRIx64 "-0x%" PRIx64,
r->begin, r->end - 1);
} else {

View File

@ -204,7 +204,7 @@ void qemu_chr_be_write(CharDriverState *s, uint8_t *buf, int len)
int qemu_chr_fe_get_msgfd(CharDriverState *s)
{
int fd;
return (qemu_chr_fe_get_msgfds(s, &fd, 1) >= 0) ? fd : -1;
return (qemu_chr_fe_get_msgfds(s, &fd, 1) == 1) ? fd : -1;
}
int qemu_chr_fe_get_msgfds(CharDriverState *s, int *fds, int len)
@ -2481,8 +2481,15 @@ static int tcp_get_msgfds(CharDriverState *chr, int *fds, int num)
int to_copy = (s->read_msgfds_num < num) ? s->read_msgfds_num : num;
if (to_copy) {
int i;
memcpy(fds, s->read_msgfds, to_copy * sizeof(int));
/* Close unused fds */
for (i = to_copy; i < s->read_msgfds_num; i++) {
close(s->read_msgfds[i]);
}
g_free(s->read_msgfds);
s->read_msgfds = 0;
s->read_msgfds_num = 0;

View File

@ -1,5 +1,6 @@
stub-obj-y += arch-query-cpu-def.o
stub-obj-y += bdrv-commit-all.o
stub-obj-y += chr-baum-init.o
stub-obj-y += chr-msmouse.o
stub-obj-y += clock-warp.o
stub-obj-y += cpu-get-clock.o
@ -24,6 +25,7 @@ stub-obj-y += mon-set-error.o
stub-obj-y += monitor-init.o
stub-obj-y += notify-event.o
stub-obj-y += pci-drive-hot-add.o
stub-obj-$(CONFIG_SPICE) += qemu-chr-open-spice.o
stub-obj-y += qtest.o
stub-obj-y += reset.o
stub-obj-y += runstate-check.o

7
stubs/chr-baum-init.c Normal file
View File

@ -0,0 +1,7 @@
#include "qemu-common.h"
#include "sysemu/char.h"
CharDriverState *chr_baum_init(void)
{
return NULL;
}

View File

@ -0,0 +1,14 @@
#include "qemu-common.h"
#include "ui/qemu-spice.h"
CharDriverState *qemu_chr_open_spice_vmc(const char *type)
{
return NULL;
}
#if SPICE_SERVER_VERSION >= 0x000c02
CharDriverState *qemu_chr_open_spice_port(const char *name)
{
return NULL;
}
#endif

View File

@ -8,17 +8,30 @@
*
*/
#define QEMU_GLIB_COMPAT_H
#include <glib.h>
#include "libqtest.h"
#include "qemu/option.h"
#include "sysemu/char.h"
#include "sysemu/sysemu.h"
#include <glib.h>
#include <linux/vhost.h>
#include <sys/mman.h>
#include <sys/vfs.h>
#include <qemu/sockets.h>
/* GLIB version compatibility flags */
#if GLIB_CHECK_VERSION(2, 28, 0)
#define HAVE_MONOTONIC_TIME
#endif
#if GLIB_CHECK_VERSION(2, 32, 0)
#define HAVE_MUTEX_INIT
#define HAVE_COND_INIT
#define HAVE_THREAD_NEW
#endif
#define QEMU_CMD_ACCEL " -machine accel=tcg"
#define QEMU_CMD_MEM " -m 512 -object memory-backend-file,id=mem,size=512M,"\
"mem-path=%s,share=on -numa node,memdev=mem"
@ -95,8 +108,93 @@ static VhostUserMsg m __attribute__ ((unused));
int fds_num = 0, fds[VHOST_MEMORY_MAX_NREGIONS];
static VhostUserMemory memory;
static GMutex data_mutex;
static GCond data_cond;
static GMutex *data_mutex;
static GCond *data_cond;
static gint64 _get_time(void)
{
#ifdef HAVE_MONOTONIC_TIME
return g_get_monotonic_time();
#else
GTimeVal time;
g_get_current_time(&time);
return time.tv_sec * G_TIME_SPAN_SECOND + time.tv_usec;
#endif
}
static GMutex *_mutex_new(void)
{
GMutex *mutex;
#ifdef HAVE_MUTEX_INIT
mutex = g_new(GMutex, 1);
g_mutex_init(mutex);
#else
mutex = g_mutex_new();
#endif
return mutex;
}
static void _mutex_free(GMutex *mutex)
{
#ifdef HAVE_MUTEX_INIT
g_mutex_clear(mutex);
g_free(mutex);
#else
g_mutex_free(mutex);
#endif
}
static GCond *_cond_new(void)
{
GCond *cond;
#ifdef HAVE_COND_INIT
cond = g_new(GCond, 1);
g_cond_init(cond);
#else
cond = g_cond_new();
#endif
return cond;
}
static gboolean _cond_wait_until(GCond *cond, GMutex *mutex, gint64 end_time)
{
gboolean ret = FALSE;
#ifdef HAVE_COND_INIT
ret = g_cond_wait_until(cond, mutex, end_time);
#else
GTimeVal time = { end_time / G_TIME_SPAN_SECOND,
end_time % G_TIME_SPAN_SECOND };
ret = g_cond_timed_wait(cond, mutex, &time);
#endif
return ret;
}
static void _cond_free(GCond *cond)
{
#ifdef HAVE_COND_INIT
g_cond_clear(cond);
g_free(cond);
#else
g_cond_free(cond);
#endif
}
static GThread *_thread_new(const gchar *name, GThreadFunc func, gpointer data)
{
GThread *thread = NULL;
GError *error = NULL;
#ifdef HAVE_THREAD_NEW
thread = g_thread_try_new(name, func, data, &error);
#else
thread = g_thread_create(func, data, TRUE, &error);
#endif
return thread;
}
static void read_guest_mem(void)
{
@ -104,11 +202,11 @@ static void read_guest_mem(void)
gint64 end_time;
int i, j;
g_mutex_lock(&data_mutex);
g_mutex_lock(data_mutex);
end_time = g_get_monotonic_time() + 5 * G_TIME_SPAN_SECOND;
end_time = _get_time() + 5 * G_TIME_SPAN_SECOND;
while (!fds_num) {
if (!g_cond_wait_until(&data_cond, &data_mutex, end_time)) {
if (!_cond_wait_until(data_cond, data_mutex, end_time)) {
/* timeout has passed */
g_assert(fds_num);
break;
@ -143,7 +241,7 @@ static void read_guest_mem(void)
}
g_assert_cmpint(1, ==, 1);
g_mutex_unlock(&data_mutex);
g_mutex_unlock(data_mutex);
}
static void *thread_function(void *data)
@ -171,6 +269,7 @@ static void chr_read(void *opaque, const uint8_t *buf, int size)
return;
}
g_mutex_lock(data_mutex);
memcpy(p, buf, VHOST_USER_HDR_SIZE);
if (msg.size) {
@ -203,8 +302,7 @@ static void chr_read(void *opaque, const uint8_t *buf, int size)
fds_num = qemu_chr_fe_get_msgfds(chr, fds, sizeof(fds) / sizeof(int));
/* signal the test that it can continue */
g_cond_signal(&data_cond);
g_mutex_unlock(&data_mutex);
g_cond_signal(data_cond);
break;
case VHOST_USER_SET_VRING_KICK:
@ -221,6 +319,7 @@ static void chr_read(void *opaque, const uint8_t *buf, int size)
default:
break;
}
g_mutex_unlock(data_mutex);
}
static const char *init_hugepagefs(void)
@ -285,10 +384,9 @@ int main(int argc, char **argv)
qemu_chr_add_handlers(chr, chr_can_read, chr_read, NULL, chr);
/* run the main loop thread so the chardev may operate */
g_mutex_init(&data_mutex);
g_cond_init(&data_cond);
g_mutex_lock(&data_mutex);
g_thread_new(NULL, thread_function, NULL);
data_mutex = _mutex_new();
data_cond = _cond_new();
_thread_new(NULL, thread_function, NULL);
qemu_cmd = g_strdup_printf(QEMU_CMD, hugefs, socket_path);
s = qtest_start(qemu_cmd);
@ -305,8 +403,8 @@ int main(int argc, char **argv)
/* cleanup */
unlink(socket_path);
g_free(socket_path);
g_cond_clear(&data_cond);
g_mutex_clear(&data_mutex);
_cond_free(data_cond);
_mutex_free(data_mutex);
return ret;
}

4
vl.c
View File

@ -380,6 +380,10 @@ static QemuOptsList qemu_machine_opts = {
.name = "kvm-type",
.type = QEMU_OPT_STRING,
.help = "Specifies the KVM virtualization mode (HV, PR)",
},{
.name = PC_MACHINE_MAX_RAM_BELOW_4G,
.type = QEMU_OPT_SIZE,
.help = "maximum ram below the 4G boundary (32bit boundary)",
},
{ /* End of list */ }
},

View File

@ -51,7 +51,8 @@ void xen_modified_memory(ram_addr_t start, ram_addr_t length)
{
}
int xen_hvm_init(MemoryRegion **ram_memory)
int xen_hvm_init(ram_addr_t *below_4g_mem_size, ram_addr_t *above_4g_mem_size,
MemoryRegion **ram_memory)
{
return 0;
}

View File

@ -155,30 +155,43 @@ qemu_irq *xen_interrupt_controller_init(void)
/* Memory Ops */
static void xen_ram_init(ram_addr_t ram_size, MemoryRegion **ram_memory_p)
static void xen_ram_init(ram_addr_t *below_4g_mem_size,
ram_addr_t *above_4g_mem_size,
ram_addr_t ram_size, MemoryRegion **ram_memory_p)
{
MemoryRegion *sysmem = get_system_memory();
ram_addr_t below_4g_mem_size, above_4g_mem_size = 0;
ram_addr_t block_len;
uint64_t user_lowmem = object_property_get_int(qdev_get_machine(),
PC_MACHINE_MAX_RAM_BELOW_4G,
&error_abort);
block_len = ram_size;
if (ram_size >= HVM_BELOW_4G_RAM_END) {
/* Xen does not allocate the memory continuously, and keep a hole at
* HVM_BELOW_4G_MMIO_START of HVM_BELOW_4G_MMIO_LENGTH
/* Handle the machine opt max-ram-below-4g. It is basicly doing
* min(xen limit, user limit).
*/
if (HVM_BELOW_4G_RAM_END <= user_lowmem) {
user_lowmem = HVM_BELOW_4G_RAM_END;
}
if (ram_size >= user_lowmem) {
*above_4g_mem_size = ram_size - user_lowmem;
*below_4g_mem_size = user_lowmem;
} else {
*above_4g_mem_size = 0;
*below_4g_mem_size = ram_size;
}
if (!*above_4g_mem_size) {
block_len = ram_size;
} else {
/*
* Xen does not allocate the memory continuously, it keeps a
* hole of the size computed above or passed in.
*/
block_len += HVM_BELOW_4G_MMIO_LENGTH;
block_len = (1ULL << 32) + *above_4g_mem_size;
}
memory_region_init_ram(&ram_memory, NULL, "xen.ram", block_len);
*ram_memory_p = &ram_memory;
vmstate_register_ram_global(&ram_memory);
if (ram_size >= HVM_BELOW_4G_RAM_END) {
above_4g_mem_size = ram_size - HVM_BELOW_4G_RAM_END;
below_4g_mem_size = HVM_BELOW_4G_RAM_END;
} else {
below_4g_mem_size = ram_size;
}
memory_region_init_alias(&ram_640k, NULL, "xen.ram.640k",
&ram_memory, 0, 0xa0000);
memory_region_add_subregion(sysmem, 0, &ram_640k);
@ -189,12 +202,13 @@ static void xen_ram_init(ram_addr_t ram_size, MemoryRegion **ram_memory_p)
* the Options ROM, so it is registered here as RAM.
*/
memory_region_init_alias(&ram_lo, NULL, "xen.ram.lo",
&ram_memory, 0xc0000, below_4g_mem_size - 0xc0000);
&ram_memory, 0xc0000,
*below_4g_mem_size - 0xc0000);
memory_region_add_subregion(sysmem, 0xc0000, &ram_lo);
if (above_4g_mem_size > 0) {
if (*above_4g_mem_size > 0) {
memory_region_init_alias(&ram_hi, NULL, "xen.ram.hi",
&ram_memory, 0x100000000ULL,
above_4g_mem_size);
*above_4g_mem_size);
memory_region_add_subregion(sysmem, 0x100000000ULL, &ram_hi);
}
}
@ -958,7 +972,8 @@ static void xen_wakeup_notifier(Notifier *notifier, void *data)
xc_set_hvm_param(xen_xc, xen_domid, HVM_PARAM_ACPI_S_STATE, 0);
}
int xen_hvm_init(MemoryRegion **ram_memory)
int xen_hvm_init(ram_addr_t *below_4g_mem_size, ram_addr_t *above_4g_mem_size,
MemoryRegion **ram_memory)
{
int i, rc;
unsigned long ioreq_pfn;
@ -1036,7 +1051,7 @@ int xen_hvm_init(MemoryRegion **ram_memory)
/* Init RAM management */
xen_map_cache_init(xen_phys_offset_to_gaddr, state);
xen_ram_init(ram_size, ram_memory);
xen_ram_init(below_4g_mem_size, above_4g_mem_size, ram_size, ram_memory);
qemu_add_vm_change_state_handler(xen_hvm_change_state_handler, state);