virtio,pc,acpi fixes, cleanups

Fixes all over the place.
 
 Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1
 
 iQEcBAABAgAGBQJV8UpiAAoJECgfDbjSjVRp3RUH/AuDtZqyRoQuvK+Ec3faN5sk
 LWChCpaVe+UqoIUT3gC6SnIpJrbJu8Kd9SppiQQALyDZctJZ+UbQ8EhcUdi+nZz6
 ZLjMog5GO246O5NA7LeBJu/099IACRAMdv/upG1liJ9e6O1jtkOwl+y9/845hibG
 XDfwPQL9YCpBbQTH/R7/wxUW1yYqEPDr/gMDumB2YDn5Zvz7PGE8+In004nQkbxF
 RTQN3ZbnvM3u0Iup4sbhxN9GiquwTepUPTBeTjoKwLEivi+hXjR12JputBL5d5pd
 Fb9O+0xJOdFdm1DI+W+KnIOVPGX9mNBY93NHiIwWbIa34iAyWcQStL2Y4lHeNDY=
 =9CYg
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/mst/tags/for_upstream' into staging

virtio,pc,acpi fixes, cleanups

Fixes all over the place.

Signed-off-by: Michael S. Tsirkin <mst@redhat.com>

# gpg: Signature made Thu 10 Sep 2015 10:16:18 BST using RSA key ID D28D5469
# gpg: Good signature from "Michael S. Tsirkin <mst@kernel.org>"
# gpg:                 aka "Michael S. Tsirkin <mst@redhat.com>"

* remotes/mst/tags/for_upstream:
  hw/pci: fix pci_update_mappings() trace events
  pc: memhotplug: keep reserved-memory-end broken on 2.4 and earlier machines
  pc: memhotplug: fix incorrectly set reserved-memory-end
  acpi: Remove unused definition.
  virtio: avoid leading underscores for helpers
  pc: Remove redundant arguments from xen_hvm_init()
  pci: Fix pci_device_iommu_address_space() bus propagation

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2015-09-10 10:24:30 +01:00
commit fbf054cb0a
21 changed files with 86 additions and 82 deletions

View File

@ -731,7 +731,7 @@ static uint64_t virtio_blk_get_features(VirtIODevice *vdev, uint64_t features,
virtio_add_feature(&features, VIRTIO_BLK_F_GEOMETRY);
virtio_add_feature(&features, VIRTIO_BLK_F_TOPOLOGY);
virtio_add_feature(&features, VIRTIO_BLK_F_BLK_SIZE);
if (__virtio_has_feature(features, VIRTIO_F_VERSION_1)) {
if (virtio_has_feature(features, VIRTIO_F_VERSION_1)) {
if (s->conf.scsi) {
error_setg(errp, "Please set scsi=off for virtio-blk devices in order to use virtio 1.0");
return 0;
@ -782,10 +782,11 @@ static void virtio_blk_set_status(VirtIODevice *vdev, uint8_t status)
*
* s->blk would erroneously be placed in writethrough mode.
*/
if (!virtio_has_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE)) {
if (!virtio_vdev_has_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE)) {
aio_context_acquire(blk_get_aio_context(s->blk));
blk_set_enable_write_cache(s->blk,
virtio_has_feature(vdev, VIRTIO_BLK_F_WCE));
virtio_vdev_has_feature(vdev,
VIRTIO_BLK_F_WCE));
aio_context_release(blk_get_aio_context(s->blk));
}
}

View File

@ -76,7 +76,7 @@ static VirtIOSerialPort *find_port_by_name(char *name)
static bool use_multiport(VirtIOSerial *vser)
{
VirtIODevice *vdev = VIRTIO_DEVICE(vser);
return virtio_has_feature(vdev, VIRTIO_CONSOLE_F_MULTIPORT);
return virtio_vdev_has_feature(vdev, VIRTIO_CONSOLE_F_MULTIPORT);
}
static size_t write_to_port(VirtIOSerialPort *port,

View File

@ -1412,7 +1412,13 @@ FWCfgState *pc_memory_init(PCMachineState *pcms,
if (guest_info->has_reserved_memory && pcms->hotplug_memory.base) {
uint64_t *val = g_malloc(sizeof(*val));
*val = cpu_to_le64(ROUND_UP(pcms->hotplug_memory.base, 0x1ULL << 30));
PCMachineClass *pcmc = PC_MACHINE_GET_CLASS(pcms);
uint64_t res_mem_end = pcms->hotplug_memory.base;
if (!pcmc->broken_reserved_end) {
res_mem_end += memory_region_size(&pcms->hotplug_memory.mr);
}
*val = cpu_to_le64(ROUND_UP(res_mem_end, 0x1ULL << 30));
fw_cfg_add_file(fw_cfg, "etc/reserved-memory-end", val, sizeof(*val));
}

View File

@ -134,9 +134,7 @@ static void pc_init1(MachineState *machine)
pcms->below_4g_mem_size = machine->ram_size;
}
if (xen_enabled() && xen_hvm_init(&pcms->below_4g_mem_size,
&pcms->above_4g_mem_size,
&ram_memory) != 0) {
if (xen_enabled() && xen_hvm_init(pcms, &ram_memory) != 0) {
fprintf(stderr, "xen hardware virtual machine initialisation failed\n");
exit(1);
}
@ -451,7 +449,9 @@ static void pc_i440fx_machine_options(MachineClass *m)
static void pc_i440fx_2_4_machine_options(MachineClass *m)
{
PCMachineClass *pcmc = PC_MACHINE_CLASS(m);
pc_i440fx_machine_options(m);
pcmc->broken_reserved_end = true;
m->default_machine_opts = "firmware=bios-256k.bin";
m->default_display = "std";
m->alias = "pc";

View File

@ -125,9 +125,7 @@ static void pc_q35_init(MachineState *machine)
pcms->below_4g_mem_size = machine->ram_size;
}
if (xen_enabled() && xen_hvm_init(&pcms->below_4g_mem_size,
&pcms->above_4g_mem_size,
&ram_memory) != 0) {
if (xen_enabled() && xen_hvm_init(pcms, &ram_memory) != 0) {
fprintf(stderr, "xen hardware virtual machine initialisation failed\n");
exit(1);
}
@ -370,7 +368,9 @@ static void pc_q35_machine_options(MachineClass *m)
static void pc_q35_2_4_machine_options(MachineClass *m)
{
PCMachineClass *pcmc = PC_MACHINE_CLASS(m);
pc_q35_machine_options(m);
pcmc->broken_reserved_end = true;
m->default_machine_opts = "firmware=bios-256k.bin";
m->default_display = "std";
m->no_floppy = 1;

View File

@ -197,7 +197,7 @@ static int vhost_net_set_vnet_endian(VirtIODevice *dev, NetClientState *peer,
{
int r = 0;
if (virtio_has_feature(dev, VIRTIO_F_VERSION_1) ||
if (virtio_vdev_has_feature(dev, VIRTIO_F_VERSION_1) ||
(virtio_legacy_is_cross_endian(dev) && !virtio_is_big_endian(dev))) {
r = qemu_set_vnet_le(peer, set);
if (r) {

View File

@ -86,8 +86,8 @@ static void virtio_net_set_config(VirtIODevice *vdev, const uint8_t *config)
memcpy(&netcfg, config, n->config_size);
if (!virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR) &&
!virtio_has_feature(vdev, VIRTIO_F_VERSION_1) &&
if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR) &&
!virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1) &&
memcmp(netcfg.mac, n->mac, ETH_ALEN)) {
memcpy(n->mac, netcfg.mac, ETH_ALEN);
qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac);
@ -304,7 +304,7 @@ static RxFilterInfo *virtio_net_query_rxfilter(NetClientState *nc)
info->multicast_table = str_list;
info->vlan_table = get_vlan_table(n);
if (!virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VLAN)) {
if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VLAN)) {
info->vlan = RX_STATE_ALL;
} else if (!info->vlan_table) {
info->vlan = RX_STATE_NONE;
@ -529,13 +529,13 @@ static void virtio_net_set_features(VirtIODevice *vdev, uint64_t features)
int i;
virtio_net_set_multiqueue(n,
__virtio_has_feature(features, VIRTIO_NET_F_MQ));
virtio_has_feature(features, VIRTIO_NET_F_MQ));
virtio_net_set_mrg_rx_bufs(n,
__virtio_has_feature(features,
VIRTIO_NET_F_MRG_RXBUF),
__virtio_has_feature(features,
VIRTIO_F_VERSION_1));
virtio_has_feature(features,
VIRTIO_NET_F_MRG_RXBUF),
virtio_has_feature(features,
VIRTIO_F_VERSION_1));
if (n->has_vnet_hdr) {
n->curr_guest_offloads =
@ -552,7 +552,7 @@ static void virtio_net_set_features(VirtIODevice *vdev, uint64_t features)
vhost_net_ack_features(get_vhost_net(nc->peer), features);
}
if (__virtio_has_feature(features, VIRTIO_NET_F_CTRL_VLAN)) {
if (virtio_has_feature(features, VIRTIO_NET_F_CTRL_VLAN)) {
memset(n->vlans, 0, MAX_VLAN >> 3);
} else {
memset(n->vlans, 0xff, MAX_VLAN >> 3);
@ -599,7 +599,7 @@ static int virtio_net_handle_offloads(VirtIONet *n, uint8_t cmd,
uint64_t offloads;
size_t s;
if (!virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) {
if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) {
return VIRTIO_NET_ERR;
}
@ -1449,7 +1449,7 @@ static void virtio_net_save_device(VirtIODevice *vdev, QEMUFile *f)
}
}
if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) {
if (virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) {
qemu_put_be64(f, n->curr_guest_offloads);
}
}
@ -1475,7 +1475,8 @@ static int virtio_net_load_device(VirtIODevice *vdev, QEMUFile *f,
n->vqs[0].tx_waiting = qemu_get_be32(f);
virtio_net_set_mrg_rx_bufs(n, qemu_get_be32(f),
virtio_has_feature(vdev, VIRTIO_F_VERSION_1));
virtio_vdev_has_feature(vdev,
VIRTIO_F_VERSION_1));
if (version_id >= 3)
n->status = qemu_get_be16(f);
@ -1558,7 +1559,7 @@ static int virtio_net_load_device(VirtIODevice *vdev, QEMUFile *f,
}
}
if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) {
if (virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) {
n->curr_guest_offloads = qemu_get_be64(f);
} else {
n->curr_guest_offloads = virtio_net_supported_guest_offloads(n);
@ -1585,8 +1586,8 @@ static int virtio_net_load_device(VirtIODevice *vdev, QEMUFile *f,
qemu_get_subqueue(n->nic, i)->link_down = link_down;
}
if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ANNOUNCE) &&
virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) {
if (virtio_vdev_has_feature(vdev, VIRTIO_NET_F_GUEST_ANNOUNCE) &&
virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) {
n->announce_counter = SELF_ANNOUNCE_ROUNDS;
timer_mod(n->announce_timer, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL));
}

View File

@ -1154,16 +1154,16 @@ static void pci_update_mappings(PCIDevice *d)
/* now do the real mapping */
if (r->addr != PCI_BAR_UNMAPPED) {
trace_pci_update_mappings_del(d, pci_bus_num(d->bus),
PCI_FUNC(d->devfn),
PCI_SLOT(d->devfn),
PCI_FUNC(d->devfn),
i, r->addr, r->size);
memory_region_del_subregion(r->address_space, r->memory);
}
r->addr = new_addr;
if (r->addr != PCI_BAR_UNMAPPED) {
trace_pci_update_mappings_add(d, pci_bus_num(d->bus),
PCI_FUNC(d->devfn),
PCI_SLOT(d->devfn),
PCI_FUNC(d->devfn),
i, r->addr, r->size);
memory_region_add_subregion_overlap(r->address_space,
r->addr, r->memory, 1);
@ -2383,17 +2383,14 @@ static void pci_device_class_init(ObjectClass *klass, void *data)
AddressSpace *pci_device_iommu_address_space(PCIDevice *dev)
{
PCIBus *bus = PCI_BUS(dev->bus);
PCIBus *iommu_bus = bus;
if (bus->iommu_fn) {
return bus->iommu_fn(bus, bus->iommu_opaque, dev->devfn);
while(iommu_bus && !iommu_bus->iommu_fn && iommu_bus->parent_dev) {
iommu_bus = PCI_BUS(iommu_bus->parent_dev->bus);
}
if (bus->parent_dev) {
/** We are ignoring the bus master DMA bit of the bridge
* as it would complicate things such as VFIO for no good reason */
return pci_device_iommu_address_space(bus->parent_dev);
if (iommu_bus && iommu_bus->iommu_fn) {
return iommu_bus->iommu_fn(bus, iommu_bus->iommu_opaque, dev->devfn);
}
return &address_space_memory;
}

View File

@ -145,7 +145,7 @@ static int virtio_scsi_parse_req(VirtIOSCSIReq *req,
*
* TODO: always disable this workaround for virtio 1.0 devices.
*/
if (!virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT)) {
if (!virtio_vdev_has_feature(vdev, VIRTIO_F_ANY_LAYOUT)) {
if (req->elem.out_num) {
req_size = req->elem.out_sg[0].iov_len;
}
@ -759,7 +759,7 @@ static void virtio_scsi_change(SCSIBus *bus, SCSIDevice *dev, SCSISense sense)
VirtIOSCSI *s = container_of(bus, VirtIOSCSI, bus);
VirtIODevice *vdev = VIRTIO_DEVICE(s);
if (virtio_has_feature(vdev, VIRTIO_SCSI_F_CHANGE) &&
if (virtio_vdev_has_feature(vdev, VIRTIO_SCSI_F_CHANGE) &&
dev->type != TYPE_ROM) {
virtio_scsi_push_event(s, dev, VIRTIO_SCSI_T_PARAM_CHANGE,
sense.asc | (sense.ascq << 8));
@ -783,7 +783,7 @@ static void virtio_scsi_hotplug(HotplugHandler *hotplug_dev, DeviceState *dev,
aio_context_release(s->ctx);
}
if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) {
if (virtio_vdev_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) {
virtio_scsi_push_event(s, sd,
VIRTIO_SCSI_T_TRANSPORT_RESET,
VIRTIO_SCSI_EVT_RESET_RESCAN);
@ -797,7 +797,7 @@ static void virtio_scsi_hotunplug(HotplugHandler *hotplug_dev, DeviceState *dev,
VirtIOSCSI *s = VIRTIO_SCSI(vdev);
SCSIDevice *sd = SCSI_DEVICE(dev);
if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) {
if (virtio_vdev_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) {
virtio_scsi_push_event(s, sd,
VIRTIO_SCSI_T_TRANSPORT_RESET,
VIRTIO_SCSI_EVT_RESET_REMOVED);

View File

@ -105,7 +105,7 @@ void vring_teardown(Vring *vring, VirtIODevice *vdev, int n)
/* Disable guest->host notifies */
void vring_disable_notification(VirtIODevice *vdev, Vring *vring)
{
if (!virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
if (!virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
vring_set_used_flags(vdev, vring, VRING_USED_F_NO_NOTIFY);
}
}
@ -116,7 +116,7 @@ void vring_disable_notification(VirtIODevice *vdev, Vring *vring)
*/
bool vring_enable_notification(VirtIODevice *vdev, Vring *vring)
{
if (virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
if (virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
vring_avail_event(&vring->vr) = vring->vr.avail->idx;
} else {
vring_clear_used_flags(vdev, vring, VRING_USED_F_NO_NOTIFY);
@ -135,12 +135,12 @@ bool vring_should_notify(VirtIODevice *vdev, Vring *vring)
* interrupts. */
smp_mb();
if (virtio_has_feature(vdev, VIRTIO_F_NOTIFY_ON_EMPTY) &&
if (virtio_vdev_has_feature(vdev, VIRTIO_F_NOTIFY_ON_EMPTY) &&
unlikely(!vring_more_avail(vdev, vring))) {
return true;
}
if (!virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
if (!virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
return !(vring_get_avail_flags(vdev, vring) &
VRING_AVAIL_F_NO_INTERRUPT);
}
@ -402,7 +402,7 @@ int vring_pop(VirtIODevice *vdev, Vring *vring,
/* On success, increment avail index. */
vring->last_avail_idx++;
if (virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
if (virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
vring_avail_event(&vring->vr) =
virtio_tswap16(vdev, vring->last_avail_idx);
}

View File

@ -742,7 +742,7 @@ static int vhost_virtqueue_start(struct vhost_dev *dev,
return -errno;
}
if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1) &&
if (!virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1) &&
virtio_legacy_is_cross_endian(vdev)) {
r = vhost_virtqueue_set_vring_endian_legacy(dev,
virtio_is_big_endian(vdev),
@ -839,7 +839,7 @@ static void vhost_virtqueue_stop(struct vhost_dev *dev,
/* In the cross-endian case, we need to reset the vring endianness to
* native as legacy devices expect so by default.
*/
if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1) &&
if (!virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1) &&
virtio_legacy_is_cross_endian(vdev)) {
r = vhost_virtqueue_set_vring_endian_legacy(dev,
!virtio_is_big_endian(vdev),

View File

@ -70,7 +70,7 @@ static inline void reset_stats(VirtIOBalloon *dev)
static bool balloon_stats_supported(const VirtIOBalloon *s)
{
VirtIODevice *vdev = VIRTIO_DEVICE(s);
return virtio_has_feature(vdev, VIRTIO_BALLOON_F_STATS_VQ);
return virtio_vdev_has_feature(vdev, VIRTIO_BALLOON_F_STATS_VQ);
}
static bool balloon_stats_enabled(const VirtIOBalloon *s)

View File

@ -220,7 +220,7 @@ static inline void vring_set_avail_event(VirtQueue *vq, uint16_t val)
void virtio_queue_set_notification(VirtQueue *vq, int enable)
{
vq->notification = enable;
if (virtio_has_feature(vq->vdev, VIRTIO_RING_F_EVENT_IDX)) {
if (virtio_vdev_has_feature(vq->vdev, VIRTIO_RING_F_EVENT_IDX)) {
vring_set_avail_event(vq, vring_avail_idx(vq));
} else if (enable) {
vring_used_flags_unset_bit(vq, VRING_USED_F_NO_NOTIFY);
@ -471,7 +471,7 @@ int virtqueue_pop(VirtQueue *vq, VirtQueueElement *elem)
max = vq->vring.num;
i = head = virtqueue_get_head(vq, vq->last_avail_idx++);
if (virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
if (virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
vring_set_avail_event(vq, vq->last_avail_idx);
}
@ -560,7 +560,7 @@ int virtio_set_status(VirtIODevice *vdev, uint8_t val)
VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
trace_virtio_set_status(vdev, val);
if (virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) {
if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
if (!(vdev->status & VIRTIO_CONFIG_S_FEATURES_OK) &&
val & VIRTIO_CONFIG_S_FEATURES_OK) {
int ret = virtio_validate_features(vdev);
@ -898,7 +898,7 @@ void virtio_queue_set_align(VirtIODevice *vdev, int n, int align)
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
/* virtio-1 compliant devices cannot change the alignment */
if (virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) {
if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
error_report("tried to modify queue alignment for virtio-1 device");
return;
}
@ -993,12 +993,12 @@ static bool vring_notify(VirtIODevice *vdev, VirtQueue *vq)
/* We need to expose used array entries before checking used event. */
smp_mb();
/* Always notify when queue is empty (when feature acknowledge) */
if (virtio_has_feature(vdev, VIRTIO_F_NOTIFY_ON_EMPTY) &&
if (virtio_vdev_has_feature(vdev, VIRTIO_F_NOTIFY_ON_EMPTY) &&
!vq->inuse && vring_avail_idx(vq) == vq->last_avail_idx) {
return true;
}
if (!virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
if (!virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
return !(vring_avail_flags(vq) & VRING_AVAIL_F_NO_INTERRUPT);
}
@ -1035,7 +1035,7 @@ static bool virtio_device_endian_needed(void *opaque)
VirtIODevice *vdev = opaque;
assert(vdev->device_endian != VIRTIO_DEVICE_ENDIAN_UNKNOWN);
if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) {
if (!virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
return vdev->device_endian != virtio_default_endian();
}
/* Devices conforming to VIRTIO 1.0 or later are always LE. */

View File

@ -9,7 +9,6 @@
/* Reserve RAM space for tables: add another order of magnitude. */
#define ACPI_BUILD_TABLE_MAX_SIZE 0x200000
#define ACPI_BUILD_APPNAME "Bochs"
#define ACPI_BUILD_APPNAME6 "BOCHS "
#define ACPI_BUILD_APPNAME4 "BXPC"

View File

@ -59,6 +59,7 @@ struct PCMachineClass {
MachineClass parent_class;
/*< public >*/
bool broken_reserved_end;
HotplugHandler *(*get_hotplug_handler)(MachineState *machine,
DeviceState *dev);
};

View File

@ -19,7 +19,7 @@
static inline bool virtio_access_is_big_endian(VirtIODevice *vdev)
{
if (virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) {
if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
/* Devices conforming to VIRTIO 1.0 or later are always LE. */
return false;
}

View File

@ -261,26 +261,27 @@ static inline void virtio_clear_feature(uint64_t *features, unsigned int fbit)
*features &= ~(1ULL << fbit);
}
static inline bool __virtio_has_feature(uint64_t features, unsigned int fbit)
static inline bool virtio_has_feature(uint64_t features, unsigned int fbit)
{
assert(fbit < 64);
return !!(features & (1ULL << fbit));
}
static inline bool virtio_has_feature(VirtIODevice *vdev, unsigned int fbit)
static inline bool virtio_vdev_has_feature(VirtIODevice *vdev,
unsigned int fbit)
{
return __virtio_has_feature(vdev->guest_features, fbit);
return virtio_has_feature(vdev->guest_features, fbit);
}
static inline bool virtio_host_has_feature(VirtIODevice *vdev,
unsigned int fbit)
{
return __virtio_has_feature(vdev->host_features, fbit);
return virtio_has_feature(vdev->host_features, fbit);
}
static inline bool virtio_is_big_endian(VirtIODevice *vdev)
{
if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) {
if (!virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
assert(vdev->device_endian != VIRTIO_DEVICE_ENDIAN_UNKNOWN);
return vdev->device_endian == VIRTIO_DEVICE_ENDIAN_BIG;
}

View File

@ -10,6 +10,7 @@
#include "hw/irq.h"
#include "qemu-common.h"
#include "qemu/typedefs.h"
/* xen-machine.c */
enum xen_mode {
@ -38,8 +39,7 @@ qemu_irq *xen_interrupt_controller_init(void);
void xenstore_store_pv_console_info(int i, struct CharDriverState *chr);
#if defined(NEED_CPU_H) && !defined(CONFIG_USER_ONLY)
int xen_hvm_init(ram_addr_t *below_4g_mem_size, ram_addr_t *above_4g_mem_size,
MemoryRegion **ram_memory);
int xen_hvm_init(PCMachineState *pcms, MemoryRegion **ram_memory);
void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t size,
struct MemoryRegion *mr);
void xen_modified_memory(ram_addr_t start, ram_addr_t length);

View File

@ -1303,8 +1303,8 @@ spapr_pci_lsi_set(const char *busname, int pin, uint32_t irq) "%s PIN%d IRQ %u"
spapr_pci_msi_retry(unsigned config_addr, unsigned req_num, unsigned max_irqs) "Guest device at %x asked %u, have only %u"
# hw/pci/pci.c
pci_update_mappings_del(void *d, uint32_t bus, uint32_t func, uint32_t slot, int bar, uint64_t addr, uint64_t size) "d=%p %02x:%02x.%x %d,%#"PRIx64"+%#"PRIx64
pci_update_mappings_add(void *d, uint32_t bus, uint32_t func, uint32_t slot, int bar, uint64_t addr, uint64_t size) "d=%p %02x:%02x.%x %d,%#"PRIx64"+%#"PRIx64
pci_update_mappings_del(void *d, uint32_t bus, uint32_t slot, uint32_t func, int bar, uint64_t addr, uint64_t size) "d=%p %02x:%02x.%x %d,%#"PRIx64"+%#"PRIx64
pci_update_mappings_add(void *d, uint32_t bus, uint32_t slot, uint32_t func, int bar, uint64_t addr, uint64_t size) "d=%p %02x:%02x.%x %d,%#"PRIx64"+%#"PRIx64
# hw/net/pcnet.c
pcnet_s_reset(void *s) "s=%p"

View File

@ -47,8 +47,7 @@ void xen_modified_memory(ram_addr_t start, ram_addr_t length)
{
}
int xen_hvm_init(ram_addr_t *below_4g_mem_size, ram_addr_t *above_4g_mem_size,
MemoryRegion **ram_memory)
int xen_hvm_init(PCMachineState *pcms, MemoryRegion **ram_memory)
{
return 0;
}

View File

@ -180,8 +180,7 @@ qemu_irq *xen_interrupt_controller_init(void)
/* Memory Ops */
static void xen_ram_init(ram_addr_t *below_4g_mem_size,
ram_addr_t *above_4g_mem_size,
static void xen_ram_init(PCMachineState *pcms,
ram_addr_t ram_size, MemoryRegion **ram_memory_p)
{
MemoryRegion *sysmem = get_system_memory();
@ -198,20 +197,20 @@ static void xen_ram_init(ram_addr_t *below_4g_mem_size,
}
if (ram_size >= user_lowmem) {
*above_4g_mem_size = ram_size - user_lowmem;
*below_4g_mem_size = user_lowmem;
pcms->above_4g_mem_size = ram_size - user_lowmem;
pcms->below_4g_mem_size = user_lowmem;
} else {
*above_4g_mem_size = 0;
*below_4g_mem_size = ram_size;
pcms->above_4g_mem_size = 0;
pcms->below_4g_mem_size = ram_size;
}
if (!*above_4g_mem_size) {
if (!pcms->above_4g_mem_size) {
block_len = ram_size;
} else {
/*
* Xen does not allocate the memory continuously, it keeps a
* hole of the size computed above or passed in.
*/
block_len = (1ULL << 32) + *above_4g_mem_size;
block_len = (1ULL << 32) + pcms->above_4g_mem_size;
}
memory_region_init_ram(&ram_memory, NULL, "xen.ram", block_len,
&error_abort);
@ -229,12 +228,12 @@ static void xen_ram_init(ram_addr_t *below_4g_mem_size,
*/
memory_region_init_alias(&ram_lo, NULL, "xen.ram.lo",
&ram_memory, 0xc0000,
*below_4g_mem_size - 0xc0000);
pcms->below_4g_mem_size - 0xc0000);
memory_region_add_subregion(sysmem, 0xc0000, &ram_lo);
if (*above_4g_mem_size > 0) {
if (pcms->above_4g_mem_size > 0) {
memory_region_init_alias(&ram_hi, NULL, "xen.ram.hi",
&ram_memory, 0x100000000ULL,
*above_4g_mem_size);
pcms->above_4g_mem_size);
memory_region_add_subregion(sysmem, 0x100000000ULL, &ram_hi);
}
}
@ -1159,7 +1158,7 @@ static void xen_wakeup_notifier(Notifier *notifier, void *data)
}
/* return 0 means OK, or -1 means critical issue -- will exit(1) */
int xen_hvm_init(ram_addr_t *below_4g_mem_size, ram_addr_t *above_4g_mem_size,
int xen_hvm_init(PCMachineState *pcms,
MemoryRegion **ram_memory)
{
int i, rc;
@ -1270,7 +1269,7 @@ int xen_hvm_init(ram_addr_t *below_4g_mem_size, ram_addr_t *above_4g_mem_size,
/* Init RAM management */
xen_map_cache_init(xen_phys_offset_to_gaddr, state);
xen_ram_init(below_4g_mem_size, above_4g_mem_size, ram_size, ram_memory);
xen_ram_init(pcms, ram_size, ram_memory);
qemu_add_vm_change_state_handler(xen_hvm_change_state_handler, state);