2007-11-17 18:14:51 +01:00
|
|
|
#ifndef QEMU_PCI_H
|
|
|
|
#define QEMU_PCI_H
|
|
|
|
|
2012-12-12 22:05:42 +01:00
|
|
|
#include "hw/qdev.h"
|
2012-12-17 18:19:49 +01:00
|
|
|
#include "exec/memory.h"
|
2012-12-17 18:20:04 +01:00
|
|
|
#include "sysemu/dma.h"
|
2009-05-14 23:35:07 +02:00
|
|
|
|
2007-11-17 18:14:51 +01:00
|
|
|
/* PCI includes legacy ISA access. */
|
2013-02-05 17:06:20 +01:00
|
|
|
#include "hw/isa/isa.h"
|
2007-11-17 18:14:51 +01:00
|
|
|
|
2012-12-12 22:05:42 +01:00
|
|
|
#include "hw/pci/pcie.h"
|
2010-10-19 11:06:34 +02:00
|
|
|
|
2007-11-17 18:14:51 +01:00
|
|
|
/* PCI bus */
|
|
|
|
|
2009-02-11 16:19:46 +01:00
|
|
|
#define PCI_DEVFN(slot, func) ((((slot) & 0x1f) << 3) | ((func) & 0x07))
|
|
|
|
#define PCI_SLOT(devfn) (((devfn) >> 3) & 0x1f)
|
|
|
|
#define PCI_FUNC(devfn) ((devfn) & 0x07)
|
2016-05-17 13:26:10 +02:00
|
|
|
#define PCI_BUILD_BDF(bus, devfn) ((bus << 8) | (devfn))
|
2011-01-27 07:56:35 +01:00
|
|
|
#define PCI_SLOT_MAX 32
|
2010-06-23 09:15:26 +02:00
|
|
|
#define PCI_FUNC_MAX 8
|
2009-02-11 16:19:46 +01:00
|
|
|
|
2009-03-13 16:02:23 +01:00
|
|
|
/* Class, Vendor and Device IDs from Linux's pci_ids.h */
|
2012-12-12 22:05:42 +01:00
|
|
|
#include "hw/pci/pci_ids.h"
|
2009-02-01 20:26:20 +01:00
|
|
|
|
2009-03-13 16:02:23 +01:00
|
|
|
/* QEMU-specific Vendor and Device ID definitions */
|
2009-02-11 16:21:54 +01:00
|
|
|
|
2009-03-13 16:02:23 +01:00
|
|
|
/* IBM (0x1014) */
|
|
|
|
#define PCI_DEVICE_ID_IBM_440GX 0x027f
|
2009-02-01 13:01:04 +01:00
|
|
|
#define PCI_DEVICE_ID_IBM_OPENPIC2 0xffff
|
2009-01-26 16:37:35 +01:00
|
|
|
|
2009-03-13 16:02:23 +01:00
|
|
|
/* Hitachi (0x1054) */
|
2009-01-26 16:37:35 +01:00
|
|
|
#define PCI_VENDOR_ID_HITACHI 0x1054
|
2009-03-13 16:02:23 +01:00
|
|
|
#define PCI_DEVICE_ID_HITACHI_SH7751R 0x350e
|
2009-01-26 16:37:35 +01:00
|
|
|
|
2009-03-13 16:02:23 +01:00
|
|
|
/* Apple (0x106b) */
|
2009-02-01 13:01:04 +01:00
|
|
|
#define PCI_DEVICE_ID_APPLE_343S1201 0x0010
|
|
|
|
#define PCI_DEVICE_ID_APPLE_UNI_N_I_PCI 0x001e
|
|
|
|
#define PCI_DEVICE_ID_APPLE_UNI_N_PCI 0x001f
|
|
|
|
#define PCI_DEVICE_ID_APPLE_UNI_N_KEYL 0x0022
|
2009-03-13 16:02:23 +01:00
|
|
|
#define PCI_DEVICE_ID_APPLE_IPID_USB 0x003f
|
2009-01-26 16:37:35 +01:00
|
|
|
|
2009-03-13 16:02:23 +01:00
|
|
|
/* Realtek (0x10ec) */
|
|
|
|
#define PCI_DEVICE_ID_REALTEK_8029 0x8029
|
2009-01-26 16:37:35 +01:00
|
|
|
|
2009-03-13 16:02:23 +01:00
|
|
|
/* Xilinx (0x10ee) */
|
|
|
|
#define PCI_DEVICE_ID_XILINX_XC2VP30 0x0300
|
2009-01-26 16:37:35 +01:00
|
|
|
|
2009-03-13 16:02:23 +01:00
|
|
|
/* Marvell (0x11ab) */
|
|
|
|
#define PCI_DEVICE_ID_MARVELL_GT6412X 0x4620
|
2009-01-26 16:37:35 +01:00
|
|
|
|
2009-03-13 16:02:23 +01:00
|
|
|
/* QEMU/Bochs VGA (0x1234) */
|
2009-02-01 13:01:04 +01:00
|
|
|
#define PCI_VENDOR_ID_QEMU 0x1234
|
|
|
|
#define PCI_DEVICE_ID_QEMU_VGA 0x1111
|
|
|
|
|
2009-03-13 16:02:23 +01:00
|
|
|
/* VMWare (0x15ad) */
|
2009-01-26 16:37:35 +01:00
|
|
|
#define PCI_VENDOR_ID_VMWARE 0x15ad
|
|
|
|
#define PCI_DEVICE_ID_VMWARE_SVGA2 0x0405
|
|
|
|
#define PCI_DEVICE_ID_VMWARE_SVGA 0x0710
|
|
|
|
#define PCI_DEVICE_ID_VMWARE_NET 0x0720
|
|
|
|
#define PCI_DEVICE_ID_VMWARE_SCSI 0x0730
|
2013-04-19 09:05:46 +02:00
|
|
|
#define PCI_DEVICE_ID_VMWARE_PVSCSI 0x07C0
|
2009-01-26 16:37:35 +01:00
|
|
|
#define PCI_DEVICE_ID_VMWARE_IDE 0x1729
|
2013-03-09 10:21:06 +01:00
|
|
|
#define PCI_DEVICE_ID_VMWARE_VMXNET3 0x07B0
|
2009-01-26 16:37:35 +01:00
|
|
|
|
2009-03-28 18:29:07 +01:00
|
|
|
/* Intel (0x8086) */
|
2009-03-13 16:02:23 +01:00
|
|
|
#define PCI_DEVICE_ID_INTEL_82551IT 0x1209
|
2009-09-01 22:16:10 +02:00
|
|
|
#define PCI_DEVICE_ID_INTEL_82557 0x1229
|
2010-12-14 01:34:39 +01:00
|
|
|
#define PCI_DEVICE_ID_INTEL_82801IR 0x2922
|
2009-03-02 17:42:23 +01:00
|
|
|
|
2009-01-26 16:37:35 +01:00
|
|
|
/* Red Hat / Qumranet (for QEMU) -- see pci-ids.txt */
|
2008-12-11 22:15:42 +01:00
|
|
|
#define PCI_VENDOR_ID_REDHAT_QUMRANET 0x1af4
|
|
|
|
#define PCI_SUBVENDOR_ID_REDHAT_QUMRANET 0x1af4
|
|
|
|
#define PCI_SUBDEVICE_ID_QEMU 0x1100
|
|
|
|
|
|
|
|
#define PCI_DEVICE_ID_VIRTIO_NET 0x1000
|
|
|
|
#define PCI_DEVICE_ID_VIRTIO_BLOCK 0x1001
|
|
|
|
#define PCI_DEVICE_ID_VIRTIO_BALLOON 0x1002
|
2009-01-26 16:22:46 +01:00
|
|
|
#define PCI_DEVICE_ID_VIRTIO_CONSOLE 0x1003
|
2011-02-11 09:40:59 +01:00
|
|
|
#define PCI_DEVICE_ID_VIRTIO_SCSI 0x1004
|
2012-06-20 08:59:32 +02:00
|
|
|
#define PCI_DEVICE_ID_VIRTIO_RNG 0x1005
|
2012-12-13 10:19:36 +01:00
|
|
|
#define PCI_DEVICE_ID_VIRTIO_9P 0x1009
|
2008-12-11 22:15:42 +01:00
|
|
|
|
2012-12-13 10:19:38 +01:00
|
|
|
#define PCI_VENDOR_ID_REDHAT 0x1b36
|
|
|
|
#define PCI_DEVICE_ID_REDHAT_BRIDGE 0x0001
|
|
|
|
#define PCI_DEVICE_ID_REDHAT_SERIAL 0x0002
|
|
|
|
#define PCI_DEVICE_ID_REDHAT_SERIAL2 0x0003
|
|
|
|
#define PCI_DEVICE_ID_REDHAT_SERIAL4 0x0004
|
2013-03-31 14:31:14 +02:00
|
|
|
#define PCI_DEVICE_ID_REDHAT_TEST 0x0005
|
2015-03-14 05:09:28 +01:00
|
|
|
#define PCI_DEVICE_ID_REDHAT_ROCKER 0x0006
|
2014-12-30 06:14:02 +01:00
|
|
|
#define PCI_DEVICE_ID_REDHAT_SDHCI 0x0007
|
2015-02-13 06:46:07 +01:00
|
|
|
#define PCI_DEVICE_ID_REDHAT_PCIE_HOST 0x0008
|
2015-06-02 13:23:06 +02:00
|
|
|
#define PCI_DEVICE_ID_REDHAT_PXB 0x0009
|
2015-06-18 12:17:29 +02:00
|
|
|
#define PCI_DEVICE_ID_REDHAT_BRIDGE_SEAT 0x000a
|
2015-11-26 17:00:27 +01:00
|
|
|
#define PCI_DEVICE_ID_REDHAT_PXB_PCIE 0x000b
|
2012-12-13 10:19:38 +01:00
|
|
|
#define PCI_DEVICE_ID_REDHAT_QXL 0x0100
|
|
|
|
|
2009-10-30 13:21:10 +01:00
|
|
|
#define FMT_PCIBUS PRIx64
|
2009-10-30 13:21:08 +01:00
|
|
|
|
2016-03-09 13:44:19 +01:00
|
|
|
typedef uint64_t pcibus_t;
|
|
|
|
|
|
|
|
struct PCIHostDeviceAddress {
|
|
|
|
unsigned int domain;
|
|
|
|
unsigned int bus;
|
|
|
|
unsigned int slot;
|
|
|
|
unsigned int function;
|
|
|
|
};
|
|
|
|
|
2007-11-17 18:14:51 +01:00
|
|
|
typedef void PCIConfigWriteFunc(PCIDevice *pci_dev,
|
|
|
|
uint32_t address, uint32_t data, int len);
|
|
|
|
typedef uint32_t PCIConfigReadFunc(PCIDevice *pci_dev,
|
|
|
|
uint32_t address, int len);
|
|
|
|
typedef void PCIMapIORegionFunc(PCIDevice *pci_dev, int region_num,
|
2009-10-30 13:21:08 +01:00
|
|
|
pcibus_t addr, pcibus_t size, int type);
|
2012-07-04 06:39:27 +02:00
|
|
|
typedef void PCIUnregisterFunc(PCIDevice *pci_dev);
|
2007-11-17 18:14:51 +01:00
|
|
|
|
|
|
|
typedef struct PCIIORegion {
|
2009-10-30 13:21:08 +01:00
|
|
|
pcibus_t addr; /* current PCI mapping address. -1 means not mapped */
|
|
|
|
#define PCI_BAR_UNMAPPED (~(pcibus_t)0)
|
|
|
|
pcibus_t size;
|
2007-11-17 18:14:51 +01:00
|
|
|
uint8_t type;
|
2011-07-26 13:26:20 +02:00
|
|
|
MemoryRegion *memory;
|
2011-08-08 15:09:05 +02:00
|
|
|
MemoryRegion *address_space;
|
2007-11-17 18:14:51 +01:00
|
|
|
} PCIIORegion;
|
|
|
|
|
|
|
|
#define PCI_ROM_SLOT 6
|
|
|
|
#define PCI_NUM_REGIONS 7
|
|
|
|
|
2013-03-03 18:21:26 +01:00
|
|
|
enum {
|
|
|
|
QEMU_PCI_VGA_MEM,
|
|
|
|
QEMU_PCI_VGA_IO_LO,
|
|
|
|
QEMU_PCI_VGA_IO_HI,
|
|
|
|
QEMU_PCI_VGA_NUM_REGIONS,
|
|
|
|
};
|
|
|
|
|
|
|
|
#define QEMU_PCI_VGA_MEM_BASE 0xa0000
|
|
|
|
#define QEMU_PCI_VGA_MEM_SIZE 0x20000
|
|
|
|
#define QEMU_PCI_VGA_IO_LO_BASE 0x3b0
|
|
|
|
#define QEMU_PCI_VGA_IO_LO_SIZE 0xc
|
|
|
|
#define QEMU_PCI_VGA_IO_HI_BASE 0x3c0
|
|
|
|
#define QEMU_PCI_VGA_IO_HI_SIZE 0x20
|
|
|
|
|
2012-12-12 22:05:42 +01:00
|
|
|
#include "hw/pci/pci_regs.h"
|
2009-12-15 12:26:01 +01:00
|
|
|
|
|
|
|
/* PCI HEADER_TYPE */
|
2009-05-03 21:03:00 +02:00
|
|
|
#define PCI_HEADER_TYPE_MULTI_FUNCTION 0x80
|
2008-12-18 23:43:33 +01:00
|
|
|
|
2009-06-21 18:45:18 +02:00
|
|
|
/* Size of the standard PCI config header */
|
|
|
|
#define PCI_CONFIG_HEADER_SIZE 0x40
|
|
|
|
/* Size of the standard PCI config space */
|
|
|
|
#define PCI_CONFIG_SPACE_SIZE 0x100
|
2015-03-10 02:52:23 +01:00
|
|
|
/* Size of the standard PCIe config space: 4KB */
|
2009-10-30 13:21:18 +01:00
|
|
|
#define PCIE_CONFIG_SPACE_SIZE 0x1000
|
2009-06-21 18:45:18 +02:00
|
|
|
|
2009-10-30 13:20:56 +01:00
|
|
|
#define PCI_NUM_PINS 4 /* A-D */
|
|
|
|
|
2009-06-21 18:49:54 +02:00
|
|
|
/* Bits in cap_present field. */
|
|
|
|
enum {
|
2010-10-19 11:06:32 +02:00
|
|
|
QEMU_PCI_CAP_MSI = 0x1,
|
|
|
|
QEMU_PCI_CAP_MSIX = 0x2,
|
|
|
|
QEMU_PCI_CAP_EXPRESS = 0x4,
|
2010-06-23 09:15:30 +02:00
|
|
|
|
|
|
|
/* multifunction capable device */
|
2010-10-19 11:06:32 +02:00
|
|
|
#define QEMU_PCI_CAP_MULTIFUNCTION_BITNR 3
|
2010-06-23 09:15:30 +02:00
|
|
|
QEMU_PCI_CAP_MULTIFUNCTION = (1 << QEMU_PCI_CAP_MULTIFUNCTION_BITNR),
|
2010-11-26 13:01:41 +01:00
|
|
|
|
|
|
|
/* command register SERR bit enabled */
|
|
|
|
#define QEMU_PCI_CAP_SERR_BITNR 4
|
|
|
|
QEMU_PCI_CAP_SERR = (1 << QEMU_PCI_CAP_SERR_BITNR),
|
2012-02-12 13:12:21 +01:00
|
|
|
/* Standard hot plug controller. */
|
|
|
|
#define QEMU_PCI_SHPC_BITNR 5
|
|
|
|
QEMU_PCI_CAP_SHPC = (1 << QEMU_PCI_SHPC_BITNR),
|
2012-02-15 18:17:59 +01:00
|
|
|
#define QEMU_PCI_SLOTID_BITNR 6
|
|
|
|
QEMU_PCI_CAP_SLOTID = (1 << QEMU_PCI_SLOTID_BITNR),
|
2014-06-23 16:32:48 +02:00
|
|
|
/* PCI Express capability - Power Controller Present */
|
|
|
|
#define QEMU_PCIE_SLTCAP_PCP_BITNR 7
|
|
|
|
QEMU_PCIE_SLTCAP_PCP = (1 << QEMU_PCIE_SLTCAP_PCP_BITNR),
|
2009-06-21 18:49:54 +02:00
|
|
|
};
|
|
|
|
|
2011-12-04 19:22:06 +01:00
|
|
|
#define TYPE_PCI_DEVICE "pci-device"
|
|
|
|
#define PCI_DEVICE(obj) \
|
|
|
|
OBJECT_CHECK(PCIDevice, (obj), TYPE_PCI_DEVICE)
|
|
|
|
#define PCI_DEVICE_CLASS(klass) \
|
|
|
|
OBJECT_CLASS_CHECK(PCIDeviceClass, (klass), TYPE_PCI_DEVICE)
|
|
|
|
#define PCI_DEVICE_GET_CLASS(obj) \
|
|
|
|
OBJECT_GET_CLASS(PCIDeviceClass, (obj), TYPE_PCI_DEVICE)
|
|
|
|
|
2012-07-19 16:11:47 +02:00
|
|
|
typedef struct PCIINTxRoute {
|
|
|
|
enum {
|
|
|
|
PCI_INTX_ENABLED,
|
|
|
|
PCI_INTX_INVERTED,
|
|
|
|
PCI_INTX_DISABLED,
|
|
|
|
} mode;
|
|
|
|
int irq;
|
|
|
|
} PCIINTxRoute;
|
|
|
|
|
2011-12-04 19:22:06 +01:00
|
|
|
typedef struct PCIDeviceClass {
|
|
|
|
DeviceClass parent_class;
|
|
|
|
|
2015-01-19 15:52:29 +01:00
|
|
|
void (*realize)(PCIDevice *dev, Error **errp);
|
|
|
|
int (*init)(PCIDevice *dev);/* TODO convert to realize() and remove */
|
2011-12-04 19:22:06 +01:00
|
|
|
PCIUnregisterFunc *exit;
|
|
|
|
PCIConfigReadFunc *config_read;
|
|
|
|
PCIConfigWriteFunc *config_write;
|
|
|
|
|
|
|
|
uint16_t vendor_id;
|
|
|
|
uint16_t device_id;
|
|
|
|
uint8_t revision;
|
|
|
|
uint16_t class_id;
|
|
|
|
uint16_t subsystem_vendor_id; /* only for header type = 0 */
|
|
|
|
uint16_t subsystem_id; /* only for header type = 0 */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* pci-to-pci bridge or normal device.
|
|
|
|
* This doesn't mean pci host switch.
|
|
|
|
* When card bus bridge is supported, this would be enhanced.
|
|
|
|
*/
|
|
|
|
int is_bridge;
|
|
|
|
|
|
|
|
/* pcie stuff */
|
|
|
|
int is_express; /* is this device pci express? */
|
|
|
|
|
|
|
|
/* rom bar */
|
|
|
|
const char *romfile;
|
|
|
|
} PCIDeviceClass;
|
|
|
|
|
2012-07-02 14:38:47 +02:00
|
|
|
typedef void (*PCIINTxRoutingNotifier)(PCIDevice *dev);
|
2012-05-17 15:32:31 +02:00
|
|
|
typedef int (*MSIVectorUseNotifier)(PCIDevice *dev, unsigned int vector,
|
|
|
|
MSIMessage msg);
|
|
|
|
typedef void (*MSIVectorReleaseNotifier)(PCIDevice *dev, unsigned int vector);
|
2012-12-12 15:10:02 +01:00
|
|
|
typedef void (*MSIVectorPollNotifier)(PCIDevice *dev,
|
|
|
|
unsigned int vector_start,
|
|
|
|
unsigned int vector_end);
|
2012-05-17 15:32:31 +02:00
|
|
|
|
2016-05-17 13:26:10 +02:00
|
|
|
enum PCIReqIDType {
|
|
|
|
PCI_REQ_ID_INVALID = 0,
|
|
|
|
PCI_REQ_ID_BDF,
|
|
|
|
PCI_REQ_ID_SECONDARY_BUS,
|
|
|
|
PCI_REQ_ID_MAX,
|
|
|
|
};
|
|
|
|
typedef enum PCIReqIDType PCIReqIDType;
|
|
|
|
|
|
|
|
struct PCIReqIDCache {
|
|
|
|
PCIDevice *dev;
|
|
|
|
PCIReqIDType type;
|
|
|
|
};
|
|
|
|
typedef struct PCIReqIDCache PCIReqIDCache;
|
|
|
|
|
2007-11-17 18:14:51 +01:00
|
|
|
struct PCIDevice {
|
2009-05-14 23:35:07 +02:00
|
|
|
DeviceState qdev;
|
2012-06-27 06:50:45 +02:00
|
|
|
|
2007-11-17 18:14:51 +01:00
|
|
|
/* PCI config space */
|
2009-10-30 13:21:18 +01:00
|
|
|
uint8_t *config;
|
2009-06-21 18:45:18 +02:00
|
|
|
|
2011-04-26 10:29:36 +02:00
|
|
|
/* Used to enable config checks on load. Note that writable bits are
|
2009-06-21 18:49:40 +02:00
|
|
|
* never checked even if set in cmask. */
|
2009-10-30 13:21:18 +01:00
|
|
|
uint8_t *cmask;
|
2009-06-21 18:49:40 +02:00
|
|
|
|
2009-06-21 18:45:18 +02:00
|
|
|
/* Used to implement R/W bytes */
|
2009-10-30 13:21:18 +01:00
|
|
|
uint8_t *wmask;
|
2007-11-17 18:14:51 +01:00
|
|
|
|
2010-09-15 07:38:15 +02:00
|
|
|
/* Used to implement RW1C(Write 1 to Clear) bytes */
|
|
|
|
uint8_t *w1cmask;
|
|
|
|
|
2009-06-21 18:45:40 +02:00
|
|
|
/* Used to allocate config space for capabilities. */
|
2009-10-30 13:21:18 +01:00
|
|
|
uint8_t *used;
|
2009-06-21 18:45:40 +02:00
|
|
|
|
2007-11-17 18:14:51 +01:00
|
|
|
/* the following fields are read only */
|
|
|
|
PCIBus *bus;
|
2012-03-04 20:38:27 +01:00
|
|
|
int32_t devfn;
|
2016-05-17 13:26:10 +02:00
|
|
|
/* Cached device to fetch requester ID from, to avoid the PCI
|
|
|
|
* tree walking every time we invoke PCI request (e.g.,
|
|
|
|
* MSI). For conventional PCI root complex, this field is
|
|
|
|
* meaningless. */
|
|
|
|
PCIReqIDCache requester_id_cache;
|
2007-11-17 18:14:51 +01:00
|
|
|
char name[64];
|
|
|
|
PCIIORegion io_regions[PCI_NUM_REGIONS];
|
2012-10-03 17:17:27 +02:00
|
|
|
AddressSpace bus_master_as;
|
2012-10-03 17:42:58 +02:00
|
|
|
MemoryRegion bus_master_enable_region;
|
2007-11-17 18:14:51 +01:00
|
|
|
|
|
|
|
/* do not access the following fields */
|
|
|
|
PCIConfigReadFunc *config_read;
|
|
|
|
PCIConfigWriteFunc *config_write;
|
|
|
|
|
2013-03-03 18:21:26 +01:00
|
|
|
/* Legacy PCI VGA regions */
|
|
|
|
MemoryRegion *vga_regions[QEMU_PCI_VGA_NUM_REGIONS];
|
|
|
|
bool has_vga;
|
|
|
|
|
2007-11-17 18:14:51 +01:00
|
|
|
/* Current IRQ levels. Used internally by the generic PCI code. */
|
2009-11-25 14:20:51 +01:00
|
|
|
uint8_t irq_state;
|
2009-06-21 18:49:54 +02:00
|
|
|
|
|
|
|
/* Capability bits */
|
|
|
|
uint32_t cap_present;
|
|
|
|
|
|
|
|
/* Offset of MSI-X capability in config space */
|
|
|
|
uint8_t msix_cap;
|
|
|
|
|
|
|
|
/* MSI-X entries */
|
|
|
|
int msix_entries_nr;
|
|
|
|
|
2012-06-14 20:16:37 +02:00
|
|
|
/* Space to store MSIX table & pending bit array */
|
|
|
|
uint8_t *msix_table;
|
|
|
|
uint8_t *msix_pba;
|
2012-06-14 20:15:51 +02:00
|
|
|
/* MemoryRegion container for msix exclusive BAR setup */
|
|
|
|
MemoryRegion msix_exclusive_bar;
|
2012-06-14 20:16:37 +02:00
|
|
|
/* Memory Regions for MSIX table and pending bit entries. */
|
|
|
|
MemoryRegion msix_table_mmio;
|
|
|
|
MemoryRegion msix_pba_mmio;
|
2009-06-21 18:49:54 +02:00
|
|
|
/* Reference-count for entries actually in use by driver. */
|
|
|
|
unsigned *msix_entry_used;
|
2011-11-21 17:57:21 +01:00
|
|
|
/* MSIX function mask set or MSIX disabled */
|
|
|
|
bool msix_function_masked;
|
2009-08-20 19:42:38 +02:00
|
|
|
/* Version id needed for VMState */
|
|
|
|
int32_t version_id;
|
2009-12-18 12:01:07 +01:00
|
|
|
|
2010-10-19 11:06:32 +02:00
|
|
|
/* Offset of MSI capability in config space */
|
|
|
|
uint8_t msi_cap;
|
|
|
|
|
2010-10-19 11:06:34 +02:00
|
|
|
/* PCI Express */
|
|
|
|
PCIExpressDevice exp;
|
|
|
|
|
2012-02-12 13:12:21 +01:00
|
|
|
/* SHPC */
|
|
|
|
SHPCDevice *shpc;
|
|
|
|
|
2009-12-18 12:01:07 +01:00
|
|
|
/* Location of option rom */
|
2009-12-18 12:01:08 +01:00
|
|
|
char *romfile;
|
2011-08-08 15:09:28 +02:00
|
|
|
bool has_rom;
|
|
|
|
MemoryRegion rom;
|
2010-01-08 15:25:41 +01:00
|
|
|
uint32_t rom_bar;
|
2012-05-17 15:32:31 +02:00
|
|
|
|
2012-07-02 14:38:47 +02:00
|
|
|
/* INTx routing notifier */
|
|
|
|
PCIINTxRoutingNotifier intx_routing_notifier;
|
|
|
|
|
2012-05-17 15:32:31 +02:00
|
|
|
/* MSI-X notifiers */
|
|
|
|
MSIVectorUseNotifier msix_vector_use_notifier;
|
|
|
|
MSIVectorReleaseNotifier msix_vector_release_notifier;
|
2012-12-12 15:10:02 +01:00
|
|
|
MSIVectorPollNotifier msix_vector_poll_notifier;
|
2007-11-17 18:14:51 +01:00
|
|
|
};
|
|
|
|
|
2011-08-08 15:09:31 +02:00
|
|
|
void pci_register_bar(PCIDevice *pci_dev, int region_num,
|
|
|
|
uint8_t attr, MemoryRegion *memory);
|
2013-03-03 18:21:26 +01:00
|
|
|
void pci_register_vga(PCIDevice *pci_dev, MemoryRegion *mem,
|
|
|
|
MemoryRegion *io_lo, MemoryRegion *io_hi);
|
|
|
|
void pci_unregister_vga(PCIDevice *pci_dev);
|
2011-08-08 15:08:55 +02:00
|
|
|
pcibus_t pci_get_bar_addr(PCIDevice *pci_dev, int region_num);
|
2007-11-17 18:14:51 +01:00
|
|
|
|
2010-09-06 09:46:16 +02:00
|
|
|
int pci_add_capability(PCIDevice *pdev, uint8_t cap_id,
|
|
|
|
uint8_t offset, uint8_t size);
|
2014-04-10 10:24:36 +02:00
|
|
|
int pci_add_capability2(PCIDevice *pdev, uint8_t cap_id,
|
|
|
|
uint8_t offset, uint8_t size,
|
|
|
|
Error **errp);
|
2009-06-21 18:45:40 +02:00
|
|
|
|
|
|
|
void pci_del_capability(PCIDevice *pci_dev, uint8_t cap_id, uint8_t cap_size);
|
|
|
|
|
|
|
|
uint8_t pci_find_capability(PCIDevice *pci_dev, uint8_t cap_id);
|
|
|
|
|
|
|
|
|
2007-11-17 18:14:51 +01:00
|
|
|
uint32_t pci_default_read_config(PCIDevice *d,
|
|
|
|
uint32_t address, int len);
|
|
|
|
void pci_default_write_config(PCIDevice *d,
|
|
|
|
uint32_t address, uint32_t val, int len);
|
|
|
|
void pci_device_save(PCIDevice *s, QEMUFile *f);
|
|
|
|
int pci_device_load(PCIDevice *s, QEMUFile *f);
|
2011-08-15 16:17:36 +02:00
|
|
|
MemoryRegion *pci_address_space(PCIDevice *dev);
|
2011-08-11 00:28:10 +02:00
|
|
|
MemoryRegion *pci_address_space_io(PCIDevice *dev);
|
2007-11-17 18:14:51 +01:00
|
|
|
|
2015-05-07 07:33:54 +02:00
|
|
|
/*
|
|
|
|
* Should not normally be used by devices. For use by sPAPR target
|
|
|
|
* where QEMU emulates firmware.
|
|
|
|
*/
|
|
|
|
int pci_bar(PCIDevice *d, int reg);
|
|
|
|
|
2009-08-28 15:28:17 +02:00
|
|
|
typedef void (*pci_set_irq_fn)(void *opaque, int irq_num, int level);
|
2007-11-17 18:14:51 +01:00
|
|
|
typedef int (*pci_map_irq_fn)(PCIDevice *pci_dev, int irq_num);
|
2012-07-19 16:11:47 +02:00
|
|
|
typedef PCIINTxRoute (*pci_route_irq_fn)(void *opaque, int pin);
|
2010-11-12 08:21:35 +01:00
|
|
|
|
2013-03-14 23:01:05 +01:00
|
|
|
#define TYPE_PCI_BUS "PCI"
|
|
|
|
#define PCI_BUS(obj) OBJECT_CHECK(PCIBus, (obj), TYPE_PCI_BUS)
|
2015-06-02 13:22:57 +02:00
|
|
|
#define PCI_BUS_CLASS(klass) OBJECT_CLASS_CHECK(PCIBusClass, (klass), TYPE_PCI_BUS)
|
|
|
|
#define PCI_BUS_GET_CLASS(obj) OBJECT_GET_CLASS(PCIBusClass, (obj), TYPE_PCI_BUS)
|
2013-03-14 23:01:05 +01:00
|
|
|
#define TYPE_PCIE_BUS "PCIE"
|
|
|
|
|
2013-03-14 23:01:23 +01:00
|
|
|
bool pci_bus_is_express(PCIBus *bus);
|
2013-03-14 23:01:29 +01:00
|
|
|
bool pci_bus_is_root(PCIBus *bus);
|
2013-08-23 20:23:55 +02:00
|
|
|
void pci_bus_new_inplace(PCIBus *bus, size_t bus_size, DeviceState *parent,
|
2011-07-26 13:26:19 +02:00
|
|
|
const char *name,
|
2011-08-08 15:09:04 +02:00
|
|
|
MemoryRegion *address_space_mem,
|
|
|
|
MemoryRegion *address_space_io,
|
2013-03-14 23:01:11 +01:00
|
|
|
uint8_t devfn_min, const char *typename);
|
2011-07-26 13:26:19 +02:00
|
|
|
PCIBus *pci_bus_new(DeviceState *parent, const char *name,
|
2011-08-08 15:09:04 +02:00
|
|
|
MemoryRegion *address_space_mem,
|
|
|
|
MemoryRegion *address_space_io,
|
2013-03-14 23:01:11 +01:00
|
|
|
uint8_t devfn_min, const char *typename);
|
2009-09-16 22:25:31 +02:00
|
|
|
void pci_bus_irqs(PCIBus *bus, pci_set_irq_fn set_irq, pci_map_irq_fn map_irq,
|
|
|
|
void *irq_opaque, int nirq);
|
2011-04-01 13:43:21 +02:00
|
|
|
int pci_bus_get_irq_level(PCIBus *bus, int irq_num);
|
2012-10-19 22:43:28 +02:00
|
|
|
/* 0 <= pin <= 3 0 = INTA, 1 = INTB, 2 = INTC, 3 = INTD */
|
|
|
|
int pci_swizzle_map_irq_fn(PCIDevice *pci_dev, int pin);
|
2009-05-23 01:05:19 +02:00
|
|
|
PCIBus *pci_register_bus(DeviceState *parent, const char *name,
|
|
|
|
pci_set_irq_fn set_irq, pci_map_irq_fn map_irq,
|
2011-07-26 13:26:19 +02:00
|
|
|
void *irq_opaque,
|
2011-08-08 15:09:04 +02:00
|
|
|
MemoryRegion *address_space_mem,
|
|
|
|
MemoryRegion *address_space_io,
|
2013-03-14 23:01:11 +01:00
|
|
|
uint8_t devfn_min, int nirq, const char *typename);
|
2012-07-19 16:11:47 +02:00
|
|
|
void pci_bus_set_route_irq_fn(PCIBus *, pci_route_irq_fn);
|
|
|
|
PCIINTxRoute pci_device_route_intx_to_irq(PCIDevice *dev, int pin);
|
2012-10-02 21:21:54 +02:00
|
|
|
bool pci_intx_route_changed(PCIINTxRoute *old, PCIINTxRoute *new);
|
2012-07-02 14:38:47 +02:00
|
|
|
void pci_bus_fire_intx_routing_notifier(PCIBus *bus);
|
|
|
|
void pci_device_set_intx_routing_notifier(PCIDevice *dev,
|
|
|
|
PCIINTxRoutingNotifier notifier);
|
2010-12-22 07:14:35 +01:00
|
|
|
void pci_device_reset(PCIDevice *dev);
|
2007-11-17 18:14:51 +01:00
|
|
|
|
2013-06-06 10:48:51 +02:00
|
|
|
PCIDevice *pci_nic_init_nofail(NICInfo *nd, PCIBus *rootbus,
|
|
|
|
const char *default_model,
|
2009-09-25 03:53:51 +02:00
|
|
|
const char *default_devaddr);
|
2012-09-08 11:49:24 +02:00
|
|
|
|
|
|
|
PCIDevice *pci_vga_init(PCIBus *bus);
|
|
|
|
|
2007-11-17 18:14:51 +01:00
|
|
|
int pci_bus_num(PCIBus *s);
|
2015-06-02 13:23:09 +02:00
|
|
|
int pci_bus_numa_node(PCIBus *bus);
|
2012-06-21 17:35:28 +02:00
|
|
|
void pci_for_each_device(PCIBus *bus, int bus_num,
|
|
|
|
void (*fn)(PCIBus *bus, PCIDevice *d, void *opaque),
|
|
|
|
void *opaque);
|
2013-10-14 17:01:07 +02:00
|
|
|
void pci_for_each_bus_depth_first(PCIBus *bus,
|
|
|
|
void *(*begin)(PCIBus *bus, void *parent_state),
|
|
|
|
void (*end)(PCIBus *bus, void *state),
|
|
|
|
void *parent_state);
|
2015-10-28 07:20:31 +01:00
|
|
|
PCIDevice *pci_get_function_0(PCIDevice *pci_dev);
|
2013-10-14 17:01:07 +02:00
|
|
|
|
|
|
|
/* Use this wrapper when specific scan order is not required. */
|
|
|
|
static inline
|
|
|
|
void pci_for_each_bus(PCIBus *bus,
|
|
|
|
void (*fn)(PCIBus *bus, void *opaque),
|
|
|
|
void *opaque)
|
|
|
|
{
|
|
|
|
pci_for_each_bus_depth_first(bus, NULL, fn, opaque);
|
|
|
|
}
|
|
|
|
|
2013-06-06 10:48:47 +02:00
|
|
|
PCIBus *pci_find_primary_bus(void);
|
2013-06-06 10:48:48 +02:00
|
|
|
PCIBus *pci_device_root_bus(const PCIDevice *d);
|
2013-06-06 10:48:49 +02:00
|
|
|
const char *pci_root_bus_path(PCIDevice *dev);
|
2011-01-27 07:56:36 +01:00
|
|
|
PCIDevice *pci_find_device(PCIBus *bus, int bus_num, uint8_t devfn);
|
2010-12-24 04:14:13 +01:00
|
|
|
int pci_qdev_find_device(const char *id, PCIDevice **pdev);
|
2013-09-02 10:37:02 +02:00
|
|
|
void pci_bus_get_w64_range(PCIBus *bus, Range *range);
|
2007-11-17 18:14:51 +01:00
|
|
|
|
2011-01-20 08:21:38 +01:00
|
|
|
void pci_device_deassert_intx(PCIDevice *dev);
|
|
|
|
|
2012-10-30 12:47:48 +01:00
|
|
|
typedef AddressSpace *(*PCIIOMMUFunc)(PCIBus *, void *, int);
|
2012-06-27 06:50:45 +02:00
|
|
|
|
2013-08-09 17:09:08 +02:00
|
|
|
AddressSpace *pci_device_iommu_address_space(PCIDevice *dev);
|
2012-10-30 12:47:48 +01:00
|
|
|
void pci_setup_iommu(PCIBus *bus, PCIIOMMUFunc fn, void *opaque);
|
2012-06-27 06:50:45 +02:00
|
|
|
|
2009-06-21 18:50:57 +02:00
|
|
|
static inline void
|
|
|
|
pci_set_byte(uint8_t *config, uint8_t val)
|
|
|
|
{
|
|
|
|
*config = val;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline uint8_t
|
2010-01-11 21:20:13 +01:00
|
|
|
pci_get_byte(const uint8_t *config)
|
2009-06-21 18:50:57 +02:00
|
|
|
{
|
|
|
|
return *config;
|
|
|
|
}
|
|
|
|
|
2009-06-21 18:45:30 +02:00
|
|
|
static inline void
|
|
|
|
pci_set_word(uint8_t *config, uint16_t val)
|
|
|
|
{
|
2013-11-05 17:38:29 +01:00
|
|
|
stw_le_p(config, val);
|
2009-06-21 18:45:30 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline uint16_t
|
2010-01-11 21:20:13 +01:00
|
|
|
pci_get_word(const uint8_t *config)
|
2009-06-21 18:45:30 +02:00
|
|
|
{
|
2013-11-05 17:38:31 +01:00
|
|
|
return lduw_le_p(config);
|
2009-06-21 18:45:30 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
pci_set_long(uint8_t *config, uint32_t val)
|
|
|
|
{
|
2013-11-05 17:38:30 +01:00
|
|
|
stl_le_p(config, val);
|
2009-06-21 18:45:30 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline uint32_t
|
2010-01-11 21:20:13 +01:00
|
|
|
pci_get_long(const uint8_t *config)
|
2009-06-21 18:45:30 +02:00
|
|
|
{
|
2013-11-05 17:38:32 +01:00
|
|
|
return ldl_le_p(config);
|
2009-06-21 18:45:30 +02:00
|
|
|
}
|
|
|
|
|
2016-06-01 10:23:30 +02:00
|
|
|
/*
|
|
|
|
* PCI capabilities and/or their fields
|
|
|
|
* are generally DWORD aligned only so
|
|
|
|
* mechanism used by pci_set/get_quad()
|
|
|
|
* must be tolerant to unaligned pointers
|
|
|
|
*
|
|
|
|
*/
|
2009-10-30 13:20:59 +01:00
|
|
|
static inline void
|
|
|
|
pci_set_quad(uint8_t *config, uint64_t val)
|
|
|
|
{
|
2016-06-01 10:23:30 +02:00
|
|
|
stq_le_p(config, val);
|
2009-10-30 13:20:59 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline uint64_t
|
2010-01-11 21:20:13 +01:00
|
|
|
pci_get_quad(const uint8_t *config)
|
2009-10-30 13:20:59 +01:00
|
|
|
{
|
2016-06-01 10:23:30 +02:00
|
|
|
return ldq_le_p(config);
|
2009-10-30 13:20:59 +01:00
|
|
|
}
|
|
|
|
|
2009-01-26 16:37:35 +01:00
|
|
|
static inline void
|
|
|
|
pci_config_set_vendor_id(uint8_t *pci_config, uint16_t val)
|
|
|
|
{
|
2009-06-21 18:45:30 +02:00
|
|
|
pci_set_word(&pci_config[PCI_VENDOR_ID], val);
|
2009-01-26 16:37:35 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
pci_config_set_device_id(uint8_t *pci_config, uint16_t val)
|
|
|
|
{
|
2009-06-21 18:45:30 +02:00
|
|
|
pci_set_word(&pci_config[PCI_DEVICE_ID], val);
|
2009-01-26 16:37:35 +01:00
|
|
|
}
|
|
|
|
|
2010-02-25 09:41:25 +01:00
|
|
|
static inline void
|
|
|
|
pci_config_set_revision(uint8_t *pci_config, uint8_t val)
|
|
|
|
{
|
|
|
|
pci_set_byte(&pci_config[PCI_REVISION_ID], val);
|
|
|
|
}
|
|
|
|
|
2009-02-01 20:26:20 +01:00
|
|
|
static inline void
|
|
|
|
pci_config_set_class(uint8_t *pci_config, uint16_t val)
|
|
|
|
{
|
2009-06-21 18:45:30 +02:00
|
|
|
pci_set_word(&pci_config[PCI_CLASS_DEVICE], val);
|
2009-02-01 20:26:20 +01:00
|
|
|
}
|
|
|
|
|
2010-02-25 09:41:25 +01:00
|
|
|
static inline void
|
|
|
|
pci_config_set_prog_interface(uint8_t *pci_config, uint8_t val)
|
|
|
|
{
|
|
|
|
pci_set_byte(&pci_config[PCI_CLASS_PROG], val);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
pci_config_set_interrupt_pin(uint8_t *pci_config, uint8_t val)
|
|
|
|
{
|
|
|
|
pci_set_byte(&pci_config[PCI_INTERRUPT_PIN], val);
|
|
|
|
}
|
|
|
|
|
2010-10-19 11:06:28 +02:00
|
|
|
/*
|
|
|
|
* helper functions to do bit mask operation on configuration space.
|
|
|
|
* Just to set bit, use test-and-set and discard returned value.
|
|
|
|
* Just to clear bit, use test-and-clear and discard returned value.
|
|
|
|
* NOTE: They aren't atomic.
|
|
|
|
*/
|
|
|
|
static inline uint8_t
|
|
|
|
pci_byte_test_and_clear_mask(uint8_t *config, uint8_t mask)
|
|
|
|
{
|
|
|
|
uint8_t val = pci_get_byte(config);
|
|
|
|
pci_set_byte(config, val & ~mask);
|
|
|
|
return val & mask;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline uint8_t
|
|
|
|
pci_byte_test_and_set_mask(uint8_t *config, uint8_t mask)
|
|
|
|
{
|
|
|
|
uint8_t val = pci_get_byte(config);
|
|
|
|
pci_set_byte(config, val | mask);
|
|
|
|
return val & mask;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline uint16_t
|
|
|
|
pci_word_test_and_clear_mask(uint8_t *config, uint16_t mask)
|
|
|
|
{
|
|
|
|
uint16_t val = pci_get_word(config);
|
|
|
|
pci_set_word(config, val & ~mask);
|
|
|
|
return val & mask;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline uint16_t
|
|
|
|
pci_word_test_and_set_mask(uint8_t *config, uint16_t mask)
|
|
|
|
{
|
|
|
|
uint16_t val = pci_get_word(config);
|
|
|
|
pci_set_word(config, val | mask);
|
|
|
|
return val & mask;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline uint32_t
|
|
|
|
pci_long_test_and_clear_mask(uint8_t *config, uint32_t mask)
|
|
|
|
{
|
|
|
|
uint32_t val = pci_get_long(config);
|
|
|
|
pci_set_long(config, val & ~mask);
|
|
|
|
return val & mask;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline uint32_t
|
|
|
|
pci_long_test_and_set_mask(uint8_t *config, uint32_t mask)
|
|
|
|
{
|
|
|
|
uint32_t val = pci_get_long(config);
|
|
|
|
pci_set_long(config, val | mask);
|
|
|
|
return val & mask;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline uint64_t
|
|
|
|
pci_quad_test_and_clear_mask(uint8_t *config, uint64_t mask)
|
|
|
|
{
|
|
|
|
uint64_t val = pci_get_quad(config);
|
|
|
|
pci_set_quad(config, val & ~mask);
|
|
|
|
return val & mask;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline uint64_t
|
|
|
|
pci_quad_test_and_set_mask(uint8_t *config, uint64_t mask)
|
|
|
|
{
|
|
|
|
uint64_t val = pci_get_quad(config);
|
|
|
|
pci_set_quad(config, val | mask);
|
|
|
|
return val & mask;
|
|
|
|
}
|
|
|
|
|
2012-02-21 14:41:30 +01:00
|
|
|
/* Access a register specified by a mask */
|
|
|
|
static inline void
|
|
|
|
pci_set_byte_by_mask(uint8_t *config, uint8_t mask, uint8_t reg)
|
|
|
|
{
|
|
|
|
uint8_t val = pci_get_byte(config);
|
2015-03-23 16:29:26 +01:00
|
|
|
uint8_t rval = reg << ctz32(mask);
|
2012-02-21 14:41:30 +01:00
|
|
|
pci_set_byte(config, (~mask & val) | (mask & rval));
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline uint8_t
|
|
|
|
pci_get_byte_by_mask(uint8_t *config, uint8_t mask)
|
|
|
|
{
|
|
|
|
uint8_t val = pci_get_byte(config);
|
2015-03-23 16:29:26 +01:00
|
|
|
return (val & mask) >> ctz32(mask);
|
2012-02-21 14:41:30 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
pci_set_word_by_mask(uint8_t *config, uint16_t mask, uint16_t reg)
|
|
|
|
{
|
|
|
|
uint16_t val = pci_get_word(config);
|
2015-03-23 16:29:26 +01:00
|
|
|
uint16_t rval = reg << ctz32(mask);
|
2012-02-21 14:41:30 +01:00
|
|
|
pci_set_word(config, (~mask & val) | (mask & rval));
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline uint16_t
|
|
|
|
pci_get_word_by_mask(uint8_t *config, uint16_t mask)
|
|
|
|
{
|
|
|
|
uint16_t val = pci_get_word(config);
|
2015-03-23 16:29:26 +01:00
|
|
|
return (val & mask) >> ctz32(mask);
|
2012-02-21 14:41:30 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
pci_set_long_by_mask(uint8_t *config, uint32_t mask, uint32_t reg)
|
|
|
|
{
|
|
|
|
uint32_t val = pci_get_long(config);
|
2015-03-23 16:29:26 +01:00
|
|
|
uint32_t rval = reg << ctz32(mask);
|
2012-02-21 14:41:30 +01:00
|
|
|
pci_set_long(config, (~mask & val) | (mask & rval));
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline uint32_t
|
|
|
|
pci_get_long_by_mask(uint8_t *config, uint32_t mask)
|
|
|
|
{
|
|
|
|
uint32_t val = pci_get_long(config);
|
2015-03-23 16:29:26 +01:00
|
|
|
return (val & mask) >> ctz32(mask);
|
2012-02-21 14:41:30 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
pci_set_quad_by_mask(uint8_t *config, uint64_t mask, uint64_t reg)
|
|
|
|
{
|
|
|
|
uint64_t val = pci_get_quad(config);
|
2015-03-23 16:29:26 +01:00
|
|
|
uint64_t rval = reg << ctz32(mask);
|
2012-02-21 14:41:30 +01:00
|
|
|
pci_set_quad(config, (~mask & val) | (mask & rval));
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline uint64_t
|
|
|
|
pci_get_quad_by_mask(uint8_t *config, uint64_t mask)
|
|
|
|
{
|
|
|
|
uint64_t val = pci_get_quad(config);
|
2015-03-23 16:29:26 +01:00
|
|
|
return (val & mask) >> ctz32(mask);
|
2012-02-21 14:41:30 +01:00
|
|
|
}
|
|
|
|
|
2010-06-23 09:15:30 +02:00
|
|
|
PCIDevice *pci_create_multifunction(PCIBus *bus, int devfn, bool multifunction,
|
|
|
|
const char *name);
|
|
|
|
PCIDevice *pci_create_simple_multifunction(PCIBus *bus, int devfn,
|
|
|
|
bool multifunction,
|
|
|
|
const char *name);
|
2009-09-25 03:53:53 +02:00
|
|
|
PCIDevice *pci_create(PCIBus *bus, int devfn, const char *name);
|
2009-05-14 23:35:07 +02:00
|
|
|
PCIDevice *pci_create_simple(PCIBus *bus, int devfn, const char *name);
|
|
|
|
|
2013-10-07 09:36:35 +02:00
|
|
|
qemu_irq pci_allocate_irq(PCIDevice *pci_dev);
|
|
|
|
void pci_set_irq(PCIDevice *pci_dev, int level);
|
|
|
|
|
|
|
|
static inline void pci_irq_assert(PCIDevice *pci_dev)
|
|
|
|
{
|
|
|
|
pci_set_irq(pci_dev, 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void pci_irq_deassert(PCIDevice *pci_dev)
|
|
|
|
{
|
|
|
|
pci_set_irq(pci_dev, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* FIXME: PCI does not work this way.
|
|
|
|
* All the callers to this method should be fixed.
|
|
|
|
*/
|
|
|
|
static inline void pci_irq_pulse(PCIDevice *pci_dev)
|
|
|
|
{
|
|
|
|
pci_irq_assert(pci_dev);
|
|
|
|
pci_irq_deassert(pci_dev);
|
|
|
|
}
|
|
|
|
|
2010-05-27 07:42:37 +02:00
|
|
|
static inline int pci_is_express(const PCIDevice *d)
|
2009-10-30 13:21:18 +01:00
|
|
|
{
|
|
|
|
return d->cap_present & QEMU_PCI_CAP_EXPRESS;
|
|
|
|
}
|
|
|
|
|
2010-05-27 07:42:37 +02:00
|
|
|
static inline uint32_t pci_config_size(const PCIDevice *d)
|
2009-10-30 13:21:18 +01:00
|
|
|
{
|
|
|
|
return pci_is_express(d) ? PCIE_CONFIG_SPACE_SIZE : PCI_CONFIG_SPACE_SIZE;
|
|
|
|
}
|
|
|
|
|
2016-05-17 13:26:10 +02:00
|
|
|
static inline uint16_t pci_get_bdf(PCIDevice *dev)
|
2015-10-15 15:44:51 +02:00
|
|
|
{
|
2016-05-17 13:26:10 +02:00
|
|
|
return PCI_BUILD_BDF(pci_bus_num(dev->bus), dev->devfn);
|
2015-10-15 15:44:51 +02:00
|
|
|
}
|
|
|
|
|
2016-05-17 13:26:10 +02:00
|
|
|
uint16_t pci_requester_id(PCIDevice *dev);
|
|
|
|
|
Add stub functions for PCI device models to do PCI DMA
This patch adds functions to pci.[ch] to perform PCI DMA operations.
At present, these are just stubs which perform directly cpu physical
memory accesses. Stubs are included which are analogous to
cpu_physical_memory_{read,write}(), the stX_phys() and ldX_phys()
functions and cpu_physical_memory_{map,unmap}().
In addition, a wrapper around qemu_sglist_init() is provided, which
also takes a PCIDevice *. It's assumed that _init() is the only
sglist function which will need wrapping, the idea being that once we
have IOMMU support whatever IOMMU context handle the wrapper derives
from the PCI device will be stored within the sglist structure for
later use.
Using these stubs, however, distinguishes PCI device DMA transactions from
other accesses to physical memory, which will allow PCI IOMMU support to
be added in one place, rather than updating every PCI driver at that time.
That is, it allows us to update individual PCI drivers to support an IOMMU
without having yet determined the details of how the IOMMU emulation will
operate. This will let us remove the most bitrot-sensitive part of an
IOMMU patch in advance.
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
2011-10-31 07:06:47 +01:00
|
|
|
/* DMA access functions */
|
2013-04-10 18:15:49 +02:00
|
|
|
static inline AddressSpace *pci_get_address_space(PCIDevice *dev)
|
iommu: Add universal DMA helper functions
Not that long ago, every device implementation using DMA directly
accessed guest memory using cpu_physical_memory_*(). This meant that
adding support for a guest visible IOMMU would require changing every
one of these devices to go through IOMMU translation.
Shortly before qemu 1.0, I made a start on fixing this by providing
helper functions for PCI DMA. These are currently just stubs which
call the direct access functions, but mean that an IOMMU can be
implemented in one place, rather than for every PCI device.
Clearly, this doesn't help for non PCI devices, which could also be
IOMMU translated on some platforms. It is also problematic for the
devices which have both PCI and non-PCI version (e.g. OHCI, AHCI) - we
cannot use the the pci_dma_*() functions, because they assume the
presence of a PCIDevice, but we don't want to have to check between
pci_dma_*() and cpu_physical_memory_*() every time we do a DMA in the
device code.
This patch makes the first step on addressing both these problems, by
introducing new (stub) dma helper functions which can be used for any
DMA capable device.
These dma functions take a DMAContext *, a new (currently empty)
variable describing the DMA address space in which the operation is to
take place. NULL indicates untranslated DMA directly into guest
physical address space. The intention is that in future non-NULL
values will given information about any necessary IOMMU translation.
DMA using devices must obtain a DMAContext (or, potentially, contexts)
from their bus or platform. For now this patch just converts the PCI
wrappers to be implemented in terms of the universal wrappers,
converting other drivers can take place over time.
Cc: Michael S. Tsirkin <mst@redhat.com>
Cc: Eduard - Gabriel Munteanu <eduard.munteanu@linux360.ro>
Cc: Richard Henderson <rth@twiddle.net>
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
2012-06-27 06:50:38 +02:00
|
|
|
{
|
2013-04-10 18:15:49 +02:00
|
|
|
return &dev->bus_master_as;
|
iommu: Add universal DMA helper functions
Not that long ago, every device implementation using DMA directly
accessed guest memory using cpu_physical_memory_*(). This meant that
adding support for a guest visible IOMMU would require changing every
one of these devices to go through IOMMU translation.
Shortly before qemu 1.0, I made a start on fixing this by providing
helper functions for PCI DMA. These are currently just stubs which
call the direct access functions, but mean that an IOMMU can be
implemented in one place, rather than for every PCI device.
Clearly, this doesn't help for non PCI devices, which could also be
IOMMU translated on some platforms. It is also problematic for the
devices which have both PCI and non-PCI version (e.g. OHCI, AHCI) - we
cannot use the the pci_dma_*() functions, because they assume the
presence of a PCIDevice, but we don't want to have to check between
pci_dma_*() and cpu_physical_memory_*() every time we do a DMA in the
device code.
This patch makes the first step on addressing both these problems, by
introducing new (stub) dma helper functions which can be used for any
DMA capable device.
These dma functions take a DMAContext *, a new (currently empty)
variable describing the DMA address space in which the operation is to
take place. NULL indicates untranslated DMA directly into guest
physical address space. The intention is that in future non-NULL
values will given information about any necessary IOMMU translation.
DMA using devices must obtain a DMAContext (or, potentially, contexts)
from their bus or platform. For now this patch just converts the PCI
wrappers to be implemented in terms of the universal wrappers,
converting other drivers can take place over time.
Cc: Michael S. Tsirkin <mst@redhat.com>
Cc: Eduard - Gabriel Munteanu <eduard.munteanu@linux360.ro>
Cc: Richard Henderson <rth@twiddle.net>
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
2012-06-27 06:50:38 +02:00
|
|
|
}
|
|
|
|
|
Add stub functions for PCI device models to do PCI DMA
This patch adds functions to pci.[ch] to perform PCI DMA operations.
At present, these are just stubs which perform directly cpu physical
memory accesses. Stubs are included which are analogous to
cpu_physical_memory_{read,write}(), the stX_phys() and ldX_phys()
functions and cpu_physical_memory_{map,unmap}().
In addition, a wrapper around qemu_sglist_init() is provided, which
also takes a PCIDevice *. It's assumed that _init() is the only
sglist function which will need wrapping, the idea being that once we
have IOMMU support whatever IOMMU context handle the wrapper derives
from the PCI device will be stored within the sglist structure for
later use.
Using these stubs, however, distinguishes PCI device DMA transactions from
other accesses to physical memory, which will allow PCI IOMMU support to
be added in one place, rather than updating every PCI driver at that time.
That is, it allows us to update individual PCI drivers to support an IOMMU
without having yet determined the details of how the IOMMU emulation will
operate. This will let us remove the most bitrot-sensitive part of an
IOMMU patch in advance.
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
2011-10-31 07:06:47 +01:00
|
|
|
static inline int pci_dma_rw(PCIDevice *dev, dma_addr_t addr,
|
|
|
|
void *buf, dma_addr_t len, DMADirection dir)
|
|
|
|
{
|
2013-04-10 18:15:49 +02:00
|
|
|
dma_memory_rw(pci_get_address_space(dev), addr, buf, len, dir);
|
Add stub functions for PCI device models to do PCI DMA
This patch adds functions to pci.[ch] to perform PCI DMA operations.
At present, these are just stubs which perform directly cpu physical
memory accesses. Stubs are included which are analogous to
cpu_physical_memory_{read,write}(), the stX_phys() and ldX_phys()
functions and cpu_physical_memory_{map,unmap}().
In addition, a wrapper around qemu_sglist_init() is provided, which
also takes a PCIDevice *. It's assumed that _init() is the only
sglist function which will need wrapping, the idea being that once we
have IOMMU support whatever IOMMU context handle the wrapper derives
from the PCI device will be stored within the sglist structure for
later use.
Using these stubs, however, distinguishes PCI device DMA transactions from
other accesses to physical memory, which will allow PCI IOMMU support to
be added in one place, rather than updating every PCI driver at that time.
That is, it allows us to update individual PCI drivers to support an IOMMU
without having yet determined the details of how the IOMMU emulation will
operate. This will let us remove the most bitrot-sensitive part of an
IOMMU patch in advance.
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
2011-10-31 07:06:47 +01:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int pci_dma_read(PCIDevice *dev, dma_addr_t addr,
|
|
|
|
void *buf, dma_addr_t len)
|
|
|
|
{
|
|
|
|
return pci_dma_rw(dev, addr, buf, len, DMA_DIRECTION_TO_DEVICE);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int pci_dma_write(PCIDevice *dev, dma_addr_t addr,
|
|
|
|
const void *buf, dma_addr_t len)
|
|
|
|
{
|
|
|
|
return pci_dma_rw(dev, addr, (void *) buf, len, DMA_DIRECTION_FROM_DEVICE);
|
|
|
|
}
|
|
|
|
|
|
|
|
#define PCI_DMA_DEFINE_LDST(_l, _s, _bits) \
|
|
|
|
static inline uint##_bits##_t ld##_l##_pci_dma(PCIDevice *dev, \
|
|
|
|
dma_addr_t addr) \
|
|
|
|
{ \
|
2013-04-10 18:15:49 +02:00
|
|
|
return ld##_l##_dma(pci_get_address_space(dev), addr); \
|
Add stub functions for PCI device models to do PCI DMA
This patch adds functions to pci.[ch] to perform PCI DMA operations.
At present, these are just stubs which perform directly cpu physical
memory accesses. Stubs are included which are analogous to
cpu_physical_memory_{read,write}(), the stX_phys() and ldX_phys()
functions and cpu_physical_memory_{map,unmap}().
In addition, a wrapper around qemu_sglist_init() is provided, which
also takes a PCIDevice *. It's assumed that _init() is the only
sglist function which will need wrapping, the idea being that once we
have IOMMU support whatever IOMMU context handle the wrapper derives
from the PCI device will be stored within the sglist structure for
later use.
Using these stubs, however, distinguishes PCI device DMA transactions from
other accesses to physical memory, which will allow PCI IOMMU support to
be added in one place, rather than updating every PCI driver at that time.
That is, it allows us to update individual PCI drivers to support an IOMMU
without having yet determined the details of how the IOMMU emulation will
operate. This will let us remove the most bitrot-sensitive part of an
IOMMU patch in advance.
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
2011-10-31 07:06:47 +01:00
|
|
|
} \
|
|
|
|
static inline void st##_s##_pci_dma(PCIDevice *dev, \
|
iommu: Add universal DMA helper functions
Not that long ago, every device implementation using DMA directly
accessed guest memory using cpu_physical_memory_*(). This meant that
adding support for a guest visible IOMMU would require changing every
one of these devices to go through IOMMU translation.
Shortly before qemu 1.0, I made a start on fixing this by providing
helper functions for PCI DMA. These are currently just stubs which
call the direct access functions, but mean that an IOMMU can be
implemented in one place, rather than for every PCI device.
Clearly, this doesn't help for non PCI devices, which could also be
IOMMU translated on some platforms. It is also problematic for the
devices which have both PCI and non-PCI version (e.g. OHCI, AHCI) - we
cannot use the the pci_dma_*() functions, because they assume the
presence of a PCIDevice, but we don't want to have to check between
pci_dma_*() and cpu_physical_memory_*() every time we do a DMA in the
device code.
This patch makes the first step on addressing both these problems, by
introducing new (stub) dma helper functions which can be used for any
DMA capable device.
These dma functions take a DMAContext *, a new (currently empty)
variable describing the DMA address space in which the operation is to
take place. NULL indicates untranslated DMA directly into guest
physical address space. The intention is that in future non-NULL
values will given information about any necessary IOMMU translation.
DMA using devices must obtain a DMAContext (or, potentially, contexts)
from their bus or platform. For now this patch just converts the PCI
wrappers to be implemented in terms of the universal wrappers,
converting other drivers can take place over time.
Cc: Michael S. Tsirkin <mst@redhat.com>
Cc: Eduard - Gabriel Munteanu <eduard.munteanu@linux360.ro>
Cc: Richard Henderson <rth@twiddle.net>
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
2012-06-27 06:50:38 +02:00
|
|
|
dma_addr_t addr, uint##_bits##_t val) \
|
Add stub functions for PCI device models to do PCI DMA
This patch adds functions to pci.[ch] to perform PCI DMA operations.
At present, these are just stubs which perform directly cpu physical
memory accesses. Stubs are included which are analogous to
cpu_physical_memory_{read,write}(), the stX_phys() and ldX_phys()
functions and cpu_physical_memory_{map,unmap}().
In addition, a wrapper around qemu_sglist_init() is provided, which
also takes a PCIDevice *. It's assumed that _init() is the only
sglist function which will need wrapping, the idea being that once we
have IOMMU support whatever IOMMU context handle the wrapper derives
from the PCI device will be stored within the sglist structure for
later use.
Using these stubs, however, distinguishes PCI device DMA transactions from
other accesses to physical memory, which will allow PCI IOMMU support to
be added in one place, rather than updating every PCI driver at that time.
That is, it allows us to update individual PCI drivers to support an IOMMU
without having yet determined the details of how the IOMMU emulation will
operate. This will let us remove the most bitrot-sensitive part of an
IOMMU patch in advance.
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
2011-10-31 07:06:47 +01:00
|
|
|
{ \
|
2013-04-10 18:15:49 +02:00
|
|
|
st##_s##_dma(pci_get_address_space(dev), addr, val); \
|
Add stub functions for PCI device models to do PCI DMA
This patch adds functions to pci.[ch] to perform PCI DMA operations.
At present, these are just stubs which perform directly cpu physical
memory accesses. Stubs are included which are analogous to
cpu_physical_memory_{read,write}(), the stX_phys() and ldX_phys()
functions and cpu_physical_memory_{map,unmap}().
In addition, a wrapper around qemu_sglist_init() is provided, which
also takes a PCIDevice *. It's assumed that _init() is the only
sglist function which will need wrapping, the idea being that once we
have IOMMU support whatever IOMMU context handle the wrapper derives
from the PCI device will be stored within the sglist structure for
later use.
Using these stubs, however, distinguishes PCI device DMA transactions from
other accesses to physical memory, which will allow PCI IOMMU support to
be added in one place, rather than updating every PCI driver at that time.
That is, it allows us to update individual PCI drivers to support an IOMMU
without having yet determined the details of how the IOMMU emulation will
operate. This will let us remove the most bitrot-sensitive part of an
IOMMU patch in advance.
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
2011-10-31 07:06:47 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
PCI_DMA_DEFINE_LDST(ub, b, 8);
|
|
|
|
PCI_DMA_DEFINE_LDST(uw_le, w_le, 16)
|
|
|
|
PCI_DMA_DEFINE_LDST(l_le, l_le, 32);
|
|
|
|
PCI_DMA_DEFINE_LDST(q_le, q_le, 64);
|
|
|
|
PCI_DMA_DEFINE_LDST(uw_be, w_be, 16)
|
|
|
|
PCI_DMA_DEFINE_LDST(l_be, l_be, 32);
|
|
|
|
PCI_DMA_DEFINE_LDST(q_be, q_be, 64);
|
|
|
|
|
|
|
|
#undef PCI_DMA_DEFINE_LDST
|
|
|
|
|
|
|
|
static inline void *pci_dma_map(PCIDevice *dev, dma_addr_t addr,
|
|
|
|
dma_addr_t *plen, DMADirection dir)
|
|
|
|
{
|
|
|
|
void *buf;
|
|
|
|
|
2013-04-10 18:15:49 +02:00
|
|
|
buf = dma_memory_map(pci_get_address_space(dev), addr, plen, dir);
|
Add stub functions for PCI device models to do PCI DMA
This patch adds functions to pci.[ch] to perform PCI DMA operations.
At present, these are just stubs which perform directly cpu physical
memory accesses. Stubs are included which are analogous to
cpu_physical_memory_{read,write}(), the stX_phys() and ldX_phys()
functions and cpu_physical_memory_{map,unmap}().
In addition, a wrapper around qemu_sglist_init() is provided, which
also takes a PCIDevice *. It's assumed that _init() is the only
sglist function which will need wrapping, the idea being that once we
have IOMMU support whatever IOMMU context handle the wrapper derives
from the PCI device will be stored within the sglist structure for
later use.
Using these stubs, however, distinguishes PCI device DMA transactions from
other accesses to physical memory, which will allow PCI IOMMU support to
be added in one place, rather than updating every PCI driver at that time.
That is, it allows us to update individual PCI drivers to support an IOMMU
without having yet determined the details of how the IOMMU emulation will
operate. This will let us remove the most bitrot-sensitive part of an
IOMMU patch in advance.
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
2011-10-31 07:06:47 +01:00
|
|
|
return buf;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void pci_dma_unmap(PCIDevice *dev, void *buffer, dma_addr_t len,
|
|
|
|
DMADirection dir, dma_addr_t access_len)
|
|
|
|
{
|
2013-04-10 18:15:49 +02:00
|
|
|
dma_memory_unmap(pci_get_address_space(dev), buffer, len, dir, access_len);
|
Add stub functions for PCI device models to do PCI DMA
This patch adds functions to pci.[ch] to perform PCI DMA operations.
At present, these are just stubs which perform directly cpu physical
memory accesses. Stubs are included which are analogous to
cpu_physical_memory_{read,write}(), the stX_phys() and ldX_phys()
functions and cpu_physical_memory_{map,unmap}().
In addition, a wrapper around qemu_sglist_init() is provided, which
also takes a PCIDevice *. It's assumed that _init() is the only
sglist function which will need wrapping, the idea being that once we
have IOMMU support whatever IOMMU context handle the wrapper derives
from the PCI device will be stored within the sglist structure for
later use.
Using these stubs, however, distinguishes PCI device DMA transactions from
other accesses to physical memory, which will allow PCI IOMMU support to
be added in one place, rather than updating every PCI driver at that time.
That is, it allows us to update individual PCI drivers to support an IOMMU
without having yet determined the details of how the IOMMU emulation will
operate. This will let us remove the most bitrot-sensitive part of an
IOMMU patch in advance.
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
2011-10-31 07:06:47 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void pci_dma_sglist_init(QEMUSGList *qsg, PCIDevice *dev,
|
|
|
|
int alloc_hint)
|
|
|
|
{
|
2013-06-03 14:17:19 +02:00
|
|
|
qemu_sglist_init(qsg, DEVICE(dev), alloc_hint, pci_get_address_space(dev));
|
Add stub functions for PCI device models to do PCI DMA
This patch adds functions to pci.[ch] to perform PCI DMA operations.
At present, these are just stubs which perform directly cpu physical
memory accesses. Stubs are included which are analogous to
cpu_physical_memory_{read,write}(), the stX_phys() and ldX_phys()
functions and cpu_physical_memory_{map,unmap}().
In addition, a wrapper around qemu_sglist_init() is provided, which
also takes a PCIDevice *. It's assumed that _init() is the only
sglist function which will need wrapping, the idea being that once we
have IOMMU support whatever IOMMU context handle the wrapper derives
from the PCI device will be stored within the sglist structure for
later use.
Using these stubs, however, distinguishes PCI device DMA transactions from
other accesses to physical memory, which will allow PCI IOMMU support to
be added in one place, rather than updating every PCI driver at that time.
That is, it allows us to update individual PCI drivers to support an IOMMU
without having yet determined the details of how the IOMMU emulation will
operate. This will let us remove the most bitrot-sensitive part of an
IOMMU patch in advance.
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
2011-10-31 07:06:47 +01:00
|
|
|
}
|
|
|
|
|
2012-01-13 17:07:20 +01:00
|
|
|
extern const VMStateDescription vmstate_pci_device;
|
|
|
|
|
|
|
|
#define VMSTATE_PCI_DEVICE(_field, _state) { \
|
|
|
|
.name = (stringify(_field)), \
|
|
|
|
.size = sizeof(PCIDevice), \
|
|
|
|
.vmsd = &vmstate_pci_device, \
|
|
|
|
.flags = VMS_STRUCT, \
|
|
|
|
.offset = vmstate_offset_value(_state, _field, PCIDevice), \
|
|
|
|
}
|
|
|
|
|
|
|
|
#define VMSTATE_PCI_DEVICE_POINTER(_field, _state) { \
|
|
|
|
.name = (stringify(_field)), \
|
|
|
|
.size = sizeof(PCIDevice), \
|
|
|
|
.vmsd = &vmstate_pci_device, \
|
|
|
|
.flags = VMS_STRUCT|VMS_POINTER, \
|
|
|
|
.offset = vmstate_offset_pointer(_state, _field, PCIDevice), \
|
|
|
|
}
|
|
|
|
|
2007-11-17 18:14:51 +01:00
|
|
|
#endif
|