hw/nvme updates

- sriov functionality
 - odd fixes
 -----BEGIN PGP SIGNATURE-----
 
 iQEzBAABCAAdFiEEUigzqnXi3OaiR2bATeGvMW1PDekFAmK02wUACgkQTeGvMW1P
 DenNPwgAwhQCXXacTb+6vEdxN30QoWygzQj5BLm//SiXlj7hBX7P/JqCxYF5vUDU
 EaZkl4n3ry5T1xqlUWIBFdIAmKyrsWz2eKTrX41g64i/L+/nfJXZ+IgQc3WkM/FK
 5NwwAE8q/JGiRczLesF/9QvQq/90L6QtyC48bsS8AIcl5IcqHCKGwEJS7LErltex
 YZDJyTNU4wB2XFophylJUL43GrHa/kUFA2ZHgs9iuH0p5LGG6UM3KoinBKcbwn47
 iEWKccvsHSyfE8VpJJS5STMEeGGaBPziZ654ElLmzVq6EXDKMCoX03naQ9Q8oSpl
 FiktbxllCYdmECb44PNBEd/nLdpCdQ==
 =o54a
 -----END PGP SIGNATURE-----

Merge tag 'nvme-next-pull-request' of git://git.infradead.org/qemu-nvme into staging

hw/nvme updates

- sriov functionality
- odd fixes

# -----BEGIN PGP SIGNATURE-----
#
# iQEzBAABCAAdFiEEUigzqnXi3OaiR2bATeGvMW1PDekFAmK02wUACgkQTeGvMW1P
# DenNPwgAwhQCXXacTb+6vEdxN30QoWygzQj5BLm//SiXlj7hBX7P/JqCxYF5vUDU
# EaZkl4n3ry5T1xqlUWIBFdIAmKyrsWz2eKTrX41g64i/L+/nfJXZ+IgQc3WkM/FK
# 5NwwAE8q/JGiRczLesF/9QvQq/90L6QtyC48bsS8AIcl5IcqHCKGwEJS7LErltex
# YZDJyTNU4wB2XFophylJUL43GrHa/kUFA2ZHgs9iuH0p5LGG6UM3KoinBKcbwn47
# iEWKccvsHSyfE8VpJJS5STMEeGGaBPziZ654ElLmzVq6EXDKMCoX03naQ9Q8oSpl
# FiktbxllCYdmECb44PNBEd/nLdpCdQ==
# =o54a
# -----END PGP SIGNATURE-----
# gpg: Signature made Thu 23 Jun 2022 02:28:37 PM PDT
# gpg:                using RSA key 522833AA75E2DCE6A24766C04DE1AF316D4F0DE9
# gpg: Good signature from "Klaus Jensen <its@irrelevant.dk>" [unknown]
# gpg:                 aka "Klaus Jensen <k.jensen@samsung.com>" [unknown]
# gpg: WARNING: This key is not certified with a trusted signature!
# gpg:          There is no indication that the signature belongs to the owner.
# Primary key fingerprint: DDCA 4D9C 9EF9 31CC 3468  4272 63D5 6FC5 E55D A838
#      Subkey fingerprint: 5228 33AA 75E2 DCE6 A247  66C0 4DE1 AF31 6D4F 0DE9

* tag 'nvme-next-pull-request' of git://git.infradead.org/qemu-nvme:
  hw/nvme: clear aen mask on reset
  Revert "hw/block/nvme: add support for sgl bit bucket descriptor"
  hw/nvme: clean up CC register write logic
  hw/acpi: Make the PCI hot-plug aware of SR-IOV
  hw/nvme: Update the initalization place for the AER queue
  docs: Add documentation for SR-IOV and Virtualization Enhancements
  hw/nvme: Add support for the Virtualization Management command
  hw/nvme: Initialize capability structures for primary/secondary controllers
  hw/nvme: Calculate BAR attributes in a function
  hw/nvme: Remove reg_size variable and update BAR0 size calculation
  hw/nvme: Make max_ioqpairs and msix_qsize configurable in runtime
  hw/nvme: Implement the Function Level Reset
  hw/nvme: Add support for Secondary Controller List
  hw/nvme: Add support for Primary Controller Capabilities
  hw/nvme: Add support for SR-IOV

Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
Richard Henderson 2022-06-23 14:52:30 -07:00
commit 3a821c52e1
9 changed files with 933 additions and 102 deletions

View File

@ -239,3 +239,85 @@ The virtual namespace device supports DIF- and DIX-based protection information
to ``1`` to transfer protection information as the first eight bytes of
metadata. Otherwise, the protection information is transferred as the last
eight bytes.
Virtualization Enhancements and SR-IOV (Experimental Support)
-------------------------------------------------------------
The ``nvme`` device supports Single Root I/O Virtualization and Sharing
along with Virtualization Enhancements. The controller has to be linked to
an NVM Subsystem device (``nvme-subsys``) for use with SR-IOV.
A number of parameters are present (**please note, that they may be
subject to change**):
``sriov_max_vfs`` (default: ``0``)
Indicates the maximum number of PCIe virtual functions supported
by the controller. Specifying a non-zero value enables reporting of both
SR-IOV and ARI (Alternative Routing-ID Interpretation) capabilities
by the NVMe device. Virtual function controllers will not report SR-IOV.
``sriov_vq_flexible``
Indicates the total number of flexible queue resources assignable to all
the secondary controllers. Implicitly sets the number of primary
controller's private resources to ``(max_ioqpairs - sriov_vq_flexible)``.
``sriov_vi_flexible``
Indicates the total number of flexible interrupt resources assignable to
all the secondary controllers. Implicitly sets the number of primary
controller's private resources to ``(msix_qsize - sriov_vi_flexible)``.
``sriov_max_vi_per_vf`` (default: ``0``)
Indicates the maximum number of virtual interrupt resources assignable
to a secondary controller. The default ``0`` resolves to
``(sriov_vi_flexible / sriov_max_vfs)``
``sriov_max_vq_per_vf`` (default: ``0``)
Indicates the maximum number of virtual queue resources assignable to
a secondary controller. The default ``0`` resolves to
``(sriov_vq_flexible / sriov_max_vfs)``
The simplest possible invocation enables the capability to set up one VF
controller and assign an admin queue, an IO queue, and a MSI-X interrupt.
.. code-block:: console
-device nvme-subsys,id=subsys0
-device nvme,serial=deadbeef,subsys=subsys0,sriov_max_vfs=1,
sriov_vq_flexible=2,sriov_vi_flexible=1
The minimum steps required to configure a functional NVMe secondary
controller are:
* unbind flexible resources from the primary controller
.. code-block:: console
nvme virt-mgmt /dev/nvme0 -c 0 -r 1 -a 1 -n 0
nvme virt-mgmt /dev/nvme0 -c 0 -r 0 -a 1 -n 0
* perform a Function Level Reset on the primary controller to actually
release the resources
.. code-block:: console
echo 1 > /sys/bus/pci/devices/0000:01:00.0/reset
* enable VF
.. code-block:: console
echo 1 > /sys/bus/pci/devices/0000:01:00.0/sriov_numvfs
* assign the flexible resources to the VF and set it ONLINE
.. code-block:: console
nvme virt-mgmt /dev/nvme0 -c 1 -r 1 -a 8 -n 1
nvme virt-mgmt /dev/nvme0 -c 1 -r 0 -a 8 -n 2
nvme virt-mgmt /dev/nvme0 -c 1 -r 0 -a 9 -n 0
* bind the NVMe driver to the VF
.. code-block:: console
echo 0000:01:00.1 > /sys/bus/pci/drivers/nvme/bind

View File

@ -192,8 +192,12 @@ static bool acpi_pcihp_pc_no_hotplug(AcpiPciHpState *s, PCIDevice *dev)
* ACPI doesn't allow hotplug of bridge devices. Don't allow
* hot-unplug of bridge devices unless they were added by hotplug
* (and so, not described by acpi).
*
* Don't allow hot-unplug of SR-IOV Virtual Functions, as they
* will be removed implicitly, when Physical Function is unplugged.
*/
return (pc->is_bridge && !dev->qdev.hotplugged) || !dc->hotpluggable;
return (pc->is_bridge && !dev->qdev.hotplugged) || !dc->hotpluggable ||
pci_is_vf(dev);
}
static void acpi_pcihp_eject_slot(AcpiPciHpState *s, unsigned bsel, unsigned slots)

File diff suppressed because it is too large Load Diff

View File

@ -597,7 +597,7 @@ static void nvme_ns_realize(DeviceState *dev, Error **errp)
for (i = 0; i < ARRAY_SIZE(subsys->ctrls); i++) {
NvmeCtrl *ctrl = subsys->ctrls[i];
if (ctrl) {
if (ctrl && ctrl != SUBSYS_SLOT_RSVD) {
nvme_attach_ns(ctrl, ns);
}
}

View File

@ -24,7 +24,7 @@
#include "block/nvme.h"
#define NVME_MAX_CONTROLLERS 32
#define NVME_MAX_CONTROLLERS 256
#define NVME_MAX_NAMESPACES 256
#define NVME_EUI64_DEFAULT ((uint64_t)0x5254000000000000)
@ -43,6 +43,7 @@ typedef struct NvmeBus {
#define TYPE_NVME_SUBSYS "nvme-subsys"
#define NVME_SUBSYS(obj) \
OBJECT_CHECK(NvmeSubsystem, (obj), TYPE_NVME_SUBSYS)
#define SUBSYS_SLOT_RSVD (void *)0xFFFF
typedef struct NvmeSubsystem {
DeviceState parent_obj;
@ -68,6 +69,10 @@ static inline NvmeCtrl *nvme_subsys_ctrl(NvmeSubsystem *subsys,
return NULL;
}
if (subsys->ctrls[cntlid] == SUBSYS_SLOT_RSVD) {
return NULL;
}
return subsys->ctrls[cntlid];
}
@ -335,6 +340,7 @@ static inline const char *nvme_adm_opc_str(uint8_t opc)
case NVME_ADM_CMD_GET_FEATURES: return "NVME_ADM_CMD_GET_FEATURES";
case NVME_ADM_CMD_ASYNC_EV_REQ: return "NVME_ADM_CMD_ASYNC_EV_REQ";
case NVME_ADM_CMD_NS_ATTACHMENT: return "NVME_ADM_CMD_NS_ATTACHMENT";
case NVME_ADM_CMD_VIRT_MNGMT: return "NVME_ADM_CMD_VIRT_MNGMT";
case NVME_ADM_CMD_FORMAT_NVM: return "NVME_ADM_CMD_FORMAT_NVM";
default: return "NVME_ADM_CMD_UNKNOWN";
}
@ -406,6 +412,11 @@ typedef struct NvmeParams {
uint8_t zasl;
bool auto_transition_zones;
bool legacy_cmb;
uint8_t sriov_max_vfs;
uint16_t sriov_vq_flexible;
uint16_t sriov_vi_flexible;
uint8_t sriov_max_vq_per_vf;
uint8_t sriov_max_vi_per_vf;
} NvmeParams;
typedef struct NvmeCtrl {
@ -423,7 +434,6 @@ typedef struct NvmeCtrl {
uint16_t max_prp_ents;
uint16_t cqe_size;
uint16_t sqe_size;
uint32_t reg_size;
uint32_t max_q_ents;
uint8_t outstanding_aers;
uint32_t irq_status;
@ -433,6 +443,8 @@ typedef struct NvmeCtrl {
uint64_t starttime_ms;
uint16_t temperature;
uint8_t smart_critical_warning;
uint32_t conf_msix_qsize;
uint32_t conf_ioqpairs;
struct {
MemoryRegion mem;
@ -477,8 +489,20 @@ typedef struct NvmeCtrl {
uint32_t async_config;
NvmeHostBehaviorSupport hbs;
} features;
NvmePriCtrlCap pri_ctrl_cap;
NvmeSecCtrlList sec_ctrl_list;
struct {
uint16_t vqrfap;
uint16_t virfap;
} next_pri_ctrl_cap; /* These override pri_ctrl_cap after reset */
} NvmeCtrl;
typedef enum NvmeResetType {
NVME_RESET_FUNCTION = 0,
NVME_RESET_CONTROLLER = 1,
} NvmeResetType;
static inline NvmeNamespace *nvme_ns(NvmeCtrl *n, uint32_t nsid)
{
if (!nsid || nsid > NVME_MAX_NAMESPACES) {
@ -511,6 +535,33 @@ static inline uint16_t nvme_cid(NvmeRequest *req)
return le16_to_cpu(req->cqe.cid);
}
static inline NvmeSecCtrlEntry *nvme_sctrl(NvmeCtrl *n)
{
PCIDevice *pci_dev = &n->parent_obj;
NvmeCtrl *pf = NVME(pcie_sriov_get_pf(pci_dev));
if (pci_is_vf(pci_dev)) {
return &pf->sec_ctrl_list.sec[pcie_sriov_vf_number(pci_dev)];
}
return NULL;
}
static inline NvmeSecCtrlEntry *nvme_sctrl_for_cntlid(NvmeCtrl *n,
uint16_t cntlid)
{
NvmeSecCtrlList *list = &n->sec_ctrl_list;
uint8_t i;
for (i = 0; i < list->numcntl; i++) {
if (le16_to_cpu(list->sec[i].scid) == cntlid) {
return &list->sec[i];
}
}
return NULL;
}
void nvme_attach_ns(NvmeCtrl *n, NvmeNamespace *ns);
uint16_t nvme_bounce_data(NvmeCtrl *n, void *ptr, uint32_t len,
NvmeTxDirection dir, NvmeRequest *req);

View File

@ -11,20 +11,71 @@
#include "nvme.h"
int nvme_subsys_register_ctrl(NvmeCtrl *n, Error **errp)
static int nvme_subsys_reserve_cntlids(NvmeCtrl *n, int start, int num)
{
NvmeSubsystem *subsys = n->subsys;
int cntlid, nsid;
NvmeSecCtrlList *list = &n->sec_ctrl_list;
NvmeSecCtrlEntry *sctrl;
int i, cnt = 0;
for (cntlid = 0; cntlid < ARRAY_SIZE(subsys->ctrls); cntlid++) {
if (!subsys->ctrls[cntlid]) {
break;
for (i = start; i < ARRAY_SIZE(subsys->ctrls) && cnt < num; i++) {
if (!subsys->ctrls[i]) {
sctrl = &list->sec[cnt];
sctrl->scid = cpu_to_le16(i);
subsys->ctrls[i] = SUBSYS_SLOT_RSVD;
cnt++;
}
}
if (cntlid == ARRAY_SIZE(subsys->ctrls)) {
error_setg(errp, "no more free controller id");
return -1;
return cnt;
}
static void nvme_subsys_unreserve_cntlids(NvmeCtrl *n)
{
NvmeSubsystem *subsys = n->subsys;
NvmeSecCtrlList *list = &n->sec_ctrl_list;
NvmeSecCtrlEntry *sctrl;
int i, cntlid;
for (i = 0; i < n->params.sriov_max_vfs; i++) {
sctrl = &list->sec[i];
cntlid = le16_to_cpu(sctrl->scid);
if (cntlid) {
assert(subsys->ctrls[cntlid] == SUBSYS_SLOT_RSVD);
subsys->ctrls[cntlid] = NULL;
sctrl->scid = 0;
}
}
}
int nvme_subsys_register_ctrl(NvmeCtrl *n, Error **errp)
{
NvmeSubsystem *subsys = n->subsys;
NvmeSecCtrlEntry *sctrl = nvme_sctrl(n);
int cntlid, nsid, num_rsvd, num_vfs = n->params.sriov_max_vfs;
if (pci_is_vf(&n->parent_obj)) {
cntlid = le16_to_cpu(sctrl->scid);
} else {
for (cntlid = 0; cntlid < ARRAY_SIZE(subsys->ctrls); cntlid++) {
if (!subsys->ctrls[cntlid]) {
break;
}
}
if (cntlid == ARRAY_SIZE(subsys->ctrls)) {
error_setg(errp, "no more free controller id");
return -1;
}
num_rsvd = nvme_subsys_reserve_cntlids(n, cntlid + 1, num_vfs);
if (num_rsvd != num_vfs) {
nvme_subsys_unreserve_cntlids(n);
error_setg(errp,
"no more free controller ids for secondary controllers");
return -1;
}
}
if (!subsys->serial) {
@ -48,7 +99,13 @@ int nvme_subsys_register_ctrl(NvmeCtrl *n, Error **errp)
void nvme_subsys_unregister_ctrl(NvmeSubsystem *subsys, NvmeCtrl *n)
{
subsys->ctrls[n->cntlid] = NULL;
if (pci_is_vf(&n->parent_obj)) {
subsys->ctrls[n->cntlid] = SUBSYS_SLOT_RSVD;
} else {
subsys->ctrls[n->cntlid] = NULL;
nvme_subsys_unreserve_cntlids(n);
}
n->cntlid = -1;
}

View File

@ -56,6 +56,8 @@ pci_nvme_identify_ctrl(void) "identify controller"
pci_nvme_identify_ctrl_csi(uint8_t csi) "identify controller, csi=0x%"PRIx8""
pci_nvme_identify_ns(uint32_t ns) "nsid %"PRIu32""
pci_nvme_identify_ctrl_list(uint8_t cns, uint16_t cntid) "cns 0x%"PRIx8" cntid %"PRIu16""
pci_nvme_identify_pri_ctrl_cap(uint16_t cntlid) "identify primary controller capabilities cntlid=%"PRIu16""
pci_nvme_identify_sec_ctrl_list(uint16_t cntlid, uint8_t numcntl) "identify secondary controller list cntlid=%"PRIu16" numcntl=%"PRIu8""
pci_nvme_identify_ns_csi(uint32_t ns, uint8_t csi) "nsid=%"PRIu32", csi=0x%"PRIx8""
pci_nvme_identify_nslist(uint32_t ns) "nsid %"PRIu32""
pci_nvme_identify_nslist_csi(uint16_t ns, uint8_t csi) "nsid=%"PRIu16", csi=0x%"PRIx8""
@ -108,6 +110,8 @@ pci_nvme_zd_extension_set(uint32_t zone_idx) "set descriptor extension for zone_
pci_nvme_clear_ns_close(uint32_t state, uint64_t slba) "zone state=%"PRIu32", slba=%"PRIu64" transitioned to Closed state"
pci_nvme_clear_ns_reset(uint32_t state, uint64_t slba) "zone state=%"PRIu32", slba=%"PRIu64" transitioned to Empty state"
pci_nvme_zoned_zrwa_implicit_flush(uint64_t zslba, uint32_t nlb) "zslba 0x%"PRIx64" nlb %"PRIu32""
pci_nvme_pci_reset(void) "PCI Function Level Reset"
pci_nvme_virt_mngmt(uint16_t cid, uint16_t act, uint16_t cntlid, const char* rt, uint16_t nr) "cid %"PRIu16", act=0x%"PRIx16", ctrlid=%"PRIu16" %s nr=%"PRIu16""
# error conditions
pci_nvme_err_mdts(size_t len) "len %zu"
@ -177,7 +181,9 @@ pci_nvme_err_startfail_asqent_sz_zero(void) "nvme_start_ctrl failed because the
pci_nvme_err_startfail_acqent_sz_zero(void) "nvme_start_ctrl failed because the admin completion queue size is zero"
pci_nvme_err_startfail_zasl_too_small(uint32_t zasl, uint32_t pagesz) "nvme_start_ctrl failed because zone append size limit %"PRIu32" is too small, needs to be >= %"PRIu32""
pci_nvme_err_startfail(void) "setting controller enable bit failed"
pci_nvme_err_startfail_virt_state(uint16_t vq, uint16_t vi, const char *state) "nvme_start_ctrl failed due to ctrl state: vi=%u vq=%u %s"
pci_nvme_err_invalid_mgmt_action(uint8_t action) "action=0x%"PRIx8""
pci_nvme_err_ignored_mmio_vf_offline(uint64_t addr, unsigned size) "addr 0x%"PRIx64" size %d"
# undefined behavior
pci_nvme_ub_mmiowr_misaligned32(uint64_t offset) "MMIO write not 32-bit aligned, offset=0x%"PRIx64""

View File

@ -595,6 +595,7 @@ enum NvmeAdminCommands {
NVME_ADM_CMD_ACTIVATE_FW = 0x10,
NVME_ADM_CMD_DOWNLOAD_FW = 0x11,
NVME_ADM_CMD_NS_ATTACHMENT = 0x15,
NVME_ADM_CMD_VIRT_MNGMT = 0x1c,
NVME_ADM_CMD_FORMAT_NVM = 0x80,
NVME_ADM_CMD_SECURITY_SEND = 0x81,
NVME_ADM_CMD_SECURITY_RECV = 0x82,
@ -899,6 +900,10 @@ enum NvmeStatusCodes {
NVME_NS_PRIVATE = 0x0119,
NVME_NS_NOT_ATTACHED = 0x011a,
NVME_NS_CTRL_LIST_INVALID = 0x011c,
NVME_INVALID_CTRL_ID = 0x011f,
NVME_INVALID_SEC_CTRL_STATE = 0x0120,
NVME_INVALID_NUM_RESOURCES = 0x0121,
NVME_INVALID_RESOURCE_ID = 0x0122,
NVME_CONFLICTING_ATTRS = 0x0180,
NVME_INVALID_PROT_INFO = 0x0181,
NVME_WRITE_TO_RO = 0x0182,
@ -1033,6 +1038,8 @@ enum NvmeIdCns {
NVME_ID_CNS_NS_PRESENT = 0x11,
NVME_ID_CNS_NS_ATTACHED_CTRL_LIST = 0x12,
NVME_ID_CNS_CTRL_LIST = 0x13,
NVME_ID_CNS_PRIMARY_CTRL_CAP = 0x14,
NVME_ID_CNS_SECONDARY_CTRL_LIST = 0x15,
NVME_ID_CNS_CS_NS_PRESENT_LIST = 0x1a,
NVME_ID_CNS_CS_NS_PRESENT = 0x1b,
NVME_ID_CNS_IO_COMMAND_SET = 0x1c,
@ -1553,6 +1560,61 @@ typedef enum NvmeZoneState {
NVME_ZONE_STATE_OFFLINE = 0x0f,
} NvmeZoneState;
typedef struct QEMU_PACKED NvmePriCtrlCap {
uint16_t cntlid;
uint16_t portid;
uint8_t crt;
uint8_t rsvd5[27];
uint32_t vqfrt;
uint32_t vqrfa;
uint16_t vqrfap;
uint16_t vqprt;
uint16_t vqfrsm;
uint16_t vqgran;
uint8_t rsvd48[16];
uint32_t vifrt;
uint32_t virfa;
uint16_t virfap;
uint16_t viprt;
uint16_t vifrsm;
uint16_t vigran;
uint8_t rsvd80[4016];
} NvmePriCtrlCap;
typedef enum NvmePriCtrlCapCrt {
NVME_CRT_VQ = 1 << 0,
NVME_CRT_VI = 1 << 1,
} NvmePriCtrlCapCrt;
typedef struct QEMU_PACKED NvmeSecCtrlEntry {
uint16_t scid;
uint16_t pcid;
uint8_t scs;
uint8_t rsvd5[3];
uint16_t vfn;
uint16_t nvq;
uint16_t nvi;
uint8_t rsvd14[18];
} NvmeSecCtrlEntry;
typedef struct QEMU_PACKED NvmeSecCtrlList {
uint8_t numcntl;
uint8_t rsvd1[31];
NvmeSecCtrlEntry sec[127];
} NvmeSecCtrlList;
typedef enum NvmeVirtMngmtAction {
NVME_VIRT_MNGMT_ACTION_PRM_ALLOC = 0x01,
NVME_VIRT_MNGMT_ACTION_SEC_OFFLINE = 0x07,
NVME_VIRT_MNGMT_ACTION_SEC_ASSIGN = 0x08,
NVME_VIRT_MNGMT_ACTION_SEC_ONLINE = 0x09,
} NvmeVirtMngmtAction;
typedef enum NvmeVirtualResourceType {
NVME_VIRT_RES_QUEUE = 0x00,
NVME_VIRT_RES_INTERRUPT = 0x01,
} NvmeVirtualResourceType;
static inline void _nvme_check_size(void)
{
QEMU_BUILD_BUG_ON(sizeof(NvmeBar) != 4096);
@ -1588,5 +1650,8 @@ static inline void _nvme_check_size(void)
QEMU_BUILD_BUG_ON(sizeof(NvmeIdNsDescr) != 4);
QEMU_BUILD_BUG_ON(sizeof(NvmeZoneDescr) != 64);
QEMU_BUILD_BUG_ON(sizeof(NvmeDifTuple) != 16);
QEMU_BUILD_BUG_ON(sizeof(NvmePriCtrlCap) != 4096);
QEMU_BUILD_BUG_ON(sizeof(NvmeSecCtrlEntry) != 32);
QEMU_BUILD_BUG_ON(sizeof(NvmeSecCtrlList) != 4096);
}
#endif

View File

@ -238,6 +238,7 @@
#define PCI_DEVICE_ID_INTEL_82801BA_11 0x244e
#define PCI_DEVICE_ID_INTEL_82801D 0x24CD
#define PCI_DEVICE_ID_INTEL_ESB_9 0x25ab
#define PCI_DEVICE_ID_INTEL_NVME 0x5845
#define PCI_DEVICE_ID_INTEL_82371SB_0 0x7000
#define PCI_DEVICE_ID_INTEL_82371SB_1 0x7010
#define PCI_DEVICE_ID_INTEL_82371SB_2 0x7020