Merge branches 'pci/aer', 'pci/hotplug', 'pci/misc', 'pci/msi', 'pci/resource' and 'pci/virtualization' into next

* pci/aer:
  PCI/AER: Clear error status registers during enumeration and restore

* pci/hotplug:
  PCI: pciehp: Queue power work requests in dedicated function

* pci/misc:
  PCI: Turn off Request Attributes to avoid Chelsio T5 Completion erratum
  x86/PCI: Make pci_subsys_init() static
  PCI: Add builtin_pci_driver() to avoid registration boilerplate
  PCI: Remove unnecessary "if" statement

* pci/msi:
  x86/PCI: Don't alloc pcibios-irq when MSI is enabled
  PCI/MSI: Export all remapped MSIs to sysfs attributes
  PCI: Disable MSI on SiS 761

* pci/resource:
  sparc/PCI: Add mem64 resource parsing for root bus
  PCI: Expand Enhanced Allocation BAR output
  PCI: Make Enhanced Allocation bitmasks more obvious
  PCI: Handle Enhanced Allocation capability for SR-IOV devices
  PCI: Add support for Enhanced Allocation devices
  PCI: Add Enhanced Allocation register entries
  PCI: Handle IORESOURCE_PCI_FIXED when assigning resources
  PCI: Handle IORESOURCE_PCI_FIXED when sizing resources
  PCI: Clear IORESOURCE_UNSET when reverting to firmware-assigned address

* pci/virtualization:
  PCI: Fix sriov_enable() error path for pcibios_enable_sriov() failures
  PCI: Wait 1 second between disabling VFs and clearing NumVFs
  PCI: Reorder pcibios_sriov_disable()
  PCI: Remove VFs in reverse order if virtfn_add() fails
  PCI: Remove redundant validation of SR-IOV offset/stride registers
  PCI: Set SR-IOV NumVFs to zero after enumeration
  PCI: Enable SR-IOV ARI Capable Hierarchy before reading TotalVFs
  PCI: Don't try to restore VF BARs
This commit is contained in:
Bjorn Helgaas 2015-11-02 15:57:03 -06:00
19 changed files with 555 additions and 128 deletions

View File

@ -185,8 +185,10 @@ static unsigned long pci_parse_of_flags(u32 addr0)
if (addr0 & 0x02000000) {
flags = IORESOURCE_MEM | PCI_BASE_ADDRESS_SPACE_MEMORY;
flags |= (addr0 >> 22) & PCI_BASE_ADDRESS_MEM_TYPE_64;
flags |= (addr0 >> 28) & PCI_BASE_ADDRESS_MEM_TYPE_1M;
if (addr0 & 0x01000000)
flags |= IORESOURCE_MEM_64
| PCI_BASE_ADDRESS_MEM_TYPE_64;
if (addr0 & 0x40000000)
flags |= IORESOURCE_PREFETCH
| PCI_BASE_ADDRESS_MEM_PREFETCH;
@ -655,6 +657,9 @@ struct pci_bus *pci_scan_one_pbm(struct pci_pbm_info *pbm,
pbm->io_space.start);
pci_add_resource_offset(&resources, &pbm->mem_space,
pbm->mem_space.start);
if (pbm->mem64_space.flags)
pci_add_resource_offset(&resources, &pbm->mem64_space,
pbm->mem_space.start);
pbm->busn.start = pbm->pci_first_busno;
pbm->busn.end = pbm->pci_last_busno;
pbm->busn.flags = IORESOURCE_BUS;

View File

@ -406,6 +406,7 @@ void pci_determine_mem_io_space(struct pci_pbm_info *pbm)
}
num_pbm_ranges = i / sizeof(*pbm_ranges);
memset(&pbm->mem64_space, 0, sizeof(struct resource));
for (i = 0; i < num_pbm_ranges; i++) {
const struct linux_prom_pci_ranges *pr = &pbm_ranges[i];
@ -451,7 +452,12 @@ void pci_determine_mem_io_space(struct pci_pbm_info *pbm)
break;
case 3:
/* XXX 64-bit MEM handling XXX */
/* 64-bit MEM handling */
pbm->mem64_space.start = a;
pbm->mem64_space.end = a + size - 1UL;
pbm->mem64_space.flags = IORESOURCE_MEM;
saw_mem = 1;
break;
default:
break;
@ -465,15 +471,22 @@ void pci_determine_mem_io_space(struct pci_pbm_info *pbm)
prom_halt();
}
printk("%s: PCI IO[%llx] MEM[%llx]\n",
printk("%s: PCI IO[%llx] MEM[%llx]",
pbm->name,
pbm->io_space.start,
pbm->mem_space.start);
if (pbm->mem64_space.flags)
printk(" MEM64[%llx]",
pbm->mem64_space.start);
printk("\n");
pbm->io_space.name = pbm->mem_space.name = pbm->name;
pbm->mem64_space.name = pbm->name;
request_resource(&ioport_resource, &pbm->io_space);
request_resource(&iomem_resource, &pbm->mem_space);
if (pbm->mem64_space.flags)
request_resource(&iomem_resource, &pbm->mem64_space);
pci_register_legacy_regions(&pbm->io_space,
&pbm->mem_space);

View File

@ -97,6 +97,7 @@ struct pci_pbm_info {
/* PBM I/O and Memory space resources. */
struct resource io_space;
struct resource mem_space;
struct resource mem64_space;
struct resource busn;
/* Base of PCI Config space, can be per-PBM or shared. */

View File

@ -674,6 +674,14 @@ int pcibios_add_device(struct pci_dev *dev)
int pcibios_alloc_irq(struct pci_dev *dev)
{
/*
* If the PCI device was already claimed by core code and has
* MSI enabled, probing of the pcibios IRQ will overwrite
* dev->irq. So bail out if MSI is already enabled.
*/
if (pci_dev_msi_enabled(dev))
return -EBUSY;
return pcibios_enable_irq(dev);
}

View File

@ -54,7 +54,7 @@ void pcibios_scan_specific_bus(int busn)
}
EXPORT_SYMBOL_GPL(pcibios_scan_specific_bus);
int __init pci_subsys_init(void)
static int __init pci_subsys_init(void)
{
/*
* The init function returns an non zero value when

View File

@ -204,36 +204,39 @@ static void pciehp_power_thread(struct work_struct *work)
kfree(info);
}
void pciehp_queue_pushbutton_work(struct work_struct *work)
static void pciehp_queue_power_work(struct slot *p_slot, int req)
{
struct slot *p_slot = container_of(work, struct slot, work.work);
struct power_work_info *info;
p_slot->state = (req == ENABLE_REQ) ? POWERON_STATE : POWEROFF_STATE;
info = kmalloc(sizeof(*info), GFP_KERNEL);
if (!info) {
ctrl_err(p_slot->ctrl, "%s: Cannot allocate memory\n",
__func__);
ctrl_err(p_slot->ctrl, "no memory to queue %s request\n",
(req == ENABLE_REQ) ? "poweron" : "poweroff");
return;
}
info->p_slot = p_slot;
INIT_WORK(&info->work, pciehp_power_thread);
info->req = req;
queue_work(p_slot->wq, &info->work);
}
void pciehp_queue_pushbutton_work(struct work_struct *work)
{
struct slot *p_slot = container_of(work, struct slot, work.work);
mutex_lock(&p_slot->lock);
switch (p_slot->state) {
case BLINKINGOFF_STATE:
p_slot->state = POWEROFF_STATE;
info->req = DISABLE_REQ;
pciehp_queue_power_work(p_slot, DISABLE_REQ);
break;
case BLINKINGON_STATE:
p_slot->state = POWERON_STATE;
info->req = ENABLE_REQ;
pciehp_queue_power_work(p_slot, ENABLE_REQ);
break;
default:
kfree(info);
goto out;
break;
}
queue_work(p_slot->wq, &info->work);
out:
mutex_unlock(&p_slot->lock);
}
@ -301,27 +304,12 @@ static void handle_button_press_event(struct slot *p_slot)
static void handle_surprise_event(struct slot *p_slot)
{
u8 getstatus;
struct power_work_info *info;
info = kmalloc(sizeof(*info), GFP_KERNEL);
if (!info) {
ctrl_err(p_slot->ctrl, "%s: Cannot allocate memory\n",
__func__);
return;
}
info->p_slot = p_slot;
INIT_WORK(&info->work, pciehp_power_thread);
pciehp_get_adapter_status(p_slot, &getstatus);
if (!getstatus) {
p_slot->state = POWEROFF_STATE;
info->req = DISABLE_REQ;
} else {
p_slot->state = POWERON_STATE;
info->req = ENABLE_REQ;
}
queue_work(p_slot->wq, &info->work);
if (!getstatus)
pciehp_queue_power_work(p_slot, DISABLE_REQ);
else
pciehp_queue_power_work(p_slot, ENABLE_REQ);
}
/*
@ -330,17 +318,6 @@ static void handle_surprise_event(struct slot *p_slot)
static void handle_link_event(struct slot *p_slot, u32 event)
{
struct controller *ctrl = p_slot->ctrl;
struct power_work_info *info;
info = kmalloc(sizeof(*info), GFP_KERNEL);
if (!info) {
ctrl_err(p_slot->ctrl, "%s: Cannot allocate memory\n",
__func__);
return;
}
info->p_slot = p_slot;
info->req = event == INT_LINK_UP ? ENABLE_REQ : DISABLE_REQ;
INIT_WORK(&info->work, pciehp_power_thread);
switch (p_slot->state) {
case BLINKINGON_STATE:
@ -348,22 +325,19 @@ static void handle_link_event(struct slot *p_slot, u32 event)
cancel_delayed_work(&p_slot->work);
/* Fall through */
case STATIC_STATE:
p_slot->state = event == INT_LINK_UP ?
POWERON_STATE : POWEROFF_STATE;
queue_work(p_slot->wq, &info->work);
pciehp_queue_power_work(p_slot, event == INT_LINK_UP ?
ENABLE_REQ : DISABLE_REQ);
break;
case POWERON_STATE:
if (event == INT_LINK_UP) {
ctrl_info(ctrl,
"Link Up event ignored on slot(%s): already powering on\n",
slot_name(p_slot));
kfree(info);
} else {
ctrl_info(ctrl,
"Link Down event queued on slot(%s): currently getting powered on\n",
slot_name(p_slot));
p_slot->state = POWEROFF_STATE;
queue_work(p_slot->wq, &info->work);
pciehp_queue_power_work(p_slot, DISABLE_REQ);
}
break;
case POWEROFF_STATE:
@ -371,19 +345,16 @@ static void handle_link_event(struct slot *p_slot, u32 event)
ctrl_info(ctrl,
"Link Up event queued on slot(%s): currently getting powered off\n",
slot_name(p_slot));
p_slot->state = POWERON_STATE;
queue_work(p_slot->wq, &info->work);
pciehp_queue_power_work(p_slot, ENABLE_REQ);
} else {
ctrl_info(ctrl,
"Link Down event ignored on slot(%s): already powering off\n",
slot_name(p_slot));
kfree(info);
}
break;
default:
ctrl_err(ctrl, "ignoring invalid state %#x on slot(%s)\n",
p_slot->state, slot_name(p_slot));
kfree(info);
break;
}
}

View File

@ -54,24 +54,29 @@ static inline void pci_iov_set_numvfs(struct pci_dev *dev, int nr_virtfn)
* The PF consumes one bus number. NumVFs, First VF Offset, and VF Stride
* determine how many additional bus numbers will be consumed by VFs.
*
* Iterate over all valid NumVFs and calculate the maximum number of bus
* numbers that could ever be required.
* Iterate over all valid NumVFs, validate offset and stride, and calculate
* the maximum number of bus numbers that could ever be required.
*/
static inline u8 virtfn_max_buses(struct pci_dev *dev)
static int compute_max_vf_buses(struct pci_dev *dev)
{
struct pci_sriov *iov = dev->sriov;
int nr_virtfn;
u8 max = 0;
int busnr;
int nr_virtfn, busnr, rc = 0;
for (nr_virtfn = 1; nr_virtfn <= iov->total_VFs; nr_virtfn++) {
for (nr_virtfn = iov->total_VFs; nr_virtfn; nr_virtfn--) {
pci_iov_set_numvfs(dev, nr_virtfn);
if (!iov->offset || (nr_virtfn > 1 && !iov->stride)) {
rc = -EIO;
goto out;
}
busnr = pci_iov_virtfn_bus(dev, nr_virtfn - 1);
if (busnr > max)
max = busnr;
if (busnr > iov->max_VF_buses)
iov->max_VF_buses = busnr;
}
return max;
out:
pci_iov_set_numvfs(dev, 0);
return rc;
}
static struct pci_bus *virtfn_add_bus(struct pci_bus *bus, int busnr)
@ -222,21 +227,25 @@ static void virtfn_remove(struct pci_dev *dev, int id, int reset)
int __weak pcibios_sriov_enable(struct pci_dev *pdev, u16 num_vfs)
{
return 0;
return 0;
}
int __weak pcibios_sriov_disable(struct pci_dev *pdev)
{
return 0;
}
static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
{
int rc;
int i, j;
int i;
int nres;
u16 offset, stride, initial;
u16 initial;
struct resource *res;
struct pci_dev *pdev;
struct pci_sriov *iov = dev->sriov;
int bars = 0;
int bus;
int retval;
if (!nr_virtfn)
return 0;
@ -253,11 +262,6 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
(!(iov->cap & PCI_SRIOV_CAP_VFM) && (nr_virtfn > initial)))
return -EINVAL;
pci_read_config_word(dev, iov->pos + PCI_SRIOV_VF_OFFSET, &offset);
pci_read_config_word(dev, iov->pos + PCI_SRIOV_VF_STRIDE, &stride);
if (!offset || (nr_virtfn > 1 && !stride))
return -EIO;
nres = 0;
for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
bars |= (1 << (i + PCI_IOV_RESOURCES));
@ -270,9 +274,6 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
return -ENOMEM;
}
iov->offset = offset;
iov->stride = stride;
bus = pci_iov_virtfn_bus(dev, nr_virtfn - 1);
if (bus > dev->bus->busn_res.end) {
dev_err(&dev->dev, "can't enable %d VFs (bus %02x out of range of %pR)\n",
@ -313,10 +314,10 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
if (nr_virtfn < initial)
initial = nr_virtfn;
if ((retval = pcibios_sriov_enable(dev, initial))) {
dev_err(&dev->dev, "failure %d from pcibios_sriov_enable()\n",
retval);
return retval;
rc = pcibios_sriov_enable(dev, initial);
if (rc) {
dev_err(&dev->dev, "failure %d from pcibios_sriov_enable()\n", rc);
goto err_pcibios;
}
for (i = 0; i < initial; i++) {
@ -331,27 +332,24 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
return 0;
failed:
for (j = 0; j < i; j++)
virtfn_remove(dev, j, 0);
while (i--)
virtfn_remove(dev, i, 0);
pcibios_sriov_disable(dev);
err_pcibios:
iov->ctrl &= ~(PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE);
pci_cfg_access_lock(dev);
pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
pci_iov_set_numvfs(dev, 0);
ssleep(1);
pci_cfg_access_unlock(dev);
if (iov->link != dev->devfn)
sysfs_remove_link(&dev->dev.kobj, "dep_link");
pci_iov_set_numvfs(dev, 0);
return rc;
}
int __weak pcibios_sriov_disable(struct pci_dev *pdev)
{
return 0;
}
static void sriov_disable(struct pci_dev *dev)
{
int i;
@ -384,7 +382,7 @@ static int sriov_init(struct pci_dev *dev, int pos)
int rc;
int nres;
u32 pgsz;
u16 ctrl, total, offset, stride;
u16 ctrl, total;
struct pci_sriov *iov;
struct resource *res;
struct pci_dev *pdev;
@ -399,10 +397,6 @@ static int sriov_init(struct pci_dev *dev, int pos)
ssleep(1);
}
pci_read_config_word(dev, pos + PCI_SRIOV_TOTAL_VF, &total);
if (!total)
return 0;
ctrl = 0;
list_for_each_entry(pdev, &dev->bus->devices, bus_list)
if (pdev->is_physfn)
@ -414,11 +408,10 @@ static int sriov_init(struct pci_dev *dev, int pos)
found:
pci_write_config_word(dev, pos + PCI_SRIOV_CTRL, ctrl);
pci_write_config_word(dev, pos + PCI_SRIOV_NUM_VF, 0);
pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &offset);
pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &stride);
if (!offset || (total > 1 && !stride))
return -EIO;
pci_read_config_word(dev, pos + PCI_SRIOV_TOTAL_VF, &total);
if (!total)
return 0;
pci_read_config_dword(dev, pos + PCI_SRIOV_SUP_PGSIZE, &pgsz);
i = PAGE_SHIFT > 12 ? PAGE_SHIFT - 12 : 0;
@ -436,8 +429,15 @@ found:
nres = 0;
for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
res = &dev->resource[i + PCI_IOV_RESOURCES];
bar64 = __pci_read_base(dev, pci_bar_unknown, res,
pos + PCI_SRIOV_BAR + i * 4);
/*
* If it is already FIXED, don't change it, something
* (perhaps EA or header fixups) wants it this way.
*/
if (res->flags & IORESOURCE_PCI_FIXED)
bar64 = (res->flags & IORESOURCE_MEM_64) ? 1 : 0;
else
bar64 = __pci_read_base(dev, pci_bar_unknown, res,
pos + PCI_SRIOV_BAR + i * 4);
if (!res->flags)
continue;
if (resource_size(res) & (PAGE_SIZE - 1)) {
@ -456,8 +456,6 @@ found:
iov->nres = nres;
iov->ctrl = ctrl;
iov->total_VFs = total;
iov->offset = offset;
iov->stride = stride;
iov->pgsz = pgsz;
iov->self = dev;
pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap);
@ -474,10 +472,15 @@ found:
dev->sriov = iov;
dev->is_physfn = 1;
iov->max_VF_buses = virtfn_max_buses(dev);
rc = compute_max_vf_buses(dev);
if (rc)
goto fail_max_buses;
return 0;
fail_max_buses:
dev->sriov = NULL;
dev->is_physfn = 0;
failed:
for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
res = &dev->resource[i + PCI_IOV_RESOURCES];

View File

@ -475,10 +475,11 @@ static int populate_msi_sysfs(struct pci_dev *pdev)
int ret = -ENOMEM;
int num_msi = 0;
int count = 0;
int i;
/* Determine how many msi entries we have */
for_each_pci_msi_entry(entry, pdev)
++num_msi;
num_msi += entry->nvec_used;
if (!num_msi)
return 0;
@ -487,19 +488,21 @@ static int populate_msi_sysfs(struct pci_dev *pdev)
if (!msi_attrs)
return -ENOMEM;
for_each_pci_msi_entry(entry, pdev) {
msi_dev_attr = kzalloc(sizeof(*msi_dev_attr), GFP_KERNEL);
if (!msi_dev_attr)
goto error_attrs;
msi_attrs[count] = &msi_dev_attr->attr;
for (i = 0; i < entry->nvec_used; i++) {
msi_dev_attr = kzalloc(sizeof(*msi_dev_attr), GFP_KERNEL);
if (!msi_dev_attr)
goto error_attrs;
msi_attrs[count] = &msi_dev_attr->attr;
sysfs_attr_init(&msi_dev_attr->attr);
msi_dev_attr->attr.name = kasprintf(GFP_KERNEL, "%d",
entry->irq);
if (!msi_dev_attr->attr.name)
goto error_attrs;
msi_dev_attr->attr.mode = S_IRUGO;
msi_dev_attr->show = msi_mode_show;
++count;
sysfs_attr_init(&msi_dev_attr->attr);
msi_dev_attr->attr.name = kasprintf(GFP_KERNEL, "%d",
entry->irq + i);
if (!msi_dev_attr->attr.name)
goto error_attrs;
msi_dev_attr->attr.mode = S_IRUGO;
msi_dev_attr->show = msi_mode_show;
++count;
}
}
msi_irq_group = kzalloc(sizeof(*msi_irq_group), GFP_KERNEL);

View File

@ -172,7 +172,7 @@ static ssize_t store_remove_id(struct device_driver *driver, const char *buf,
__u32 vendor, device, subvendor = PCI_ANY_ID,
subdevice = PCI_ANY_ID, class = 0, class_mask = 0;
int fields = 0;
int retval = -ENODEV;
size_t retval = -ENODEV;
fields = sscanf(buf, "%x %x %x %x %x %x",
&vendor, &device, &subvendor, &subdevice,
@ -190,15 +190,13 @@ static ssize_t store_remove_id(struct device_driver *driver, const char *buf,
!((id->class ^ class) & class_mask)) {
list_del(&dynid->node);
kfree(dynid);
retval = 0;
retval = count;
break;
}
}
spin_unlock(&pdrv->dynids.lock);
if (retval)
return retval;
return count;
return retval;
}
static DRIVER_ATTR(remove_id, S_IWUSR, NULL, store_remove_id);

View File

@ -27,6 +27,7 @@
#include <linux/pci_hotplug.h>
#include <asm-generic/pci-bridge.h>
#include <asm/setup.h>
#include <linux/aer.h>
#include "pci.h"
const char *pci_power_names[] = {
@ -457,6 +458,30 @@ struct resource *pci_find_parent_resource(const struct pci_dev *dev,
}
EXPORT_SYMBOL(pci_find_parent_resource);
/**
* pci_find_pcie_root_port - return PCIe Root Port
* @dev: PCI device to query
*
* Traverse up the parent chain and return the PCIe Root Port PCI Device
* for a given PCI Device.
*/
struct pci_dev *pci_find_pcie_root_port(struct pci_dev *dev)
{
struct pci_dev *bridge, *highest_pcie_bridge = NULL;
bridge = pci_upstream_bridge(dev);
while (bridge && pci_is_pcie(bridge)) {
highest_pcie_bridge = bridge;
bridge = pci_upstream_bridge(bridge);
}
if (pci_pcie_type(highest_pcie_bridge) != PCI_EXP_TYPE_ROOT_PORT)
return NULL;
return highest_pcie_bridge;
}
EXPORT_SYMBOL(pci_find_pcie_root_port);
/**
* pci_wait_for_pending - wait for @mask bit(s) to clear in status word @pos
* @dev: the PCI device to operate on
@ -484,7 +509,7 @@ int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask)
}
/**
* pci_restore_bars - restore a devices BAR values (e.g. after wake-up)
* pci_restore_bars - restore a device's BAR values (e.g. after wake-up)
* @dev: PCI device to have its BARs restored
*
* Restore the BAR values for a given device, so as to make it
@ -494,6 +519,10 @@ static void pci_restore_bars(struct pci_dev *dev)
{
int i;
/* Per SR-IOV spec 3.4.1.11, VF BARs are RO zero */
if (dev->is_virtfn)
return;
for (i = 0; i < PCI_BRIDGE_RESOURCES; i++)
pci_update_resource(dev, i);
}
@ -1099,6 +1128,8 @@ void pci_restore_state(struct pci_dev *dev)
pci_restore_ats_state(dev);
pci_restore_vc_state(dev);
pci_cleanup_aer_error_status_regs(dev);
pci_restore_config_space(dev);
pci_restore_pcix_state(dev);
@ -2148,6 +2179,198 @@ void pci_pm_init(struct pci_dev *dev)
}
}
static unsigned long pci_ea_flags(struct pci_dev *dev, u8 prop)
{
unsigned long flags = IORESOURCE_PCI_FIXED;
switch (prop) {
case PCI_EA_P_MEM:
case PCI_EA_P_VF_MEM:
flags |= IORESOURCE_MEM;
break;
case PCI_EA_P_MEM_PREFETCH:
case PCI_EA_P_VF_MEM_PREFETCH:
flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH;
break;
case PCI_EA_P_IO:
flags |= IORESOURCE_IO;
break;
default:
return 0;
}
return flags;
}
static struct resource *pci_ea_get_resource(struct pci_dev *dev, u8 bei,
u8 prop)
{
if (bei <= PCI_EA_BEI_BAR5 && prop <= PCI_EA_P_IO)
return &dev->resource[bei];
#ifdef CONFIG_PCI_IOV
else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5 &&
(prop == PCI_EA_P_VF_MEM || prop == PCI_EA_P_VF_MEM_PREFETCH))
return &dev->resource[PCI_IOV_RESOURCES +
bei - PCI_EA_BEI_VF_BAR0];
#endif
else if (bei == PCI_EA_BEI_ROM)
return &dev->resource[PCI_ROM_RESOURCE];
else
return NULL;
}
/* Read an Enhanced Allocation (EA) entry */
static int pci_ea_read(struct pci_dev *dev, int offset)
{
struct resource *res;
int ent_size, ent_offset = offset;
resource_size_t start, end;
unsigned long flags;
u32 dw0, bei, base, max_offset;
u8 prop;
bool support_64 = (sizeof(resource_size_t) >= 8);
pci_read_config_dword(dev, ent_offset, &dw0);
ent_offset += 4;
/* Entry size field indicates DWORDs after 1st */
ent_size = ((dw0 & PCI_EA_ES) + 1) << 2;
if (!(dw0 & PCI_EA_ENABLE)) /* Entry not enabled */
goto out;
bei = (dw0 & PCI_EA_BEI) >> 4;
prop = (dw0 & PCI_EA_PP) >> 8;
/*
* If the Property is in the reserved range, try the Secondary
* Property instead.
*/
if (prop > PCI_EA_P_BRIDGE_IO && prop < PCI_EA_P_MEM_RESERVED)
prop = (dw0 & PCI_EA_SP) >> 16;
if (prop > PCI_EA_P_BRIDGE_IO)
goto out;
res = pci_ea_get_resource(dev, bei, prop);
if (!res) {
dev_err(&dev->dev, "Unsupported EA entry BEI: %u\n", bei);
goto out;
}
flags = pci_ea_flags(dev, prop);
if (!flags) {
dev_err(&dev->dev, "Unsupported EA properties: %#x\n", prop);
goto out;
}
/* Read Base */
pci_read_config_dword(dev, ent_offset, &base);
start = (base & PCI_EA_FIELD_MASK);
ent_offset += 4;
/* Read MaxOffset */
pci_read_config_dword(dev, ent_offset, &max_offset);
ent_offset += 4;
/* Read Base MSBs (if 64-bit entry) */
if (base & PCI_EA_IS_64) {
u32 base_upper;
pci_read_config_dword(dev, ent_offset, &base_upper);
ent_offset += 4;
flags |= IORESOURCE_MEM_64;
/* entry starts above 32-bit boundary, can't use */
if (!support_64 && base_upper)
goto out;
if (support_64)
start |= ((u64)base_upper << 32);
}
end = start + (max_offset | 0x03);
/* Read MaxOffset MSBs (if 64-bit entry) */
if (max_offset & PCI_EA_IS_64) {
u32 max_offset_upper;
pci_read_config_dword(dev, ent_offset, &max_offset_upper);
ent_offset += 4;
flags |= IORESOURCE_MEM_64;
/* entry too big, can't use */
if (!support_64 && max_offset_upper)
goto out;
if (support_64)
end += ((u64)max_offset_upper << 32);
}
if (end < start) {
dev_err(&dev->dev, "EA Entry crosses address boundary\n");
goto out;
}
if (ent_size != ent_offset - offset) {
dev_err(&dev->dev,
"EA Entry Size (%d) does not match length read (%d)\n",
ent_size, ent_offset - offset);
goto out;
}
res->name = pci_name(dev);
res->start = start;
res->end = end;
res->flags = flags;
if (bei <= PCI_EA_BEI_BAR5)
dev_printk(KERN_DEBUG, &dev->dev, "BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n",
bei, res, prop);
else if (bei == PCI_EA_BEI_ROM)
dev_printk(KERN_DEBUG, &dev->dev, "ROM: %pR (from Enhanced Allocation, properties %#02x)\n",
res, prop);
else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5)
dev_printk(KERN_DEBUG, &dev->dev, "VF BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n",
bei - PCI_EA_BEI_VF_BAR0, res, prop);
else
dev_printk(KERN_DEBUG, &dev->dev, "BEI %d res: %pR (from Enhanced Allocation, properties %#02x)\n",
bei, res, prop);
out:
return offset + ent_size;
}
/* Enhanced Allocation Initalization */
void pci_ea_init(struct pci_dev *dev)
{
int ea;
u8 num_ent;
int offset;
int i;
/* find PCI EA capability in list */
ea = pci_find_capability(dev, PCI_CAP_ID_EA);
if (!ea)
return;
/* determine the number of entries */
pci_bus_read_config_byte(dev->bus, dev->devfn, ea + PCI_EA_NUM_ENT,
&num_ent);
num_ent &= PCI_EA_NUM_ENT_MASK;
offset = ea + PCI_EA_FIRST_ENT;
/* Skip DWORD 2 for type 1 functions */
if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
offset += 4;
/* parse each EA entry */
for (i = 0; i < num_ent; ++i)
offset = pci_ea_read(dev, offset);
}
static void pci_add_saved_cap(struct pci_dev *pci_dev,
struct pci_cap_saved_state *new_cap)
{

View File

@ -78,6 +78,7 @@ bool pci_dev_keep_suspended(struct pci_dev *dev);
void pci_config_pm_runtime_get(struct pci_dev *dev);
void pci_config_pm_runtime_put(struct pci_dev *dev);
void pci_pm_init(struct pci_dev *dev);
void pci_ea_init(struct pci_dev *dev);
void pci_allocate_cap_save_buffers(struct pci_dev *dev);
void pci_free_cap_save_buffers(struct pci_dev *dev);

View File

@ -74,6 +74,34 @@ int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
}
EXPORT_SYMBOL_GPL(pci_cleanup_aer_uncorrect_error_status);
int pci_cleanup_aer_error_status_regs(struct pci_dev *dev)
{
int pos;
u32 status;
int port_type;
if (!pci_is_pcie(dev))
return -ENODEV;
pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
if (!pos)
return -EIO;
port_type = pci_pcie_type(dev);
if (port_type == PCI_EXP_TYPE_ROOT_PORT) {
pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, &status);
pci_write_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, status);
}
pci_read_config_dword(dev, pos + PCI_ERR_COR_STATUS, &status);
pci_write_config_dword(dev, pos + PCI_ERR_COR_STATUS, status);
pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
return 0;
}
/**
* add_error_device - list device to be handled
* @e_info: pointer to error info

View File

@ -12,6 +12,7 @@
#include <linux/module.h>
#include <linux/cpumask.h>
#include <linux/pci-aspm.h>
#include <linux/aer.h>
#include <asm-generic/pci-bridge.h>
#include "pci.h"
@ -1598,6 +1599,9 @@ static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn)
static void pci_init_capabilities(struct pci_dev *dev)
{
/* Enhanced Allocation */
pci_ea_init(dev);
/* MSI/MSI-X list */
pci_msi_init_pci_dev(dev);
@ -1621,6 +1625,8 @@ static void pci_init_capabilities(struct pci_dev *dev)
/* Enable ACS P2P upstream forwarding */
pci_enable_acs(dev);
pci_cleanup_aer_error_status_regs(dev);
}
static void pci_set_msi_domain(struct pci_dev *dev)

View File

@ -2230,6 +2230,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT3336, quirk_disab
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT3351, quirk_disable_all_msi);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT3364, quirk_disable_all_msi);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8380_0, quirk_disable_all_msi);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SI, 0x0761, quirk_disable_all_msi);
/* Disable MSI on chipsets that are known to not support it */
static void quirk_disable_msi(struct pci_dev *dev)
@ -3691,6 +3692,63 @@ DECLARE_PCI_FIXUP_CLASS_EARLY(0x1797, 0x6868, PCI_CLASS_NOT_DEFINED, 8,
DECLARE_PCI_FIXUP_CLASS_EARLY(0x1797, 0x6869, PCI_CLASS_NOT_DEFINED, 8,
quirk_tw686x_class);
/*
* Per PCIe r3.0, sec 2.2.9, "Completion headers must supply the same
* values for the Attribute as were supplied in the header of the
* corresponding Request, except as explicitly allowed when IDO is used."
*
* If a non-compliant device generates a completion with a different
* attribute than the request, the receiver may accept it (which itself
* seems non-compliant based on sec 2.3.2), or it may handle it as a
* Malformed TLP or an Unexpected Completion, which will probably lead to a
* device access timeout.
*
* If the non-compliant device generates completions with zero attributes
* (instead of copying the attributes from the request), we can work around
* this by disabling the "Relaxed Ordering" and "No Snoop" attributes in
* upstream devices so they always generate requests with zero attributes.
*
* This affects other devices under the same Root Port, but since these
* attributes are performance hints, there should be no functional problem.
*
* Note that Configuration Space accesses are never supposed to have TLP
* Attributes, so we're safe waiting till after any Configuration Space
* accesses to do the Root Port fixup.
*/
static void quirk_disable_root_port_attributes(struct pci_dev *pdev)
{
struct pci_dev *root_port = pci_find_pcie_root_port(pdev);
if (!root_port) {
dev_warn(&pdev->dev, "PCIe Completion erratum may cause device errors\n");
return;
}
dev_info(&root_port->dev, "Disabling No Snoop/Relaxed Ordering Attributes to avoid PCIe Completion erratum in %s\n",
dev_name(&pdev->dev));
pcie_capability_clear_and_set_word(root_port, PCI_EXP_DEVCTL,
PCI_EXP_DEVCTL_RELAX_EN |
PCI_EXP_DEVCTL_NOSNOOP_EN, 0);
}
/*
* The Chelsio T5 chip fails to copy TLP Attributes from a Request to the
* Completion it generates.
*/
static void quirk_chelsio_T5_disable_root_port_attributes(struct pci_dev *pdev)
{
/*
* This mask/compare operation selects for Physical Function 4 on a
* T5. We only need to fix up the Root Port once for any of the
* PFs. PF[0..3] have PCI Device IDs of 0x50xx, but PF4 is uniquely
* 0x54xx so we use that one,
*/
if ((pdev->device & 0xff00) == 0x5400)
quirk_disable_root_port_attributes(pdev);
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_CHELSIO, PCI_ANY_ID,
quirk_chelsio_T5_disable_root_port_attributes);
/*
* AMD has indicated that the devices below do not support peer-to-peer
* in any system where they are found in the southbridge with an AMD

View File

@ -1037,9 +1037,10 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
struct resource *r = &dev->resource[i];
resource_size_t r_size;
if (r->parent || ((r->flags & mask) != type &&
(r->flags & mask) != type2 &&
(r->flags & mask) != type3))
if (r->parent || (r->flags & IORESOURCE_PCI_FIXED) ||
((r->flags & mask) != type &&
(r->flags & mask) != type2 &&
(r->flags & mask) != type3))
continue;
r_size = resource_size(r);
#ifdef CONFIG_PCI_IOV
@ -1340,6 +1341,47 @@ void pci_bus_size_bridges(struct pci_bus *bus)
}
EXPORT_SYMBOL(pci_bus_size_bridges);
static void assign_fixed_resource_on_bus(struct pci_bus *b, struct resource *r)
{
int i;
struct resource *parent_r;
unsigned long mask = IORESOURCE_IO | IORESOURCE_MEM |
IORESOURCE_PREFETCH;
pci_bus_for_each_resource(b, parent_r, i) {
if (!parent_r)
continue;
if ((r->flags & mask) == (parent_r->flags & mask) &&
resource_contains(parent_r, r))
request_resource(parent_r, r);
}
}
/*
* Try to assign any resources marked as IORESOURCE_PCI_FIXED, as they
* are skipped by pbus_assign_resources_sorted().
*/
static void pdev_assign_fixed_resources(struct pci_dev *dev)
{
int i;
for (i = 0; i < PCI_NUM_RESOURCES; i++) {
struct pci_bus *b;
struct resource *r = &dev->resource[i];
if (r->parent || !(r->flags & IORESOURCE_PCI_FIXED) ||
!(r->flags & (IORESOURCE_IO | IORESOURCE_MEM)))
continue;
b = dev->bus;
while (b && !r->parent) {
assign_fixed_resource_on_bus(b, r);
b = b->parent;
}
}
}
void __pci_bus_assign_resources(const struct pci_bus *bus,
struct list_head *realloc_head,
struct list_head *fail_head)
@ -1350,6 +1392,8 @@ void __pci_bus_assign_resources(const struct pci_bus *bus,
pbus_assign_resources_sorted(bus, realloc_head, fail_head);
list_for_each_entry(dev, &bus->devices, bus_list) {
pdev_assign_fixed_resources(dev);
b = dev->subordinate;
if (!b)
continue;

View File

@ -36,6 +36,11 @@ void pci_update_resource(struct pci_dev *dev, int resno)
enum pci_bar_type type;
struct resource *res = dev->resource + resno;
if (dev->is_virtfn) {
dev_warn(&dev->dev, "can't update VF BAR%d\n", resno);
return;
}
/*
* Ignore resources for unimplemented BARs and unused resource slots
* for 64 bit BARs.
@ -177,6 +182,7 @@ static int pci_revert_fw_address(struct resource *res, struct pci_dev *dev,
end = res->end;
res->start = fw_addr;
res->end = res->start + size - 1;
res->flags &= ~IORESOURCE_UNSET;
root = pci_find_parent_resource(dev, res);
if (!root) {
@ -194,6 +200,7 @@ static int pci_revert_fw_address(struct resource *res, struct pci_dev *dev,
resno, res, conflict->name, conflict);
res->start = start;
res->end = end;
res->flags |= IORESOURCE_UNSET;
return -EBUSY;
}
return 0;

View File

@ -42,6 +42,7 @@ struct aer_capability_regs {
int pci_enable_pcie_error_reporting(struct pci_dev *dev);
int pci_disable_pcie_error_reporting(struct pci_dev *dev);
int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev);
int pci_cleanup_aer_error_status_regs(struct pci_dev *dev);
#else
static inline int pci_enable_pcie_error_reporting(struct pci_dev *dev)
{
@ -55,6 +56,10 @@ static inline int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
{
return -EINVAL;
}
static inline int pci_cleanup_aer_error_status_regs(struct pci_dev *dev)
{
return -EINVAL;
}
#endif
void cper_print_aer(struct pci_dev *dev, int cper_severity,

View File

@ -820,6 +820,7 @@ void pci_bus_add_device(struct pci_dev *dev);
void pci_read_bridge_bases(struct pci_bus *child);
struct resource *pci_find_parent_resource(const struct pci_dev *dev,
struct resource *res);
struct pci_dev *pci_find_pcie_root_port(struct pci_dev *dev);
u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin);
int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge);
u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp);
@ -1192,6 +1193,17 @@ void pci_unregister_driver(struct pci_driver *dev);
module_driver(__pci_driver, pci_register_driver, \
pci_unregister_driver)
/**
* builtin_pci_driver() - Helper macro for registering a PCI driver
* @__pci_driver: pci_driver struct
*
* Helper macro for PCI drivers which do not do anything special in their
* init code. This eliminates a lot of boilerplate. Each driver may only
* use this macro once, and calling it replaces device_initcall(...)
*/
#define builtin_pci_driver(__pci_driver) \
builtin_driver(__pci_driver, pci_register_driver)
struct pci_driver *pci_dev_driver(const struct pci_dev *dev);
int pci_add_dynid(struct pci_driver *drv,
unsigned int vendor, unsigned int device,

View File

@ -216,7 +216,8 @@
#define PCI_CAP_ID_MSIX 0x11 /* MSI-X */
#define PCI_CAP_ID_SATA 0x12 /* SATA Data/Index Conf. */
#define PCI_CAP_ID_AF 0x13 /* PCI Advanced Features */
#define PCI_CAP_ID_MAX PCI_CAP_ID_AF
#define PCI_CAP_ID_EA 0x14 /* PCI Enhanced Allocation */
#define PCI_CAP_ID_MAX PCI_CAP_ID_EA
#define PCI_CAP_LIST_NEXT 1 /* Next capability in the list */
#define PCI_CAP_FLAGS 2 /* Capability defined flags (16 bits) */
#define PCI_CAP_SIZEOF 4
@ -353,6 +354,46 @@
#define PCI_AF_STATUS_TP 0x01
#define PCI_CAP_AF_SIZEOF 6 /* size of AF registers */
/* PCI Enhanced Allocation registers */
#define PCI_EA_NUM_ENT 2 /* Number of Capability Entries */
#define PCI_EA_NUM_ENT_MASK 0x3f /* Num Entries Mask */
#define PCI_EA_FIRST_ENT 4 /* First EA Entry in List */
#define PCI_EA_FIRST_ENT_BRIDGE 8 /* First EA Entry for Bridges */
#define PCI_EA_ES 0x00000007 /* Entry Size */
#define PCI_EA_BEI 0x000000f0 /* BAR Equivalent Indicator */
/* 0-5 map to BARs 0-5 respectively */
#define PCI_EA_BEI_BAR0 0
#define PCI_EA_BEI_BAR5 5
#define PCI_EA_BEI_BRIDGE 6 /* Resource behind bridge */
#define PCI_EA_BEI_ENI 7 /* Equivalent Not Indicated */
#define PCI_EA_BEI_ROM 8 /* Expansion ROM */
/* 9-14 map to VF BARs 0-5 respectively */
#define PCI_EA_BEI_VF_BAR0 9
#define PCI_EA_BEI_VF_BAR5 14
#define PCI_EA_BEI_RESERVED 15 /* Reserved - Treat like ENI */
#define PCI_EA_PP 0x0000ff00 /* Primary Properties */
#define PCI_EA_SP 0x00ff0000 /* Secondary Properties */
#define PCI_EA_P_MEM 0x00 /* Non-Prefetch Memory */
#define PCI_EA_P_MEM_PREFETCH 0x01 /* Prefetchable Memory */
#define PCI_EA_P_IO 0x02 /* I/O Space */
#define PCI_EA_P_VF_MEM_PREFETCH 0x03 /* VF Prefetchable Memory */
#define PCI_EA_P_VF_MEM 0x04 /* VF Non-Prefetch Memory */
#define PCI_EA_P_BRIDGE_MEM 0x05 /* Bridge Non-Prefetch Memory */
#define PCI_EA_P_BRIDGE_MEM_PREFETCH 0x06 /* Bridge Prefetchable Memory */
#define PCI_EA_P_BRIDGE_IO 0x07 /* Bridge I/O Space */
/* 0x08-0xfc reserved */
#define PCI_EA_P_MEM_RESERVED 0xfd /* Reserved Memory */
#define PCI_EA_P_IO_RESERVED 0xfe /* Reserved I/O Space */
#define PCI_EA_P_UNAVAILABLE 0xff /* Entry Unavailable */
#define PCI_EA_WRITABLE 0x40000000 /* Writable: 1 = RW, 0 = HwInit */
#define PCI_EA_ENABLE 0x80000000 /* Enable for this entry */
#define PCI_EA_BASE 4 /* Base Address Offset */
#define PCI_EA_MAX_OFFSET 8 /* MaxOffset (resource length) */
/* bit 0 is reserved */
#define PCI_EA_IS_64 0x00000002 /* 64-bit field flag */
#define PCI_EA_FIELD_MASK 0xfffffffc /* For Base & Max Offset */
/* PCI-X registers (Type 0 (non-bridge) devices) */
#define PCI_X_CMD 2 /* Modes & Features */