Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jbarnes/pci-2.6

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jbarnes/pci-2.6:
  PCI: Limit VPD length for Broadcom 5708S
  PCI PM: Export pci_pme_active to drivers
  PCI: remove duplicate symbol from pci_ids.h
  PCI: check the return value of device_create_bin_file() in pci_create_bus()
  PCI: fully restore MSI state at resume time
  DMA: make dma-coherent.c documentation kdoc-friendly
  PCI: make pci_register_driver() a macro
  PCI: add Broadcom 5708S to VPD length quirk
This commit is contained in:
Linus Torvalds 2008-08-11 10:38:36 -07:00
commit a7ef6a40f7
7 changed files with 59 additions and 31 deletions

View File

@ -308,9 +308,8 @@ static void __pci_restore_msi_state(struct pci_dev *dev)
entry->msi_attrib.masked);
pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
control &= ~(PCI_MSI_FLAGS_QSIZE | PCI_MSI_FLAGS_ENABLE);
if (entry->msi_attrib.maskbit || !entry->msi_attrib.masked)
control |= PCI_MSI_FLAGS_ENABLE;
control &= ~PCI_MSI_FLAGS_QSIZE;
control |= PCI_MSI_FLAGS_ENABLE;
pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
}

View File

@ -1060,7 +1060,7 @@ bool pci_pme_capable(struct pci_dev *dev, pci_power_t state)
* The caller must verify that the device is capable of generating PME# before
* calling this function with @enable equal to 'true'.
*/
static void pci_pme_active(struct pci_dev *dev, bool enable)
void pci_pme_active(struct pci_dev *dev, bool enable)
{
u16 pmcsr;
@ -1941,6 +1941,7 @@ EXPORT_SYMBOL(pci_set_power_state);
EXPORT_SYMBOL(pci_save_state);
EXPORT_SYMBOL(pci_restore_state);
EXPORT_SYMBOL(pci_pme_capable);
EXPORT_SYMBOL(pci_pme_active);
EXPORT_SYMBOL(pci_enable_wake);
EXPORT_SYMBOL(pci_target_state);
EXPORT_SYMBOL(pci_prepare_to_sleep);

View File

@ -52,27 +52,49 @@ EXPORT_SYMBOL(no_pci_devices);
* Some platforms allow access to legacy I/O port and ISA memory space on
* a per-bus basis. This routine creates the files and ties them into
* their associated read, write and mmap files from pci-sysfs.c
*
* On error unwind, but don't propogate the error to the caller
* as it is ok to set up the PCI bus without these files.
*/
static void pci_create_legacy_files(struct pci_bus *b)
{
int error;
b->legacy_io = kzalloc(sizeof(struct bin_attribute) * 2,
GFP_ATOMIC);
if (b->legacy_io) {
b->legacy_io->attr.name = "legacy_io";
b->legacy_io->size = 0xffff;
b->legacy_io->attr.mode = S_IRUSR | S_IWUSR;
b->legacy_io->read = pci_read_legacy_io;
b->legacy_io->write = pci_write_legacy_io;
device_create_bin_file(&b->dev, b->legacy_io);
if (!b->legacy_io)
goto kzalloc_err;
/* Allocated above after the legacy_io struct */
b->legacy_mem = b->legacy_io + 1;
b->legacy_mem->attr.name = "legacy_mem";
b->legacy_mem->size = 1024*1024;
b->legacy_mem->attr.mode = S_IRUSR | S_IWUSR;
b->legacy_mem->mmap = pci_mmap_legacy_mem;
device_create_bin_file(&b->dev, b->legacy_mem);
}
b->legacy_io->attr.name = "legacy_io";
b->legacy_io->size = 0xffff;
b->legacy_io->attr.mode = S_IRUSR | S_IWUSR;
b->legacy_io->read = pci_read_legacy_io;
b->legacy_io->write = pci_write_legacy_io;
error = device_create_bin_file(&b->dev, b->legacy_io);
if (error)
goto legacy_io_err;
/* Allocated above after the legacy_io struct */
b->legacy_mem = b->legacy_io + 1;
b->legacy_mem->attr.name = "legacy_mem";
b->legacy_mem->size = 1024*1024;
b->legacy_mem->attr.mode = S_IRUSR | S_IWUSR;
b->legacy_mem->mmap = pci_mmap_legacy_mem;
error = device_create_bin_file(&b->dev, b->legacy_mem);
if (error)
goto legacy_mem_err;
return;
legacy_mem_err:
device_remove_bin_file(&b->dev, b->legacy_io);
legacy_io_err:
kfree(b->legacy_io);
b->legacy_io = NULL;
kzalloc_err:
printk(KERN_WARNING "pci: warning: could not create legacy I/O port "
"and ISA memory resources to sysfs\n");
return;
}
void pci_remove_legacy_files(struct pci_bus *b)

View File

@ -1756,9 +1756,14 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_VIA, 0x324e, quirk_via_cx700_pci_parking_c
*/
static void __devinit quirk_brcm_570x_limit_vpd(struct pci_dev *dev)
{
/* Only disable the VPD capability for 5706, 5708, and 5709 rev. A */
/*
* Only disable the VPD capability for 5706, 5706S, 5708,
* 5708S and 5709 rev. A
*/
if ((dev->device == PCI_DEVICE_ID_NX2_5706) ||
(dev->device == PCI_DEVICE_ID_NX2_5706S) ||
(dev->device == PCI_DEVICE_ID_NX2_5708) ||
(dev->device == PCI_DEVICE_ID_NX2_5708S) ||
((dev->device == PCI_DEVICE_ID_NX2_5709) &&
(dev->revision & 0xf0) == 0x0)) {
if (dev->vpd)

View File

@ -641,6 +641,7 @@ int pci_restore_state(struct pci_dev *dev);
int pci_set_power_state(struct pci_dev *dev, pci_power_t state);
pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state);
bool pci_pme_capable(struct pci_dev *dev, pci_power_t state);
void pci_pme_active(struct pci_dev *dev, bool enable);
int pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable);
pci_power_t pci_target_state(struct pci_dev *dev);
int pci_prepare_to_sleep(struct pci_dev *dev);
@ -680,10 +681,12 @@ void pci_enable_bridges(struct pci_bus *bus);
/* Proper probing supporting hot-pluggable devices */
int __must_check __pci_register_driver(struct pci_driver *, struct module *,
const char *mod_name);
static inline int __must_check pci_register_driver(struct pci_driver *driver)
{
return __pci_register_driver(driver, THIS_MODULE, KBUILD_MODNAME);
}
/*
* pci_register_driver must be a macro so that KBUILD_MODNAME can be expanded
*/
#define pci_register_driver(driver) \
__pci_register_driver(driver, THIS_MODULE, KBUILD_MODNAME)
void pci_unregister_driver(struct pci_driver *dev);
void pci_remove_behind_bridge(struct pci_dev *dev);

View File

@ -2177,8 +2177,6 @@
#define PCI_DEVICE_ID_HERC_WIN 0x5732
#define PCI_DEVICE_ID_HERC_UNI 0x5832
#define PCI_VENDOR_ID_RDC 0x17f3
#define PCI_VENDOR_ID_SITECOM 0x182d
#define PCI_DEVICE_ID_SITECOM_DC105V2 0x3069

View File

@ -92,7 +92,7 @@ void *dma_mark_declared_memory_occupied(struct device *dev,
EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
/**
* Try to allocate memory from the per-device coherent area.
* dma_alloc_from_coherent() - try to allocate memory from the per-device coherent area
*
* @dev: device from which we allocate memory
* @size: size of requested memory area
@ -100,11 +100,11 @@ EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
* @ret: This pointer will be filled with the virtual address
* to allocated area.
*
* This function should be only called from per-arch %dma_alloc_coherent()
* This function should be only called from per-arch dma_alloc_coherent()
* to support allocation from per-device coherent memory pools.
*
* Returns 0 if dma_alloc_coherent should continue with allocating from
* generic memory areas, or !0 if dma_alloc_coherent should return %ret.
* generic memory areas, or !0 if dma_alloc_coherent should return @ret.
*/
int dma_alloc_from_coherent(struct device *dev, ssize_t size,
dma_addr_t *dma_handle, void **ret)
@ -126,7 +126,7 @@ int dma_alloc_from_coherent(struct device *dev, ssize_t size,
}
/**
* Try to free the memory allocated from per-device coherent memory pool.
* dma_release_from_coherent() - try to free the memory allocated from per-device coherent memory pool
* @dev: device from which the memory was allocated
* @order: the order of pages allocated
* @vaddr: virtual address of allocated pages
@ -135,7 +135,7 @@ int dma_alloc_from_coherent(struct device *dev, ssize_t size,
* coherent memory pool and if so, releases that memory.
*
* Returns 1 if we correctly released the memory, or 0 if
* %dma_release_coherent() should proceed with releasing memory from
* dma_release_coherent() should proceed with releasing memory from
* generic pools.
*/
int dma_release_from_coherent(struct device *dev, int order, void *vaddr)