ARM: i.MX: Fix SSI clock associations for i.MX25/i.MX35

-----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1.4.12 (GNU/Linux)
 
 iQIcBAABCAAGBQJQTwn8AAoJEPFlmONMx+ezcuQP/iSJjSTecLvWH8c7cD9To+nv
 ItpQakHtabQZVEtQgfCb2uy8u1vKbIZcJ4FlqwDz6Q9AiV0SngXjHfR+PPO9mDuj
 4JmnF7ZnKtYcjBC/rhMAmiU+cVqrjY7WLwqz2FTEUc4OVC8m9xFIh0nhRSvQbngj
 MQdp4kBRriroaoF6ZgoanjhD/Xw0kEeERiAMbjlq7je+ZAZWYXj38tHg8BYp3Hrf
 ERVMXGoYgwglwNyooJHs5lAWTqKjSrXy+RAIQhV8yXOWE1Q+Jd7NE6ZOtn3VE20D
 8iO0WgjITi3xm+ewCzJT5UVcQmktGjpYTD5ePvbi0eilX1D8sL704QiOauJETcQq
 j4EHjgb+o53V9hyJt5Z3SWmJiyCsQRxlKagmbpJoRXWII08qR+Tgh2I8jgmCEAnw
 HTrA+IPZ1hFCUyQT7JZzDLak6nwOLQAc1S2H/GxyCOPhURZsFpLBdi9dqIyiMUdw
 KFw8T8GE5NgqXKB9osa3CGtjBfDtNWBTTRF+akQg9k1656HyWKxl24oSbX4kXsnC
 O9N4SSxjx7mL87elGV+jzIE61O2cTH5cTGA6Z7HAKQLAckLdF4+hd/NS8oOPtNOm
 K0r0EIt2U127I9OlpxvzosGjDgMzytGBP4NH0OmEovdvTX2Xh3uAyaiA/7rGWdoH
 /Tec1NLs0VbDv+jM2rKx
 =1cS3
 -----END PGP SIGNATURE-----

Merge tag 'imx-fixes' of git://git.pengutronix.de/git/imx/linux-2.6 into fixes

ARM: i.MX: Fix SSI clock associations for i.MX25/i.MX35

* tag 'imx-fixes' of git://git.pengutronix.de/git/imx/linux-2.6:
  ARM: clk-imx35: Fix SSI clock registration
  ARM: clk-imx25: Fix SSI clock registration
  + Linux 3.6-rc5
This commit is contained in:
Olof Johansson 2012-09-12 22:00:07 -07:00
commit 2bc733e8b4
24 changed files with 253 additions and 46 deletions

View File

@ -210,3 +210,15 @@ Users:
firmware assigned instance number of the PCI
device that can help in understanding the firmware
intended order of the PCI device.
What: /sys/bus/pci/devices/.../d3cold_allowed
Date: July 2012
Contact: Huang Ying <ying.huang@intel.com>
Description:
d3cold_allowed is bit to control whether the corresponding PCI
device can be put into D3Cold state. If it is cleared, the
device will never be put into D3Cold state. If it is set, the
device may be put into D3Cold state if other requirements are
satisfied too. Reading this attribute will show the current
value of d3cold_allowed bit. Writing this attribute will set
the value of d3cold_allowed bit.

View File

@ -1,7 +1,7 @@
VERSION = 3
PATCHLEVEL = 6
SUBLEVEL = 0
EXTRAVERSION = -rc4
EXTRAVERSION = -rc5
NAME = Saber-toothed Squirrel
# *DOCUMENTATION*

View File

@ -6,7 +6,7 @@ config ARM
select HAVE_DMA_API_DEBUG
select HAVE_IDE if PCI || ISA || PCMCIA
select HAVE_DMA_ATTRS
select HAVE_DMA_CONTIGUOUS if (CPU_V6 || CPU_V6K || CPU_V7)
select HAVE_DMA_CONTIGUOUS if MMU
select HAVE_MEMBLOCK
select RTC_LIB
select SYS_SUPPORTS_APM_EMULATION

View File

@ -202,6 +202,13 @@ static inline void dma_free_writecombine(struct device *dev, size_t size,
return dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs);
}
/*
* This can be called during early boot to increase the size of the atomic
* coherent DMA pool above the default value of 256KiB. It must be called
* before postcore_initcall.
*/
extern void __init init_dma_coherent_pool_size(unsigned long size);
/*
* This can be called during boot to increase the size of the consistent
* DMA region above it's default value of 2MB. It must be called before the

View File

@ -222,10 +222,8 @@ int __init mx25_clocks_init(void)
clk_register_clkdev(clk[lcdc_ipg], "ipg", "imx-fb.0");
clk_register_clkdev(clk[lcdc_ahb], "ahb", "imx-fb.0");
clk_register_clkdev(clk[wdt_ipg], NULL, "imx2-wdt.0");
clk_register_clkdev(clk[ssi1_ipg_per], "per", "imx-ssi.0");
clk_register_clkdev(clk[ssi1_ipg], "ipg", "imx-ssi.0");
clk_register_clkdev(clk[ssi2_ipg_per], "per", "imx-ssi.1");
clk_register_clkdev(clk[ssi2_ipg], "ipg", "imx-ssi.1");
clk_register_clkdev(clk[ssi1_ipg], NULL, "imx-ssi.0");
clk_register_clkdev(clk[ssi2_ipg], NULL, "imx-ssi.1");
clk_register_clkdev(clk[esdhc1_ipg_per], "per", "sdhci-esdhc-imx25.0");
clk_register_clkdev(clk[esdhc1_ipg], "ipg", "sdhci-esdhc-imx25.0");
clk_register_clkdev(clk[esdhc1_ahb], "ahb", "sdhci-esdhc-imx25.0");

View File

@ -230,10 +230,8 @@ int __init mx35_clocks_init()
clk_register_clkdev(clk[ipu_gate], NULL, "mx3_sdc_fb");
clk_register_clkdev(clk[owire_gate], NULL, "mxc_w1");
clk_register_clkdev(clk[sdma_gate], NULL, "imx35-sdma");
clk_register_clkdev(clk[ipg], "ipg", "imx-ssi.0");
clk_register_clkdev(clk[ssi1_div_post], "per", "imx-ssi.0");
clk_register_clkdev(clk[ipg], "ipg", "imx-ssi.1");
clk_register_clkdev(clk[ssi2_div_post], "per", "imx-ssi.1");
clk_register_clkdev(clk[ssi1_gate], NULL, "imx-ssi.0");
clk_register_clkdev(clk[ssi2_gate], NULL, "imx-ssi.1");
/* i.mx35 has the i.mx21 type uart */
clk_register_clkdev(clk[uart1_gate], "per", "imx21-uart.0");
clk_register_clkdev(clk[ipg], "ipg", "imx21-uart.0");

View File

@ -517,6 +517,13 @@ void __init kirkwood_wdt_init(void)
void __init kirkwood_init_early(void)
{
orion_time_set_base(TIMER_VIRT_BASE);
/*
* Some Kirkwood devices allocate their coherent buffers from atomic
* context. Increase size of atomic coherent pool to make sure such
* the allocations won't fail.
*/
init_dma_coherent_pool_size(SZ_1M);
}
int kirkwood_tclk;

View File

@ -267,17 +267,19 @@ static void __dma_free_remap(void *cpu_addr, size_t size)
vunmap(cpu_addr);
}
#define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K
struct dma_pool {
size_t size;
spinlock_t lock;
unsigned long *bitmap;
unsigned long nr_pages;
void *vaddr;
struct page *page;
struct page **pages;
};
static struct dma_pool atomic_pool = {
.size = SZ_256K,
.size = DEFAULT_DMA_COHERENT_POOL_SIZE,
};
static int __init early_coherent_pool(char *p)
@ -287,6 +289,21 @@ static int __init early_coherent_pool(char *p)
}
early_param("coherent_pool", early_coherent_pool);
void __init init_dma_coherent_pool_size(unsigned long size)
{
/*
* Catch any attempt to set the pool size too late.
*/
BUG_ON(atomic_pool.vaddr);
/*
* Set architecture specific coherent pool size only if
* it has not been changed by kernel command line parameter.
*/
if (atomic_pool.size == DEFAULT_DMA_COHERENT_POOL_SIZE)
atomic_pool.size = size;
}
/*
* Initialise the coherent pool for atomic allocations.
*/
@ -297,6 +314,7 @@ static int __init atomic_pool_init(void)
unsigned long nr_pages = pool->size >> PAGE_SHIFT;
unsigned long *bitmap;
struct page *page;
struct page **pages;
void *ptr;
int bitmap_size = BITS_TO_LONGS(nr_pages) * sizeof(long);
@ -304,21 +322,31 @@ static int __init atomic_pool_init(void)
if (!bitmap)
goto no_bitmap;
pages = kzalloc(nr_pages * sizeof(struct page *), GFP_KERNEL);
if (!pages)
goto no_pages;
if (IS_ENABLED(CONFIG_CMA))
ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page);
else
ptr = __alloc_remap_buffer(NULL, pool->size, GFP_KERNEL, prot,
&page, NULL);
if (ptr) {
int i;
for (i = 0; i < nr_pages; i++)
pages[i] = page + i;
spin_lock_init(&pool->lock);
pool->vaddr = ptr;
pool->page = page;
pool->pages = pages;
pool->bitmap = bitmap;
pool->nr_pages = nr_pages;
pr_info("DMA: preallocated %u KiB pool for atomic coherent allocations\n",
(unsigned)pool->size / 1024);
return 0;
}
no_pages:
kfree(bitmap);
no_bitmap:
pr_err("DMA: failed to allocate %u KiB pool for atomic coherent allocation\n",
@ -443,27 +471,45 @@ static void *__alloc_from_pool(size_t size, struct page **ret_page)
if (pageno < pool->nr_pages) {
bitmap_set(pool->bitmap, pageno, count);
ptr = pool->vaddr + PAGE_SIZE * pageno;
*ret_page = pool->page + pageno;
*ret_page = pool->pages[pageno];
} else {
pr_err_once("ERROR: %u KiB atomic DMA coherent pool is too small!\n"
"Please increase it with coherent_pool= kernel parameter!\n",
(unsigned)pool->size / 1024);
}
spin_unlock_irqrestore(&pool->lock, flags);
return ptr;
}
static bool __in_atomic_pool(void *start, size_t size)
{
struct dma_pool *pool = &atomic_pool;
void *end = start + size;
void *pool_start = pool->vaddr;
void *pool_end = pool->vaddr + pool->size;
if (start < pool_start || start > pool_end)
return false;
if (end <= pool_end)
return true;
WARN(1, "Wrong coherent size(%p-%p) from atomic pool(%p-%p)\n",
start, end - 1, pool_start, pool_end - 1);
return false;
}
static int __free_from_pool(void *start, size_t size)
{
struct dma_pool *pool = &atomic_pool;
unsigned long pageno, count;
unsigned long flags;
if (start < pool->vaddr || start > pool->vaddr + pool->size)
if (!__in_atomic_pool(start, size))
return 0;
if (start + size > pool->vaddr + pool->size) {
WARN(1, "freeing wrong coherent size from pool\n");
return 0;
}
pageno = (start - pool->vaddr) >> PAGE_SHIFT;
count = size >> PAGE_SHIFT;
@ -1090,10 +1136,22 @@ static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t si
return 0;
}
static struct page **__atomic_get_pages(void *addr)
{
struct dma_pool *pool = &atomic_pool;
struct page **pages = pool->pages;
int offs = (addr - pool->vaddr) >> PAGE_SHIFT;
return pages + offs;
}
static struct page **__iommu_get_pages(void *cpu_addr, struct dma_attrs *attrs)
{
struct vm_struct *area;
if (__in_atomic_pool(cpu_addr, PAGE_SIZE))
return __atomic_get_pages(cpu_addr);
if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs))
return cpu_addr;
@ -1103,6 +1161,34 @@ static struct page **__iommu_get_pages(void *cpu_addr, struct dma_attrs *attrs)
return NULL;
}
static void *__iommu_alloc_atomic(struct device *dev, size_t size,
dma_addr_t *handle)
{
struct page *page;
void *addr;
addr = __alloc_from_pool(size, &page);
if (!addr)
return NULL;
*handle = __iommu_create_mapping(dev, &page, size);
if (*handle == DMA_ERROR_CODE)
goto err_mapping;
return addr;
err_mapping:
__free_from_pool(addr, size);
return NULL;
}
static void __iommu_free_atomic(struct device *dev, struct page **pages,
dma_addr_t handle, size_t size)
{
__iommu_remove_mapping(dev, handle, size);
__free_from_pool(page_address(pages[0]), size);
}
static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
{
@ -1113,6 +1199,9 @@ static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
*handle = DMA_ERROR_CODE;
size = PAGE_ALIGN(size);
if (gfp & GFP_ATOMIC)
return __iommu_alloc_atomic(dev, size, handle);
pages = __iommu_alloc_buffer(dev, size, gfp);
if (!pages)
return NULL;
@ -1179,6 +1268,11 @@ void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
return;
}
if (__in_atomic_pool(cpu_addr, size)) {
__iommu_free_atomic(dev, pages, handle, size);
return;
}
if (!dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) {
unmap_kernel_range((unsigned long)cpu_addr, size);
vunmap(cpu_addr);

View File

@ -1283,7 +1283,7 @@ static void xen_flush_tlb_others(const struct cpumask *cpus,
cpumask_clear_cpu(smp_processor_id(), to_cpumask(args->mask));
args->op.cmd = MMUEXT_TLB_FLUSH_MULTI;
if (start != TLB_FLUSH_ALL && (end - start) <= PAGE_SIZE) {
if (end != TLB_FLUSH_ALL && (end - start) <= PAGE_SIZE) {
args->op.cmd = MMUEXT_INVLPG_MULTI;
args->op.arg1.linear_addr = start;
}

View File

@ -599,7 +599,7 @@ bool __init early_can_reuse_p2m_middle(unsigned long set_pfn, unsigned long set_
if (p2m_index(set_pfn))
return false;
for (pfn = 0; pfn <= MAX_DOMAIN_PAGES; pfn += P2M_PER_PAGE) {
for (pfn = 0; pfn < MAX_DOMAIN_PAGES; pfn += P2M_PER_PAGE) {
topidx = p2m_top_index(pfn);
if (!p2m_top[topidx])

View File

@ -250,7 +250,7 @@ int __init dma_declare_contiguous(struct device *dev, unsigned long size,
return -EINVAL;
/* Sanitise input arguments */
alignment = PAGE_SIZE << max(MAX_ORDER, pageblock_order);
alignment = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order);
base = ALIGN(base, alignment);
size = ALIGN(size, alignment);
limit &= ~(alignment - 1);

View File

@ -996,7 +996,8 @@ static void hid_process_event(struct hid_device *hid, struct hid_field *field,
struct hid_driver *hdrv = hid->driver;
int ret;
hid_dump_input(hid, usage, value);
if (!list_empty(&hid->debug_list))
hid_dump_input(hid, usage, value);
if (hdrv && hdrv->event && hid_match_usage(hid, usage)) {
ret = hdrv->event(hid, field, usage, value);
@ -1558,7 +1559,9 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LCPOWER, USB_DEVICE_ID_LCPOWER_LC1000 ) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TPKBD) },
#if IS_ENABLED(CONFIG_HID_LENOVO_TPKBD)
{ HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TPKBD) },
#endif
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_MX3000_RECEIVER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER_2) },

View File

@ -70,6 +70,7 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_AXIS_295, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN2, HID_QUIRK_NO_INIT_REPORTS },

View File

@ -358,6 +358,7 @@ static void imx_keypad_inhibit(struct imx_keypad *keypad)
/* Inhibit KDI and KRI interrupts. */
reg_val = readw(keypad->mmio_base + KPSR);
reg_val &= ~(KBD_STAT_KRIE | KBD_STAT_KDIE);
reg_val |= KBD_STAT_KPKR | KBD_STAT_KPKD;
writew(reg_val, keypad->mmio_base + KPSR);
/* Colums as open drain and disable all rows */
@ -515,7 +516,9 @@ static int __devinit imx_keypad_probe(struct platform_device *pdev)
input_set_drvdata(input_dev, keypad);
/* Ensure that the keypad will stay dormant until opened */
clk_enable(keypad->clk);
imx_keypad_inhibit(keypad);
clk_disable(keypad->clk);
error = request_irq(irq, imx_keypad_irq_handler, 0,
pdev->name, keypad);

View File

@ -176,6 +176,20 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "Spring Peak"),
},
},
{
/* Gigabyte T1005 - defines wrong chassis type ("Other") */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
DMI_MATCH(DMI_PRODUCT_NAME, "T1005"),
},
},
{
/* Gigabyte T1005M/P - defines wrong chassis type ("Other") */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
DMI_MATCH(DMI_PRODUCT_NAME, "T1005M/P"),
},
},
{
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),

View File

@ -1848,7 +1848,10 @@ static const struct wacom_features wacom_features_0x2A =
{ "Wacom Intuos5 M", WACOM_PKGLEN_INTUOS, 44704, 27940, 2047,
63, INTUOS5, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
static const struct wacom_features wacom_features_0xF4 =
{ "Wacom Cintiq 24HD", WACOM_PKGLEN_INTUOS, 104480, 65600, 2047,
{ "Wacom Cintiq 24HD", WACOM_PKGLEN_INTUOS, 104480, 65600, 2047,
63, WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
static const struct wacom_features wacom_features_0xF8 =
{ "Wacom Cintiq 24HD touch", WACOM_PKGLEN_INTUOS, 104480, 65600, 2047,
63, WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
static const struct wacom_features wacom_features_0x3F =
{ "Wacom Cintiq 21UX", WACOM_PKGLEN_INTUOS, 87200, 65600, 1023,
@ -2091,6 +2094,7 @@ const struct usb_device_id wacom_ids[] = {
{ USB_DEVICE_WACOM(0xEF) },
{ USB_DEVICE_WACOM(0x47) },
{ USB_DEVICE_WACOM(0xF4) },
{ USB_DEVICE_WACOM(0xF8) },
{ USB_DEVICE_WACOM(0xFA) },
{ USB_DEVICE_LENOVO(0x6004) },
{ }

View File

@ -602,6 +602,7 @@ edt_ft5x06_ts_teardown_debugfs(struct edt_ft5x06_ts_data *tsdata)
{
if (tsdata->debug_dir)
debugfs_remove_recursive(tsdata->debug_dir);
kfree(tsdata->raw_buffer);
}
#else
@ -843,7 +844,6 @@ static int __devexit edt_ft5x06_ts_remove(struct i2c_client *client)
if (gpio_is_valid(pdata->reset_pin))
gpio_free(pdata->reset_pin);
kfree(tsdata->raw_buffer);
kfree(tsdata);
return 0;

View File

@ -280,8 +280,12 @@ static long local_pci_probe(void *_ddi)
{
struct drv_dev_and_id *ddi = _ddi;
struct device *dev = &ddi->dev->dev;
struct device *parent = dev->parent;
int rc;
/* The parent bridge must be in active state when probing */
if (parent)
pm_runtime_get_sync(parent);
/* Unbound PCI devices are always set to disabled and suspended.
* During probe, the device is set to enabled and active and the
* usage count is incremented. If the driver supports runtime PM,
@ -298,6 +302,8 @@ static long local_pci_probe(void *_ddi)
pm_runtime_set_suspended(dev);
pm_runtime_put_noidle(dev);
}
if (parent)
pm_runtime_put(parent);
return rc;
}

View File

@ -458,6 +458,40 @@ boot_vga_show(struct device *dev, struct device_attribute *attr, char *buf)
}
struct device_attribute vga_attr = __ATTR_RO(boot_vga);
static void
pci_config_pm_runtime_get(struct pci_dev *pdev)
{
struct device *dev = &pdev->dev;
struct device *parent = dev->parent;
if (parent)
pm_runtime_get_sync(parent);
pm_runtime_get_noresume(dev);
/*
* pdev->current_state is set to PCI_D3cold during suspending,
* so wait until suspending completes
*/
pm_runtime_barrier(dev);
/*
* Only need to resume devices in D3cold, because config
* registers are still accessible for devices suspended but
* not in D3cold.
*/
if (pdev->current_state == PCI_D3cold)
pm_runtime_resume(dev);
}
static void
pci_config_pm_runtime_put(struct pci_dev *pdev)
{
struct device *dev = &pdev->dev;
struct device *parent = dev->parent;
pm_runtime_put(dev);
if (parent)
pm_runtime_put_sync(parent);
}
static ssize_t
pci_read_config(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
@ -484,6 +518,8 @@ pci_read_config(struct file *filp, struct kobject *kobj,
size = count;
}
pci_config_pm_runtime_get(dev);
if ((off & 1) && size) {
u8 val;
pci_user_read_config_byte(dev, off, &val);
@ -529,6 +565,8 @@ pci_read_config(struct file *filp, struct kobject *kobj,
--size;
}
pci_config_pm_runtime_put(dev);
return count;
}
@ -549,6 +587,8 @@ pci_write_config(struct file* filp, struct kobject *kobj,
count = size;
}
pci_config_pm_runtime_get(dev);
if ((off & 1) && size) {
pci_user_write_config_byte(dev, off, data[off - init_off]);
off++;
@ -587,6 +627,8 @@ pci_write_config(struct file* filp, struct kobject *kobj,
--size;
}
pci_config_pm_runtime_put(dev);
return count;
}

View File

@ -1941,6 +1941,7 @@ void pci_pm_init(struct pci_dev *dev)
dev->pm_cap = pm;
dev->d3_delay = PCI_PM_D3_WAIT;
dev->d3cold_delay = PCI_PM_D3COLD_WAIT;
dev->d3cold_allowed = true;
dev->d1_support = false;
dev->d2_support = false;

View File

@ -140,9 +140,17 @@ static int pcie_port_runtime_resume(struct device *dev)
{
return 0;
}
static int pcie_port_runtime_idle(struct device *dev)
{
/* Delay for a short while to prevent too frequent suspend/resume */
pm_schedule_suspend(dev, 10);
return -EBUSY;
}
#else
#define pcie_port_runtime_suspend NULL
#define pcie_port_runtime_resume NULL
#define pcie_port_runtime_idle NULL
#endif
static const struct dev_pm_ops pcie_portdrv_pm_ops = {
@ -155,6 +163,7 @@ static const struct dev_pm_ops pcie_portdrv_pm_ops = {
.resume_noirq = pcie_port_resume_noirq,
.runtime_suspend = pcie_port_runtime_suspend,
.runtime_resume = pcie_port_runtime_resume,
.runtime_idle = pcie_port_runtime_idle,
};
#define PCIE_PORTDRV_PM_OPS (&pcie_portdrv_pm_ops)
@ -200,6 +209,11 @@ static int __devinit pcie_portdrv_probe(struct pci_dev *dev,
return status;
pci_save_state(dev);
/*
* D3cold may not work properly on some PCIe port, so disable
* it by default.
*/
dev->d3cold_allowed = false;
if (!pci_match_id(port_runtime_pm_black_list, dev))
pm_runtime_put_noidle(&dev->dev);

View File

@ -144,15 +144,13 @@ static inline unsigned long decode_bar(struct pci_dev *dev, u32 bar)
case PCI_BASE_ADDRESS_MEM_TYPE_32:
break;
case PCI_BASE_ADDRESS_MEM_TYPE_1M:
dev_info(&dev->dev, "1M mem BAR treated as 32-bit BAR\n");
/* 1M mem BAR treated as 32-bit BAR */
break;
case PCI_BASE_ADDRESS_MEM_TYPE_64:
flags |= IORESOURCE_MEM_64;
break;
default:
dev_warn(&dev->dev,
"mem unknown type %x treated as 32-bit BAR\n",
mem_type);
/* mem unknown type treated as 32-bit BAR */
break;
}
return flags;
@ -173,9 +171,11 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
u32 l, sz, mask;
u16 orig_cmd;
struct pci_bus_region region;
bool bar_too_big = false, bar_disabled = false;
mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
/* No printks while decoding is disabled! */
if (!dev->mmio_always_on) {
pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
pci_write_config_word(dev, PCI_COMMAND,
@ -240,8 +240,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
goto fail;
if ((sizeof(resource_size_t) < 8) && (sz64 > 0x100000000ULL)) {
dev_err(&dev->dev, "reg %x: can't handle 64-bit BAR\n",
pos);
bar_too_big = true;
goto fail;
}
@ -252,12 +251,11 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
region.start = 0;
region.end = sz64;
pcibios_bus_to_resource(dev, res, &region);
bar_disabled = true;
} else {
region.start = l64;
region.end = l64 + sz64;
pcibios_bus_to_resource(dev, res, &region);
dev_printk(KERN_DEBUG, &dev->dev, "reg %x: %pR\n",
pos, res);
}
} else {
sz = pci_size(l, sz, mask);
@ -268,18 +266,23 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
region.start = l;
region.end = l + sz;
pcibios_bus_to_resource(dev, res, &region);
dev_printk(KERN_DEBUG, &dev->dev, "reg %x: %pR\n", pos, res);
}
out:
goto out;
fail:
res->flags = 0;
out:
if (!dev->mmio_always_on)
pci_write_config_word(dev, PCI_COMMAND, orig_cmd);
if (bar_too_big)
dev_err(&dev->dev, "reg %x: can't handle 64-bit BAR\n", pos);
if (res->flags && !bar_disabled)
dev_printk(KERN_DEBUG, &dev->dev, "reg %x: %pR\n", pos, res);
return (res->flags & IORESOURCE_MEM_64) ? 1 : 0;
fail:
res->flags = 0;
goto out;
}
static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)

View File

@ -232,7 +232,7 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
return ret;
if (hwdev && hwdev->coherent_dma_mask)
dma_mask = hwdev->coherent_dma_mask;
dma_mask = dma_alloc_coherent_mask(hwdev, flags);
phys = virt_to_phys(ret);
dev_addr = xen_phys_to_bus(phys);

View File

@ -353,16 +353,16 @@ static int __devinit pcistub_init_device(struct pci_dev *dev)
if (err)
goto config_release;
dev_dbg(&dev->dev, "reseting (FLR, D3, etc) the device\n");
__pci_reset_function_locked(dev);
/* We need the device active to save the state. */
dev_dbg(&dev->dev, "save state of device\n");
pci_save_state(dev);
dev_data->pci_saved_state = pci_store_saved_state(dev);
if (!dev_data->pci_saved_state)
dev_err(&dev->dev, "Could not store PCI conf saved state!\n");
else {
dev_dbg(&dev->dev, "reseting (FLR, D3, etc) the device\n");
__pci_reset_function_locked(dev);
}
/* Now disable the device (this also ensures some private device
* data is setup before we export)
*/