Merge branches 'arm/mediatek', 'arm/smmu', 'x86/amd', 's390', 'core' and 'arm/exynos' into next

This commit is contained in:
Joerg Roedel 2016-12-06 17:32:16 +01:00
49 changed files with 2624 additions and 455 deletions

View File

@ -0,0 +1,12 @@
What: /sys/devices/.../deferred_probe
Date: August 2016
Contact: Ben Hutchings <ben.hutchings@codethink.co.uk>
Description:
The /sys/devices/.../deferred_probe attribute is
present for all devices. If a driver detects during
probing a device that a related device is not yet
ready, it may defer probing of the first device. The
kernel will retry probing the first device after any
other device is successfully probed. This attribute
reads as 1 if probing of this device is currently
deferred, or 0 otherwise.

View File

@ -796,6 +796,8 @@ static struct dma_map_ops iommu_dma_ops = {
.sync_single_for_device = __iommu_sync_single_for_device,
.sync_sg_for_cpu = __iommu_sync_sg_for_cpu,
.sync_sg_for_device = __iommu_sync_sg_for_device,
.map_resource = iommu_dma_map_resource,
.unmap_resource = iommu_dma_unmap_resource,
.dma_supported = iommu_dma_supported,
.mapping_error = iommu_dma_mapping_error,
};

View File

@ -934,6 +934,8 @@ static int __populate_cache_leaves(unsigned int cpu)
ci_leaf_init(this_leaf++, &id4_regs);
__cache_cpumap_setup(cpu, idx, &id4_regs);
}
this_cpu_ci->cpu_map_populated = true;
return 0;
}

View File

@ -19,8 +19,17 @@
#define pr_fmt(fmt) "ACPI: IORT: " fmt
#include <linux/acpi_iort.h>
#include <linux/iommu.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#define IORT_TYPE_MASK(type) (1 << (type))
#define IORT_MSI_TYPE (1 << ACPI_IORT_NODE_ITS_GROUP)
#define IORT_IOMMU_TYPE ((1 << ACPI_IORT_NODE_SMMU) | \
(1 << ACPI_IORT_NODE_SMMU_V3))
struct iort_its_msi_chip {
struct list_head list;
@ -28,6 +37,90 @@ struct iort_its_msi_chip {
u32 translation_id;
};
struct iort_fwnode {
struct list_head list;
struct acpi_iort_node *iort_node;
struct fwnode_handle *fwnode;
};
static LIST_HEAD(iort_fwnode_list);
static DEFINE_SPINLOCK(iort_fwnode_lock);
/**
* iort_set_fwnode() - Create iort_fwnode and use it to register
* iommu data in the iort_fwnode_list
*
* @node: IORT table node associated with the IOMMU
* @fwnode: fwnode associated with the IORT node
*
* Returns: 0 on success
* <0 on failure
*/
static inline int iort_set_fwnode(struct acpi_iort_node *iort_node,
struct fwnode_handle *fwnode)
{
struct iort_fwnode *np;
np = kzalloc(sizeof(struct iort_fwnode), GFP_ATOMIC);
if (WARN_ON(!np))
return -ENOMEM;
INIT_LIST_HEAD(&np->list);
np->iort_node = iort_node;
np->fwnode = fwnode;
spin_lock(&iort_fwnode_lock);
list_add_tail(&np->list, &iort_fwnode_list);
spin_unlock(&iort_fwnode_lock);
return 0;
}
/**
* iort_get_fwnode() - Retrieve fwnode associated with an IORT node
*
* @node: IORT table node to be looked-up
*
* Returns: fwnode_handle pointer on success, NULL on failure
*/
static inline
struct fwnode_handle *iort_get_fwnode(struct acpi_iort_node *node)
{
struct iort_fwnode *curr;
struct fwnode_handle *fwnode = NULL;
spin_lock(&iort_fwnode_lock);
list_for_each_entry(curr, &iort_fwnode_list, list) {
if (curr->iort_node == node) {
fwnode = curr->fwnode;
break;
}
}
spin_unlock(&iort_fwnode_lock);
return fwnode;
}
/**
* iort_delete_fwnode() - Delete fwnode associated with an IORT node
*
* @node: IORT table node associated with fwnode to delete
*/
static inline void iort_delete_fwnode(struct acpi_iort_node *node)
{
struct iort_fwnode *curr, *tmp;
spin_lock(&iort_fwnode_lock);
list_for_each_entry_safe(curr, tmp, &iort_fwnode_list, list) {
if (curr->iort_node == node) {
list_del(&curr->list);
kfree(curr);
break;
}
}
spin_unlock(&iort_fwnode_lock);
}
typedef acpi_status (*iort_find_node_callback)
(struct acpi_iort_node *node, void *context);
@ -141,6 +234,21 @@ static struct acpi_iort_node *iort_scan_node(enum acpi_iort_node_type type,
return NULL;
}
static acpi_status
iort_match_type_callback(struct acpi_iort_node *node, void *context)
{
return AE_OK;
}
bool iort_node_match(u8 type)
{
struct acpi_iort_node *node;
node = iort_scan_node(type, iort_match_type_callback, NULL);
return node != NULL;
}
static acpi_status iort_match_node_callback(struct acpi_iort_node *node,
void *context)
{
@ -212,9 +320,48 @@ static int iort_id_map(struct acpi_iort_id_mapping *map, u8 type, u32 rid_in,
return 0;
}
static
struct acpi_iort_node *iort_node_get_id(struct acpi_iort_node *node,
u32 *id_out, u8 type_mask,
int index)
{
struct acpi_iort_node *parent;
struct acpi_iort_id_mapping *map;
if (!node->mapping_offset || !node->mapping_count ||
index >= node->mapping_count)
return NULL;
map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node,
node->mapping_offset);
/* Firmware bug! */
if (!map->output_reference) {
pr_err(FW_BUG "[node %p type %d] ID map has NULL parent reference\n",
node, node->type);
return NULL;
}
parent = ACPI_ADD_PTR(struct acpi_iort_node, iort_table,
map->output_reference);
if (!(IORT_TYPE_MASK(parent->type) & type_mask))
return NULL;
if (map[index].flags & ACPI_IORT_ID_SINGLE_MAPPING) {
if (node->type == ACPI_IORT_NODE_NAMED_COMPONENT ||
node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) {
*id_out = map[index].output_base;
return parent;
}
}
return NULL;
}
static struct acpi_iort_node *iort_node_map_rid(struct acpi_iort_node *node,
u32 rid_in, u32 *rid_out,
u8 type)
u8 type_mask)
{
u32 rid = rid_in;
@ -223,7 +370,7 @@ static struct acpi_iort_node *iort_node_map_rid(struct acpi_iort_node *node,
struct acpi_iort_id_mapping *map;
int i;
if (node->type == type) {
if (IORT_TYPE_MASK(node->type) & type_mask) {
if (rid_out)
*rid_out = rid;
return node;
@ -296,7 +443,7 @@ u32 iort_msi_map_rid(struct device *dev, u32 req_id)
if (!node)
return req_id;
iort_node_map_rid(node, req_id, &dev_id, ACPI_IORT_NODE_ITS_GROUP);
iort_node_map_rid(node, req_id, &dev_id, IORT_MSI_TYPE);
return dev_id;
}
@ -318,7 +465,7 @@ static int iort_dev_find_its_id(struct device *dev, u32 req_id,
if (!node)
return -ENXIO;
node = iort_node_map_rid(node, req_id, NULL, ACPI_IORT_NODE_ITS_GROUP);
node = iort_node_map_rid(node, req_id, NULL, IORT_MSI_TYPE);
if (!node)
return -ENXIO;
@ -356,13 +503,459 @@ struct irq_domain *iort_get_device_domain(struct device *dev, u32 req_id)
return irq_find_matching_fwnode(handle, DOMAIN_BUS_PCI_MSI);
}
static int __get_pci_rid(struct pci_dev *pdev, u16 alias, void *data)
{
u32 *rid = data;
*rid = alias;
return 0;
}
static int arm_smmu_iort_xlate(struct device *dev, u32 streamid,
struct fwnode_handle *fwnode,
const struct iommu_ops *ops)
{
int ret = iommu_fwspec_init(dev, fwnode, ops);
if (!ret)
ret = iommu_fwspec_add_ids(dev, &streamid, 1);
return ret;
}
static const struct iommu_ops *iort_iommu_xlate(struct device *dev,
struct acpi_iort_node *node,
u32 streamid)
{
const struct iommu_ops *ops = NULL;
int ret = -ENODEV;
struct fwnode_handle *iort_fwnode;
if (node) {
iort_fwnode = iort_get_fwnode(node);
if (!iort_fwnode)
return NULL;
ops = iommu_get_instance(iort_fwnode);
if (!ops)
return NULL;
ret = arm_smmu_iort_xlate(dev, streamid, iort_fwnode, ops);
}
return ret ? NULL : ops;
}
/**
* iort_set_dma_mask - Set-up dma mask for a device.
*
* @dev: device to configure
*/
void iort_set_dma_mask(struct device *dev)
{
/*
* Set default coherent_dma_mask to 32 bit. Drivers are expected to
* setup the correct supported mask.
*/
if (!dev->coherent_dma_mask)
dev->coherent_dma_mask = DMA_BIT_MASK(32);
/*
* Set it to coherent_dma_mask by default if the architecture
* code has not set it.
*/
if (!dev->dma_mask)
dev->dma_mask = &dev->coherent_dma_mask;
}
/**
* iort_iommu_configure - Set-up IOMMU configuration for a device.
*
* @dev: device to configure
*
* Returns: iommu_ops pointer on configuration success
* NULL on configuration failure
*/
const struct iommu_ops *iort_iommu_configure(struct device *dev)
{
struct acpi_iort_node *node, *parent;
const struct iommu_ops *ops = NULL;
u32 streamid = 0;
if (dev_is_pci(dev)) {
struct pci_bus *bus = to_pci_dev(dev)->bus;
u32 rid;
pci_for_each_dma_alias(to_pci_dev(dev), __get_pci_rid,
&rid);
node = iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX,
iort_match_node_callback, &bus->dev);
if (!node)
return NULL;
parent = iort_node_map_rid(node, rid, &streamid,
IORT_IOMMU_TYPE);
ops = iort_iommu_xlate(dev, parent, streamid);
} else {
int i = 0;
node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT,
iort_match_node_callback, dev);
if (!node)
return NULL;
parent = iort_node_get_id(node, &streamid,
IORT_IOMMU_TYPE, i++);
while (parent) {
ops = iort_iommu_xlate(dev, parent, streamid);
parent = iort_node_get_id(node, &streamid,
IORT_IOMMU_TYPE, i++);
}
}
return ops;
}
static void __init acpi_iort_register_irq(int hwirq, const char *name,
int trigger,
struct resource *res)
{
int irq = acpi_register_gsi(NULL, hwirq, trigger,
ACPI_ACTIVE_HIGH);
if (irq <= 0) {
pr_err("could not register gsi hwirq %d name [%s]\n", hwirq,
name);
return;
}
res->start = irq;
res->end = irq;
res->flags = IORESOURCE_IRQ;
res->name = name;
}
static int __init arm_smmu_v3_count_resources(struct acpi_iort_node *node)
{
struct acpi_iort_smmu_v3 *smmu;
/* Always present mem resource */
int num_res = 1;
/* Retrieve SMMUv3 specific data */
smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
if (smmu->event_gsiv)
num_res++;
if (smmu->pri_gsiv)
num_res++;
if (smmu->gerr_gsiv)
num_res++;
if (smmu->sync_gsiv)
num_res++;
return num_res;
}
static void __init arm_smmu_v3_init_resources(struct resource *res,
struct acpi_iort_node *node)
{
struct acpi_iort_smmu_v3 *smmu;
int num_res = 0;
/* Retrieve SMMUv3 specific data */
smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
res[num_res].start = smmu->base_address;
res[num_res].end = smmu->base_address + SZ_128K - 1;
res[num_res].flags = IORESOURCE_MEM;
num_res++;
if (smmu->event_gsiv)
acpi_iort_register_irq(smmu->event_gsiv, "eventq",
ACPI_EDGE_SENSITIVE,
&res[num_res++]);
if (smmu->pri_gsiv)
acpi_iort_register_irq(smmu->pri_gsiv, "priq",
ACPI_EDGE_SENSITIVE,
&res[num_res++]);
if (smmu->gerr_gsiv)
acpi_iort_register_irq(smmu->gerr_gsiv, "gerror",
ACPI_EDGE_SENSITIVE,
&res[num_res++]);
if (smmu->sync_gsiv)
acpi_iort_register_irq(smmu->sync_gsiv, "cmdq-sync",
ACPI_EDGE_SENSITIVE,
&res[num_res++]);
}
static bool __init arm_smmu_v3_is_coherent(struct acpi_iort_node *node)
{
struct acpi_iort_smmu_v3 *smmu;
/* Retrieve SMMUv3 specific data */
smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
return smmu->flags & ACPI_IORT_SMMU_V3_COHACC_OVERRIDE;
}
static int __init arm_smmu_count_resources(struct acpi_iort_node *node)
{
struct acpi_iort_smmu *smmu;
/* Retrieve SMMU specific data */
smmu = (struct acpi_iort_smmu *)node->node_data;
/*
* Only consider the global fault interrupt and ignore the
* configuration access interrupt.
*
* MMIO address and global fault interrupt resources are always
* present so add them to the context interrupt count as a static
* value.
*/
return smmu->context_interrupt_count + 2;
}
static void __init arm_smmu_init_resources(struct resource *res,
struct acpi_iort_node *node)
{
struct acpi_iort_smmu *smmu;
int i, hw_irq, trigger, num_res = 0;
u64 *ctx_irq, *glb_irq;
/* Retrieve SMMU specific data */
smmu = (struct acpi_iort_smmu *)node->node_data;
res[num_res].start = smmu->base_address;
res[num_res].end = smmu->base_address + smmu->span - 1;
res[num_res].flags = IORESOURCE_MEM;
num_res++;
glb_irq = ACPI_ADD_PTR(u64, node, smmu->global_interrupt_offset);
/* Global IRQs */
hw_irq = IORT_IRQ_MASK(glb_irq[0]);
trigger = IORT_IRQ_TRIGGER_MASK(glb_irq[0]);
acpi_iort_register_irq(hw_irq, "arm-smmu-global", trigger,
&res[num_res++]);
/* Context IRQs */
ctx_irq = ACPI_ADD_PTR(u64, node, smmu->context_interrupt_offset);
for (i = 0; i < smmu->context_interrupt_count; i++) {
hw_irq = IORT_IRQ_MASK(ctx_irq[i]);
trigger = IORT_IRQ_TRIGGER_MASK(ctx_irq[i]);
acpi_iort_register_irq(hw_irq, "arm-smmu-context", trigger,
&res[num_res++]);
}
}
static bool __init arm_smmu_is_coherent(struct acpi_iort_node *node)
{
struct acpi_iort_smmu *smmu;
/* Retrieve SMMU specific data */
smmu = (struct acpi_iort_smmu *)node->node_data;
return smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK;
}
struct iort_iommu_config {
const char *name;
int (*iommu_init)(struct acpi_iort_node *node);
bool (*iommu_is_coherent)(struct acpi_iort_node *node);
int (*iommu_count_resources)(struct acpi_iort_node *node);
void (*iommu_init_resources)(struct resource *res,
struct acpi_iort_node *node);
};
static const struct iort_iommu_config iort_arm_smmu_v3_cfg __initconst = {
.name = "arm-smmu-v3",
.iommu_is_coherent = arm_smmu_v3_is_coherent,
.iommu_count_resources = arm_smmu_v3_count_resources,
.iommu_init_resources = arm_smmu_v3_init_resources
};
static const struct iort_iommu_config iort_arm_smmu_cfg __initconst = {
.name = "arm-smmu",
.iommu_is_coherent = arm_smmu_is_coherent,
.iommu_count_resources = arm_smmu_count_resources,
.iommu_init_resources = arm_smmu_init_resources
};
static __init
const struct iort_iommu_config *iort_get_iommu_cfg(struct acpi_iort_node *node)
{
switch (node->type) {
case ACPI_IORT_NODE_SMMU_V3:
return &iort_arm_smmu_v3_cfg;
case ACPI_IORT_NODE_SMMU:
return &iort_arm_smmu_cfg;
default:
return NULL;
}
}
/**
* iort_add_smmu_platform_device() - Allocate a platform device for SMMU
* @node: Pointer to SMMU ACPI IORT node
*
* Returns: 0 on success, <0 failure
*/
static int __init iort_add_smmu_platform_device(struct acpi_iort_node *node)
{
struct fwnode_handle *fwnode;
struct platform_device *pdev;
struct resource *r;
enum dev_dma_attr attr;
int ret, count;
const struct iort_iommu_config *ops = iort_get_iommu_cfg(node);
if (!ops)
return -ENODEV;
pdev = platform_device_alloc(ops->name, PLATFORM_DEVID_AUTO);
if (!pdev)
return PTR_ERR(pdev);
count = ops->iommu_count_resources(node);
r = kcalloc(count, sizeof(*r), GFP_KERNEL);
if (!r) {
ret = -ENOMEM;
goto dev_put;
}
ops->iommu_init_resources(r, node);
ret = platform_device_add_resources(pdev, r, count);
/*
* Resources are duplicated in platform_device_add_resources,
* free their allocated memory
*/
kfree(r);
if (ret)
goto dev_put;
/*
* Add a copy of IORT node pointer to platform_data to
* be used to retrieve IORT data information.
*/
ret = platform_device_add_data(pdev, &node, sizeof(node));
if (ret)
goto dev_put;
/*
* We expect the dma masks to be equivalent for
* all SMMUs set-ups
*/
pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
fwnode = iort_get_fwnode(node);
if (!fwnode) {
ret = -ENODEV;
goto dev_put;
}
pdev->dev.fwnode = fwnode;
attr = ops->iommu_is_coherent(node) ?
DEV_DMA_COHERENT : DEV_DMA_NON_COHERENT;
/* Configure DMA for the page table walker */
acpi_dma_configure(&pdev->dev, attr);
ret = platform_device_add(pdev);
if (ret)
goto dma_deconfigure;
return 0;
dma_deconfigure:
acpi_dma_deconfigure(&pdev->dev);
dev_put:
platform_device_put(pdev);
return ret;
}
static void __init iort_init_platform_devices(void)
{
struct acpi_iort_node *iort_node, *iort_end;
struct acpi_table_iort *iort;
struct fwnode_handle *fwnode;
int i, ret;
/*
* iort_table and iort both point to the start of IORT table, but
* have different struct types
*/
iort = (struct acpi_table_iort *)iort_table;
/* Get the first IORT node */
iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort,
iort->node_offset);
iort_end = ACPI_ADD_PTR(struct acpi_iort_node, iort,
iort_table->length);
for (i = 0; i < iort->node_count; i++) {
if (iort_node >= iort_end) {
pr_err("iort node pointer overflows, bad table\n");
return;
}
if ((iort_node->type == ACPI_IORT_NODE_SMMU) ||
(iort_node->type == ACPI_IORT_NODE_SMMU_V3)) {
fwnode = acpi_alloc_fwnode_static();
if (!fwnode)
return;
iort_set_fwnode(iort_node, fwnode);
ret = iort_add_smmu_platform_device(iort_node);
if (ret) {
iort_delete_fwnode(iort_node);
acpi_free_fwnode_static(fwnode);
return;
}
}
iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort_node,
iort_node->length);
}
}
void __init acpi_iort_init(void)
{
acpi_status status;
status = acpi_get_table(ACPI_SIG_IORT, 0, &iort_table);
if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
const char *msg = acpi_format_exception(status);
pr_err("Failed to get table, %s\n", msg);
if (ACPI_FAILURE(status)) {
if (status != AE_NOT_FOUND) {
const char *msg = acpi_format_exception(status);
pr_err("Failed to get table, %s\n", msg);
}
return;
}
iort_init_platform_devices();
acpi_probe_device_table(iort);
}

View File

@ -227,8 +227,7 @@ int acpi_bind_one(struct device *dev, struct acpi_device *acpi_dev)
attr = acpi_get_dma_attr(acpi_dev);
if (attr != DEV_DMA_NOT_SUPPORTED)
arch_setup_dma_ops(dev, 0, 0, NULL,
attr == DEV_DMA_COHERENT);
acpi_dma_configure(dev, attr);
acpi_physnode_link_name(physical_node_name, node_id);
retval = sysfs_create_link(&acpi_dev->dev.kobj, &dev->kobj,
@ -251,6 +250,7 @@ int acpi_bind_one(struct device *dev, struct acpi_device *acpi_dev)
return 0;
err:
acpi_dma_deconfigure(dev);
ACPI_COMPANION_SET(dev, NULL);
put_device(dev);
put_device(&acpi_dev->dev);

View File

@ -7,6 +7,7 @@
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/acpi.h>
#include <linux/acpi_iort.h>
#include <linux/signal.h>
#include <linux/kthread.h>
#include <linux/dmi.h>
@ -1370,6 +1371,38 @@ enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev)
return DEV_DMA_NON_COHERENT;
}
/**
* acpi_dma_configure - Set-up DMA configuration for the device.
* @dev: The pointer to the device
* @attr: device dma attributes
*/
void acpi_dma_configure(struct device *dev, enum dev_dma_attr attr)
{
const struct iommu_ops *iommu;
iort_set_dma_mask(dev);
iommu = iort_iommu_configure(dev);
/*
* Assume dma valid range starts at 0 and covers the whole
* coherent_dma_mask.
*/
arch_setup_dma_ops(dev, 0, dev->coherent_dma_mask + 1, iommu,
attr == DEV_DMA_COHERENT);
}
EXPORT_SYMBOL_GPL(acpi_dma_configure);
/**
* acpi_dma_deconfigure - Tear-down DMA configuration for the device.
* @dev: The pointer to the device
*/
void acpi_dma_deconfigure(struct device *dev)
{
arch_teardown_dma_ops(dev);
}
EXPORT_SYMBOL_GPL(acpi_dma_deconfigure);
static void acpi_init_coherency(struct acpi_device *adev)
{
unsigned long long cca = 0;

View File

@ -224,6 +224,8 @@ config DEBUG_TEST_DRIVER_REMOVE
unusable. You should say N here unless you are explicitly looking to
test this functionality.
source "drivers/base/test/Kconfig"
config SYS_HYPERVISOR
bool
default n

View File

@ -24,5 +24,7 @@ obj-$(CONFIG_PINCTRL) += pinctrl.o
obj-$(CONFIG_DEV_COREDUMP) += devcoredump.o
obj-$(CONFIG_GENERIC_MSI_IRQ_DOMAIN) += platform-msi.o
obj-y += test/
ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG

View File

@ -107,6 +107,9 @@ extern void bus_remove_device(struct device *dev);
extern int bus_add_driver(struct device_driver *drv);
extern void bus_remove_driver(struct device_driver *drv);
extern void device_release_driver_internal(struct device *dev,
struct device_driver *drv,
struct device *parent);
extern void driver_detach(struct device_driver *drv);
extern int driver_probe_device(struct device_driver *drv, struct device *dev);
@ -138,6 +141,8 @@ extern void device_unblock_probing(void);
extern struct kset *devices_kset;
extern void devices_kset_move_last(struct device *dev);
extern struct device_attribute dev_attr_deferred_probe;
#if defined(CONFIG_MODULES) && defined(CONFIG_SYSFS)
extern void module_add_driver(struct module *mod, struct device_driver *drv);
extern void module_remove_driver(struct device_driver *drv);
@ -152,3 +157,13 @@ extern int devtmpfs_init(void);
#else
static inline int devtmpfs_init(void) { return 0; }
#endif
/* Device links support */
extern int device_links_read_lock(void);
extern void device_links_read_unlock(int idx);
extern int device_links_check_suppliers(struct device *dev);
extern void device_links_driver_bound(struct device *dev);
extern void device_links_driver_cleanup(struct device *dev);
extern void device_links_no_driver(struct device *dev);
extern bool device_links_busy(struct device *dev);
extern void device_links_unbind_consumers(struct device *dev);

View File

@ -16,6 +16,9 @@
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/acpi.h>
#include <linux/bitops.h>
#include <linux/cacheinfo.h>
#include <linux/compiler.h>
@ -85,7 +88,120 @@ static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
{
return sib_leaf->of_node == this_leaf->of_node;
}
/* OF properties to query for a given cache type */
struct cache_type_info {
const char *size_prop;
const char *line_size_props[2];
const char *nr_sets_prop;
};
static const struct cache_type_info cache_type_info[] = {
{
.size_prop = "cache-size",
.line_size_props = { "cache-line-size",
"cache-block-size", },
.nr_sets_prop = "cache-sets",
}, {
.size_prop = "i-cache-size",
.line_size_props = { "i-cache-line-size",
"i-cache-block-size", },
.nr_sets_prop = "i-cache-sets",
}, {
.size_prop = "d-cache-size",
.line_size_props = { "d-cache-line-size",
"d-cache-block-size", },
.nr_sets_prop = "d-cache-sets",
},
};
static inline int get_cacheinfo_idx(enum cache_type type)
{
if (type == CACHE_TYPE_UNIFIED)
return 0;
return type;
}
static void cache_size(struct cacheinfo *this_leaf)
{
const char *propname;
const __be32 *cache_size;
int ct_idx;
ct_idx = get_cacheinfo_idx(this_leaf->type);
propname = cache_type_info[ct_idx].size_prop;
cache_size = of_get_property(this_leaf->of_node, propname, NULL);
if (cache_size)
this_leaf->size = of_read_number(cache_size, 1);
}
/* not cache_line_size() because that's a macro in include/linux/cache.h */
static void cache_get_line_size(struct cacheinfo *this_leaf)
{
const __be32 *line_size;
int i, lim, ct_idx;
ct_idx = get_cacheinfo_idx(this_leaf->type);
lim = ARRAY_SIZE(cache_type_info[ct_idx].line_size_props);
for (i = 0; i < lim; i++) {
const char *propname;
propname = cache_type_info[ct_idx].line_size_props[i];
line_size = of_get_property(this_leaf->of_node, propname, NULL);
if (line_size)
break;
}
if (line_size)
this_leaf->coherency_line_size = of_read_number(line_size, 1);
}
static void cache_nr_sets(struct cacheinfo *this_leaf)
{
const char *propname;
const __be32 *nr_sets;
int ct_idx;
ct_idx = get_cacheinfo_idx(this_leaf->type);
propname = cache_type_info[ct_idx].nr_sets_prop;
nr_sets = of_get_property(this_leaf->of_node, propname, NULL);
if (nr_sets)
this_leaf->number_of_sets = of_read_number(nr_sets, 1);
}
static void cache_associativity(struct cacheinfo *this_leaf)
{
unsigned int line_size = this_leaf->coherency_line_size;
unsigned int nr_sets = this_leaf->number_of_sets;
unsigned int size = this_leaf->size;
/*
* If the cache is fully associative, there is no need to
* check the other properties.
*/
if (!(nr_sets == 1) && (nr_sets > 0 && size > 0 && line_size > 0))
this_leaf->ways_of_associativity = (size / nr_sets) / line_size;
}
static void cache_of_override_properties(unsigned int cpu)
{
int index;
struct cacheinfo *this_leaf;
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
for (index = 0; index < cache_leaves(cpu); index++) {
this_leaf = this_cpu_ci->info_list + index;
cache_size(this_leaf);
cache_get_line_size(this_leaf);
cache_nr_sets(this_leaf);
cache_associativity(this_leaf);
}
}
#else
static void cache_of_override_properties(unsigned int cpu) { }
static inline int cache_setup_of_node(unsigned int cpu) { return 0; }
static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
struct cacheinfo *sib_leaf)
@ -104,9 +220,16 @@ static int cache_shared_cpu_map_setup(unsigned int cpu)
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
struct cacheinfo *this_leaf, *sib_leaf;
unsigned int index;
int ret;
int ret = 0;
ret = cache_setup_of_node(cpu);
if (this_cpu_ci->cpu_map_populated)
return 0;
if (of_have_populated_dt())
ret = cache_setup_of_node(cpu);
else if (!acpi_disabled)
/* No cache property/hierarchy support yet in ACPI */
ret = -ENOTSUPP;
if (ret)
return ret;
@ -161,6 +284,12 @@ static void cache_shared_cpu_map_remove(unsigned int cpu)
}
}
static void cache_override_properties(unsigned int cpu)
{
if (of_have_populated_dt())
return cache_of_override_properties(cpu);
}
static void free_cache_attributes(unsigned int cpu)
{
if (!per_cpu_cacheinfo(cpu))
@ -203,10 +332,11 @@ static int detect_cache_attributes(unsigned int cpu)
*/
ret = cache_shared_cpu_map_setup(cpu);
if (ret) {
pr_warn("Unable to detect cache hierarchy from DT for CPU %d\n",
cpu);
pr_warn("Unable to detect cache hierarchy for CPU %d\n", cpu);
goto free_ci;
}
cache_override_properties(cpu);
return 0;
free_ci:

View File

@ -44,6 +44,572 @@ static int __init sysfs_deprecated_setup(char *arg)
early_param("sysfs.deprecated", sysfs_deprecated_setup);
#endif
/* Device links support. */
#ifdef CONFIG_SRCU
static DEFINE_MUTEX(device_links_lock);
DEFINE_STATIC_SRCU(device_links_srcu);
static inline void device_links_write_lock(void)
{
mutex_lock(&device_links_lock);
}
static inline void device_links_write_unlock(void)
{
mutex_unlock(&device_links_lock);
}
int device_links_read_lock(void)
{
return srcu_read_lock(&device_links_srcu);
}
void device_links_read_unlock(int idx)
{
srcu_read_unlock(&device_links_srcu, idx);
}
#else /* !CONFIG_SRCU */
static DECLARE_RWSEM(device_links_lock);
static inline void device_links_write_lock(void)
{
down_write(&device_links_lock);
}
static inline void device_links_write_unlock(void)
{
up_write(&device_links_lock);
}
int device_links_read_lock(void)
{
down_read(&device_links_lock);
return 0;
}
void device_links_read_unlock(int not_used)
{
up_read(&device_links_lock);
}
#endif /* !CONFIG_SRCU */
/**
* device_is_dependent - Check if one device depends on another one
* @dev: Device to check dependencies for.
* @target: Device to check against.
*
* Check if @target depends on @dev or any device dependent on it (its child or
* its consumer etc). Return 1 if that is the case or 0 otherwise.
*/
static int device_is_dependent(struct device *dev, void *target)
{
struct device_link *link;
int ret;
if (WARN_ON(dev == target))
return 1;
ret = device_for_each_child(dev, target, device_is_dependent);
if (ret)
return ret;
list_for_each_entry(link, &dev->links.consumers, s_node) {
if (WARN_ON(link->consumer == target))
return 1;
ret = device_is_dependent(link->consumer, target);
if (ret)
break;
}
return ret;
}
static int device_reorder_to_tail(struct device *dev, void *not_used)
{
struct device_link *link;
/*
* Devices that have not been registered yet will be put to the ends
* of the lists during the registration, so skip them here.
*/
if (device_is_registered(dev))
devices_kset_move_last(dev);
if (device_pm_initialized(dev))
device_pm_move_last(dev);
device_for_each_child(dev, NULL, device_reorder_to_tail);
list_for_each_entry(link, &dev->links.consumers, s_node)
device_reorder_to_tail(link->consumer, NULL);
return 0;
}
/**
* device_link_add - Create a link between two devices.
* @consumer: Consumer end of the link.
* @supplier: Supplier end of the link.
* @flags: Link flags.
*
* The caller is responsible for the proper synchronization of the link creation
* with runtime PM. First, setting the DL_FLAG_PM_RUNTIME flag will cause the
* runtime PM framework to take the link into account. Second, if the
* DL_FLAG_RPM_ACTIVE flag is set in addition to it, the supplier devices will
* be forced into the active metastate and reference-counted upon the creation
* of the link. If DL_FLAG_PM_RUNTIME is not set, DL_FLAG_RPM_ACTIVE will be
* ignored.
*
* If the DL_FLAG_AUTOREMOVE is set, the link will be removed automatically
* when the consumer device driver unbinds from it. The combination of both
* DL_FLAG_AUTOREMOVE and DL_FLAG_STATELESS set is invalid and will cause NULL
* to be returned.
*
* A side effect of the link creation is re-ordering of dpm_list and the
* devices_kset list by moving the consumer device and all devices depending
* on it to the ends of these lists (that does not happen to devices that have
* not been registered when this function is called).
*
* The supplier device is required to be registered when this function is called
* and NULL will be returned if that is not the case. The consumer device need
* not be registerd, however.
*/
struct device_link *device_link_add(struct device *consumer,
struct device *supplier, u32 flags)
{
struct device_link *link;
if (!consumer || !supplier ||
((flags & DL_FLAG_STATELESS) && (flags & DL_FLAG_AUTOREMOVE)))
return NULL;
device_links_write_lock();
device_pm_lock();
/*
* If the supplier has not been fully registered yet or there is a
* reverse dependency between the consumer and the supplier already in
* the graph, return NULL.
*/
if (!device_pm_initialized(supplier)
|| device_is_dependent(consumer, supplier)) {
link = NULL;
goto out;
}
list_for_each_entry(link, &supplier->links.consumers, s_node)
if (link->consumer == consumer)
goto out;
link = kzalloc(sizeof(*link), GFP_KERNEL);
if (!link)
goto out;
if (flags & DL_FLAG_PM_RUNTIME) {
if (flags & DL_FLAG_RPM_ACTIVE) {
if (pm_runtime_get_sync(supplier) < 0) {
pm_runtime_put_noidle(supplier);
kfree(link);
link = NULL;
goto out;
}
link->rpm_active = true;
}
pm_runtime_new_link(consumer);
}
get_device(supplier);
link->supplier = supplier;
INIT_LIST_HEAD(&link->s_node);
get_device(consumer);
link->consumer = consumer;
INIT_LIST_HEAD(&link->c_node);
link->flags = flags;
/* Deterine the initial link state. */
if (flags & DL_FLAG_STATELESS) {
link->status = DL_STATE_NONE;
} else {
switch (supplier->links.status) {
case DL_DEV_DRIVER_BOUND:
switch (consumer->links.status) {
case DL_DEV_PROBING:
/*
* Balance the decrementation of the supplier's
* runtime PM usage counter after consumer probe
* in driver_probe_device().
*/
if (flags & DL_FLAG_PM_RUNTIME)
pm_runtime_get_sync(supplier);
link->status = DL_STATE_CONSUMER_PROBE;
break;
case DL_DEV_DRIVER_BOUND:
link->status = DL_STATE_ACTIVE;
break;
default:
link->status = DL_STATE_AVAILABLE;
break;
}
break;
case DL_DEV_UNBINDING:
link->status = DL_STATE_SUPPLIER_UNBIND;
break;
default:
link->status = DL_STATE_DORMANT;
break;
}
}
/*
* Move the consumer and all of the devices depending on it to the end
* of dpm_list and the devices_kset list.
*
* It is necessary to hold dpm_list locked throughout all that or else
* we may end up suspending with a wrong ordering of it.
*/
device_reorder_to_tail(consumer, NULL);
list_add_tail_rcu(&link->s_node, &supplier->links.consumers);
list_add_tail_rcu(&link->c_node, &consumer->links.suppliers);
dev_info(consumer, "Linked as a consumer to %s\n", dev_name(supplier));
out:
device_pm_unlock();
device_links_write_unlock();
return link;
}
EXPORT_SYMBOL_GPL(device_link_add);
static void device_link_free(struct device_link *link)
{
put_device(link->consumer);
put_device(link->supplier);
kfree(link);
}
#ifdef CONFIG_SRCU
static void __device_link_free_srcu(struct rcu_head *rhead)
{
device_link_free(container_of(rhead, struct device_link, rcu_head));
}
static void __device_link_del(struct device_link *link)
{
dev_info(link->consumer, "Dropping the link to %s\n",
dev_name(link->supplier));
if (link->flags & DL_FLAG_PM_RUNTIME)
pm_runtime_drop_link(link->consumer);
list_del_rcu(&link->s_node);
list_del_rcu(&link->c_node);
call_srcu(&device_links_srcu, &link->rcu_head, __device_link_free_srcu);
}
#else /* !CONFIG_SRCU */
static void __device_link_del(struct device_link *link)
{
dev_info(link->consumer, "Dropping the link to %s\n",
dev_name(link->supplier));
list_del(&link->s_node);
list_del(&link->c_node);
device_link_free(link);
}
#endif /* !CONFIG_SRCU */
/**
* device_link_del - Delete a link between two devices.
* @link: Device link to delete.
*
* The caller must ensure proper synchronization of this function with runtime
* PM.
*/
void device_link_del(struct device_link *link)
{
device_links_write_lock();
device_pm_lock();
__device_link_del(link);
device_pm_unlock();
device_links_write_unlock();
}
EXPORT_SYMBOL_GPL(device_link_del);
static void device_links_missing_supplier(struct device *dev)
{
struct device_link *link;
list_for_each_entry(link, &dev->links.suppliers, c_node)
if (link->status == DL_STATE_CONSUMER_PROBE)
WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
}
/**
* device_links_check_suppliers - Check presence of supplier drivers.
* @dev: Consumer device.
*
* Check links from this device to any suppliers. Walk the list of the device's
* links to suppliers and see if all of them are available. If not, simply
* return -EPROBE_DEFER.
*
* We need to guarantee that the supplier will not go away after the check has
* been positive here. It only can go away in __device_release_driver() and
* that function checks the device's links to consumers. This means we need to
* mark the link as "consumer probe in progress" to make the supplier removal
* wait for us to complete (or bad things may happen).
*
* Links with the DL_FLAG_STATELESS flag set are ignored.
*/
int device_links_check_suppliers(struct device *dev)
{
struct device_link *link;
int ret = 0;
device_links_write_lock();
list_for_each_entry(link, &dev->links.suppliers, c_node) {
if (link->flags & DL_FLAG_STATELESS)
continue;
if (link->status != DL_STATE_AVAILABLE) {
device_links_missing_supplier(dev);
ret = -EPROBE_DEFER;
break;
}
WRITE_ONCE(link->status, DL_STATE_CONSUMER_PROBE);
}
dev->links.status = DL_DEV_PROBING;
device_links_write_unlock();
return ret;
}
/**
* device_links_driver_bound - Update device links after probing its driver.
* @dev: Device to update the links for.
*
* The probe has been successful, so update links from this device to any
* consumers by changing their status to "available".
*
* Also change the status of @dev's links to suppliers to "active".
*
* Links with the DL_FLAG_STATELESS flag set are ignored.
*/
void device_links_driver_bound(struct device *dev)
{
struct device_link *link;
device_links_write_lock();
list_for_each_entry(link, &dev->links.consumers, s_node) {
if (link->flags & DL_FLAG_STATELESS)
continue;
WARN_ON(link->status != DL_STATE_DORMANT);
WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
}
list_for_each_entry(link, &dev->links.suppliers, c_node) {
if (link->flags & DL_FLAG_STATELESS)
continue;
WARN_ON(link->status != DL_STATE_CONSUMER_PROBE);
WRITE_ONCE(link->status, DL_STATE_ACTIVE);
}
dev->links.status = DL_DEV_DRIVER_BOUND;
device_links_write_unlock();
}
/**
* __device_links_no_driver - Update links of a device without a driver.
* @dev: Device without a drvier.
*
* Delete all non-persistent links from this device to any suppliers.
*
* Persistent links stay around, but their status is changed to "available",
* unless they already are in the "supplier unbind in progress" state in which
* case they need not be updated.
*
* Links with the DL_FLAG_STATELESS flag set are ignored.
*/
static void __device_links_no_driver(struct device *dev)
{
struct device_link *link, *ln;
list_for_each_entry_safe_reverse(link, ln, &dev->links.suppliers, c_node) {
if (link->flags & DL_FLAG_STATELESS)
continue;
if (link->flags & DL_FLAG_AUTOREMOVE)
__device_link_del(link);
else if (link->status != DL_STATE_SUPPLIER_UNBIND)
WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
}
dev->links.status = DL_DEV_NO_DRIVER;
}
void device_links_no_driver(struct device *dev)
{
device_links_write_lock();
__device_links_no_driver(dev);
device_links_write_unlock();
}
/**
* device_links_driver_cleanup - Update links after driver removal.
* @dev: Device whose driver has just gone away.
*
* Update links to consumers for @dev by changing their status to "dormant" and
* invoke %__device_links_no_driver() to update links to suppliers for it as
* appropriate.
*
* Links with the DL_FLAG_STATELESS flag set are ignored.
*/
void device_links_driver_cleanup(struct device *dev)
{
struct device_link *link;
device_links_write_lock();
list_for_each_entry(link, &dev->links.consumers, s_node) {
if (link->flags & DL_FLAG_STATELESS)
continue;
WARN_ON(link->flags & DL_FLAG_AUTOREMOVE);
WARN_ON(link->status != DL_STATE_SUPPLIER_UNBIND);
WRITE_ONCE(link->status, DL_STATE_DORMANT);
}
__device_links_no_driver(dev);
device_links_write_unlock();
}
/**
* device_links_busy - Check if there are any busy links to consumers.
* @dev: Device to check.
*
* Check each consumer of the device and return 'true' if its link's status
* is one of "consumer probe" or "active" (meaning that the given consumer is
* probing right now or its driver is present). Otherwise, change the link
* state to "supplier unbind" to prevent the consumer from being probed
* successfully going forward.
*
* Return 'false' if there are no probing or active consumers.
*
* Links with the DL_FLAG_STATELESS flag set are ignored.
*/
bool device_links_busy(struct device *dev)
{
struct device_link *link;
bool ret = false;
device_links_write_lock();
list_for_each_entry(link, &dev->links.consumers, s_node) {
if (link->flags & DL_FLAG_STATELESS)
continue;
if (link->status == DL_STATE_CONSUMER_PROBE
|| link->status == DL_STATE_ACTIVE) {
ret = true;
break;
}
WRITE_ONCE(link->status, DL_STATE_SUPPLIER_UNBIND);
}
dev->links.status = DL_DEV_UNBINDING;
device_links_write_unlock();
return ret;
}
/**
* device_links_unbind_consumers - Force unbind consumers of the given device.
* @dev: Device to unbind the consumers of.
*
* Walk the list of links to consumers for @dev and if any of them is in the
* "consumer probe" state, wait for all device probes in progress to complete
* and start over.
*
* If that's not the case, change the status of the link to "supplier unbind"
* and check if the link was in the "active" state. If so, force the consumer
* driver to unbind and start over (the consumer will not re-probe as we have
* changed the state of the link already).
*
* Links with the DL_FLAG_STATELESS flag set are ignored.
*/
void device_links_unbind_consumers(struct device *dev)
{
struct device_link *link;
start:
device_links_write_lock();
list_for_each_entry(link, &dev->links.consumers, s_node) {
enum device_link_state status;
if (link->flags & DL_FLAG_STATELESS)
continue;
status = link->status;
if (status == DL_STATE_CONSUMER_PROBE) {
device_links_write_unlock();
wait_for_device_probe();
goto start;
}
WRITE_ONCE(link->status, DL_STATE_SUPPLIER_UNBIND);
if (status == DL_STATE_ACTIVE) {
struct device *consumer = link->consumer;
get_device(consumer);
device_links_write_unlock();
device_release_driver_internal(consumer, NULL,
consumer->parent);
put_device(consumer);
goto start;
}
}
device_links_write_unlock();
}
/**
* device_links_purge - Delete existing links to other devices.
* @dev: Target device.
*/
static void device_links_purge(struct device *dev)
{
struct device_link *link, *ln;
/*
* Delete all of the remaining links from this device to any other
* devices (either consumers or suppliers).
*/
device_links_write_lock();
list_for_each_entry_safe_reverse(link, ln, &dev->links.suppliers, c_node) {
WARN_ON(link->status == DL_STATE_ACTIVE);
__device_link_del(link);
}
list_for_each_entry_safe_reverse(link, ln, &dev->links.consumers, s_node) {
WARN_ON(link->status != DL_STATE_DORMANT &&
link->status != DL_STATE_NONE);
__device_link_del(link);
}
device_links_write_unlock();
}
/* Device links support end. */
int (*platform_notify)(struct device *dev) = NULL;
int (*platform_notify_remove)(struct device *dev) = NULL;
static struct kobject *dev_kobj;
@ -494,8 +1060,14 @@ static int device_add_attrs(struct device *dev)
goto err_remove_dev_groups;
}
error = device_create_file(dev, &dev_attr_deferred_probe);
if (error)
goto err_remove_online;
return 0;
err_remove_online:
device_remove_file(dev, &dev_attr_online);
err_remove_dev_groups:
device_remove_groups(dev, dev->groups);
err_remove_type_groups:
@ -513,6 +1085,7 @@ static void device_remove_attrs(struct device *dev)
struct class *class = dev->class;
const struct device_type *type = dev->type;
device_remove_file(dev, &dev_attr_deferred_probe);
device_remove_file(dev, &dev_attr_online);
device_remove_groups(dev, dev->groups);
@ -711,6 +1284,9 @@ void device_initialize(struct device *dev)
#ifdef CONFIG_GENERIC_MSI_IRQ
INIT_LIST_HEAD(&dev->msi_list);
#endif
INIT_LIST_HEAD(&dev->links.consumers);
INIT_LIST_HEAD(&dev->links.suppliers);
dev->links.status = DL_DEV_NO_DRIVER;
}
EXPORT_SYMBOL_GPL(device_initialize);
@ -1258,6 +1834,8 @@ void device_del(struct device *dev)
if (dev->bus)
blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
BUS_NOTIFY_DEL_DEVICE, dev);
device_links_purge(dev);
dpm_sysfs_remove(dev);
if (parent)
klist_del(&dev->p->knode_parent);

View File

@ -53,6 +53,19 @@ static LIST_HEAD(deferred_probe_pending_list);
static LIST_HEAD(deferred_probe_active_list);
static atomic_t deferred_trigger_count = ATOMIC_INIT(0);
static ssize_t deferred_probe_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
bool value;
mutex_lock(&deferred_probe_mutex);
value = !list_empty(&dev->p->deferred_probe);
mutex_unlock(&deferred_probe_mutex);
return sprintf(buf, "%d\n", value);
}
DEVICE_ATTR_RO(deferred_probe);
/*
* In some cases, like suspend to RAM or hibernation, It might be reasonable
* to prohibit probing of devices as it could be unsafe.
@ -244,6 +257,7 @@ static void driver_bound(struct device *dev)
__func__, dev_name(dev));
klist_add_tail(&dev->p->knode_driver, &dev->driver->p->klist_devices);
device_links_driver_bound(dev);
device_pm_check_callbacks(dev);
@ -338,6 +352,10 @@ static int really_probe(struct device *dev, struct device_driver *drv)
return ret;
}
ret = device_links_check_suppliers(dev);
if (ret)
return ret;
atomic_inc(&probe_count);
pr_debug("bus: '%s': %s: probing driver %s with device %s\n",
drv->bus->name, __func__, drv->name, dev_name(dev));
@ -416,6 +434,7 @@ probe_failed:
blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
BUS_NOTIFY_DRIVER_NOT_BOUND, dev);
pinctrl_bind_failed:
device_links_no_driver(dev);
devres_release_all(dev);
driver_sysfs_remove(dev);
dev->driver = NULL;
@ -508,6 +527,7 @@ int driver_probe_device(struct device_driver *drv, struct device *dev)
pr_debug("bus: '%s': %s: matched device %s with driver %s\n",
drv->bus->name, __func__, dev_name(dev), drv->name);
pm_runtime_get_suppliers(dev);
if (dev->parent)
pm_runtime_get_sync(dev->parent);
@ -518,6 +538,7 @@ int driver_probe_device(struct device_driver *drv, struct device *dev)
if (dev->parent)
pm_runtime_put(dev->parent);
pm_runtime_put_suppliers(dev);
return ret;
}
@ -772,7 +793,7 @@ EXPORT_SYMBOL_GPL(driver_attach);
* __device_release_driver() must be called with @dev lock held.
* When called for a USB interface, @dev->parent lock must be held as well.
*/
static void __device_release_driver(struct device *dev)
static void __device_release_driver(struct device *dev, struct device *parent)
{
struct device_driver *drv;
@ -781,7 +802,27 @@ static void __device_release_driver(struct device *dev)
if (driver_allows_async_probing(drv))
async_synchronize_full();
while (device_links_busy(dev)) {
device_unlock(dev);
if (parent)
device_unlock(parent);
device_links_unbind_consumers(dev);
if (parent)
device_lock(parent);
device_lock(dev);
/*
* A concurrent invocation of the same function might
* have released the driver successfully while this one
* was waiting, so check for that.
*/
if (dev->driver != drv)
return;
}
pm_runtime_get_sync(dev);
pm_runtime_clean_up_links(dev);
driver_sysfs_remove(dev);
@ -796,6 +837,8 @@ static void __device_release_driver(struct device *dev)
dev->bus->remove(dev);
else if (drv->remove)
drv->remove(dev);
device_links_driver_cleanup(dev);
devres_release_all(dev);
dev->driver = NULL;
dev_set_drvdata(dev, NULL);
@ -812,12 +855,32 @@ static void __device_release_driver(struct device *dev)
}
}
void device_release_driver_internal(struct device *dev,
struct device_driver *drv,
struct device *parent)
{
if (parent)
device_lock(parent);
device_lock(dev);
if (!drv || drv == dev->driver)
__device_release_driver(dev, parent);
device_unlock(dev);
if (parent)
device_unlock(parent);
}
/**
* device_release_driver - manually detach device from driver.
* @dev: device.
*
* Manually detach device from driver.
* When called for a USB interface, @dev->parent lock must be held.
*
* If this function is to be called with @dev->parent lock held, ensure that
* the device's consumers are unbound in advance or that their locks can be
* acquired under the @dev->parent lock.
*/
void device_release_driver(struct device *dev)
{
@ -826,9 +889,7 @@ void device_release_driver(struct device *dev)
* within their ->remove callback for the same device, they
* will deadlock right here.
*/
device_lock(dev);
__device_release_driver(dev);
device_unlock(dev);
device_release_driver_internal(dev, NULL, NULL);
}
EXPORT_SYMBOL_GPL(device_release_driver);
@ -853,15 +914,7 @@ void driver_detach(struct device_driver *drv)
dev = dev_prv->device;
get_device(dev);
spin_unlock(&drv->p->klist_devices.k_lock);
if (dev->parent) /* Needed for USB */
device_lock(dev->parent);
device_lock(dev);
if (dev->driver == drv)
__device_release_driver(dev);
device_unlock(dev);
if (dev->parent)
device_unlock(dev->parent);
device_release_driver_internal(dev, drv, dev->parent);
put_device(dev);
}
}

View File

@ -131,6 +131,7 @@ void device_pm_add(struct device *dev)
dev_warn(dev, "parent %s should not be sleeping\n",
dev_name(dev->parent));
list_add_tail(&dev->power.entry, &dpm_list);
dev->power.in_dpm_list = true;
mutex_unlock(&dpm_list_mtx);
}
@ -145,6 +146,7 @@ void device_pm_remove(struct device *dev)
complete_all(&dev->power.completion);
mutex_lock(&dpm_list_mtx);
list_del_init(&dev->power.entry);
dev->power.in_dpm_list = false;
mutex_unlock(&dpm_list_mtx);
device_wakeup_disable(dev);
pm_runtime_remove(dev);
@ -244,6 +246,62 @@ static void dpm_wait_for_children(struct device *dev, bool async)
device_for_each_child(dev, &async, dpm_wait_fn);
}
static void dpm_wait_for_suppliers(struct device *dev, bool async)
{
struct device_link *link;
int idx;
idx = device_links_read_lock();
/*
* If the supplier goes away right after we've checked the link to it,
* we'll wait for its completion to change the state, but that's fine,
* because the only things that will block as a result are the SRCU
* callbacks freeing the link objects for the links in the list we're
* walking.
*/
list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
if (READ_ONCE(link->status) != DL_STATE_DORMANT)
dpm_wait(link->supplier, async);
device_links_read_unlock(idx);
}
static void dpm_wait_for_superior(struct device *dev, bool async)
{
dpm_wait(dev->parent, async);
dpm_wait_for_suppliers(dev, async);
}
static void dpm_wait_for_consumers(struct device *dev, bool async)
{
struct device_link *link;
int idx;
idx = device_links_read_lock();
/*
* The status of a device link can only be changed from "dormant" by a
* probe, but that cannot happen during system suspend/resume. In
* theory it can change to "dormant" at that time, but then it is
* reasonable to wait for the target device anyway (eg. if it goes
* away, it's better to wait for it to go away completely and then
* continue instead of trying to continue in parallel with its
* unregistration).
*/
list_for_each_entry_rcu(link, &dev->links.consumers, s_node)
if (READ_ONCE(link->status) != DL_STATE_DORMANT)
dpm_wait(link->consumer, async);
device_links_read_unlock(idx);
}
static void dpm_wait_for_subordinate(struct device *dev, bool async)
{
dpm_wait_for_children(dev, async);
dpm_wait_for_consumers(dev, async);
}
/**
* pm_op - Return the PM operation appropriate for given PM event.
* @ops: PM operations to choose from.
@ -488,7 +546,7 @@ static int device_resume_noirq(struct device *dev, pm_message_t state, bool asyn
if (!dev->power.is_noirq_suspended)
goto Out;
dpm_wait(dev->parent, async);
dpm_wait_for_superior(dev, async);
if (dev->pm_domain) {
info = "noirq power domain ";
@ -618,7 +676,7 @@ static int device_resume_early(struct device *dev, pm_message_t state, bool asyn
if (!dev->power.is_late_suspended)
goto Out;
dpm_wait(dev->parent, async);
dpm_wait_for_superior(dev, async);
if (dev->pm_domain) {
info = "early power domain ";
@ -750,7 +808,7 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
goto Complete;
}
dpm_wait(dev->parent, async);
dpm_wait_for_superior(dev, async);
dpm_watchdog_set(&wd, dev);
device_lock(dev);
@ -1040,6 +1098,8 @@ static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool a
if (dev->power.syscore || dev->power.direct_complete)
goto Complete;
dpm_wait_for_subordinate(dev, async);
if (dev->pm_domain) {
info = "noirq power domain ";
callback = pm_noirq_op(&dev->pm_domain->ops, state);
@ -1187,6 +1247,8 @@ static int __device_suspend_late(struct device *dev, pm_message_t state, bool as
if (dev->power.syscore || dev->power.direct_complete)
goto Complete;
dpm_wait_for_subordinate(dev, async);
if (dev->pm_domain) {
info = "late power domain ";
callback = pm_late_early_op(&dev->pm_domain->ops, state);
@ -1342,6 +1404,22 @@ static int legacy_suspend(struct device *dev, pm_message_t state,
return error;
}
static void dpm_clear_suppliers_direct_complete(struct device *dev)
{
struct device_link *link;
int idx;
idx = device_links_read_lock();
list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) {
spin_lock_irq(&link->supplier->power.lock);
link->supplier->power.direct_complete = false;
spin_unlock_irq(&link->supplier->power.lock);
}
device_links_read_unlock(idx);
}
/**
* device_suspend - Execute "suspend" callbacks for given device.
* @dev: Device to handle.
@ -1358,7 +1436,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
TRACE_DEVICE(dev);
TRACE_SUSPEND(0);
dpm_wait_for_children(dev, async);
dpm_wait_for_subordinate(dev, async);
if (async_error)
goto Complete;
@ -1454,6 +1532,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
spin_unlock_irq(&parent->power.lock);
}
dpm_clear_suppliers_direct_complete(dev);
}
device_unlock(dev);

View File

@ -127,6 +127,11 @@ extern void device_pm_move_after(struct device *, struct device *);
extern void device_pm_move_last(struct device *);
extern void device_pm_check_callbacks(struct device *dev);
static inline bool device_pm_initialized(struct device *dev)
{
return dev->power.in_dpm_list;
}
#else /* !CONFIG_PM_SLEEP */
static inline void device_pm_sleep_init(struct device *dev) {}
@ -146,6 +151,11 @@ static inline void device_pm_move_last(struct device *dev) {}
static inline void device_pm_check_callbacks(struct device *dev) {}
static inline bool device_pm_initialized(struct device *dev)
{
return device_is_registered(dev);
}
#endif /* !CONFIG_PM_SLEEP */
static inline void device_pm_init(struct device *dev)

View File

@ -12,6 +12,8 @@
#include <linux/pm_runtime.h>
#include <linux/pm_wakeirq.h>
#include <trace/events/rpm.h>
#include "../base.h"
#include "power.h"
typedef int (*pm_callback_t)(struct device *);
@ -258,6 +260,42 @@ static int rpm_check_suspend_allowed(struct device *dev)
return retval;
}
static int rpm_get_suppliers(struct device *dev)
{
struct device_link *link;
list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) {
int retval;
if (!(link->flags & DL_FLAG_PM_RUNTIME))
continue;
if (READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND ||
link->rpm_active)
continue;
retval = pm_runtime_get_sync(link->supplier);
if (retval < 0) {
pm_runtime_put_noidle(link->supplier);
return retval;
}
link->rpm_active = true;
}
return 0;
}
static void rpm_put_suppliers(struct device *dev)
{
struct device_link *link;
list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
if (link->rpm_active &&
READ_ONCE(link->status) != DL_STATE_SUPPLIER_UNBIND) {
pm_runtime_put(link->supplier);
link->rpm_active = false;
}
}
/**
* __rpm_callback - Run a given runtime PM callback for a given device.
* @cb: Runtime PM callback to run.
@ -266,19 +304,57 @@ static int rpm_check_suspend_allowed(struct device *dev)
static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
__releases(&dev->power.lock) __acquires(&dev->power.lock)
{
int retval;
int retval, idx;
bool use_links = dev->power.links_count > 0;
if (dev->power.irq_safe)
if (dev->power.irq_safe) {
spin_unlock(&dev->power.lock);
else
} else {
spin_unlock_irq(&dev->power.lock);
/*
* Resume suppliers if necessary.
*
* The device's runtime PM status cannot change until this
* routine returns, so it is safe to read the status outside of
* the lock.
*/
if (use_links && dev->power.runtime_status == RPM_RESUMING) {
idx = device_links_read_lock();
retval = rpm_get_suppliers(dev);
if (retval)
goto fail;
device_links_read_unlock(idx);
}
}
retval = cb(dev);
if (dev->power.irq_safe)
if (dev->power.irq_safe) {
spin_lock(&dev->power.lock);
else
} else {
/*
* If the device is suspending and the callback has returned
* success, drop the usage counters of the suppliers that have
* been reference counted on its resume.
*
* Do that if resume fails too.
*/
if (use_links
&& ((dev->power.runtime_status == RPM_SUSPENDING && !retval)
|| (dev->power.runtime_status == RPM_RESUMING && retval))) {
idx = device_links_read_lock();
fail:
rpm_put_suppliers(dev);
device_links_read_unlock(idx);
}
spin_lock_irq(&dev->power.lock);
}
return retval;
}
@ -1446,6 +1522,94 @@ void pm_runtime_remove(struct device *dev)
pm_runtime_reinit(dev);
}
/**
* pm_runtime_clean_up_links - Prepare links to consumers for driver removal.
* @dev: Device whose driver is going to be removed.
*
* Check links from this device to any consumers and if any of them have active
* runtime PM references to the device, drop the usage counter of the device
* (once per link).
*
* Links with the DL_FLAG_STATELESS flag set are ignored.
*
* Since the device is guaranteed to be runtime-active at the point this is
* called, nothing else needs to be done here.
*
* Moreover, this is called after device_links_busy() has returned 'false', so
* the status of each link is guaranteed to be DL_STATE_SUPPLIER_UNBIND and
* therefore rpm_active can't be manipulated concurrently.
*/
void pm_runtime_clean_up_links(struct device *dev)
{
struct device_link *link;
int idx;
idx = device_links_read_lock();
list_for_each_entry_rcu(link, &dev->links.consumers, s_node) {
if (link->flags & DL_FLAG_STATELESS)
continue;
if (link->rpm_active) {
pm_runtime_put_noidle(dev);
link->rpm_active = false;
}
}
device_links_read_unlock(idx);
}
/**
* pm_runtime_get_suppliers - Resume and reference-count supplier devices.
* @dev: Consumer device.
*/
void pm_runtime_get_suppliers(struct device *dev)
{
struct device_link *link;
int idx;
idx = device_links_read_lock();
list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
if (link->flags & DL_FLAG_PM_RUNTIME)
pm_runtime_get_sync(link->supplier);
device_links_read_unlock(idx);
}
/**
* pm_runtime_put_suppliers - Drop references to supplier devices.
* @dev: Consumer device.
*/
void pm_runtime_put_suppliers(struct device *dev)
{
struct device_link *link;
int idx;
idx = device_links_read_lock();
list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
if (link->flags & DL_FLAG_PM_RUNTIME)
pm_runtime_put(link->supplier);
device_links_read_unlock(idx);
}
void pm_runtime_new_link(struct device *dev)
{
spin_lock_irq(&dev->power.lock);
dev->power.links_count++;
spin_unlock_irq(&dev->power.lock);
}
void pm_runtime_drop_link(struct device *dev)
{
spin_lock_irq(&dev->power.lock);
WARN_ON(dev->power.links_count == 0);
dev->power.links_count--;
spin_unlock_irq(&dev->power.lock);
}
/**
* pm_runtime_force_suspend - Force a device into suspend state if needed.
* @dev: Device to suspend.

View File

@ -0,0 +1,9 @@
config TEST_ASYNC_DRIVER_PROBE
tristate "Build kernel module to test asynchronous driver probing"
depends on m
help
Enabling this option produces a kernel module that allows
testing asynchronous driver probing by the device core.
The module name will be test_async_driver_probe.ko
If unsure say N.

View File

@ -0,0 +1 @@
obj-$(CONFIG_TEST_ASYNC_DRIVER_PROBE) += test_async_driver_probe.o

View File

@ -0,0 +1,171 @@
/*
* Copyright (C) 2014 Google, Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/hrtimer.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/time.h>
#define TEST_PROBE_DELAY (5 * 1000) /* 5 sec */
#define TEST_PROBE_THRESHOLD (TEST_PROBE_DELAY / 2)
static int test_probe(struct platform_device *pdev)
{
dev_info(&pdev->dev, "sleeping for %d msecs in probe\n",
TEST_PROBE_DELAY);
msleep(TEST_PROBE_DELAY);
dev_info(&pdev->dev, "done sleeping\n");
return 0;
}
static struct platform_driver async_driver = {
.driver = {
.name = "test_async_driver",
.owner = THIS_MODULE,
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
.probe = test_probe,
};
static struct platform_driver sync_driver = {
.driver = {
.name = "test_sync_driver",
.owner = THIS_MODULE,
.probe_type = PROBE_FORCE_SYNCHRONOUS,
},
.probe = test_probe,
};
static struct platform_device *async_dev_1, *async_dev_2;
static struct platform_device *sync_dev_1;
static int __init test_async_probe_init(void)
{
ktime_t calltime, delta;
unsigned long long duration;
int error;
pr_info("registering first asynchronous device...\n");
async_dev_1 = platform_device_register_simple("test_async_driver", 1,
NULL, 0);
if (IS_ERR(async_dev_1)) {
error = PTR_ERR(async_dev_1);
pr_err("failed to create async_dev_1: %d", error);
return error;
}
pr_info("registering asynchronous driver...\n");
calltime = ktime_get();
error = platform_driver_register(&async_driver);
if (error) {
pr_err("Failed to register async_driver: %d\n", error);
goto err_unregister_async_dev_1;
}
delta = ktime_sub(ktime_get(), calltime);
duration = (unsigned long long) ktime_to_ms(delta);
pr_info("registration took %lld msecs\n", duration);
if (duration > TEST_PROBE_THRESHOLD) {
pr_err("test failed: probe took too long\n");
error = -ETIMEDOUT;
goto err_unregister_async_driver;
}
pr_info("registering second asynchronous device...\n");
calltime = ktime_get();
async_dev_2 = platform_device_register_simple("test_async_driver", 2,
NULL, 0);
if (IS_ERR(async_dev_2)) {
error = PTR_ERR(async_dev_2);
pr_err("failed to create async_dev_2: %d", error);
goto err_unregister_async_driver;
}
delta = ktime_sub(ktime_get(), calltime);
duration = (unsigned long long) ktime_to_ms(delta);
pr_info("registration took %lld msecs\n", duration);
if (duration > TEST_PROBE_THRESHOLD) {
pr_err("test failed: probe took too long\n");
error = -ETIMEDOUT;
goto err_unregister_async_dev_2;
}
pr_info("registering synchronous driver...\n");
error = platform_driver_register(&sync_driver);
if (error) {
pr_err("Failed to register async_driver: %d\n", error);
goto err_unregister_async_dev_2;
}
pr_info("registering synchronous device...\n");
calltime = ktime_get();
sync_dev_1 = platform_device_register_simple("test_sync_driver", 1,
NULL, 0);
if (IS_ERR(async_dev_1)) {
error = PTR_ERR(sync_dev_1);
pr_err("failed to create sync_dev_1: %d", error);
goto err_unregister_sync_driver;
}
delta = ktime_sub(ktime_get(), calltime);
duration = (unsigned long long) ktime_to_ms(delta);
pr_info("registration took %lld msecs\n", duration);
if (duration < TEST_PROBE_THRESHOLD) {
pr_err("test failed: probe was too quick\n");
error = -ETIMEDOUT;
goto err_unregister_sync_dev_1;
}
pr_info("completed successfully");
return 0;
err_unregister_sync_dev_1:
platform_device_unregister(sync_dev_1);
err_unregister_sync_driver:
platform_driver_unregister(&sync_driver);
err_unregister_async_dev_2:
platform_device_unregister(async_dev_2);
err_unregister_async_driver:
platform_driver_unregister(&async_driver);
err_unregister_async_dev_1:
platform_device_unregister(async_dev_1);
return error;
}
module_init(test_async_probe_init);
static void __exit test_async_probe_exit(void)
{
platform_driver_unregister(&async_driver);
platform_driver_unregister(&sync_driver);
platform_device_unregister(async_dev_1);
platform_device_unregister(async_dev_2);
platform_device_unregister(sync_dev_1);
}
module_exit(test_async_probe_exit);
MODULE_DESCRIPTION("Test module for asynchronous driver probing");
MODULE_AUTHOR("Dmitry Torokhov <dtor@chromium.org>");
MODULE_LICENSE("GPL");

View File

@ -373,6 +373,8 @@ static struct iommu_group *acpihid_device_group(struct device *dev)
if (!entry->group)
entry->group = generic_device_group(dev);
else
iommu_group_ref_get(entry->group);
return entry->group;
}

View File

@ -28,6 +28,7 @@
#include <linux/amd-iommu.h>
#include <linux/export.h>
#include <linux/iommu.h>
#include <linux/kmemleak.h>
#include <asm/pci-direct.h>
#include <asm/iommu.h>
#include <asm/gart.h>
@ -2090,6 +2091,7 @@ static struct syscore_ops amd_iommu_syscore_ops = {
static void __init free_on_init_error(void)
{
kmemleak_free(irq_lookup_table);
free_pages((unsigned long)irq_lookup_table,
get_order(rlookup_table_size));
@ -2321,6 +2323,8 @@ static int __init early_amd_iommu_init(void)
irq_lookup_table = (void *)__get_free_pages(
GFP_KERNEL | __GFP_ZERO,
get_order(rlookup_table_size));
kmemleak_alloc(irq_lookup_table, rlookup_table_size,
1, GFP_KERNEL);
if (!irq_lookup_table)
goto out;
}

View File

@ -805,8 +805,10 @@ int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
goto out_free_domain;
group = iommu_group_get(&pdev->dev);
if (!group)
if (!group) {
ret = -EINVAL;
goto out_free_domain;
}
ret = iommu_attach_group(dev_state->domain, group);
if (ret != 0)

View File

@ -20,6 +20,8 @@
* This driver is powered by bad coffee and bombay mix.
*/
#include <linux/acpi.h>
#include <linux/acpi_iort.h>
#include <linux/delay.h>
#include <linux/dma-iommu.h>
#include <linux/err.h>
@ -1358,7 +1360,7 @@ static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
} while (size -= granule);
}
static struct iommu_gather_ops arm_smmu_gather_ops = {
static const struct iommu_gather_ops arm_smmu_gather_ops = {
.tlb_flush_all = arm_smmu_tlb_inv_context,
.tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
.tlb_sync = arm_smmu_tlb_sync,
@ -1723,13 +1725,14 @@ static struct platform_driver arm_smmu_driver;
static int arm_smmu_match_node(struct device *dev, void *data)
{
return dev->of_node == data;
return dev->fwnode == data;
}
static struct arm_smmu_device *arm_smmu_get_by_node(struct device_node *np)
static
struct arm_smmu_device *arm_smmu_get_by_fwnode(struct fwnode_handle *fwnode)
{
struct device *dev = driver_find_device(&arm_smmu_driver.driver, NULL,
np, arm_smmu_match_node);
fwnode, arm_smmu_match_node);
put_device(dev);
return dev ? dev_get_drvdata(dev) : NULL;
}
@ -1765,7 +1768,7 @@ static int arm_smmu_add_device(struct device *dev)
master = fwspec->iommu_priv;
smmu = master->smmu;
} else {
smmu = arm_smmu_get_by_node(to_of_node(fwspec->iommu_fwnode));
smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);
if (!smmu)
return -ENODEV;
master = kzalloc(sizeof(*master), GFP_KERNEL);
@ -2380,10 +2383,10 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass)
return 0;
}
static int arm_smmu_device_probe(struct arm_smmu_device *smmu)
static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
{
u32 reg;
bool coherent;
bool coherent = smmu->features & ARM_SMMU_FEAT_COHERENCY;
/* IDR0 */
reg = readl_relaxed(smmu->base + ARM_SMMU_IDR0);
@ -2435,13 +2438,9 @@ static int arm_smmu_device_probe(struct arm_smmu_device *smmu)
smmu->features |= ARM_SMMU_FEAT_HYP;
/*
* The dma-coherent property is used in preference to the ID
* The coherency feature as set by FW is used in preference to the ID
* register, but warn on mismatch.
*/
coherent = of_dma_is_coherent(smmu->dev->of_node);
if (coherent)
smmu->features |= ARM_SMMU_FEAT_COHERENCY;
if (!!(reg & IDR0_COHACC) != coherent)
dev_warn(smmu->dev, "IDR0.COHACC overridden by dma-coherent property (%s)\n",
coherent ? "true" : "false");
@ -2562,21 +2561,61 @@ static int arm_smmu_device_probe(struct arm_smmu_device *smmu)
return 0;
}
static int arm_smmu_device_dt_probe(struct platform_device *pdev)
#ifdef CONFIG_ACPI
static int arm_smmu_device_acpi_probe(struct platform_device *pdev,
struct arm_smmu_device *smmu)
{
struct acpi_iort_smmu_v3 *iort_smmu;
struct device *dev = smmu->dev;
struct acpi_iort_node *node;
node = *(struct acpi_iort_node **)dev_get_platdata(dev);
/* Retrieve SMMUv3 specific data */
iort_smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
if (iort_smmu->flags & ACPI_IORT_SMMU_V3_COHACC_OVERRIDE)
smmu->features |= ARM_SMMU_FEAT_COHERENCY;
return 0;
}
#else
static inline int arm_smmu_device_acpi_probe(struct platform_device *pdev,
struct arm_smmu_device *smmu)
{
return -ENODEV;
}
#endif
static int arm_smmu_device_dt_probe(struct platform_device *pdev,
struct arm_smmu_device *smmu)
{
int irq, ret;
struct resource *res;
struct arm_smmu_device *smmu;
struct device *dev = &pdev->dev;
bool bypass = true;
u32 cells;
int ret = -EINVAL;
if (of_property_read_u32(dev->of_node, "#iommu-cells", &cells))
dev_err(dev, "missing #iommu-cells property\n");
else if (cells != 1)
dev_err(dev, "invalid #iommu-cells value (%d)\n", cells);
else
bypass = false;
ret = 0;
parse_driver_options(smmu);
if (of_dma_is_coherent(dev->of_node))
smmu->features |= ARM_SMMU_FEAT_COHERENCY;
return ret;
}
static int arm_smmu_device_probe(struct platform_device *pdev)
{
int irq, ret;
struct resource *res;
struct arm_smmu_device *smmu;
struct device *dev = &pdev->dev;
bool bypass;
smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
if (!smmu) {
@ -2613,10 +2652,19 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
if (irq > 0)
smmu->gerr_irq = irq;
parse_driver_options(smmu);
if (dev->of_node) {
ret = arm_smmu_device_dt_probe(pdev, smmu);
} else {
ret = arm_smmu_device_acpi_probe(pdev, smmu);
if (ret == -ENODEV)
return ret;
}
/* Set bypass mode according to firmware probing result */
bypass = !!ret;
/* Probe the h/w */
ret = arm_smmu_device_probe(smmu);
ret = arm_smmu_device_hw_probe(smmu);
if (ret)
return ret;
@ -2634,7 +2682,8 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
return ret;
/* And we're up. Go go go! */
of_iommu_set_ops(dev->of_node, &arm_smmu_ops);
iommu_register_instance(dev->fwnode, &arm_smmu_ops);
#ifdef CONFIG_PCI
if (pci_bus_type.iommu_ops != &arm_smmu_ops) {
pci_request_acs();
@ -2677,7 +2726,7 @@ static struct platform_driver arm_smmu_driver = {
.name = "arm-smmu-v3",
.of_match_table = of_match_ptr(arm_smmu_of_match),
},
.probe = arm_smmu_device_dt_probe,
.probe = arm_smmu_device_probe,
.remove = arm_smmu_device_remove,
};
@ -2715,6 +2764,17 @@ static int __init arm_smmu_of_init(struct device_node *np)
}
IOMMU_OF_DECLARE(arm_smmuv3, "arm,smmu-v3", arm_smmu_of_init);
#ifdef CONFIG_ACPI
static int __init acpi_smmu_v3_init(struct acpi_table_header *table)
{
if (iort_node_match(ACPI_IORT_NODE_SMMU_V3))
return arm_smmu_init();
return 0;
}
IORT_ACPI_DECLARE(arm_smmu_v3, ACPI_SIG_IORT, acpi_smmu_v3_init);
#endif
MODULE_DESCRIPTION("IOMMU API for ARM architected SMMUv3 implementations");
MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
MODULE_LICENSE("GPL v2");

View File

@ -28,6 +28,8 @@
#define pr_fmt(fmt) "arm-smmu: " fmt
#include <linux/acpi.h>
#include <linux/acpi_iort.h>
#include <linux/atomic.h>
#include <linux/delay.h>
#include <linux/dma-iommu.h>
@ -247,6 +249,7 @@ enum arm_smmu_s2cr_privcfg {
#define ARM_MMU500_ACTLR_CPRE (1 << 1)
#define ARM_MMU500_ACR_CACHE_LOCK (1 << 26)
#define ARM_MMU500_ACR_SMTNMB_TLBEN (1 << 8)
#define CB_PAR_F (1 << 0)
@ -642,7 +645,7 @@ static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
}
}
static struct iommu_gather_ops arm_smmu_gather_ops = {
static const struct iommu_gather_ops arm_smmu_gather_ops = {
.tlb_flush_all = arm_smmu_tlb_inv_context,
.tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
.tlb_sync = arm_smmu_tlb_sync,
@ -1379,13 +1382,14 @@ static bool arm_smmu_capable(enum iommu_cap cap)
static int arm_smmu_match_node(struct device *dev, void *data)
{
return dev->of_node == data;
return dev->fwnode == data;
}
static struct arm_smmu_device *arm_smmu_get_by_node(struct device_node *np)
static
struct arm_smmu_device *arm_smmu_get_by_fwnode(struct fwnode_handle *fwnode)
{
struct device *dev = driver_find_device(&arm_smmu_driver.driver, NULL,
np, arm_smmu_match_node);
fwnode, arm_smmu_match_node);
put_device(dev);
return dev ? dev_get_drvdata(dev) : NULL;
}
@ -1403,7 +1407,7 @@ static int arm_smmu_add_device(struct device *dev)
if (ret)
goto out_free;
} else if (fwspec && fwspec->ops == &arm_smmu_ops) {
smmu = arm_smmu_get_by_node(to_of_node(fwspec->iommu_fwnode));
smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);
} else {
return -ENODEV;
}
@ -1478,7 +1482,7 @@ static struct iommu_group *arm_smmu_device_group(struct device *dev)
}
if (group)
return group;
return iommu_group_ref_get(group);
if (dev_is_pci(dev))
group = pci_device_group(dev);
@ -1581,16 +1585,22 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
for (i = 0; i < smmu->num_mapping_groups; ++i)
arm_smmu_write_sme(smmu, i);
/*
* Before clearing ARM_MMU500_ACTLR_CPRE, need to
* clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK
* bit is only present in MMU-500r2 onwards.
*/
reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7);
major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK;
if ((smmu->model == ARM_MMU500) && (major >= 2)) {
if (smmu->model == ARM_MMU500) {
/*
* Before clearing ARM_MMU500_ACTLR_CPRE, need to
* clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK
* bit is only present in MMU-500r2 onwards.
*/
reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7);
major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK;
reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sACR);
reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
if (major >= 2)
reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
/*
* Allow unmatched Stream IDs to allocate bypass
* TLB entries for reduced latency.
*/
reg |= ARM_MMU500_ACR_SMTNMB_TLBEN;
writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sACR);
}
@ -1667,7 +1677,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
unsigned long size;
void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
u32 id;
bool cttw_dt, cttw_reg;
bool cttw_reg, cttw_fw = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK;
int i;
dev_notice(smmu->dev, "probing hardware configuration...\n");
@ -1712,20 +1722,17 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
/*
* In order for DMA API calls to work properly, we must defer to what
* the DT says about coherency, regardless of what the hardware claims.
* the FW says about coherency, regardless of what the hardware claims.
* Fortunately, this also opens up a workaround for systems where the
* ID register value has ended up configured incorrectly.
*/
cttw_dt = of_dma_is_coherent(smmu->dev->of_node);
cttw_reg = !!(id & ID0_CTTW);
if (cttw_dt)
smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
if (cttw_dt || cttw_reg)
if (cttw_fw || cttw_reg)
dev_notice(smmu->dev, "\t%scoherent table walk\n",
cttw_dt ? "" : "non-");
if (cttw_dt != cttw_reg)
cttw_fw ? "" : "non-");
if (cttw_fw != cttw_reg)
dev_notice(smmu->dev,
"\t(IDR0.CTTW overridden by dma-coherent property)\n");
"\t(IDR0.CTTW overridden by FW configuration)\n");
/* Max. number of entries we have for stream matching/indexing */
size = 1 << ((id >> ID0_NUMSIDB_SHIFT) & ID0_NUMSIDB_MASK);
@ -1906,15 +1913,83 @@ static const struct of_device_id arm_smmu_of_match[] = {
};
MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
static int arm_smmu_device_dt_probe(struct platform_device *pdev)
#ifdef CONFIG_ACPI
static int acpi_smmu_get_data(u32 model, struct arm_smmu_device *smmu)
{
int ret = 0;
switch (model) {
case ACPI_IORT_SMMU_V1:
case ACPI_IORT_SMMU_CORELINK_MMU400:
smmu->version = ARM_SMMU_V1;
smmu->model = GENERIC_SMMU;
break;
case ACPI_IORT_SMMU_V2:
smmu->version = ARM_SMMU_V2;
smmu->model = GENERIC_SMMU;
break;
case ACPI_IORT_SMMU_CORELINK_MMU500:
smmu->version = ARM_SMMU_V2;
smmu->model = ARM_MMU500;
break;
default:
ret = -ENODEV;
}
return ret;
}
static int arm_smmu_device_acpi_probe(struct platform_device *pdev,
struct arm_smmu_device *smmu)
{
struct device *dev = smmu->dev;
struct acpi_iort_node *node =
*(struct acpi_iort_node **)dev_get_platdata(dev);
struct acpi_iort_smmu *iort_smmu;
int ret;
/* Retrieve SMMU1/2 specific data */
iort_smmu = (struct acpi_iort_smmu *)node->node_data;
ret = acpi_smmu_get_data(iort_smmu->model, smmu);
if (ret < 0)
return ret;
/* Ignore the configuration access interrupt */
smmu->num_global_irqs = 1;
if (iort_smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK)
smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
return 0;
}
#else
static inline int arm_smmu_device_acpi_probe(struct platform_device *pdev,
struct arm_smmu_device *smmu)
{
return -ENODEV;
}
#endif
static int arm_smmu_device_dt_probe(struct platform_device *pdev,
struct arm_smmu_device *smmu)
{
const struct arm_smmu_match_data *data;
struct resource *res;
struct arm_smmu_device *smmu;
struct device *dev = &pdev->dev;
int num_irqs, i, err;
bool legacy_binding;
if (of_property_read_u32(dev->of_node, "#global-interrupts",
&smmu->num_global_irqs)) {
dev_err(dev, "missing #global-interrupts property\n");
return -ENODEV;
}
data = of_device_get_match_data(dev);
smmu->version = data->version;
smmu->model = data->model;
parse_driver_options(smmu);
legacy_binding = of_find_property(dev->of_node, "mmu-masters", NULL);
if (legacy_binding && !using_generic_binding) {
if (!using_legacy_binding)
@ -1927,6 +2002,19 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
return -ENODEV;
}
if (of_dma_is_coherent(dev->of_node))
smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
return 0;
}
static int arm_smmu_device_probe(struct platform_device *pdev)
{
struct resource *res;
struct arm_smmu_device *smmu;
struct device *dev = &pdev->dev;
int num_irqs, i, err;
smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
if (!smmu) {
dev_err(dev, "failed to allocate arm_smmu_device\n");
@ -1934,9 +2022,13 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
}
smmu->dev = dev;
data = of_device_get_match_data(dev);
smmu->version = data->version;
smmu->model = data->model;
if (dev->of_node)
err = arm_smmu_device_dt_probe(pdev, smmu);
else
err = arm_smmu_device_acpi_probe(pdev, smmu);
if (err)
return err;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
smmu->base = devm_ioremap_resource(dev, res);
@ -1944,12 +2036,6 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
return PTR_ERR(smmu->base);
smmu->size = resource_size(res);
if (of_property_read_u32(dev->of_node, "#global-interrupts",
&smmu->num_global_irqs)) {
dev_err(dev, "missing #global-interrupts property\n");
return -ENODEV;
}
num_irqs = 0;
while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
num_irqs++;
@ -1984,8 +2070,6 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
if (err)
return err;
parse_driver_options(smmu);
if (smmu->version == ARM_SMMU_V2 &&
smmu->num_context_banks != smmu->num_context_irqs) {
dev_err(dev,
@ -2007,7 +2091,7 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
}
}
of_iommu_set_ops(dev->of_node, &arm_smmu_ops);
iommu_register_instance(dev->fwnode, &arm_smmu_ops);
platform_set_drvdata(pdev, smmu);
arm_smmu_device_reset(smmu);
@ -2047,7 +2131,7 @@ static struct platform_driver arm_smmu_driver = {
.name = "arm-smmu",
.of_match_table = of_match_ptr(arm_smmu_of_match),
},
.probe = arm_smmu_device_dt_probe,
.probe = arm_smmu_device_probe,
.remove = arm_smmu_device_remove,
};
@ -2090,6 +2174,17 @@ IOMMU_OF_DECLARE(arm_mmu401, "arm,mmu-401", arm_smmu_of_init);
IOMMU_OF_DECLARE(arm_mmu500, "arm,mmu-500", arm_smmu_of_init);
IOMMU_OF_DECLARE(cavium_smmuv2, "cavium,smmu-v2", arm_smmu_of_init);
#ifdef CONFIG_ACPI
static int __init arm_smmu_acpi_init(struct acpi_table_header *table)
{
if (iort_node_match(ACPI_IORT_NODE_SMMU))
return arm_smmu_init();
return 0;
}
IORT_ACPI_DECLARE(arm_smmu, ACPI_SIG_IORT, arm_smmu_acpi_init);
#endif
MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
MODULE_LICENSE("GPL v2");

View File

@ -432,13 +432,12 @@ int iommu_dma_mmap(struct page **pages, size_t size, struct vm_area_struct *vma)
return ret;
}
dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size, int prot)
static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
size_t size, int prot)
{
dma_addr_t dma_addr;
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
struct iova_domain *iovad = cookie_iovad(domain);
phys_addr_t phys = page_to_phys(page) + offset;
size_t iova_off = iova_offset(iovad, phys);
size_t len = iova_align(iovad, size + iova_off);
struct iova *iova = __alloc_iova(domain, len, dma_get_mask(dev));
@ -454,6 +453,12 @@ dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
return dma_addr + iova_off;
}
dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size, int prot)
{
return __iommu_dma_map(dev, page_to_phys(page) + offset, size, prot);
}
void iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size,
enum dma_data_direction dir, unsigned long attrs)
{
@ -624,6 +629,19 @@ void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
__iommu_dma_unmap(iommu_get_domain_for_dev(dev), sg_dma_address(sg));
}
dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
return __iommu_dma_map(dev, phys, size,
dma_direction_to_prot(dir, false) | IOMMU_MMIO);
}
void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
__iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle);
}
int iommu_dma_supported(struct device *dev, u64 mask)
{
/*

View File

@ -70,6 +70,36 @@ static short PG_ENT_SHIFT = -1;
#define SYSMMU_PG_ENT_SHIFT 0
#define SYSMMU_V5_PG_ENT_SHIFT 4
static const sysmmu_pte_t *LV1_PROT;
static const sysmmu_pte_t SYSMMU_LV1_PROT[] = {
((0 << 15) | (0 << 10)), /* no access */
((1 << 15) | (1 << 10)), /* IOMMU_READ only */
((0 << 15) | (1 << 10)), /* IOMMU_WRITE not supported, use read/write */
((0 << 15) | (1 << 10)), /* IOMMU_READ | IOMMU_WRITE */
};
static const sysmmu_pte_t SYSMMU_V5_LV1_PROT[] = {
(0 << 4), /* no access */
(1 << 4), /* IOMMU_READ only */
(2 << 4), /* IOMMU_WRITE only */
(3 << 4), /* IOMMU_READ | IOMMU_WRITE */
};
static const sysmmu_pte_t *LV2_PROT;
static const sysmmu_pte_t SYSMMU_LV2_PROT[] = {
((0 << 9) | (0 << 4)), /* no access */
((1 << 9) | (1 << 4)), /* IOMMU_READ only */
((0 << 9) | (1 << 4)), /* IOMMU_WRITE not supported, use read/write */
((0 << 9) | (1 << 4)), /* IOMMU_READ | IOMMU_WRITE */
};
static const sysmmu_pte_t SYSMMU_V5_LV2_PROT[] = {
(0 << 2), /* no access */
(1 << 2), /* IOMMU_READ only */
(2 << 2), /* IOMMU_WRITE only */
(3 << 2), /* IOMMU_READ | IOMMU_WRITE */
};
#define SYSMMU_SUPPORTED_PROT_BITS (IOMMU_READ | IOMMU_WRITE)
#define sect_to_phys(ent) (((phys_addr_t) ent) << PG_ENT_SHIFT)
#define section_phys(sent) (sect_to_phys(*(sent)) & SECT_MASK)
#define section_offs(iova) (iova & (SECT_SIZE - 1))
@ -97,16 +127,17 @@ static u32 lv2ent_offset(sysmmu_iova_t iova)
#define SPAGES_PER_LPAGE (LPAGE_SIZE / SPAGE_SIZE)
#define lv2table_base(sent) (sect_to_phys(*(sent) & 0xFFFFFFC0))
#define mk_lv1ent_sect(pa) ((pa >> PG_ENT_SHIFT) | 2)
#define mk_lv1ent_sect(pa, prot) ((pa >> PG_ENT_SHIFT) | LV1_PROT[prot] | 2)
#define mk_lv1ent_page(pa) ((pa >> PG_ENT_SHIFT) | 1)
#define mk_lv2ent_lpage(pa) ((pa >> PG_ENT_SHIFT) | 1)
#define mk_lv2ent_spage(pa) ((pa >> PG_ENT_SHIFT) | 2)
#define mk_lv2ent_lpage(pa, prot) ((pa >> PG_ENT_SHIFT) | LV2_PROT[prot] | 1)
#define mk_lv2ent_spage(pa, prot) ((pa >> PG_ENT_SHIFT) | LV2_PROT[prot] | 2)
#define CTRL_ENABLE 0x5
#define CTRL_BLOCK 0x7
#define CTRL_DISABLE 0x0
#define CFG_LRU 0x1
#define CFG_EAP (1 << 2)
#define CFG_QOS(n) ((n & 0xF) << 7)
#define CFG_ACGEN (1 << 24) /* System MMU 3.3 only */
#define CFG_SYSSEL (1 << 22) /* System MMU 3.2 only */
@ -206,6 +237,7 @@ static const struct sysmmu_fault_info sysmmu_v5_faults[] = {
struct exynos_iommu_owner {
struct list_head controllers; /* list of sysmmu_drvdata.owner_node */
struct iommu_domain *domain; /* domain this device is attached */
struct mutex rpm_lock; /* for runtime pm of all sysmmus */
};
/*
@ -237,8 +269,8 @@ struct sysmmu_drvdata {
struct clk *aclk; /* SYSMMU's aclk clock */
struct clk *pclk; /* SYSMMU's pclk clock */
struct clk *clk_master; /* master's device clock */
int activations; /* number of calls to sysmmu_enable */
spinlock_t lock; /* lock for modyfying state */
bool active; /* current status */
struct exynos_iommu_domain *domain; /* domain we belong to */
struct list_head domain_node; /* node for domain clients list */
struct list_head owner_node; /* node for owner controllers list */
@ -251,25 +283,6 @@ static struct exynos_iommu_domain *to_exynos_domain(struct iommu_domain *dom)
return container_of(dom, struct exynos_iommu_domain, domain);
}
static bool set_sysmmu_active(struct sysmmu_drvdata *data)
{
/* return true if the System MMU was not active previously
and it needs to be initialized */
return ++data->activations == 1;
}
static bool set_sysmmu_inactive(struct sysmmu_drvdata *data)
{
/* return true if the System MMU is needed to be disabled */
BUG_ON(data->activations < 1);
return --data->activations == 0;
}
static bool is_sysmmu_active(struct sysmmu_drvdata *data)
{
return data->activations > 0;
}
static void sysmmu_unblock(struct sysmmu_drvdata *data)
{
writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL);
@ -388,7 +401,7 @@ static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id)
unsigned short reg_status, reg_clear;
int ret = -ENOSYS;
WARN_ON(!is_sysmmu_active(data));
WARN_ON(!data->active);
if (MMU_MAJ_VER(data->version) < 5) {
reg_status = REG_INT_STATUS;
@ -434,40 +447,19 @@ static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id)
return IRQ_HANDLED;
}
static void __sysmmu_disable_nocount(struct sysmmu_drvdata *data)
static void __sysmmu_disable(struct sysmmu_drvdata *data)
{
clk_enable(data->clk_master);
writel(CTRL_DISABLE, data->sfrbase + REG_MMU_CTRL);
writel(0, data->sfrbase + REG_MMU_CFG);
__sysmmu_disable_clocks(data);
}
static bool __sysmmu_disable(struct sysmmu_drvdata *data)
{
bool disabled;
unsigned long flags;
clk_enable(data->clk_master);
spin_lock_irqsave(&data->lock, flags);
disabled = set_sysmmu_inactive(data);
if (disabled) {
data->pgtable = 0;
data->domain = NULL;
__sysmmu_disable_nocount(data);
dev_dbg(data->sysmmu, "Disabled\n");
} else {
dev_dbg(data->sysmmu, "%d times left to disable\n",
data->activations);
}
writel(CTRL_DISABLE, data->sfrbase + REG_MMU_CTRL);
writel(0, data->sfrbase + REG_MMU_CFG);
data->active = false;
spin_unlock_irqrestore(&data->lock, flags);
return disabled;
__sysmmu_disable_clocks(data);
}
static void __sysmmu_init_config(struct sysmmu_drvdata *data)
@ -481,20 +473,24 @@ static void __sysmmu_init_config(struct sysmmu_drvdata *data)
else
cfg = CFG_QOS(15) | CFG_FLPDCACHE | CFG_ACGEN;
cfg |= CFG_EAP; /* enable access protection bits check */
writel(cfg, data->sfrbase + REG_MMU_CFG);
}
static void __sysmmu_enable_nocount(struct sysmmu_drvdata *data)
static void __sysmmu_enable(struct sysmmu_drvdata *data)
{
unsigned long flags;
__sysmmu_enable_clocks(data);
spin_lock_irqsave(&data->lock, flags);
writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL);
__sysmmu_init_config(data);
__sysmmu_set_ptbase(data, data->pgtable);
writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL);
data->active = true;
spin_unlock_irqrestore(&data->lock, flags);
/*
* SYSMMU driver keeps master's clock enabled only for the short
@ -505,48 +501,18 @@ static void __sysmmu_enable_nocount(struct sysmmu_drvdata *data)
clk_disable(data->clk_master);
}
static int __sysmmu_enable(struct sysmmu_drvdata *data, phys_addr_t pgtable,
struct exynos_iommu_domain *domain)
{
int ret = 0;
unsigned long flags;
spin_lock_irqsave(&data->lock, flags);
if (set_sysmmu_active(data)) {
data->pgtable = pgtable;
data->domain = domain;
__sysmmu_enable_nocount(data);
dev_dbg(data->sysmmu, "Enabled\n");
} else {
ret = (pgtable == data->pgtable) ? 1 : -EBUSY;
dev_dbg(data->sysmmu, "already enabled\n");
}
if (WARN_ON(ret < 0))
set_sysmmu_inactive(data); /* decrement count */
spin_unlock_irqrestore(&data->lock, flags);
return ret;
}
static void sysmmu_tlb_invalidate_flpdcache(struct sysmmu_drvdata *data,
sysmmu_iova_t iova)
{
unsigned long flags;
spin_lock_irqsave(&data->lock, flags);
if (is_sysmmu_active(data) && data->version >= MAKE_MMU_VER(3, 3)) {
if (data->active && data->version >= MAKE_MMU_VER(3, 3)) {
clk_enable(data->clk_master);
__sysmmu_tlb_invalidate_entry(data, iova, 1);
clk_disable(data->clk_master);
}
spin_unlock_irqrestore(&data->lock, flags);
}
static void sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data,
@ -555,7 +521,7 @@ static void sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data,
unsigned long flags;
spin_lock_irqsave(&data->lock, flags);
if (is_sysmmu_active(data)) {
if (data->active) {
unsigned int num_inv = 1;
clk_enable(data->clk_master);
@ -578,9 +544,6 @@ static void sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data,
sysmmu_unblock(data);
}
clk_disable(data->clk_master);
} else {
dev_dbg(data->master,
"disabled. Skipping TLB invalidation @ %#x\n", iova);
}
spin_unlock_irqrestore(&data->lock, flags);
}
@ -652,10 +615,15 @@ static int __init exynos_sysmmu_probe(struct platform_device *pdev)
__sysmmu_get_version(data);
if (PG_ENT_SHIFT < 0) {
if (MMU_MAJ_VER(data->version) < 5)
if (MMU_MAJ_VER(data->version) < 5) {
PG_ENT_SHIFT = SYSMMU_PG_ENT_SHIFT;
else
LV1_PROT = SYSMMU_LV1_PROT;
LV2_PROT = SYSMMU_LV2_PROT;
} else {
PG_ENT_SHIFT = SYSMMU_V5_PG_ENT_SHIFT;
LV1_PROT = SYSMMU_V5_LV1_PROT;
LV2_PROT = SYSMMU_V5_LV2_PROT;
}
}
pm_runtime_enable(dev);
@ -665,34 +633,46 @@ static int __init exynos_sysmmu_probe(struct platform_device *pdev)
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int exynos_sysmmu_suspend(struct device *dev)
static int __maybe_unused exynos_sysmmu_suspend(struct device *dev)
{
struct sysmmu_drvdata *data = dev_get_drvdata(dev);
struct device *master = data->master;
dev_dbg(dev, "suspend\n");
if (is_sysmmu_active(data)) {
__sysmmu_disable_nocount(data);
pm_runtime_put(dev);
if (master) {
struct exynos_iommu_owner *owner = master->archdata.iommu;
mutex_lock(&owner->rpm_lock);
if (data->domain) {
dev_dbg(data->sysmmu, "saving state\n");
__sysmmu_disable(data);
}
mutex_unlock(&owner->rpm_lock);
}
return 0;
}
static int exynos_sysmmu_resume(struct device *dev)
static int __maybe_unused exynos_sysmmu_resume(struct device *dev)
{
struct sysmmu_drvdata *data = dev_get_drvdata(dev);
struct device *master = data->master;
dev_dbg(dev, "resume\n");
if (is_sysmmu_active(data)) {
pm_runtime_get_sync(dev);
__sysmmu_enable_nocount(data);
if (master) {
struct exynos_iommu_owner *owner = master->archdata.iommu;
mutex_lock(&owner->rpm_lock);
if (data->domain) {
dev_dbg(data->sysmmu, "restoring state\n");
__sysmmu_enable(data);
}
mutex_unlock(&owner->rpm_lock);
}
return 0;
}
#endif
static const struct dev_pm_ops sysmmu_pm_ops = {
SET_LATE_SYSTEM_SLEEP_PM_OPS(exynos_sysmmu_suspend, exynos_sysmmu_resume)
SET_RUNTIME_PM_OPS(exynos_sysmmu_suspend, exynos_sysmmu_resume, NULL)
SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
pm_runtime_force_resume)
};
static const struct of_device_id sysmmu_of_match[] __initconst = {
@ -796,9 +776,12 @@ static void exynos_iommu_domain_free(struct iommu_domain *iommu_domain)
spin_lock_irqsave(&domain->lock, flags);
list_for_each_entry_safe(data, next, &domain->clients, domain_node) {
if (__sysmmu_disable(data))
data->master = NULL;
spin_lock(&data->lock);
__sysmmu_disable(data);
data->pgtable = 0;
data->domain = NULL;
list_del_init(&data->domain_node);
spin_unlock(&data->lock);
}
spin_unlock_irqrestore(&domain->lock, flags);
@ -832,31 +815,34 @@ static void exynos_iommu_detach_device(struct iommu_domain *iommu_domain,
phys_addr_t pagetable = virt_to_phys(domain->pgtable);
struct sysmmu_drvdata *data, *next;
unsigned long flags;
bool found = false;
if (!has_sysmmu(dev) || owner->domain != iommu_domain)
return;
mutex_lock(&owner->rpm_lock);
list_for_each_entry(data, &owner->controllers, owner_node) {
pm_runtime_get_noresume(data->sysmmu);
if (pm_runtime_active(data->sysmmu))
__sysmmu_disable(data);
pm_runtime_put(data->sysmmu);
}
spin_lock_irqsave(&domain->lock, flags);
list_for_each_entry_safe(data, next, &domain->clients, domain_node) {
if (data->master == dev) {
if (__sysmmu_disable(data)) {
data->master = NULL;
list_del_init(&data->domain_node);
}
pm_runtime_put(data->sysmmu);
found = true;
}
spin_lock(&data->lock);
data->pgtable = 0;
data->domain = NULL;
list_del_init(&data->domain_node);
spin_unlock(&data->lock);
}
owner->domain = NULL;
spin_unlock_irqrestore(&domain->lock, flags);
owner->domain = NULL;
mutex_unlock(&owner->rpm_lock);
if (found)
dev_dbg(dev, "%s: Detached IOMMU with pgtable %pa\n",
__func__, &pagetable);
else
dev_err(dev, "%s: No IOMMU is attached\n", __func__);
dev_dbg(dev, "%s: Detached IOMMU with pgtable %pa\n", __func__,
&pagetable);
}
static int exynos_iommu_attach_device(struct iommu_domain *iommu_domain,
@ -867,7 +853,6 @@ static int exynos_iommu_attach_device(struct iommu_domain *iommu_domain,
struct sysmmu_drvdata *data;
phys_addr_t pagetable = virt_to_phys(domain->pgtable);
unsigned long flags;
int ret = -ENODEV;
if (!has_sysmmu(dev))
return -ENODEV;
@ -875,29 +860,32 @@ static int exynos_iommu_attach_device(struct iommu_domain *iommu_domain,
if (owner->domain)
exynos_iommu_detach_device(owner->domain, dev);
mutex_lock(&owner->rpm_lock);
spin_lock_irqsave(&domain->lock, flags);
list_for_each_entry(data, &owner->controllers, owner_node) {
pm_runtime_get_sync(data->sysmmu);
ret = __sysmmu_enable(data, pagetable, domain);
if (ret >= 0) {
data->master = dev;
spin_lock_irqsave(&domain->lock, flags);
list_add_tail(&data->domain_node, &domain->clients);
spin_unlock_irqrestore(&domain->lock, flags);
}
spin_lock(&data->lock);
data->pgtable = pagetable;
data->domain = domain;
list_add_tail(&data->domain_node, &domain->clients);
spin_unlock(&data->lock);
}
if (ret < 0) {
dev_err(dev, "%s: Failed to attach IOMMU with pgtable %pa\n",
__func__, &pagetable);
return ret;
}
owner->domain = iommu_domain;
dev_dbg(dev, "%s: Attached IOMMU with pgtable %pa %s\n",
__func__, &pagetable, (ret == 0) ? "" : ", again");
spin_unlock_irqrestore(&domain->lock, flags);
return ret;
list_for_each_entry(data, &owner->controllers, owner_node) {
pm_runtime_get_noresume(data->sysmmu);
if (pm_runtime_active(data->sysmmu))
__sysmmu_enable(data);
pm_runtime_put(data->sysmmu);
}
mutex_unlock(&owner->rpm_lock);
dev_dbg(dev, "%s: Attached IOMMU with pgtable %pa\n", __func__,
&pagetable);
return 0;
}
static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *domain,
@ -954,7 +942,7 @@ static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *domain,
static int lv1set_section(struct exynos_iommu_domain *domain,
sysmmu_pte_t *sent, sysmmu_iova_t iova,
phys_addr_t paddr, short *pgcnt)
phys_addr_t paddr, int prot, short *pgcnt)
{
if (lv1ent_section(sent)) {
WARN(1, "Trying mapping on 1MiB@%#08x that is mapped",
@ -973,7 +961,7 @@ static int lv1set_section(struct exynos_iommu_domain *domain,
*pgcnt = 0;
}
update_pte(sent, mk_lv1ent_sect(paddr));
update_pte(sent, mk_lv1ent_sect(paddr, prot));
spin_lock(&domain->lock);
if (lv1ent_page_zero(sent)) {
@ -991,13 +979,13 @@ static int lv1set_section(struct exynos_iommu_domain *domain,
}
static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size,
short *pgcnt)
int prot, short *pgcnt)
{
if (size == SPAGE_SIZE) {
if (WARN_ON(!lv2ent_fault(pent)))
return -EADDRINUSE;
update_pte(pent, mk_lv2ent_spage(paddr));
update_pte(pent, mk_lv2ent_spage(paddr, prot));
*pgcnt -= 1;
} else { /* size == LPAGE_SIZE */
int i;
@ -1013,7 +1001,7 @@ static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size,
return -EADDRINUSE;
}
*pent = mk_lv2ent_lpage(paddr);
*pent = mk_lv2ent_lpage(paddr, prot);
}
dma_sync_single_for_device(dma_dev, pent_base,
sizeof(*pent) * SPAGES_PER_LPAGE,
@ -1061,13 +1049,14 @@ static int exynos_iommu_map(struct iommu_domain *iommu_domain,
int ret = -ENOMEM;
BUG_ON(domain->pgtable == NULL);
prot &= SYSMMU_SUPPORTED_PROT_BITS;
spin_lock_irqsave(&domain->pgtablelock, flags);
entry = section_entry(domain->pgtable, iova);
if (size == SECT_SIZE) {
ret = lv1set_section(domain, entry, iova, paddr,
ret = lv1set_section(domain, entry, iova, paddr, prot,
&domain->lv2entcnt[lv1ent_offset(iova)]);
} else {
sysmmu_pte_t *pent;
@ -1078,7 +1067,7 @@ static int exynos_iommu_map(struct iommu_domain *iommu_domain,
if (IS_ERR(pent))
ret = PTR_ERR(pent);
else
ret = lv2set_page(pent, paddr, size,
ret = lv2set_page(pent, paddr, size, prot,
&domain->lv2entcnt[lv1ent_offset(iova)]);
}
@ -1268,10 +1257,20 @@ static int exynos_iommu_of_xlate(struct device *dev,
return -ENOMEM;
INIT_LIST_HEAD(&owner->controllers);
mutex_init(&owner->rpm_lock);
dev->archdata.iommu = owner;
}
list_add_tail(&data->owner_node, &owner->controllers);
data->master = dev;
/*
* SYSMMU will be runtime activated via device link (dependency) to its
* master device, so there are no direct calls to pm_runtime_get/put
* in this driver.
*/
device_link_add(dev, data->sysmmu, DL_FLAG_PM_RUNTIME);
return 0;
}

View File

@ -793,8 +793,7 @@ static int __init arm_v7s_do_selftests(void)
* Distinct mappings of different granule sizes.
*/
iova = 0;
i = find_first_bit(&cfg.pgsize_bitmap, BITS_PER_LONG);
while (i != BITS_PER_LONG) {
for_each_set_bit(i, &cfg.pgsize_bitmap, BITS_PER_LONG) {
size = 1UL << i;
if (ops->map(ops, iova, iova, size, IOMMU_READ |
IOMMU_WRITE |
@ -811,8 +810,6 @@ static int __init arm_v7s_do_selftests(void)
return __FAIL(ops);
iova += SZ_16M;
i++;
i = find_next_bit(&cfg.pgsize_bitmap, BITS_PER_LONG, i);
loopnr++;
}

View File

@ -916,7 +916,7 @@ static void dummy_tlb_sync(void *cookie)
WARN_ON(cookie != cfg_cookie);
}
static struct iommu_gather_ops dummy_tlb_ops __initdata = {
static const struct iommu_gather_ops dummy_tlb_ops __initconst = {
.tlb_flush_all = dummy_tlb_flush_all,
.tlb_add_flush = dummy_tlb_add_flush,
.tlb_sync = dummy_tlb_sync,
@ -980,8 +980,7 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
* Distinct mappings of different granule sizes.
*/
iova = 0;
j = find_first_bit(&cfg->pgsize_bitmap, BITS_PER_LONG);
while (j != BITS_PER_LONG) {
for_each_set_bit(j, &cfg->pgsize_bitmap, BITS_PER_LONG) {
size = 1UL << j;
if (ops->map(ops, iova, iova, size, IOMMU_READ |
@ -999,8 +998,6 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
return __FAIL(ops, i);
iova += SZ_1G;
j++;
j = find_next_bit(&cfg->pgsize_bitmap, BITS_PER_LONG, j);
}
/* Partial unmap */

View File

@ -551,6 +551,19 @@ struct iommu_group *iommu_group_get(struct device *dev)
}
EXPORT_SYMBOL_GPL(iommu_group_get);
/**
* iommu_group_ref_get - Increment reference on a group
* @group: the group to use, must not be NULL
*
* This function is called by iommu drivers to take additional references on an
* existing group. Returns the given group for convenience.
*/
struct iommu_group *iommu_group_ref_get(struct iommu_group *group)
{
kobject_get(group->devices_kobj);
return group;
}
/**
* iommu_group_put - Decrement group reference
* @group: the group to use
@ -1615,6 +1628,46 @@ out:
return ret;
}
struct iommu_instance {
struct list_head list;
struct fwnode_handle *fwnode;
const struct iommu_ops *ops;
};
static LIST_HEAD(iommu_instance_list);
static DEFINE_SPINLOCK(iommu_instance_lock);
void iommu_register_instance(struct fwnode_handle *fwnode,
const struct iommu_ops *ops)
{
struct iommu_instance *iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
if (WARN_ON(!iommu))
return;
of_node_get(to_of_node(fwnode));
INIT_LIST_HEAD(&iommu->list);
iommu->fwnode = fwnode;
iommu->ops = ops;
spin_lock(&iommu_instance_lock);
list_add_tail(&iommu->list, &iommu_instance_list);
spin_unlock(&iommu_instance_lock);
}
const struct iommu_ops *iommu_get_instance(struct fwnode_handle *fwnode)
{
struct iommu_instance *instance;
const struct iommu_ops *ops = NULL;
spin_lock(&iommu_instance_lock);
list_for_each_entry(instance, &iommu_instance_list, list)
if (instance->fwnode == fwnode) {
ops = instance->ops;
break;
}
spin_unlock(&iommu_instance_lock);
return ops;
}
int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode,
const struct iommu_ops *ops)
{

View File

@ -56,7 +56,7 @@ EXPORT_SYMBOL_GPL(init_iova_domain);
static struct rb_node *
__get_cached_rbnode(struct iova_domain *iovad, unsigned long *limit_pfn)
{
if ((*limit_pfn != iovad->dma_32bit_pfn) ||
if ((*limit_pfn > iovad->dma_32bit_pfn) ||
(iovad->cached32_node == NULL))
return rb_last(&iovad->rbroot);
else {

View File

@ -195,14 +195,14 @@ static irqreturn_t mtk_iommu_isr(int irq, void *dev_id)
static void mtk_iommu_config(struct mtk_iommu_data *data,
struct device *dev, bool enable)
{
struct mtk_iommu_client_priv *head, *cur, *next;
struct mtk_smi_larb_iommu *larb_mmu;
unsigned int larbid, portid;
struct iommu_fwspec *fwspec = dev->iommu_fwspec;
int i;
head = dev->archdata.iommu;
list_for_each_entry_safe(cur, next, &head->client, client) {
larbid = MTK_M4U_TO_LARB(cur->mtk_m4u_id);
portid = MTK_M4U_TO_PORT(cur->mtk_m4u_id);
for (i = 0; i < fwspec->num_ids; ++i) {
larbid = MTK_M4U_TO_LARB(fwspec->ids[i]);
portid = MTK_M4U_TO_PORT(fwspec->ids[i]);
larb_mmu = &data->smi_imu.larb_imu[larbid];
dev_dbg(dev, "%s iommu port: %d\n",
@ -282,14 +282,12 @@ static int mtk_iommu_attach_device(struct iommu_domain *domain,
struct device *dev)
{
struct mtk_iommu_domain *dom = to_mtk_domain(domain);
struct mtk_iommu_client_priv *priv = dev->archdata.iommu;
struct mtk_iommu_data *data;
struct mtk_iommu_data *data = dev->iommu_fwspec->iommu_priv;
int ret;
if (!priv)
if (!data)
return -ENODEV;
data = dev_get_drvdata(priv->m4udev);
if (!data->m4u_dom) {
data->m4u_dom = dom;
ret = mtk_iommu_domain_finalise(data);
@ -310,13 +308,11 @@ static int mtk_iommu_attach_device(struct iommu_domain *domain,
static void mtk_iommu_detach_device(struct iommu_domain *domain,
struct device *dev)
{
struct mtk_iommu_client_priv *priv = dev->archdata.iommu;
struct mtk_iommu_data *data;
struct mtk_iommu_data *data = dev->iommu_fwspec->iommu_priv;
if (!priv)
if (!data)
return;
data = dev_get_drvdata(priv->m4udev);
mtk_iommu_config(data, dev, false);
}
@ -366,8 +362,8 @@ static int mtk_iommu_add_device(struct device *dev)
{
struct iommu_group *group;
if (!dev->archdata.iommu) /* Not a iommu client device */
return -ENODEV;
if (!dev->iommu_fwspec || dev->iommu_fwspec->ops != &mtk_iommu_ops)
return -ENODEV; /* Not a iommu client device */
group = iommu_group_get_for_dev(dev);
if (IS_ERR(group))
@ -379,44 +375,33 @@ static int mtk_iommu_add_device(struct device *dev)
static void mtk_iommu_remove_device(struct device *dev)
{
struct mtk_iommu_client_priv *head, *cur, *next;
head = dev->archdata.iommu;
if (!head)
if (!dev->iommu_fwspec || dev->iommu_fwspec->ops != &mtk_iommu_ops)
return;
list_for_each_entry_safe(cur, next, &head->client, client) {
list_del(&cur->client);
kfree(cur);
}
kfree(head);
dev->archdata.iommu = NULL;
iommu_group_remove_device(dev);
iommu_fwspec_free(dev);
}
static struct iommu_group *mtk_iommu_device_group(struct device *dev)
{
struct mtk_iommu_data *data;
struct mtk_iommu_client_priv *priv;
struct mtk_iommu_data *data = dev->iommu_fwspec->iommu_priv;
priv = dev->archdata.iommu;
if (!priv)
if (!data)
return ERR_PTR(-ENODEV);
/* All the client devices are in the same m4u iommu-group */
data = dev_get_drvdata(priv->m4udev);
if (!data->m4u_group) {
data->m4u_group = iommu_group_alloc();
if (IS_ERR(data->m4u_group))
dev_err(dev, "Failed to allocate M4U IOMMU group\n");
} else {
iommu_group_ref_get(data->m4u_group);
}
return data->m4u_group;
}
static int mtk_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
{
struct mtk_iommu_client_priv *head, *priv, *next;
struct platform_device *m4updev;
if (args->args_count != 1) {
@ -425,38 +410,16 @@ static int mtk_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
return -EINVAL;
}
if (!dev->archdata.iommu) {
if (!dev->iommu_fwspec->iommu_priv) {
/* Get the m4u device */
m4updev = of_find_device_by_node(args->np);
if (WARN_ON(!m4updev))
return -EINVAL;
head = kzalloc(sizeof(*head), GFP_KERNEL);
if (!head)
return -ENOMEM;
dev->archdata.iommu = head;
INIT_LIST_HEAD(&head->client);
head->m4udev = &m4updev->dev;
} else {
head = dev->archdata.iommu;
dev->iommu_fwspec->iommu_priv = platform_get_drvdata(m4updev);
}
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
goto err_free_mem;
priv->mtk_m4u_id = args->args[0];
list_add_tail(&priv->client, &head->client);
return 0;
err_free_mem:
list_for_each_entry_safe(priv, next, &head->client, client)
kfree(priv);
kfree(head);
dev->archdata.iommu = NULL;
return -ENOMEM;
return iommu_fwspec_add_ids(dev, args->args, 1);
}
static struct iommu_ops mtk_iommu_ops = {
@ -583,17 +546,19 @@ static int mtk_iommu_probe(struct platform_device *pdev)
continue;
plarbdev = of_find_device_by_node(larbnode);
of_node_put(larbnode);
if (!plarbdev) {
plarbdev = of_platform_device_create(
larbnode, NULL,
platform_bus_type.dev_root);
if (!plarbdev)
if (!plarbdev) {
of_node_put(larbnode);
return -EPROBE_DEFER;
}
}
data->smi_imu.larb_imu[i].dev = &plarbdev->dev;
component_match_add(dev, &match, compare_of, larbnode);
component_match_add_release(dev, &match, release_of,
compare_of, larbnode);
}
platform_set_drvdata(pdev, data);

View File

@ -34,12 +34,6 @@ struct mtk_iommu_suspend_reg {
u32 int_main_control;
};
struct mtk_iommu_client_priv {
struct list_head client;
unsigned int mtk_m4u_id;
struct device *m4udev;
};
struct mtk_iommu_domain;
struct mtk_iommu_data {
@ -60,6 +54,11 @@ static inline int compare_of(struct device *dev, void *data)
return dev->of_node == data;
}
static inline void release_of(struct device *dev, void *data)
{
of_node_put(data);
}
static inline int mtk_iommu_bind(struct device *dev)
{
struct mtk_iommu_data *data = dev_get_drvdata(dev);

View File

@ -204,14 +204,14 @@ static irqreturn_t mtk_iommu_isr(int irq, void *dev_id)
static void mtk_iommu_config(struct mtk_iommu_data *data,
struct device *dev, bool enable)
{
struct mtk_iommu_client_priv *head, *cur, *next;
struct mtk_smi_larb_iommu *larb_mmu;
unsigned int larbid, portid;
struct iommu_fwspec *fwspec = dev->iommu_fwspec;
int i;
head = dev->archdata.iommu;
list_for_each_entry_safe(cur, next, &head->client, client) {
larbid = mt2701_m4u_to_larb(cur->mtk_m4u_id);
portid = mt2701_m4u_to_port(cur->mtk_m4u_id);
for (i = 0; i < fwspec->num_ids; ++i) {
larbid = mt2701_m4u_to_larb(fwspec->ids[i]);
portid = mt2701_m4u_to_port(fwspec->ids[i]);
larb_mmu = &data->smi_imu.larb_imu[larbid];
dev_dbg(dev, "%s iommu port: %d\n",
@ -271,14 +271,12 @@ static int mtk_iommu_attach_device(struct iommu_domain *domain,
struct device *dev)
{
struct mtk_iommu_domain *dom = to_mtk_domain(domain);
struct mtk_iommu_client_priv *priv = dev->archdata.iommu;
struct mtk_iommu_data *data;
struct mtk_iommu_data *data = dev->iommu_fwspec->iommu_priv;
int ret;
if (!priv)
if (!data)
return -ENODEV;
data = dev_get_drvdata(priv->m4udev);
if (!data->m4u_dom) {
data->m4u_dom = dom;
ret = mtk_iommu_domain_finalise(data);
@ -295,13 +293,11 @@ static int mtk_iommu_attach_device(struct iommu_domain *domain,
static void mtk_iommu_detach_device(struct iommu_domain *domain,
struct device *dev)
{
struct mtk_iommu_client_priv *priv = dev->archdata.iommu;
struct mtk_iommu_data *data;
struct mtk_iommu_data *data = dev->iommu_fwspec->iommu_priv;
if (!priv)
if (!data)
return;
data = dev_get_drvdata(priv->m4udev);
mtk_iommu_config(data, dev, false);
}
@ -366,6 +362,8 @@ static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain,
return pa;
}
static struct iommu_ops mtk_iommu_ops;
/*
* MTK generation one iommu HW only support one iommu domain, and all the client
* sharing the same iova address space.
@ -373,7 +371,7 @@ static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain,
static int mtk_iommu_create_mapping(struct device *dev,
struct of_phandle_args *args)
{
struct mtk_iommu_client_priv *head, *priv, *next;
struct mtk_iommu_data *data;
struct platform_device *m4updev;
struct dma_iommu_mapping *mtk_mapping;
struct device *m4udev;
@ -385,41 +383,37 @@ static int mtk_iommu_create_mapping(struct device *dev,
return -EINVAL;
}
if (!dev->archdata.iommu) {
if (!dev->iommu_fwspec) {
ret = iommu_fwspec_init(dev, &args->np->fwnode, &mtk_iommu_ops);
if (ret)
return ret;
} else if (dev->iommu_fwspec->ops != &mtk_iommu_ops) {
return -EINVAL;
}
if (!dev->iommu_fwspec->iommu_priv) {
/* Get the m4u device */
m4updev = of_find_device_by_node(args->np);
if (WARN_ON(!m4updev))
return -EINVAL;
head = kzalloc(sizeof(*head), GFP_KERNEL);
if (!head)
return -ENOMEM;
dev->archdata.iommu = head;
INIT_LIST_HEAD(&head->client);
head->m4udev = &m4updev->dev;
} else {
head = dev->archdata.iommu;
dev->iommu_fwspec->iommu_priv = platform_get_drvdata(m4updev);
}
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv) {
ret = -ENOMEM;
goto err_free_mem;
}
priv->mtk_m4u_id = args->args[0];
list_add_tail(&priv->client, &head->client);
ret = iommu_fwspec_add_ids(dev, args->args, 1);
if (ret)
return ret;
m4udev = head->m4udev;
data = dev->iommu_fwspec->iommu_priv;
m4udev = data->dev;
mtk_mapping = m4udev->archdata.iommu;
if (!mtk_mapping) {
/* MTK iommu support 4GB iova address space. */
mtk_mapping = arm_iommu_create_mapping(&platform_bus_type,
0, 1ULL << 32);
if (IS_ERR(mtk_mapping)) {
ret = PTR_ERR(mtk_mapping);
goto err_free_mem;
}
if (IS_ERR(mtk_mapping))
return PTR_ERR(mtk_mapping);
m4udev->archdata.iommu = mtk_mapping;
}
@ -432,11 +426,6 @@ static int mtk_iommu_create_mapping(struct device *dev,
err_release_mapping:
arm_iommu_release_mapping(mtk_mapping);
m4udev->archdata.iommu = NULL;
err_free_mem:
list_for_each_entry_safe(priv, next, &head->client, client)
kfree(priv);
kfree(head);
dev->archdata.iommu = NULL;
return ret;
}
@ -458,8 +447,8 @@ static int mtk_iommu_add_device(struct device *dev)
of_node_put(iommu_spec.np);
}
if (!dev->archdata.iommu) /* Not a iommu client device */
return -ENODEV;
if (!dev->iommu_fwspec || dev->iommu_fwspec->ops != &mtk_iommu_ops)
return -ENODEV; /* Not a iommu client device */
group = iommu_group_get_for_dev(dev);
if (IS_ERR(group))
@ -471,37 +460,27 @@ static int mtk_iommu_add_device(struct device *dev)
static void mtk_iommu_remove_device(struct device *dev)
{
struct mtk_iommu_client_priv *head, *cur, *next;
head = dev->archdata.iommu;
if (!head)
if (!dev->iommu_fwspec || dev->iommu_fwspec->ops != &mtk_iommu_ops)
return;
list_for_each_entry_safe(cur, next, &head->client, client) {
list_del(&cur->client);
kfree(cur);
}
kfree(head);
dev->archdata.iommu = NULL;
iommu_group_remove_device(dev);
iommu_fwspec_free(dev);
}
static struct iommu_group *mtk_iommu_device_group(struct device *dev)
{
struct mtk_iommu_data *data;
struct mtk_iommu_client_priv *priv;
struct mtk_iommu_data *data = dev->iommu_fwspec->iommu_priv;
priv = dev->archdata.iommu;
if (!priv)
if (!data)
return ERR_PTR(-ENODEV);
/* All the client devices are in the same m4u iommu-group */
data = dev_get_drvdata(priv->m4udev);
if (!data->m4u_group) {
data->m4u_group = iommu_group_alloc();
if (IS_ERR(data->m4u_group))
dev_err(dev, "Failed to allocate M4U IOMMU group\n");
} else {
iommu_group_ref_get(data->m4u_group);
}
return data->m4u_group;
}
@ -624,17 +603,19 @@ static int mtk_iommu_probe(struct platform_device *pdev)
continue;
plarbdev = of_find_device_by_node(larb_spec.np);
of_node_put(larb_spec.np);
if (!plarbdev) {
plarbdev = of_platform_device_create(
larb_spec.np, NULL,
platform_bus_type.dev_root);
if (!plarbdev)
if (!plarbdev) {
of_node_put(larb_spec.np);
return -EPROBE_DEFER;
}
}
data->smi_imu.larb_imu[larb_nr].dev = &plarbdev->dev;
component_match_add(dev, &match, compare_of, larb_spec.np);
component_match_add_release(dev, &match, release_of,
compare_of, larb_spec.np);
larb_nr++;
}

View File

@ -96,45 +96,6 @@ int of_get_dma_window(struct device_node *dn, const char *prefix, int index,
}
EXPORT_SYMBOL_GPL(of_get_dma_window);
struct of_iommu_node {
struct list_head list;
struct device_node *np;
const struct iommu_ops *ops;
};
static LIST_HEAD(of_iommu_list);
static DEFINE_SPINLOCK(of_iommu_lock);
void of_iommu_set_ops(struct device_node *np, const struct iommu_ops *ops)
{
struct of_iommu_node *iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
if (WARN_ON(!iommu))
return;
of_node_get(np);
INIT_LIST_HEAD(&iommu->list);
iommu->np = np;
iommu->ops = ops;
spin_lock(&of_iommu_lock);
list_add_tail(&iommu->list, &of_iommu_list);
spin_unlock(&of_iommu_lock);
}
const struct iommu_ops *of_iommu_get_ops(struct device_node *np)
{
struct of_iommu_node *node;
const struct iommu_ops *ops = NULL;
spin_lock(&of_iommu_lock);
list_for_each_entry(node, &of_iommu_list, list)
if (node->np == np) {
ops = node->ops;
break;
}
spin_unlock(&of_iommu_lock);
return ops;
}
static int __get_pci_rid(struct pci_dev *pdev, u16 alias, void *data)
{
struct of_phandle_args *iommu_spec = data;

View File

@ -8,7 +8,6 @@
#include <linux/pci.h>
#include <linux/iommu.h>
#include <linux/iommu-helper.h>
#include <linux/pci.h>
#include <linux/sizes.h>
#include <asm/pci_dma.h>

View File

@ -1764,8 +1764,7 @@ static void pci_dma_configure(struct pci_dev *dev)
if (attr == DEV_DMA_NOT_SUPPORTED)
dev_warn(&dev->dev, "DMA not supported.\n");
else
arch_setup_dma_ops(&dev->dev, 0, 0, NULL,
attr == DEV_DMA_COHERENT);
acpi_dma_configure(&dev->dev, attr);
}
pci_put_host_bridge_device(bridge);

View File

@ -573,6 +573,8 @@ struct acpi_pci_root {
bool acpi_dma_supported(struct acpi_device *adev);
enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev);
void acpi_dma_configure(struct device *dev, enum dev_dma_attr attr);
void acpi_dma_deconfigure(struct device *dev);
struct acpi_device *acpi_find_child_device(struct acpi_device *parent,
u64 address, bool check_children);

View File

@ -566,6 +566,7 @@
IRQCHIP_OF_MATCH_TABLE() \
ACPI_PROBE_TABLE(irqchip) \
ACPI_PROBE_TABLE(clksrc) \
ACPI_PROBE_TABLE(iort) \
EARLYCON_TABLE()
#define INIT_TEXT \

View File

@ -56,6 +56,27 @@ static inline acpi_handle acpi_device_handle(struct acpi_device *adev)
acpi_fwnode_handle(adev) : NULL)
#define ACPI_HANDLE(dev) acpi_device_handle(ACPI_COMPANION(dev))
static inline struct fwnode_handle *acpi_alloc_fwnode_static(void)
{
struct fwnode_handle *fwnode;
fwnode = kzalloc(sizeof(struct fwnode_handle), GFP_KERNEL);
if (!fwnode)
return NULL;
fwnode->type = FWNODE_ACPI_STATIC;
return fwnode;
}
static inline void acpi_free_fwnode_static(struct fwnode_handle *fwnode)
{
if (WARN_ON(!fwnode || fwnode->type != FWNODE_ACPI_STATIC))
return;
kfree(fwnode);
}
/**
* ACPI_DEVICE_CLASS - macro used to describe an ACPI device with
* the PCI-defined class-code information
@ -744,6 +765,11 @@ static inline enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev)
return DEV_DMA_NOT_SUPPORTED;
}
static inline void acpi_dma_configure(struct device *dev,
enum dev_dma_attr attr) { }
static inline void acpi_dma_deconfigure(struct device *dev) { }
#define ACPI_PTR(_ptr) (NULL)
static inline void acpi_device_set_enumerated(struct acpi_device *adev)

View File

@ -23,20 +23,36 @@
#include <linux/fwnode.h>
#include <linux/irqdomain.h>
#define IORT_IRQ_MASK(irq) (irq & 0xffffffffULL)
#define IORT_IRQ_TRIGGER_MASK(irq) ((irq >> 32) & 0xffffffffULL)
int iort_register_domain_token(int trans_id, struct fwnode_handle *fw_node);
void iort_deregister_domain_token(int trans_id);
struct fwnode_handle *iort_find_domain_token(int trans_id);
#ifdef CONFIG_ACPI_IORT
void acpi_iort_init(void);
bool iort_node_match(u8 type);
u32 iort_msi_map_rid(struct device *dev, u32 req_id);
struct irq_domain *iort_get_device_domain(struct device *dev, u32 req_id);
/* IOMMU interface */
void iort_set_dma_mask(struct device *dev);
const struct iommu_ops *iort_iommu_configure(struct device *dev);
#else
static inline void acpi_iort_init(void) { }
static inline bool iort_node_match(u8 type) { return false; }
static inline u32 iort_msi_map_rid(struct device *dev, u32 req_id)
{ return req_id; }
static inline struct irq_domain *iort_get_device_domain(struct device *dev,
u32 req_id)
{ return NULL; }
/* IOMMU interface */
static inline void iort_set_dma_mask(struct device *dev) { }
static inline
const struct iommu_ops *iort_iommu_configure(struct device *dev)
{ return NULL; }
#endif
#define IORT_ACPI_DECLARE(name, table_id, fn) \
ACPI_DECLARE_PROBE_ENTRY(iort, name, table_id, 0, NULL, 0, fn)
#endif /* __ACPI_IORT_H__ */

View File

@ -71,6 +71,7 @@ struct cpu_cacheinfo {
struct cacheinfo *info_list;
unsigned int num_levels;
unsigned int num_leaves;
bool cpu_map_populated;
};
/*

View File

@ -62,6 +62,21 @@ static inline const struct file_operations *debugfs_real_fops(struct file *filp)
return filp->f_path.dentry->d_fsdata;
}
#define DEFINE_DEBUGFS_ATTRIBUTE(__fops, __get, __set, __fmt) \
static int __fops ## _open(struct inode *inode, struct file *file) \
{ \
__simple_attr_check_format(__fmt, 0ull); \
return simple_attr_open(inode, file, __get, __set, __fmt); \
} \
static const struct file_operations __fops = { \
.owner = THIS_MODULE, \
.open = __fops ## _open, \
.release = simple_attr_release, \
.read = debugfs_attr_read, \
.write = debugfs_attr_write, \
.llseek = generic_file_llseek, \
}
#if defined(CONFIG_DEBUG_FS)
struct dentry *debugfs_create_file(const char *name, umode_t mode,
@ -99,21 +114,6 @@ ssize_t debugfs_attr_read(struct file *file, char __user *buf,
ssize_t debugfs_attr_write(struct file *file, const char __user *buf,
size_t len, loff_t *ppos);
#define DEFINE_DEBUGFS_ATTRIBUTE(__fops, __get, __set, __fmt) \
static int __fops ## _open(struct inode *inode, struct file *file) \
{ \
__simple_attr_check_format(__fmt, 0ull); \
return simple_attr_open(inode, file, __get, __set, __fmt); \
} \
static const struct file_operations __fops = { \
.owner = THIS_MODULE, \
.open = __fops ## _open, \
.release = simple_attr_release, \
.read = debugfs_attr_read, \
.write = debugfs_attr_write, \
.llseek = generic_file_llseek, \
}
struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry,
struct dentry *new_dir, const char *new_name);
@ -233,8 +233,18 @@ static inline void debugfs_use_file_finish(int srcu_idx)
__releases(&debugfs_srcu)
{ }
#define DEFINE_DEBUGFS_ATTRIBUTE(__fops, __get, __set, __fmt) \
static const struct file_operations __fops = { 0 }
static inline ssize_t debugfs_attr_read(struct file *file, char __user *buf,
size_t len, loff_t *ppos)
{
return -ENODEV;
}
static inline ssize_t debugfs_attr_write(struct file *file,
const char __user *buf,
size_t len, loff_t *ppos)
{
return -ENODEV;
}
static inline struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry,
struct dentry *new_dir, char *new_name)

View File

@ -707,6 +707,87 @@ struct device_dma_parameters {
unsigned long segment_boundary_mask;
};
/**
* enum device_link_state - Device link states.
* @DL_STATE_NONE: The presence of the drivers is not being tracked.
* @DL_STATE_DORMANT: None of the supplier/consumer drivers is present.
* @DL_STATE_AVAILABLE: The supplier driver is present, but the consumer is not.
* @DL_STATE_CONSUMER_PROBE: The consumer is probing (supplier driver present).
* @DL_STATE_ACTIVE: Both the supplier and consumer drivers are present.
* @DL_STATE_SUPPLIER_UNBIND: The supplier driver is unbinding.
*/
enum device_link_state {
DL_STATE_NONE = -1,
DL_STATE_DORMANT = 0,
DL_STATE_AVAILABLE,
DL_STATE_CONSUMER_PROBE,
DL_STATE_ACTIVE,
DL_STATE_SUPPLIER_UNBIND,
};
/*
* Device link flags.
*
* STATELESS: The core won't track the presence of supplier/consumer drivers.
* AUTOREMOVE: Remove this link automatically on consumer driver unbind.
* PM_RUNTIME: If set, the runtime PM framework will use this link.
* RPM_ACTIVE: Run pm_runtime_get_sync() on the supplier during link creation.
*/
#define DL_FLAG_STATELESS BIT(0)
#define DL_FLAG_AUTOREMOVE BIT(1)
#define DL_FLAG_PM_RUNTIME BIT(2)
#define DL_FLAG_RPM_ACTIVE BIT(3)
/**
* struct device_link - Device link representation.
* @supplier: The device on the supplier end of the link.
* @s_node: Hook to the supplier device's list of links to consumers.
* @consumer: The device on the consumer end of the link.
* @c_node: Hook to the consumer device's list of links to suppliers.
* @status: The state of the link (with respect to the presence of drivers).
* @flags: Link flags.
* @rpm_active: Whether or not the consumer device is runtime-PM-active.
* @rcu_head: An RCU head to use for deferred execution of SRCU callbacks.
*/
struct device_link {
struct device *supplier;
struct list_head s_node;
struct device *consumer;
struct list_head c_node;
enum device_link_state status;
u32 flags;
bool rpm_active;
#ifdef CONFIG_SRCU
struct rcu_head rcu_head;
#endif
};
/**
* enum dl_dev_state - Device driver presence tracking information.
* @DL_DEV_NO_DRIVER: There is no driver attached to the device.
* @DL_DEV_PROBING: A driver is probing.
* @DL_DEV_DRIVER_BOUND: The driver has been bound to the device.
* @DL_DEV_UNBINDING: The driver is unbinding from the device.
*/
enum dl_dev_state {
DL_DEV_NO_DRIVER = 0,
DL_DEV_PROBING,
DL_DEV_DRIVER_BOUND,
DL_DEV_UNBINDING,
};
/**
* struct dev_links_info - Device data related to device links.
* @suppliers: List of links to supplier devices.
* @consumers: List of links to consumer devices.
* @status: Driver status information.
*/
struct dev_links_info {
struct list_head suppliers;
struct list_head consumers;
enum dl_dev_state status;
};
/**
* struct device - The basic device structure
* @parent: The device's "parent" device, the device to which it is attached.
@ -799,6 +880,7 @@ struct device {
core doesn't touch it */
void *driver_data; /* Driver data, set and get with
dev_set/get_drvdata */
struct dev_links_info links;
struct dev_pm_info power;
struct dev_pm_domain *pm_domain;
@ -1116,6 +1198,10 @@ extern void device_shutdown(void);
/* debugging and troubleshooting/diagnostic helpers. */
extern const char *dev_driver_string(const struct device *dev);
/* Device links interface. */
struct device_link *device_link_add(struct device *consumer,
struct device *supplier, u32 flags);
void device_link_del(struct device_link *link);
#ifdef CONFIG_PRINTK

View File

@ -61,6 +61,10 @@ void iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size,
enum dma_data_direction dir, unsigned long attrs);
void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction dir, unsigned long attrs);
dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
size_t size, enum dma_data_direction dir, unsigned long attrs);
void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
size_t size, enum dma_data_direction dir, unsigned long attrs);
int iommu_dma_supported(struct device *dev, u64 mask);
int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr);

View File

@ -17,8 +17,9 @@ enum fwnode_type {
FWNODE_OF,
FWNODE_ACPI,
FWNODE_ACPI_DATA,
FWNODE_ACPI_STATIC,
FWNODE_PDATA,
FWNODE_IRQCHIP,
FWNODE_IRQCHIP
};
struct fwnode_handle {

View File

@ -253,6 +253,7 @@ extern void iommu_group_remove_device(struct device *dev);
extern int iommu_group_for_each_dev(struct iommu_group *group, void *data,
int (*fn)(struct device *, void *));
extern struct iommu_group *iommu_group_get(struct device *dev);
extern struct iommu_group *iommu_group_ref_get(struct iommu_group *group);
extern void iommu_group_put(struct iommu_group *group);
extern int iommu_group_register_notifier(struct iommu_group *group,
struct notifier_block *nb);
@ -351,6 +352,9 @@ int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode,
const struct iommu_ops *ops);
void iommu_fwspec_free(struct device *dev);
int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids);
void iommu_register_instance(struct fwnode_handle *fwnode,
const struct iommu_ops *ops);
const struct iommu_ops *iommu_get_instance(struct fwnode_handle *fwnode);
#else /* CONFIG_IOMMU_API */
@ -580,6 +584,17 @@ static inline int iommu_fwspec_add_ids(struct device *dev, u32 *ids,
return -ENODEV;
}
static inline void iommu_register_instance(struct fwnode_handle *fwnode,
const struct iommu_ops *ops)
{
}
static inline
const struct iommu_ops *iommu_get_instance(struct fwnode_handle *fwnode)
{
return NULL;
}
#endif /* CONFIG_IOMMU_API */
#endif /* __LINUX_IOMMU_H */

View File

@ -31,8 +31,16 @@ static inline const struct iommu_ops *of_iommu_configure(struct device *dev,
#endif /* CONFIG_OF_IOMMU */
void of_iommu_set_ops(struct device_node *np, const struct iommu_ops *ops);
const struct iommu_ops *of_iommu_get_ops(struct device_node *np);
static inline void of_iommu_set_ops(struct device_node *np,
const struct iommu_ops *ops)
{
iommu_register_instance(&np->fwnode, ops);
}
static inline const struct iommu_ops *of_iommu_get_ops(struct device_node *np)
{
return iommu_get_instance(&np->fwnode);
}
extern struct of_device_id __iommu_of_table;

View File

@ -559,6 +559,7 @@ struct dev_pm_info {
pm_message_t power_state;
unsigned int can_wakeup:1;
unsigned int async_suspend:1;
bool in_dpm_list:1; /* Owned by the PM core */
bool is_prepared:1; /* Owned by the PM core */
bool is_suspended:1; /* Ditto */
bool is_noirq_suspended:1;
@ -596,6 +597,7 @@ struct dev_pm_info {
unsigned int use_autosuspend:1;
unsigned int timer_autosuspends:1;
unsigned int memalloc_noio:1;
unsigned int links_count;
enum rpm_request request;
enum rpm_status runtime_status;
int runtime_error;

View File

@ -55,6 +55,11 @@ extern unsigned long pm_runtime_autosuspend_expiration(struct device *dev);
extern void pm_runtime_update_max_time_suspended(struct device *dev,
s64 delta_ns);
extern void pm_runtime_set_memalloc_noio(struct device *dev, bool enable);
extern void pm_runtime_clean_up_links(struct device *dev);
extern void pm_runtime_get_suppliers(struct device *dev);
extern void pm_runtime_put_suppliers(struct device *dev);
extern void pm_runtime_new_link(struct device *dev);
extern void pm_runtime_drop_link(struct device *dev);
static inline void pm_suspend_ignore_children(struct device *dev, bool enable)
{
@ -186,6 +191,11 @@ static inline unsigned long pm_runtime_autosuspend_expiration(
struct device *dev) { return 0; }
static inline void pm_runtime_set_memalloc_noio(struct device *dev,
bool enable){}
static inline void pm_runtime_clean_up_links(struct device *dev) {}
static inline void pm_runtime_get_suppliers(struct device *dev) {}
static inline void pm_runtime_put_suppliers(struct device *dev) {}
static inline void pm_runtime_new_link(struct device *dev) {}
static inline void pm_runtime_drop_link(struct device *dev) {}
#endif /* !CONFIG_PM */

View File

@ -56,7 +56,7 @@ static const char *kobject_actions[] = {
* kobject_action_type - translate action string to numeric type
*
* @buf: buffer containing the action string, newline is ignored
* @len: length of buffer
* @count: length of buffer
* @type: pointer to the location to store the action type
*
* Returns 0 if the action string was recognized.
@ -154,8 +154,8 @@ static void cleanup_uevent_env(struct subprocess_info *info)
/**
* kobject_uevent_env - send an uevent with environmental data
*
* @action: action that is happening
* @kobj: struct kobject that the action is happening to
* @action: action that is happening
* @envp_ext: pointer to environmental data
*
* Returns 0 if kobject_uevent_env() is completed with success or the
@ -363,8 +363,8 @@ EXPORT_SYMBOL_GPL(kobject_uevent_env);
/**
* kobject_uevent - notify userspace by sending an uevent
*
* @action: action that is happening
* @kobj: struct kobject that the action is happening to
* @action: action that is happening
*
* Returns 0 if kobject_uevent() is completed with success or the
* corresponding error when it fails.