linux/drivers/staging/iio/industrialio-core.c

837 lines
20 KiB
C
Raw Normal View History

/* The industrial I/O core
*
* Copyright (c) 2008 Jonathan Cameron
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* Based on elements of hwmon and input subsystems.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/idr.h>
#include <linux/kdev_t.h>
#include <linux/err.h>
#include <linux/device.h>
#include <linux/fs.h>
#include <linux/interrupt.h>
#include <linux/poll.h>
#include <linux/sched.h>
#include <linux/wait.h>
#include <linux/cdev.h>
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h percpu.h is included by sched.h and module.h and thus ends up being included when building most .c files. percpu.h includes slab.h which in turn includes gfp.h making everything defined by the two files universally available and complicating inclusion dependencies. percpu.h -> slab.h dependency is about to be removed. Prepare for this change by updating users of gfp and slab facilities include those headers directly instead of assuming availability. As this conversion needs to touch large number of source files, the following script is used as the basis of conversion. http://userweb.kernel.org/~tj/misc/slabh-sweep.py The script does the followings. * Scan files for gfp and slab usages and update includes such that only the necessary includes are there. ie. if only gfp is used, gfp.h, if slab is used, slab.h. * When the script inserts a new include, it looks at the include blocks and try to put the new include such that its order conforms to its surrounding. It's put in the include block which contains core kernel includes, in the same order that the rest are ordered - alphabetical, Christmas tree, rev-Xmas-tree or at the end if there doesn't seem to be any matching order. * If the script can't find a place to put a new include (mostly because the file doesn't have fitting include block), it prints out an error message indicating which .h file needs to be added to the file. The conversion was done in the following steps. 1. The initial automatic conversion of all .c files updated slightly over 4000 files, deleting around 700 includes and adding ~480 gfp.h and ~3000 slab.h inclusions. The script emitted errors for ~400 files. 2. Each error was manually checked. Some didn't need the inclusion, some needed manual addition while adding it to implementation .h or embedding .c file was more appropriate for others. This step added inclusions to around 150 files. 3. The script was run again and the output was compared to the edits from #2 to make sure no file was left behind. 4. Several build tests were done and a couple of problems were fixed. e.g. lib/decompress_*.c used malloc/free() wrappers around slab APIs requiring slab.h to be added manually. 5. The script was run on all .h files but without automatically editing them as sprinkling gfp.h and slab.h inclusions around .h files could easily lead to inclusion dependency hell. Most gfp.h inclusion directives were ignored as stuff from gfp.h was usually wildly available and often used in preprocessor macros. Each slab.h inclusion directive was examined and added manually as necessary. 6. percpu.h was updated not to include slab.h. 7. Build test were done on the following configurations and failures were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my distributed build env didn't work with gcov compiles) and a few more options had to be turned off depending on archs to make things build (like ipr on powerpc/64 which failed due to missing writeq). * x86 and x86_64 UP and SMP allmodconfig and a custom test config. * powerpc and powerpc64 SMP allmodconfig * sparc and sparc64 SMP allmodconfig * ia64 SMP allmodconfig * s390 SMP allmodconfig * alpha SMP allmodconfig * um on x86_64 SMP allmodconfig 8. percpu.h modifications were reverted so that it could be applied as a separate patch and serve as bisection point. Given the fact that I had only a couple of failures from tests on step 6, I'm fairly confident about the coverage of this conversion patch. If there is a breakage, it's likely to be something in one of the arch headers which should be easily discoverable easily on most builds of the specific arch. Signed-off-by: Tejun Heo <tj@kernel.org> Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
2010-03-24 09:04:11 +01:00
#include <linux/slab.h>
#include "iio.h"
#include "trigger_consumer.h"
#define IIO_ID_PREFIX "device"
#define IIO_ID_FORMAT IIO_ID_PREFIX "%d"
/* IDR to assign each registered device a unique id*/
static DEFINE_IDR(iio_idr);
/* IDR to allocate character device minor numbers */
static DEFINE_IDR(iio_chrdev_idr);
/* Lock used to protect both of the above */
static DEFINE_SPINLOCK(iio_idr_lock);
dev_t iio_devt;
EXPORT_SYMBOL(iio_devt);
#define IIO_DEV_MAX 256
struct bus_type iio_bus_type = {
.name = "iio",
};
EXPORT_SYMBOL(iio_bus_type);
void __iio_change_event(struct iio_detected_event_list *ev,
int ev_code,
s64 timestamp)
{
ev->ev.id = ev_code;
ev->ev.timestamp = timestamp;
}
EXPORT_SYMBOL(__iio_change_event);
/* Used both in the interrupt line put events and the ring buffer ones */
/* Note that in it's current form someone has to be listening before events
* are queued. Hence a client MUST open the chrdev before the ring buffer is
* switched on.
*/
int __iio_push_event(struct iio_event_interface *ev_int,
int ev_code,
s64 timestamp,
struct iio_shared_ev_pointer *
shared_pointer_p)
{
struct iio_detected_event_list *ev;
int ret = 0;
/* Does anyone care? */
mutex_lock(&ev_int->event_list_lock);
if (test_bit(IIO_BUSY_BIT_POS, &ev_int->handler.flags)) {
if (ev_int->current_events == ev_int->max_events) {
mutex_unlock(&ev_int->event_list_lock);
return 0;
}
ev = kmalloc(sizeof(*ev), GFP_KERNEL);
if (ev == NULL) {
ret = -ENOMEM;
mutex_unlock(&ev_int->event_list_lock);
goto error_ret;
}
ev->ev.id = ev_code;
ev->ev.timestamp = timestamp;
ev->shared_pointer = shared_pointer_p;
if (ev->shared_pointer)
shared_pointer_p->ev_p = ev;
list_add_tail(&ev->list, &ev_int->det_events.list);
ev_int->current_events++;
mutex_unlock(&ev_int->event_list_lock);
wake_up_interruptible(&ev_int->wait);
} else
mutex_unlock(&ev_int->event_list_lock);
error_ret:
return ret;
}
EXPORT_SYMBOL(__iio_push_event);
int iio_push_event(struct iio_dev *dev_info,
int ev_line,
int ev_code,
s64 timestamp)
{
return __iio_push_event(&dev_info->event_interfaces[ev_line],
ev_code, timestamp, NULL);
}
EXPORT_SYMBOL(iio_push_event);
/* Generic interrupt line interrupt handler */
static irqreturn_t iio_interrupt_handler(int irq, void *_int_info)
{
struct iio_interrupt *int_info = _int_info;
struct iio_dev *dev_info = int_info->dev_info;
struct iio_event_handler_list *p;
s64 time_ns;
unsigned long flags;
spin_lock_irqsave(&int_info->ev_list_lock, flags);
if (list_empty(&int_info->ev_list)) {
spin_unlock_irqrestore(&int_info->ev_list_lock, flags);
return IRQ_NONE;
}
time_ns = iio_get_time_ns();
/* detect single element list*/
if (list_is_singular(&int_info->ev_list)) {
disable_irq_nosync(irq);
p = list_first_entry(&int_info->ev_list,
struct iio_event_handler_list,
list);
/* single event handler - maybe shared */
p->handler(dev_info, 1, time_ns, !(p->refcount > 1));
} else
list_for_each_entry(p, &int_info->ev_list, list) {
disable_irq_nosync(irq);
p->handler(dev_info, 1, time_ns, 0);
}
spin_unlock_irqrestore(&int_info->ev_list_lock, flags);
return IRQ_HANDLED;
}
static struct iio_interrupt *iio_allocate_interrupt(void)
{
struct iio_interrupt *i = kmalloc(sizeof *i, GFP_KERNEL);
if (i) {
spin_lock_init(&i->ev_list_lock);
INIT_LIST_HEAD(&i->ev_list);
}
return i;
}
/* Confirming the validity of supplied irq is left to drivers.*/
int iio_register_interrupt_line(unsigned int irq,
struct iio_dev *dev_info,
int line_number,
unsigned long type,
const char *name)
{
int ret;
dev_info->interrupts[line_number] = iio_allocate_interrupt();
if (dev_info->interrupts[line_number] == NULL) {
ret = -ENOMEM;
goto error_ret;
}
dev_info->interrupts[line_number]->line_number = line_number;
dev_info->interrupts[line_number]->irq = irq;
dev_info->interrupts[line_number]->dev_info = dev_info;
/* Possibly only request on demand?
* Can see this may complicate the handling of interrupts.
* However, with this approach we might end up handling lots of
* events no-one cares about.*/
ret = request_irq(irq,
&iio_interrupt_handler,
type,
name,
dev_info->interrupts[line_number]);
error_ret:
return ret;
}
EXPORT_SYMBOL(iio_register_interrupt_line);
/* This turns up an awful lot */
ssize_t iio_read_const_attr(struct device *dev,
struct device_attribute *attr,
char *buf)
{
return sprintf(buf, "%s\n", to_iio_const_attr(attr)->string);
}
EXPORT_SYMBOL(iio_read_const_attr);
/* Before this runs the interrupt generator must have been disabled */
void iio_unregister_interrupt_line(struct iio_dev *dev_info, int line_number)
{
/* make sure the interrupt handlers are all done */
flush_scheduled_work();
free_irq(dev_info->interrupts[line_number]->irq,
dev_info->interrupts[line_number]);
kfree(dev_info->interrupts[line_number]);
}
EXPORT_SYMBOL(iio_unregister_interrupt_line);
/* Reference counted add and remove */
void iio_add_event_to_list(struct iio_event_handler_list *el,
struct list_head *head)
{
unsigned long flags;
struct iio_interrupt *inter = to_iio_interrupt(head);
/* take mutex to protect this element */
mutex_lock(&el->exist_lock);
if (el->refcount == 0) {
/* Take the event list spin lock */
spin_lock_irqsave(&inter->ev_list_lock, flags);
list_add(&el->list, head);
spin_unlock_irqrestore(&inter->ev_list_lock, flags);
}
el->refcount++;
mutex_unlock(&el->exist_lock);
}
EXPORT_SYMBOL(iio_add_event_to_list);
void iio_remove_event_from_list(struct iio_event_handler_list *el,
struct list_head *head)
{
unsigned long flags;
struct iio_interrupt *inter = to_iio_interrupt(head);
mutex_lock(&el->exist_lock);
el->refcount--;
if (el->refcount == 0) {
/* Take the event list spin lock */
spin_lock_irqsave(&inter->ev_list_lock, flags);
list_del_init(&el->list);
spin_unlock_irqrestore(&inter->ev_list_lock, flags);
}
mutex_unlock(&el->exist_lock);
}
EXPORT_SYMBOL(iio_remove_event_from_list);
static ssize_t iio_event_chrdev_read(struct file *filep,
char __user *buf,
size_t count,
loff_t *f_ps)
{
struct iio_event_interface *ev_int = filep->private_data;
struct iio_detected_event_list *el;
int ret;
size_t len;
mutex_lock(&ev_int->event_list_lock);
if (list_empty(&ev_int->det_events.list)) {
if (filep->f_flags & O_NONBLOCK) {
ret = -EAGAIN;
goto error_mutex_unlock;
}
mutex_unlock(&ev_int->event_list_lock);
/* Blocking on device; waiting for something to be there */
ret = wait_event_interruptible(ev_int->wait,
!list_empty(&ev_int
->det_events.list));
if (ret)
goto error_ret;
/* Single access device so noone else can get the data */
mutex_lock(&ev_int->event_list_lock);
}
el = list_first_entry(&ev_int->det_events.list,
struct iio_detected_event_list,
list);
len = sizeof el->ev;
if (copy_to_user(buf, &(el->ev), len)) {
ret = -EFAULT;
goto error_mutex_unlock;
}
list_del(&el->list);
ev_int->current_events--;
mutex_unlock(&ev_int->event_list_lock);
/*
* Possible concurency issue if an update of this event is on its way
* through. May lead to new event being removed whilst the reported
* event was the unescalated event. In typical use case this is not a
* problem as userspace will say read half the buffer due to a 50%
* full event which would make the correct 100% full incorrect anyway.
*/
if (el->shared_pointer) {
spin_lock(&el->shared_pointer->lock);
(el->shared_pointer->ev_p) = NULL;
spin_unlock(&el->shared_pointer->lock);
}
kfree(el);
return len;
error_mutex_unlock:
mutex_unlock(&ev_int->event_list_lock);
error_ret:
return ret;
}
static int iio_event_chrdev_release(struct inode *inode, struct file *filep)
{
struct iio_handler *hand = iio_cdev_to_handler(inode->i_cdev);
struct iio_event_interface *ev_int = hand->private;
struct iio_detected_event_list *el, *t;
mutex_lock(&ev_int->event_list_lock);
clear_bit(IIO_BUSY_BIT_POS, &ev_int->handler.flags);
/*
* In order to maintain a clean state for reopening,
* clear out any awaiting events. The mask will prevent
* any new __iio_push_event calls running.
*/
list_for_each_entry_safe(el, t, &ev_int->det_events.list, list) {
list_del(&el->list);
kfree(el);
}
mutex_unlock(&ev_int->event_list_lock);
return 0;
}
static int iio_event_chrdev_open(struct inode *inode, struct file *filep)
{
struct iio_handler *hand = iio_cdev_to_handler(inode->i_cdev);
struct iio_event_interface *ev_int = hand->private;
mutex_lock(&ev_int->event_list_lock);
if (test_and_set_bit(IIO_BUSY_BIT_POS, &hand->flags)) {
fops_put(filep->f_op);
mutex_unlock(&ev_int->event_list_lock);
return -EBUSY;
}
filep->private_data = hand->private;
mutex_unlock(&ev_int->event_list_lock);
return 0;
}
static const struct file_operations iio_event_chrdev_fileops = {
.read = iio_event_chrdev_read,
.release = iio_event_chrdev_release,
.open = iio_event_chrdev_open,
.owner = THIS_MODULE,
};
static void iio_event_dev_release(struct device *dev)
{
struct iio_event_interface *ev_int
= container_of(dev, struct iio_event_interface, dev);
cdev_del(&ev_int->handler.chrdev);
iio_device_free_chrdev_minor(MINOR(dev->devt));
};
static struct device_type iio_event_type = {
.release = iio_event_dev_release,
};
int iio_device_get_chrdev_minor(void)
{
int ret, val;
idr_again:
if (unlikely(idr_pre_get(&iio_chrdev_idr, GFP_KERNEL) == 0))
return -ENOMEM;
spin_lock(&iio_idr_lock);
ret = idr_get_new(&iio_chrdev_idr, NULL, &val);
spin_unlock(&iio_idr_lock);
if (unlikely(ret == -EAGAIN))
goto idr_again;
else if (unlikely(ret))
return ret;
if (val > IIO_DEV_MAX)
return -ENOMEM;
return val;
}
void iio_device_free_chrdev_minor(int val)
{
spin_lock(&iio_idr_lock);
idr_remove(&iio_chrdev_idr, val);
spin_unlock(&iio_idr_lock);
}
int iio_setup_ev_int(struct iio_event_interface *ev_int,
const char *name,
struct module *owner,
struct device *dev)
{
int ret, minor;
ev_int->dev.bus = &iio_bus_type;
ev_int->dev.parent = dev;
ev_int->dev.type = &iio_event_type;
device_initialize(&ev_int->dev);
minor = iio_device_get_chrdev_minor();
if (minor < 0) {
ret = minor;
goto error_device_put;
}
ev_int->dev.devt = MKDEV(MAJOR(iio_devt), minor);
dev_set_name(&ev_int->dev, "%s", name);
ret = device_add(&ev_int->dev);
if (ret)
goto error_free_minor;
cdev_init(&ev_int->handler.chrdev, &iio_event_chrdev_fileops);
ev_int->handler.chrdev.owner = owner;
mutex_init(&ev_int->event_list_lock);
/* discussion point - make this variable? */
ev_int->max_events = 10;
ev_int->current_events = 0;
INIT_LIST_HEAD(&ev_int->det_events.list);
init_waitqueue_head(&ev_int->wait);
ev_int->handler.private = ev_int;
ev_int->handler.flags = 0;
ret = cdev_add(&ev_int->handler.chrdev, ev_int->dev.devt, 1);
if (ret)
goto error_unreg_device;
return 0;
error_unreg_device:
device_unregister(&ev_int->dev);
error_free_minor:
iio_device_free_chrdev_minor(minor);
error_device_put:
put_device(&ev_int->dev);
return ret;
}
void iio_free_ev_int(struct iio_event_interface *ev_int)
{
device_unregister(&ev_int->dev);
put_device(&ev_int->dev);
}
static int __init iio_dev_init(void)
{
int err;
err = alloc_chrdev_region(&iio_devt, 0, IIO_DEV_MAX, "iio");
if (err < 0)
printk(KERN_ERR "%s: failed to allocate char dev region\n",
__FILE__);
return err;
}
static void __exit iio_dev_exit(void)
{
if (iio_devt)
unregister_chrdev_region(iio_devt, IIO_DEV_MAX);
}
static int __init iio_init(void)
{
int ret;
/* Register sysfs bus */
ret = bus_register(&iio_bus_type);
if (ret < 0) {
printk(KERN_ERR
"%s could not register bus type\n",
__FILE__);
goto error_nothing;
}
ret = iio_dev_init();
if (ret < 0)
goto error_unregister_bus_type;
return 0;
error_unregister_bus_type:
bus_unregister(&iio_bus_type);
error_nothing:
return ret;
}
static void __exit iio_exit(void)
{
iio_dev_exit();
bus_unregister(&iio_bus_type);
}
static int iio_device_register_sysfs(struct iio_dev *dev_info)
{
int ret = 0;
ret = sysfs_create_group(&dev_info->dev.kobj, dev_info->attrs);
if (ret) {
dev_err(dev_info->dev.parent,
"Failed to register sysfs hooks\n");
goto error_ret;
}
if (dev_info->scan_el_attrs) {
ret = sysfs_create_group(&dev_info->dev.kobj,
dev_info->scan_el_attrs);
if (ret)
dev_err(&dev_info->dev,
"Failed to add sysfs scan els\n");
}
error_ret:
return ret;
}
static void iio_device_unregister_sysfs(struct iio_dev *dev_info)
{
if (dev_info->scan_el_attrs)
sysfs_remove_group(&dev_info->dev.kobj,
dev_info->scan_el_attrs);
sysfs_remove_group(&dev_info->dev.kobj, dev_info->attrs);
}
/* Return a negative errno on failure */
int iio_get_new_idr_val(struct idr *this_idr)
{
int ret;
int val;
idr_again:
if (unlikely(idr_pre_get(this_idr, GFP_KERNEL) == 0))
return -ENOMEM;
spin_lock(&iio_idr_lock);
ret = idr_get_new(this_idr, NULL, &val);
spin_unlock(&iio_idr_lock);
if (unlikely(ret == -EAGAIN))
goto idr_again;
else if (unlikely(ret))
return ret;
return val;
}
EXPORT_SYMBOL(iio_get_new_idr_val);
void iio_free_idr_val(struct idr *this_idr, int id)
{
spin_lock(&iio_idr_lock);
idr_remove(this_idr, id);
spin_unlock(&iio_idr_lock);
}
EXPORT_SYMBOL(iio_free_idr_val);
static int iio_device_register_id(struct iio_dev *dev_info,
struct idr *this_idr)
{
dev_info->id = iio_get_new_idr_val(&iio_idr);
if (dev_info->id < 0)
return dev_info->id;
return 0;
}
static void iio_device_unregister_id(struct iio_dev *dev_info)
{
iio_free_idr_val(&iio_idr, dev_info->id);
}
static inline int __iio_add_event_config_attrs(struct iio_dev *dev_info, int i)
{
int ret;
/*p for adding, q for removing */
struct attribute **attrp, **attrq;
if (dev_info->event_conf_attrs && dev_info->event_conf_attrs[i].attrs) {
attrp = dev_info->event_conf_attrs[i].attrs;
while (*attrp) {
ret = sysfs_add_file_to_group(&dev_info->dev.kobj,
*attrp,
dev_info
->event_attrs[i].name);
if (ret)
goto error_ret;
attrp++;
}
}
return 0;
error_ret:
attrq = dev_info->event_conf_attrs[i].attrs;
while (attrq != attrp) {
sysfs_remove_file_from_group(&dev_info->dev.kobj,
*attrq,
dev_info->event_attrs[i].name);
attrq++;
}
return ret;
}
static inline int __iio_remove_event_config_attrs(struct iio_dev *dev_info,
int i)
{
struct attribute **attrq;
if (dev_info->event_conf_attrs
&& dev_info->event_conf_attrs[i].attrs) {
attrq = dev_info->event_conf_attrs[i].attrs;
while (*attrq) {
sysfs_remove_file_from_group(&dev_info->dev.kobj,
*attrq,
dev_info
->event_attrs[i].name);
attrq++;
}
}
return 0;
}
static int iio_device_register_eventset(struct iio_dev *dev_info)
{
int ret = 0, i, j;
if (dev_info->num_interrupt_lines == 0)
return 0;
dev_info->event_interfaces =
kzalloc(sizeof(struct iio_event_interface)
*dev_info->num_interrupt_lines,
GFP_KERNEL);
if (dev_info->event_interfaces == NULL) {
ret = -ENOMEM;
goto error_ret;
}
dev_info->interrupts = kzalloc(sizeof(struct iio_interrupt *)
*dev_info->num_interrupt_lines,
GFP_KERNEL);
if (dev_info->interrupts == NULL) {
ret = -ENOMEM;
goto error_free_event_interfaces;
}
for (i = 0; i < dev_info->num_interrupt_lines; i++) {
dev_info->event_interfaces[i].owner = dev_info->driver_module;
snprintf(dev_info->event_interfaces[i]._name, 20,
"%s:event%d",
dev_name(&dev_info->dev),
i);
ret = iio_setup_ev_int(&dev_info->event_interfaces[i],
(const char *)(dev_info
->event_interfaces[i]
._name),
dev_info->driver_module,
&dev_info->dev);
if (ret) {
dev_err(&dev_info->dev,
"Could not get chrdev interface\n");
goto error_free_setup_ev_ints;
}
dev_set_drvdata(&dev_info->event_interfaces[i].dev,
(void *)dev_info);
ret = sysfs_create_group(&dev_info
->event_interfaces[i]
.dev.kobj,
&dev_info->event_attrs[i]);
if (ret) {
dev_err(&dev_info->dev,
"Failed to register sysfs for event attrs");
goto error_remove_sysfs_interfaces;
}
}
for (i = 0; i < dev_info->num_interrupt_lines; i++) {
ret = __iio_add_event_config_attrs(dev_info, i);
if (ret)
goto error_unregister_config_attrs;
}
return 0;
error_unregister_config_attrs:
for (j = 0; j < i; j++)
__iio_remove_event_config_attrs(dev_info, i);
i = dev_info->num_interrupt_lines - 1;
error_remove_sysfs_interfaces:
for (j = 0; j < i; j++)
sysfs_remove_group(&dev_info
->event_interfaces[j].dev.kobj,
&dev_info->event_attrs[j]);
error_free_setup_ev_ints:
for (j = 0; j < i; j++)
iio_free_ev_int(&dev_info->event_interfaces[j]);
kfree(dev_info->interrupts);
error_free_event_interfaces:
kfree(dev_info->event_interfaces);
error_ret:
return ret;
}
static void iio_device_unregister_eventset(struct iio_dev *dev_info)
{
int i;
if (dev_info->num_interrupt_lines == 0)
return;
for (i = 0; i < dev_info->num_interrupt_lines; i++)
sysfs_remove_group(&dev_info
->event_interfaces[i].dev.kobj,
&dev_info->event_attrs[i]);
for (i = 0; i < dev_info->num_interrupt_lines; i++)
iio_free_ev_int(&dev_info->event_interfaces[i]);
kfree(dev_info->interrupts);
kfree(dev_info->event_interfaces);
}
static void iio_dev_release(struct device *device)
{
struct iio_dev *dev = to_iio_dev(device);
iio_put();
kfree(dev);
}
static struct device_type iio_dev_type = {
.name = "iio_device",
.release = iio_dev_release,
};
struct iio_dev *iio_allocate_device(void)
{
struct iio_dev *dev = kzalloc(sizeof *dev, GFP_KERNEL);
if (dev) {
dev->dev.type = &iio_dev_type;
dev->dev.bus = &iio_bus_type;
device_initialize(&dev->dev);
dev_set_drvdata(&dev->dev, (void *)dev);
mutex_init(&dev->mlock);
iio_get();
}
return dev;
}
EXPORT_SYMBOL(iio_allocate_device);
void iio_free_device(struct iio_dev *dev)
{
if (dev)
iio_put_device(dev);
}
EXPORT_SYMBOL(iio_free_device);
int iio_device_register(struct iio_dev *dev_info)
{
int ret;
ret = iio_device_register_id(dev_info, &iio_idr);
if (ret) {
dev_err(&dev_info->dev, "Failed to get id\n");
goto error_ret;
}
dev_set_name(&dev_info->dev, "device%d", dev_info->id);
ret = device_add(&dev_info->dev);
if (ret)
goto error_free_idr;
ret = iio_device_register_sysfs(dev_info);
if (ret) {
dev_err(dev_info->dev.parent,
"Failed to register sysfs interfaces\n");
goto error_del_device;
}
ret = iio_device_register_eventset(dev_info);
if (ret) {
dev_err(dev_info->dev.parent,
"Failed to register event set\n");
goto error_free_sysfs;
}
if (dev_info->modes & INDIO_RING_TRIGGERED)
iio_device_register_trigger_consumer(dev_info);
return 0;
error_free_sysfs:
iio_device_unregister_sysfs(dev_info);
error_del_device:
device_del(&dev_info->dev);
error_free_idr:
iio_device_unregister_id(dev_info);
error_ret:
return ret;
}
EXPORT_SYMBOL(iio_device_register);
void iio_device_unregister(struct iio_dev *dev_info)
{
if (dev_info->modes & INDIO_RING_TRIGGERED)
iio_device_unregister_trigger_consumer(dev_info);
iio_device_unregister_eventset(dev_info);
iio_device_unregister_sysfs(dev_info);
iio_device_unregister_id(dev_info);
device_unregister(&dev_info->dev);
}
EXPORT_SYMBOL(iio_device_unregister);
void iio_put(void)
{
module_put(THIS_MODULE);
}
void iio_get(void)
{
__module_get(THIS_MODULE);
}
subsys_initcall(iio_init);
module_exit(iio_exit);
MODULE_AUTHOR("Jonathan Cameron <jic23@cam.ac.uk>");
MODULE_DESCRIPTION("Industrial I/O core");
MODULE_LICENSE("GPL");