linux/drivers/staging/greybus/bundle.c

182 lines
4.0 KiB
C
Raw Normal View History

/*
* Greybus bundles
*
* Copyright 2014 Google Inc.
* Copyright 2014 Linaro Ltd.
*
* Released under the GPLv2 only.
*/
#include "greybus.h"
static void gb_bundle_connections_exit(struct gb_bundle *bundle);
static int gb_bundle_connections_init(struct gb_bundle *bundle);
static ssize_t device_id_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct gb_bundle *bundle = to_gb_bundle(dev);
return sprintf(buf, "%d", bundle->device_id);
}
static DEVICE_ATTR_RO(device_id);
static struct attribute *bundle_attrs[] = {
&dev_attr_device_id.attr,
NULL,
};
ATTRIBUTE_GROUPS(bundle);
static void gb_bundle_release(struct device *dev)
{
struct gb_bundle *bundle = to_gb_bundle(dev);
kfree(bundle);
}
struct device_type greybus_bundle_type = {
.name = "greybus_bundle",
.release = gb_bundle_release,
};
/* XXX This could be per-host device or per-module */
static DEFINE_SPINLOCK(gb_bundles_lock);
/*
* Create a gb_bundle structure to represent a discovered
* bundle. Returns a pointer to the new bundle or a null
* pointer if a failure occurs due to memory exhaustion.
*/
struct gb_bundle *gb_bundle_create(struct gb_interface_block *gb_ib, u8 interface_id)
{
struct gb_bundle *bundle;
int retval;
bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
if (!bundle)
return NULL;
bundle->gb_ib = gb_ib;
bundle->id = interface_id;
bundle->device_id = 0xff; /* Invalid device id to start with */
INIT_LIST_HEAD(&bundle->connections);
/* Build up the bundle device structures and register it with the
* driver core */
bundle->dev.parent = &gb_ib->dev;
bundle->dev.bus = &greybus_bus_type;
bundle->dev.type = &greybus_bundle_type;
bundle->dev.groups = bundle_groups;
device_initialize(&bundle->dev);
dev_set_name(&bundle->dev, "%d:%d", gb_ib->module_id, interface_id);
retval = device_add(&bundle->dev);
if (retval) {
pr_err("failed to add bundle device for id 0x%02hhx\n",
interface_id);
put_device(&bundle->dev);
kfree(bundle);
return NULL;
}
spin_lock_irq(&gb_bundles_lock);
list_add_tail(&bundle->links, &gb_ib->bundles);
spin_unlock_irq(&gb_bundles_lock);
return bundle;
}
/*
* Tear down a previously set up bundle.
*/
void gb_bundle_destroy(struct gb_interface_block *gb_ib)
{
struct gb_bundle *bundle;
struct gb_bundle *temp;
if (WARN_ON(!gb_ib))
return;
spin_lock_irq(&gb_bundles_lock);
list_for_each_entry_safe(bundle, temp, &gb_ib->bundles, links) {
list_del(&bundle->links);
gb_bundle_connections_exit(bundle);
device_del(&bundle->dev);
}
spin_unlock_irq(&gb_bundles_lock);
}
int gb_bundle_init(struct gb_interface_block *gb_ib, u8 bundle_id, u8 device_id)
{
struct gb_bundle *bundle;
int ret;
bundle = gb_bundle_find(gb_ib, bundle_id);
if (!bundle) {
dev_err(gb_ib->hd->parent, "bundle %hhu not found\n",
bundle_id);
return -ENOENT;
}
bundle->device_id = device_id;
ret = svc_set_route_send(bundle, gb_ib->hd);
if (ret) {
dev_err(gb_ib->hd->parent, "failed to set route (%d)\n", ret);
return ret;
}
ret = gb_bundle_connections_init(bundle);
if (ret) {
dev_err(gb_ib->hd->parent, "interface bundle init error %d\n",
ret);
/* XXX clear route */
return ret;
}
return 0;
}
struct gb_bundle *gb_bundle_find(struct gb_interface_block *gb_ib, u8 bundle_id)
{
struct gb_bundle *bundle;
spin_lock_irq(&gb_bundles_lock);
list_for_each_entry(bundle, &gb_ib->bundles, links)
if (bundle->id == bundle_id) {
spin_unlock_irq(&gb_bundles_lock);
return bundle;
}
spin_unlock_irq(&gb_bundles_lock);
return NULL;
}
static int gb_bundle_connections_init(struct gb_bundle *bundle)
{
struct gb_connection *connection;
int ret = 0;
list_for_each_entry(connection, &bundle->connections, bundle_links) {
ret = gb_connection_init(connection);
if (ret)
break;
}
return ret;
}
static void gb_bundle_connections_exit(struct gb_bundle *bundle)
{
struct gb_connection *connection;
struct gb_connection *next;
list_for_each_entry_safe(connection, next, &bundle->connections,
bundle_links) {
gb_connection_exit(connection);
gb_connection_destroy(connection);
}
}