staging: IIO: Fix uses of spinlocks prior to init in ring implementations

Some confusion was caused by the ___iio_init_ring_buffer and equivalent
in ring_sw handling both init of spin locks etc and allocation and
of the actual buffer.  This resulted in ring->use_lock being held
before it was initialized and actually during the initialization.

Some of the recent cleanups in the spin lock code seem to have triggered
the bug actually causing traceable crashes.

The following patch should fix this but hasn't been extensively tested
as of yet and there may well be some side effects I haven't thought of.
Just wanted to get this out there before anyone else runs into it!

Signed-off-by: Jonathan Cameron <jic23@cam.ac.uk>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
This commit is contained in:
Jonathan Cameron 2010-03-02 13:35:35 +00:00 committed by Greg Kroah-Hartman
parent 3e18951955
commit 6f2dfb3101
3 changed files with 16 additions and 12 deletions

View File

@ -266,6 +266,8 @@ void iio_ring_buffer_init(struct iio_ring_buffer *ring,
ring->indio_dev = dev_info;
ring->ev_int.private = ring;
ring->access_handler.private = ring;
ring->shared_ev_pointer.ev_p = 0;
spin_lock_init(&ring->shared_ev_pointer.lock);
}
EXPORT_SYMBOL(iio_ring_buffer_init);

View File

@ -134,19 +134,17 @@ void iio_ring_buffer_init(struct iio_ring_buffer *ring,
struct iio_dev *dev_info);
/**
* __iio_init_ring_buffer() - initialize common elements of ring buffers
* __iio_update_ring_buffer() - update common elements of ring buffers
* @ring: ring buffer that is the event source
* @bytes_per_datum: size of individual datum including timestamp
* @length: number of datums in ring
**/
static inline void __iio_init_ring_buffer(struct iio_ring_buffer *ring,
static inline void __iio_update_ring_buffer(struct iio_ring_buffer *ring,
int bytes_per_datum, int length)
{
ring->bpd = bytes_per_datum;
ring->length = length;
ring->loopcount = 0;
ring->shared_ev_pointer.ev_p = 0;
spin_lock_init(&ring->shared_ev_pointer.lock);
}
/**

View File

@ -14,14 +14,12 @@
#include <linux/workqueue.h>
#include "ring_sw.h"
static inline int __iio_init_sw_ring_buffer(struct iio_sw_ring_buffer *ring,
static inline int __iio_allocate_sw_ring_buffer(struct iio_sw_ring_buffer *ring,
int bytes_per_datum, int length)
{
if ((length == 0) || (bytes_per_datum == 0))
return -EINVAL;
__iio_init_ring_buffer(&ring->buf, bytes_per_datum, length);
spin_lock_init(&ring->use_lock);
__iio_update_ring_buffer(&ring->buf, bytes_per_datum, length);
ring->data = kmalloc(length*ring->buf.bpd, GFP_KERNEL);
ring->read_p = 0;
ring->write_p = 0;
@ -30,6 +28,11 @@ static inline int __iio_init_sw_ring_buffer(struct iio_sw_ring_buffer *ring,
return ring->data ? 0 : -ENOMEM;
}
static inline void __iio_init_sw_ring_buffer(struct iio_sw_ring_buffer *ring)
{
spin_lock_init(&ring->use_lock);
}
static inline void __iio_free_sw_ring_buffer(struct iio_sw_ring_buffer *ring)
{
kfree(ring->data);
@ -320,7 +323,8 @@ int iio_request_update_sw_rb(struct iio_ring_buffer *r)
goto error_ret;
}
__iio_free_sw_ring_buffer(ring);
ret = __iio_init_sw_ring_buffer(ring, ring->buf.bpd, ring->buf.length);
ret = __iio_allocate_sw_ring_buffer(ring, ring->buf.bpd,
ring->buf.length);
error_ret:
spin_unlock(&ring->use_lock);
return ret;
@ -411,8 +415,8 @@ struct iio_ring_buffer *iio_sw_rb_allocate(struct iio_dev *indio_dev)
if (!ring)
return 0;
buf = &ring->buf;
iio_ring_buffer_init(buf, indio_dev);
__iio_init_sw_ring_buffer(ring);
buf->dev.type = &iio_sw_ring_type;
device_initialize(&buf->dev);
buf->dev.parent = &indio_dev->dev;