dma-buf: Use seqlock_t instread disabling preemption

"dma reservation" disables preemption while acquiring the write access
for "seqcount".

Replace the seqcount with a seqlock_t which provides seqcount like
semantic and lock for writer.

Link: https://lkml.kernel.org/r/f410b429-db86-f81c-7c67-f563fa808b62@free.fr
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
This commit is contained in:
Sebastian Andrzej Siewior 2019-08-14 16:38:43 +02:00 committed by Alibek Omarov
parent 88199aa670
commit e96faa4aee
5 changed files with 27 additions and 42 deletions

View File

@ -227,7 +227,7 @@ static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
return 0;
retry:
seq = read_seqcount_begin(&resv->seq);
seq = read_seqbegin(&resv->seq);
rcu_read_lock();
fobj = rcu_dereference(resv->fence);
@ -236,7 +236,7 @@ retry:
else
shared_count = 0;
fence_excl = rcu_dereference(resv->fence_excl);
if (read_seqcount_retry(&resv->seq, seq)) {
if (read_seqretry(&resv->seq, seq)) {
rcu_read_unlock();
goto retry;
}
@ -1206,12 +1206,12 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
robj = buf_obj->resv;
while (true) {
seq = read_seqcount_begin(&robj->seq);
seq = read_seqbegin(&robj->seq);
rcu_read_lock();
fobj = rcu_dereference(robj->fence);
shared_count = fobj ? fobj->shared_count : 0;
fence = rcu_dereference(robj->fence_excl);
if (!read_seqcount_retry(&robj->seq, seq))
if (!read_seqretry(&robj->seq, seq))
break;
rcu_read_unlock();
}

View File

@ -49,12 +49,6 @@
DEFINE_WD_CLASS(reservation_ww_class);
EXPORT_SYMBOL(reservation_ww_class);
struct lock_class_key reservation_seqcount_class;
EXPORT_SYMBOL(reservation_seqcount_class);
const char reservation_seqcount_string[] = "reservation_seqcount";
EXPORT_SYMBOL(reservation_seqcount_string);
/**
* dma_resv_list_alloc - allocate fence list
* @shared_max: number of fences we need space for
@ -103,8 +97,7 @@ void dma_resv_init(struct dma_resv *obj)
{
ww_mutex_init(&obj->lock, &reservation_ww_class);
__seqcount_init(&obj->seq, reservation_seqcount_string,
&reservation_seqcount_class);
seqlock_init(&obj->seq);
RCU_INIT_POINTER(obj->fence, NULL);
RCU_INIT_POINTER(obj->fence_excl, NULL);
}
@ -234,8 +227,7 @@ void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence)
fobj = dma_resv_get_list(obj);
count = fobj->shared_count;
preempt_disable();
write_seqcount_begin(&obj->seq);
write_seqlock(&obj->seq);
for (i = 0; i < count; ++i) {
@ -255,8 +247,7 @@ replace:
/* pointer update must be visible before we extend the shared_count */
smp_store_mb(fobj->shared_count, count);
write_seqcount_end(&obj->seq);
preempt_enable();
write_sequnlock(&obj->seq);
dma_fence_put(old);
}
EXPORT_SYMBOL(dma_resv_add_shared_fence);
@ -283,14 +274,12 @@ void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)
if (fence)
dma_fence_get(fence);
preempt_disable();
write_seqcount_begin(&obj->seq);
/* write_seqcount_begin provides the necessary memory barrier */
write_seqlock(&obj->seq);
/* write_seqlock provides the necessary memory barrier */
RCU_INIT_POINTER(obj->fence_excl, fence);
if (old)
old->shared_count = 0;
write_seqcount_end(&obj->seq);
preempt_enable();
write_sequnlock(&obj->seq);
/* inplace update, no shared fences */
while (i--)
@ -368,13 +357,11 @@ retry:
src_list = dma_resv_get_list(dst);
old = dma_resv_get_excl(dst);
preempt_disable();
write_seqcount_begin(&dst->seq);
/* write_seqcount_begin provides the necessary memory barrier */
write_seqlock(&dst->seq);
/* write_seqlock provides the necessary memory barrier */
RCU_INIT_POINTER(dst->fence_excl, new);
RCU_INIT_POINTER(dst->fence, dst_list);
write_seqcount_end(&dst->seq);
preempt_enable();
write_sequnlock(&dst->seq);
dma_resv_list_free(src_list);
dma_fence_put(old);
@ -414,7 +401,7 @@ int dma_resv_get_fences_rcu(struct dma_resv *obj,
shared_count = i = 0;
rcu_read_lock();
seq = read_seqcount_begin(&obj->seq);
seq = read_seqbegin(&obj->seq);
fence_excl = rcu_dereference(obj->fence_excl);
if (fence_excl && !dma_fence_get_rcu(fence_excl))
@ -456,7 +443,7 @@ int dma_resv_get_fences_rcu(struct dma_resv *obj,
}
}
if (i != shared_count || read_seqcount_retry(&obj->seq, seq)) {
if (i != shared_count || read_seqretry(&obj->seq, seq)) {
while (i--)
dma_fence_put(shared[i]);
dma_fence_put(fence_excl);
@ -507,7 +494,7 @@ long dma_resv_wait_timeout_rcu(struct dma_resv *obj,
retry:
shared_count = 0;
seq = read_seqcount_begin(&obj->seq);
seq = read_seqbegin(&obj->seq);
rcu_read_lock();
i = -1;
@ -553,7 +540,7 @@ retry:
rcu_read_unlock();
if (fence) {
if (read_seqcount_retry(&obj->seq, seq)) {
if (read_seqretry(&obj->seq, seq)) {
dma_fence_put(fence);
goto retry;
}
@ -607,7 +594,7 @@ bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all)
retry:
ret = true;
shared_count = 0;
seq = read_seqcount_begin(&obj->seq);
seq = read_seqbegin(&obj->seq);
if (test_all) {
unsigned i;
@ -627,7 +614,7 @@ retry:
break;
}
if (read_seqcount_retry(&obj->seq, seq))
if (read_seqretry(&obj->seq, seq))
goto retry;
}
@ -639,7 +626,7 @@ retry:
if (ret < 0)
goto retry;
if (read_seqcount_retry(&obj->seq, seq))
if (read_seqretry(&obj->seq, seq))
goto retry;
}
}

View File

@ -246,11 +246,9 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
new->shared_count = k;
/* Install the new fence list, seqcount provides the barriers */
preempt_disable();
write_seqcount_begin(&resv->seq);
write_seqlock(&resv->seq);
RCU_INIT_POINTER(resv->fence, new);
write_seqcount_end(&resv->seq);
preempt_enable();
write_sequnlock(&resv->seq);
/* Drop the references to the removed fences or move them to ef_list */
for (i = j, k = 0; i < old->shared_count; ++i) {

View File

@ -75,7 +75,6 @@ busy_check_writer(const struct dma_fence *fence)
return __busy_set_if_active(fence, __busy_write_id);
}
int
i915_gem_busy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
@ -110,7 +109,8 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
*
*/
retry:
seq = raw_read_seqcount(&obj->base.resv->seq);
/* XXX raw_read_seqcount() does not wait for the WRTIE to finish */
seq = read_seqbegin(&obj->base.resv->seq);
/* Translate the exclusive fence to the READ *and* WRITE engine */
args->busy =
@ -129,7 +129,7 @@ retry:
}
}
if (args->busy && read_seqcount_retry(&obj->base.resv->seq, seq))
if (args->busy && read_seqretry(&obj->base.resv->seq, seq))
goto retry;
err = 0;

View File

@ -65,13 +65,13 @@ struct dma_resv_list {
/**
* struct dma_resv - a reservation object manages fences for a buffer
* @lock: update side lock
* @seq: sequence count for managing RCU read-side synchronization
* @seq: sequence lock for managing RCU read-side synchronization
* @fence_excl: the exclusive fence, if there is one currently
* @fence: list of current shared fences
*/
struct dma_resv {
struct ww_mutex lock;
seqcount_t seq;
seqlock_t seq;
struct dma_fence __rcu *fence_excl;
struct dma_resv_list __rcu *fence;