drm/amdgpu: add lock for interval tree in vm
Change-Id: I62b892a22af37b32e6b4aefca80a25cf45426ed2 Signed-off-by: Chunming Zhou <David1.Zhou@amd.com> Reviewed-by: Christian König <christian.koenig@amd.com>
This commit is contained in:
parent
1c16c0a7b2
commit
c25867dfab
|
@ -954,6 +954,8 @@ struct amdgpu_vm {
|
||||||
|
|
||||||
/* for id and flush management per ring */
|
/* for id and flush management per ring */
|
||||||
struct amdgpu_vm_id ids[AMDGPU_MAX_RINGS];
|
struct amdgpu_vm_id ids[AMDGPU_MAX_RINGS];
|
||||||
|
/* for interval tree */
|
||||||
|
spinlock_t it_lock;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct amdgpu_vm_manager {
|
struct amdgpu_vm_manager {
|
||||||
|
|
|
@ -1028,7 +1028,9 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
|
||||||
saddr /= AMDGPU_GPU_PAGE_SIZE;
|
saddr /= AMDGPU_GPU_PAGE_SIZE;
|
||||||
eaddr /= AMDGPU_GPU_PAGE_SIZE;
|
eaddr /= AMDGPU_GPU_PAGE_SIZE;
|
||||||
|
|
||||||
|
spin_lock(&vm->it_lock);
|
||||||
it = interval_tree_iter_first(&vm->va, saddr, eaddr - 1);
|
it = interval_tree_iter_first(&vm->va, saddr, eaddr - 1);
|
||||||
|
spin_unlock(&vm->it_lock);
|
||||||
if (it) {
|
if (it) {
|
||||||
struct amdgpu_bo_va_mapping *tmp;
|
struct amdgpu_bo_va_mapping *tmp;
|
||||||
tmp = container_of(it, struct amdgpu_bo_va_mapping, it);
|
tmp = container_of(it, struct amdgpu_bo_va_mapping, it);
|
||||||
|
@ -1055,7 +1057,9 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
|
||||||
mapping->flags = flags;
|
mapping->flags = flags;
|
||||||
|
|
||||||
list_add(&mapping->list, &bo_va->invalids);
|
list_add(&mapping->list, &bo_va->invalids);
|
||||||
|
spin_lock(&vm->it_lock);
|
||||||
interval_tree_insert(&mapping->it, &vm->va);
|
interval_tree_insert(&mapping->it, &vm->va);
|
||||||
|
spin_unlock(&vm->it_lock);
|
||||||
trace_amdgpu_vm_bo_map(bo_va, mapping);
|
trace_amdgpu_vm_bo_map(bo_va, mapping);
|
||||||
|
|
||||||
/* Make sure the page tables are allocated */
|
/* Make sure the page tables are allocated */
|
||||||
|
@ -1101,7 +1105,9 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
|
||||||
|
|
||||||
error_free:
|
error_free:
|
||||||
list_del(&mapping->list);
|
list_del(&mapping->list);
|
||||||
|
spin_lock(&vm->it_lock);
|
||||||
interval_tree_remove(&mapping->it, &vm->va);
|
interval_tree_remove(&mapping->it, &vm->va);
|
||||||
|
spin_unlock(&vm->it_lock);
|
||||||
trace_amdgpu_vm_bo_unmap(bo_va, mapping);
|
trace_amdgpu_vm_bo_unmap(bo_va, mapping);
|
||||||
kfree(mapping);
|
kfree(mapping);
|
||||||
|
|
||||||
|
@ -1151,7 +1157,9 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
|
||||||
}
|
}
|
||||||
|
|
||||||
list_del(&mapping->list);
|
list_del(&mapping->list);
|
||||||
|
spin_lock(&vm->it_lock);
|
||||||
interval_tree_remove(&mapping->it, &vm->va);
|
interval_tree_remove(&mapping->it, &vm->va);
|
||||||
|
spin_unlock(&vm->it_lock);
|
||||||
trace_amdgpu_vm_bo_unmap(bo_va, mapping);
|
trace_amdgpu_vm_bo_unmap(bo_va, mapping);
|
||||||
|
|
||||||
if (valid)
|
if (valid)
|
||||||
|
@ -1187,13 +1195,17 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
|
||||||
|
|
||||||
list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
|
list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
|
||||||
list_del(&mapping->list);
|
list_del(&mapping->list);
|
||||||
|
spin_lock(&vm->it_lock);
|
||||||
interval_tree_remove(&mapping->it, &vm->va);
|
interval_tree_remove(&mapping->it, &vm->va);
|
||||||
|
spin_unlock(&vm->it_lock);
|
||||||
trace_amdgpu_vm_bo_unmap(bo_va, mapping);
|
trace_amdgpu_vm_bo_unmap(bo_va, mapping);
|
||||||
list_add(&mapping->list, &vm->freed);
|
list_add(&mapping->list, &vm->freed);
|
||||||
}
|
}
|
||||||
list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
|
list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
|
||||||
list_del(&mapping->list);
|
list_del(&mapping->list);
|
||||||
|
spin_lock(&vm->it_lock);
|
||||||
interval_tree_remove(&mapping->it, &vm->va);
|
interval_tree_remove(&mapping->it, &vm->va);
|
||||||
|
spin_unlock(&vm->it_lock);
|
||||||
kfree(mapping);
|
kfree(mapping);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1248,7 +1260,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
|
||||||
INIT_LIST_HEAD(&vm->invalidated);
|
INIT_LIST_HEAD(&vm->invalidated);
|
||||||
INIT_LIST_HEAD(&vm->cleared);
|
INIT_LIST_HEAD(&vm->cleared);
|
||||||
INIT_LIST_HEAD(&vm->freed);
|
INIT_LIST_HEAD(&vm->freed);
|
||||||
|
spin_lock_init(&vm->it_lock);
|
||||||
pd_size = amdgpu_vm_directory_size(adev);
|
pd_size = amdgpu_vm_directory_size(adev);
|
||||||
pd_entries = amdgpu_vm_num_pdes(adev);
|
pd_entries = amdgpu_vm_num_pdes(adev);
|
||||||
|
|
||||||
|
@ -1312,7 +1324,6 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
|
||||||
|
|
||||||
amdgpu_bo_unref(&vm->page_directory);
|
amdgpu_bo_unref(&vm->page_directory);
|
||||||
fence_put(vm->page_directory_fence);
|
fence_put(vm->page_directory_fence);
|
||||||
|
|
||||||
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
|
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
|
||||||
unsigned id = vm->ids[i].id;
|
unsigned id = vm->ids[i].id;
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue