drm/nv50-nvc0: unmap buffers from the vm when they're evicted

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
This commit is contained in:
Ben Skeggs 2011-02-10 11:22:12 +10:00
parent a4154bbffd
commit 3425df486c
3 changed files with 36 additions and 7 deletions

View File

@ -508,10 +508,12 @@ nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
int ret;
src_offset = old_mem->start << PAGE_SHIFT;
if (old_mem->mem_type == TTM_PL_VRAM)
src_offset = nvbo->vma.offset;
else
if (old_mem->mem_type == TTM_PL_VRAM) {
struct nouveau_vram *node = old_mem->mm_node;
src_offset = node->tmp_vma.offset;
} else {
src_offset += dev_priv->gart_info.aper_base;
}
dst_offset = new_mem->start << PAGE_SHIFT;
if (new_mem->mem_type == TTM_PL_VRAM)
@ -559,10 +561,12 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
int ret;
src_offset = old_mem->start << PAGE_SHIFT;
if (old_mem->mem_type == TTM_PL_VRAM)
src_offset = nvbo->vma.offset;
else
if (old_mem->mem_type == TTM_PL_VRAM) {
struct nouveau_vram *node = old_mem->mm_node;
src_offset = node->tmp_vma.offset;
} else {
src_offset += dev_priv->gart_info.aper_base;
}
dst_offset = new_mem->start << PAGE_SHIFT;
if (new_mem->mem_type == TTM_PL_VRAM)
@ -711,6 +715,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
{
struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
struct nouveau_bo *nvbo = nouveau_bo(bo);
struct ttm_mem_reg *old_mem = &bo->mem;
struct nouveau_channel *chan;
int ret;
@ -720,6 +725,21 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
mutex_lock_nested(&chan->mutex, NOUVEAU_KCHANNEL_MUTEX);
}
/* create temporary vma for old memory, this will get cleaned
* up after ttm destroys the ttm_mem_reg
*/
if (dev_priv->card_type >= NV_50 && old_mem->mem_type == TTM_PL_VRAM) {
struct nouveau_vram *node = old_mem->mm_node;
ret = nouveau_vm_get(chan->vm, old_mem->num_pages << PAGE_SHIFT,
nvbo->vma.node->type, NV_MEM_ACCESS_RO,
&node->tmp_vma);
if (ret)
goto out;
nouveau_vm_map(&node->tmp_vma, node);
}
if (dev_priv->card_type < NV_50)
ret = nv04_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
else
@ -733,6 +753,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
no_wait_gpu, new_mem);
}
out:
if (chan == dev_priv->channel)
mutex_unlock(&chan->mutex);
return ret;
@ -811,7 +832,7 @@ nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem)
struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
struct nouveau_bo *nvbo = nouveau_bo(bo);
if (dev_priv->card_type < NV_50 || nvbo->no_vm)
if (dev_priv->card_type < NV_50)
return;
switch (new_mem->mem_type) {
@ -820,6 +841,7 @@ nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem)
break;
case TTM_PL_TT:
default:
nouveau_vm_unmap(&nvbo->vma);
break;
}
}

View File

@ -69,6 +69,7 @@ struct nouveau_vram {
struct drm_device *dev;
struct nouveau_vma bar_vma;
struct nouveau_vma tmp_vma;
u8 page_shift;
struct list_head regions;

View File

@ -710,8 +710,14 @@ nouveau_vram_manager_del(struct ttm_mem_type_manager *man,
{
struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
struct nouveau_vram *node = mem->mm_node;
struct drm_device *dev = dev_priv->dev;
if (node->tmp_vma.node) {
nouveau_vm_unmap(&node->tmp_vma);
nouveau_vm_put(&node->tmp_vma);
}
vram->put(dev, (struct nouveau_vram **)&mem->mm_node);
}