drm/nouveau: prepare for enabling svm with existing userspace interfaces
For a channel to make use of SVM features, it requires a different GPU MMU configuration than we would normally use, which is not desirable to switch to unless a client is actively going to use SVM. In order to supporting SVM without more extensive changes to the userspace interfaces, the SVM_INIT ioctl needs to replace the previous configuration safely. The only way we can currently do this safely, accounting for some unlikely failure conditions, is to allocate the new VMM without destroying the last one, and prioritising the SVM-enabled configuration in the code that cares. This will get cleaned up again further down the track. Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
This commit is contained in:
parent
a261a20c01
commit
bfe91afaca
|
@ -339,7 +339,8 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
|
|||
goto done;
|
||||
|
||||
if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
|
||||
ret = nouveau_vma_new(chan->ntfy, &cli->vmm, &chan->ntfy_vma);
|
||||
ret = nouveau_vma_new(chan->ntfy, chan->chan->vmm,
|
||||
&chan->ntfy_vma);
|
||||
if (ret)
|
||||
goto done;
|
||||
}
|
||||
|
|
|
@ -194,7 +194,7 @@ nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
|
|||
struct nouveau_drm *drm = cli->drm;
|
||||
struct nouveau_bo *nvbo;
|
||||
struct nvif_mmu *mmu = &cli->mmu;
|
||||
struct nvif_vmm *vmm = &cli->vmm.vmm;
|
||||
struct nvif_vmm *vmm = cli->svm.cli ? &cli->svm.vmm : &cli->vmm.vmm;
|
||||
size_t acc_size;
|
||||
int type = ttm_bo_type_device;
|
||||
int ret, i, pi = -1;
|
||||
|
|
|
@ -130,6 +130,7 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
|
|||
|
||||
chan->device = device;
|
||||
chan->drm = drm;
|
||||
chan->vmm = cli->svm.cli ? &cli->svm : &cli->vmm;
|
||||
atomic_set(&chan->killed, 0);
|
||||
|
||||
/* allocate memory for dma push buffer */
|
||||
|
@ -157,7 +158,7 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
|
|||
chan->push.addr = chan->push.buffer->bo.offset;
|
||||
|
||||
if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
|
||||
ret = nouveau_vma_new(chan->push.buffer, &cli->vmm,
|
||||
ret = nouveau_vma_new(chan->push.buffer, chan->vmm,
|
||||
&chan->push.vma);
|
||||
if (ret) {
|
||||
nouveau_channel_del(pchan);
|
||||
|
@ -172,7 +173,7 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
|
|||
args.target = NV_DMA_V0_TARGET_VM;
|
||||
args.access = NV_DMA_V0_ACCESS_VM;
|
||||
args.start = 0;
|
||||
args.limit = cli->vmm.vmm.limit - 1;
|
||||
args.limit = chan->vmm->vmm.limit - 1;
|
||||
} else
|
||||
if (chan->push.buffer->bo.mem.mem_type == TTM_PL_VRAM) {
|
||||
if (device->info.family == NV_DEVICE_INFO_V0_TNT) {
|
||||
|
@ -202,7 +203,7 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
|
|||
args.target = NV_DMA_V0_TARGET_VM;
|
||||
args.access = NV_DMA_V0_ACCESS_RDWR;
|
||||
args.start = 0;
|
||||
args.limit = cli->vmm.vmm.limit - 1;
|
||||
args.limit = chan->vmm->vmm.limit - 1;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -220,7 +221,6 @@ static int
|
|||
nouveau_channel_ind(struct nouveau_drm *drm, struct nvif_device *device,
|
||||
u64 runlist, bool priv, struct nouveau_channel **pchan)
|
||||
{
|
||||
struct nouveau_cli *cli = (void *)device->object.client;
|
||||
static const u16 oclasses[] = { TURING_CHANNEL_GPFIFO_A,
|
||||
VOLTA_CHANNEL_GPFIFO_A,
|
||||
PASCAL_CHANNEL_GPFIFO_A,
|
||||
|
@ -255,7 +255,7 @@ nouveau_channel_ind(struct nouveau_drm *drm, struct nvif_device *device,
|
|||
args.volta.ilength = 0x02000;
|
||||
args.volta.ioffset = 0x10000 + chan->push.addr;
|
||||
args.volta.runlist = runlist;
|
||||
args.volta.vmm = nvif_handle(&cli->vmm.vmm.object);
|
||||
args.volta.vmm = nvif_handle(&chan->vmm->vmm.object);
|
||||
args.volta.priv = priv;
|
||||
size = sizeof(args.volta);
|
||||
} else
|
||||
|
@ -264,7 +264,7 @@ nouveau_channel_ind(struct nouveau_drm *drm, struct nvif_device *device,
|
|||
args.kepler.ilength = 0x02000;
|
||||
args.kepler.ioffset = 0x10000 + chan->push.addr;
|
||||
args.kepler.runlist = runlist;
|
||||
args.kepler.vmm = nvif_handle(&cli->vmm.vmm.object);
|
||||
args.kepler.vmm = nvif_handle(&chan->vmm->vmm.object);
|
||||
args.kepler.priv = priv;
|
||||
size = sizeof(args.kepler);
|
||||
} else
|
||||
|
@ -272,14 +272,14 @@ nouveau_channel_ind(struct nouveau_drm *drm, struct nvif_device *device,
|
|||
args.fermi.version = 0;
|
||||
args.fermi.ilength = 0x02000;
|
||||
args.fermi.ioffset = 0x10000 + chan->push.addr;
|
||||
args.fermi.vmm = nvif_handle(&cli->vmm.vmm.object);
|
||||
args.fermi.vmm = nvif_handle(&chan->vmm->vmm.object);
|
||||
size = sizeof(args.fermi);
|
||||
} else {
|
||||
args.nv50.version = 0;
|
||||
args.nv50.ilength = 0x02000;
|
||||
args.nv50.ioffset = 0x10000 + chan->push.addr;
|
||||
args.nv50.pushbuf = nvif_handle(&chan->push.ctxdma);
|
||||
args.nv50.vmm = nvif_handle(&cli->vmm.vmm.object);
|
||||
args.nv50.vmm = nvif_handle(&chan->vmm->vmm.object);
|
||||
size = sizeof(args.nv50);
|
||||
}
|
||||
|
||||
|
@ -350,7 +350,6 @@ static int
|
|||
nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
|
||||
{
|
||||
struct nvif_device *device = chan->device;
|
||||
struct nouveau_cli *cli = (void *)chan->user.client;
|
||||
struct nouveau_drm *drm = chan->drm;
|
||||
struct nv_dma_v0 args = {};
|
||||
int ret, i;
|
||||
|
@ -376,7 +375,7 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
|
|||
args.target = NV_DMA_V0_TARGET_VM;
|
||||
args.access = NV_DMA_V0_ACCESS_VM;
|
||||
args.start = 0;
|
||||
args.limit = cli->vmm.vmm.limit - 1;
|
||||
args.limit = chan->vmm->vmm.limit - 1;
|
||||
} else {
|
||||
args.target = NV_DMA_V0_TARGET_VRAM;
|
||||
args.access = NV_DMA_V0_ACCESS_RDWR;
|
||||
|
@ -393,7 +392,7 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
|
|||
args.target = NV_DMA_V0_TARGET_VM;
|
||||
args.access = NV_DMA_V0_ACCESS_VM;
|
||||
args.start = 0;
|
||||
args.limit = cli->vmm.vmm.limit - 1;
|
||||
args.limit = chan->vmm->vmm.limit - 1;
|
||||
} else
|
||||
if (chan->drm->agp.bridge) {
|
||||
args.target = NV_DMA_V0_TARGET_AGP;
|
||||
|
@ -405,7 +404,7 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
|
|||
args.target = NV_DMA_V0_TARGET_VM;
|
||||
args.access = NV_DMA_V0_ACCESS_RDWR;
|
||||
args.start = 0;
|
||||
args.limit = cli->vmm.vmm.limit - 1;
|
||||
args.limit = chan->vmm->vmm.limit - 1;
|
||||
}
|
||||
|
||||
ret = nvif_object_init(&chan->user, gart, NV_DMA_IN_MEMORY,
|
||||
|
|
|
@ -8,6 +8,7 @@ struct nvif_device;
|
|||
struct nouveau_channel {
|
||||
struct nvif_device *device;
|
||||
struct nouveau_drm *drm;
|
||||
struct nouveau_vmm *vmm;
|
||||
|
||||
int chid;
|
||||
u64 inst;
|
||||
|
|
|
@ -172,6 +172,7 @@ nouveau_cli_fini(struct nouveau_cli *cli)
|
|||
WARN_ON(!list_empty(&cli->worker));
|
||||
|
||||
usif_client_fini(cli);
|
||||
nouveau_vmm_fini(&cli->svm);
|
||||
nouveau_vmm_fini(&cli->vmm);
|
||||
nvif_mmu_fini(&cli->mmu);
|
||||
nvif_device_fini(&cli->device);
|
||||
|
|
|
@ -96,6 +96,7 @@ struct nouveau_cli {
|
|||
struct nvif_device device;
|
||||
struct nvif_mmu mmu;
|
||||
struct nouveau_vmm vmm;
|
||||
struct nouveau_vmm svm;
|
||||
const struct nvif_mclass *mem;
|
||||
|
||||
struct list_head head;
|
||||
|
|
|
@ -353,7 +353,7 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
|
|||
|
||||
chan = nouveau_nofbaccel ? NULL : drm->channel;
|
||||
if (chan && device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
|
||||
ret = nouveau_vma_new(nvbo, &drm->client.vmm, &fb->vma);
|
||||
ret = nouveau_vma_new(nvbo, chan->vmm, &fb->vma);
|
||||
if (ret) {
|
||||
NV_ERROR(drm, "failed to map fb into chan: %d\n", ret);
|
||||
chan = NULL;
|
||||
|
|
|
@ -68,10 +68,11 @@ nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
|
|||
struct nouveau_bo *nvbo = nouveau_gem_object(gem);
|
||||
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
|
||||
struct device *dev = drm->dev->dev;
|
||||
struct nouveau_vmm *vmm = cli->svm.cli ? &cli->svm : &cli->vmm;
|
||||
struct nouveau_vma *vma;
|
||||
int ret;
|
||||
|
||||
if (cli->vmm.vmm.object.oclass < NVIF_CLASS_VMM_NV50)
|
||||
if (vmm->vmm.object.oclass < NVIF_CLASS_VMM_NV50)
|
||||
return 0;
|
||||
|
||||
ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
|
||||
|
@ -82,7 +83,7 @@ nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
|
|||
if (ret < 0 && ret != -EACCES)
|
||||
goto out;
|
||||
|
||||
ret = nouveau_vma_new(nvbo, &cli->vmm, &vma);
|
||||
ret = nouveau_vma_new(nvbo, vmm, &vma);
|
||||
pm_runtime_mark_last_busy(dev);
|
||||
pm_runtime_put_autosuspend(dev);
|
||||
out:
|
||||
|
@ -142,17 +143,18 @@ nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
|
|||
struct nouveau_bo *nvbo = nouveau_gem_object(gem);
|
||||
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
|
||||
struct device *dev = drm->dev->dev;
|
||||
struct nouveau_vmm *vmm = cli->svm.cli ? &cli->svm : & cli->vmm;
|
||||
struct nouveau_vma *vma;
|
||||
int ret;
|
||||
|
||||
if (cli->vmm.vmm.object.oclass < NVIF_CLASS_VMM_NV50)
|
||||
if (vmm->vmm.object.oclass < NVIF_CLASS_VMM_NV50)
|
||||
return;
|
||||
|
||||
ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
|
||||
if (ret)
|
||||
return;
|
||||
|
||||
vma = nouveau_vma_find(nvbo, &cli->vmm);
|
||||
vma = nouveau_vma_find(nvbo, vmm);
|
||||
if (vma) {
|
||||
if (--vma->refs == 0) {
|
||||
ret = pm_runtime_get_sync(dev);
|
||||
|
@ -219,6 +221,7 @@ nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
|
|||
{
|
||||
struct nouveau_cli *cli = nouveau_cli(file_priv);
|
||||
struct nouveau_bo *nvbo = nouveau_gem_object(gem);
|
||||
struct nouveau_vmm *vmm = cli->svm.cli ? &cli->svm : &cli->vmm;
|
||||
struct nouveau_vma *vma;
|
||||
|
||||
if (is_power_of_2(nvbo->valid_domains))
|
||||
|
@ -228,8 +231,8 @@ nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
|
|||
else
|
||||
rep->domain = NOUVEAU_GEM_DOMAIN_VRAM;
|
||||
rep->offset = nvbo->bo.offset;
|
||||
if (cli->vmm.vmm.object.oclass >= NVIF_CLASS_VMM_NV50) {
|
||||
vma = nouveau_vma_find(nvbo, &cli->vmm);
|
||||
if (vmm->vmm.object.oclass >= NVIF_CLASS_VMM_NV50) {
|
||||
vma = nouveau_vma_find(nvbo, vmm);
|
||||
if (!vma)
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -321,7 +324,8 @@ struct validate_op {
|
|||
};
|
||||
|
||||
static void
|
||||
validate_fini_no_ticket(struct validate_op *op, struct nouveau_fence *fence,
|
||||
validate_fini_no_ticket(struct validate_op *op, struct nouveau_channel *chan,
|
||||
struct nouveau_fence *fence,
|
||||
struct drm_nouveau_gem_pushbuf_bo *pbbo)
|
||||
{
|
||||
struct nouveau_bo *nvbo;
|
||||
|
@ -332,13 +336,11 @@ validate_fini_no_ticket(struct validate_op *op, struct nouveau_fence *fence,
|
|||
b = &pbbo[nvbo->pbbo_index];
|
||||
|
||||
if (likely(fence)) {
|
||||
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
|
||||
struct nouveau_vma *vma;
|
||||
|
||||
nouveau_bo_fence(nvbo, fence, !!b->write_domains);
|
||||
|
||||
if (drm->client.vmm.vmm.object.oclass >= NVIF_CLASS_VMM_NV50) {
|
||||
vma = (void *)(unsigned long)b->user_priv;
|
||||
if (chan->vmm->vmm.object.oclass >= NVIF_CLASS_VMM_NV50) {
|
||||
struct nouveau_vma *vma =
|
||||
(void *)(unsigned long)b->user_priv;
|
||||
nouveau_fence_unref(&vma->fence);
|
||||
dma_fence_get(&fence->base);
|
||||
vma->fence = fence;
|
||||
|
@ -358,10 +360,11 @@ validate_fini_no_ticket(struct validate_op *op, struct nouveau_fence *fence,
|
|||
}
|
||||
|
||||
static void
|
||||
validate_fini(struct validate_op *op, struct nouveau_fence *fence,
|
||||
validate_fini(struct validate_op *op, struct nouveau_channel *chan,
|
||||
struct nouveau_fence *fence,
|
||||
struct drm_nouveau_gem_pushbuf_bo *pbbo)
|
||||
{
|
||||
validate_fini_no_ticket(op, fence, pbbo);
|
||||
validate_fini_no_ticket(op, chan, fence, pbbo);
|
||||
ww_acquire_fini(&op->ticket);
|
||||
}
|
||||
|
||||
|
@ -416,7 +419,7 @@ retry:
|
|||
list_splice_tail_init(&vram_list, &op->list);
|
||||
list_splice_tail_init(&gart_list, &op->list);
|
||||
list_splice_tail_init(&both_list, &op->list);
|
||||
validate_fini_no_ticket(op, NULL, NULL);
|
||||
validate_fini_no_ticket(op, chan, NULL, NULL);
|
||||
if (unlikely(ret == -EDEADLK)) {
|
||||
ret = ttm_bo_reserve_slowpath(&nvbo->bo, true,
|
||||
&op->ticket);
|
||||
|
@ -430,8 +433,8 @@ retry:
|
|||
}
|
||||
}
|
||||
|
||||
if (cli->vmm.vmm.object.oclass >= NVIF_CLASS_VMM_NV50) {
|
||||
struct nouveau_vmm *vmm = &cli->vmm;
|
||||
if (chan->vmm->vmm.object.oclass >= NVIF_CLASS_VMM_NV50) {
|
||||
struct nouveau_vmm *vmm = chan->vmm;
|
||||
struct nouveau_vma *vma = nouveau_vma_find(nvbo, vmm);
|
||||
if (!vma) {
|
||||
NV_PRINTK(err, cli, "vma not found!\n");
|
||||
|
@ -471,7 +474,7 @@ retry:
|
|||
list_splice_tail(&gart_list, &op->list);
|
||||
list_splice_tail(&both_list, &op->list);
|
||||
if (ret)
|
||||
validate_fini(op, NULL, NULL);
|
||||
validate_fini(op, chan, NULL, NULL);
|
||||
return ret;
|
||||
|
||||
}
|
||||
|
@ -563,7 +566,7 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
|
|||
if (unlikely(ret < 0)) {
|
||||
if (ret != -ERESTARTSYS)
|
||||
NV_PRINTK(err, cli, "validating bo list\n");
|
||||
validate_fini(op, NULL, NULL);
|
||||
validate_fini(op, chan, NULL, NULL);
|
||||
return ret;
|
||||
}
|
||||
*apply_relocs = ret;
|
||||
|
@ -842,7 +845,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
|
|||
}
|
||||
|
||||
out:
|
||||
validate_fini(&op, fence, bo);
|
||||
validate_fini(&op, chan, fence, bo);
|
||||
nouveau_fence_unref(&fence);
|
||||
|
||||
out_prevalid:
|
||||
|
|
|
@ -109,7 +109,6 @@ nv84_fence_context_del(struct nouveau_channel *chan)
|
|||
int
|
||||
nv84_fence_context_new(struct nouveau_channel *chan)
|
||||
{
|
||||
struct nouveau_cli *cli = (void *)chan->user.client;
|
||||
struct nv84_fence_priv *priv = chan->drm->fence;
|
||||
struct nv84_fence_chan *fctx;
|
||||
int ret;
|
||||
|
@ -127,7 +126,7 @@ nv84_fence_context_new(struct nouveau_channel *chan)
|
|||
fctx->base.sequence = nv84_fence_read(chan);
|
||||
|
||||
mutex_lock(&priv->mutex);
|
||||
ret = nouveau_vma_new(priv->bo, &cli->vmm, &fctx->vma);
|
||||
ret = nouveau_vma_new(priv->bo, chan->vmm, &fctx->vma);
|
||||
mutex_unlock(&priv->mutex);
|
||||
|
||||
if (ret)
|
||||
|
|
Loading…
Reference in New Issue