Merge branch 'drm-fixes-4.4' of git://people.freedesktop.org/~agd5f/linux into drm-fixes

Radeon and amdgpu fixes for 4.4.  A bit more the usual since I missed
last week.  Misc fixes all over the place.  The big changes are the
tiling configuration fixes for Fiji.

* 'drm-fixes-4.4' of git://people.freedesktop.org/~agd5f/linux: (35 commits)
  drm/amdgpu: reserve/unreserve objects out of map/unmap operations
  drm/amdgpu: move bo_reserve out of amdgpu_vm_clear_bo
  drm/amdgpu: add lock for interval tree in vm
  drm/amdgpu: keep the owner for VMIDs
  drm/amdgpu: move VM manager clean into the VM code again
  drm/amdgpu: cleanup VM coding style
  drm/amdgpu: remove unused VM manager field
  drm/amdgpu: cleanup scheduler command submission
  drm/amdgpu: fix typo in firmware name
  drm/amdgpu: remove the unnecessary parameter adev for amdgpu_sa_bo_new()
  drm/amdgpu: wait interruptible when semaphores are disabled v2
  drm/amdgpu: update pd while updating vm as well
  drm/amdgpu: fix handling order in scheduler CS
  drm/amdgpu: fix incorrect mutex usage v3
  drm/amdgpu: cleanup scheduler fence get/put dance
  drm/amdgpu: add command submission workflow tracepoint
  drm/amdgpu: update Fiji's tiling mode table
  drm/amdgpu: fix bug that can't enter thermal interrupt for bonaire.
  drm/amdgpu: fix seq_printf format string
  drm/radeon: fix quirk for MSI R7 370 Armor 2X
  ...
This commit is contained in:
Dave Airlie 2015-11-19 13:15:17 +10:00
commit 8ed59fd6d4
24 changed files with 730 additions and 402 deletions

View File

@ -389,7 +389,6 @@ struct amdgpu_clock {
* Fences.
*/
struct amdgpu_fence_driver {
struct amdgpu_ring *ring;
uint64_t gpu_addr;
volatile uint32_t *cpu_addr;
/* sync_seq is protected by ring emission lock */
@ -398,7 +397,7 @@ struct amdgpu_fence_driver {
bool initialized;
struct amdgpu_irq_src *irq_src;
unsigned irq_type;
struct delayed_work lockup_work;
struct timer_list fallback_timer;
wait_queue_head_t fence_queue;
};
@ -917,8 +916,8 @@ struct amdgpu_ring {
#define AMDGPU_VM_FAULT_STOP_ALWAYS 2
struct amdgpu_vm_pt {
struct amdgpu_bo *bo;
uint64_t addr;
struct amdgpu_bo *bo;
uint64_t addr;
};
struct amdgpu_vm_id {
@ -926,8 +925,6 @@ struct amdgpu_vm_id {
uint64_t pd_gpu_addr;
/* last flushed PD/PT update */
struct fence *flushed_updates;
/* last use of vmid */
struct fence *last_id_use;
};
struct amdgpu_vm {
@ -957,24 +954,70 @@ struct amdgpu_vm {
/* for id and flush management per ring */
struct amdgpu_vm_id ids[AMDGPU_MAX_RINGS];
/* for interval tree */
spinlock_t it_lock;
};
struct amdgpu_vm_manager {
struct fence *active[AMDGPU_NUM_VM];
uint32_t max_pfn;
struct {
struct fence *active;
atomic_long_t owner;
} ids[AMDGPU_NUM_VM];
uint32_t max_pfn;
/* number of VMIDs */
unsigned nvm;
unsigned nvm;
/* vram base address for page table entry */
u64 vram_base_offset;
u64 vram_base_offset;
/* is vm enabled? */
bool enabled;
/* for hw to save the PD addr on suspend/resume */
uint32_t saved_table_addr[AMDGPU_NUM_VM];
bool enabled;
/* vm pte handling */
const struct amdgpu_vm_pte_funcs *vm_pte_funcs;
struct amdgpu_ring *vm_pte_funcs_ring;
};
void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm);
void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
struct list_head *head);
int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
struct amdgpu_sync *sync);
void amdgpu_vm_flush(struct amdgpu_ring *ring,
struct amdgpu_vm *vm,
struct fence *updates);
void amdgpu_vm_fence(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
struct fence *fence);
uint64_t amdgpu_vm_map_gart(struct amdgpu_device *adev, uint64_t addr);
int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
struct amdgpu_vm *vm);
int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
struct amdgpu_vm *vm);
int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct amdgpu_sync *sync);
int amdgpu_vm_bo_update(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va,
struct ttm_mem_reg *mem);
void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
struct amdgpu_bo *bo);
struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
struct amdgpu_bo *bo);
struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
struct amdgpu_bo *bo);
int amdgpu_vm_bo_map(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va,
uint64_t addr, uint64_t offset,
uint64_t size, uint32_t flags);
int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va,
uint64_t addr);
void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va);
int amdgpu_vm_free_job(struct amdgpu_job *job);
/*
* context related structures
*/
@ -1211,6 +1254,7 @@ struct amdgpu_cs_parser {
/* relocations */
struct amdgpu_bo_list_entry *vm_bos;
struct list_head validated;
struct fence *fence;
struct amdgpu_ib *ibs;
uint32_t num_ibs;
@ -1226,7 +1270,7 @@ struct amdgpu_job {
struct amdgpu_device *adev;
struct amdgpu_ib *ibs;
uint32_t num_ibs;
struct mutex job_lock;
void *owner;
struct amdgpu_user_fence uf;
int (*free_job)(struct amdgpu_job *job);
};
@ -2257,11 +2301,6 @@ void amdgpu_pci_config_reset(struct amdgpu_device *adev);
bool amdgpu_card_posted(struct amdgpu_device *adev);
void amdgpu_update_display_priority(struct amdgpu_device *adev);
bool amdgpu_boot_test_post_card(struct amdgpu_device *adev);
struct amdgpu_cs_parser *amdgpu_cs_parser_create(struct amdgpu_device *adev,
struct drm_file *filp,
struct amdgpu_ctx *ctx,
struct amdgpu_ib *ibs,
uint32_t num_ibs);
int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data);
int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
@ -2318,49 +2357,6 @@ int amdgpu_get_vblank_timestamp_kms(struct drm_device *dev, unsigned int pipe,
long amdgpu_kms_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg);
/*
* vm
*/
int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm);
void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
struct list_head *head);
int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
struct amdgpu_sync *sync);
void amdgpu_vm_flush(struct amdgpu_ring *ring,
struct amdgpu_vm *vm,
struct fence *updates);
void amdgpu_vm_fence(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
struct amdgpu_fence *fence);
uint64_t amdgpu_vm_map_gart(struct amdgpu_device *adev, uint64_t addr);
int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
struct amdgpu_vm *vm);
int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
struct amdgpu_vm *vm);
int amdgpu_vm_clear_invalids(struct amdgpu_device *adev,
struct amdgpu_vm *vm, struct amdgpu_sync *sync);
int amdgpu_vm_bo_update(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va,
struct ttm_mem_reg *mem);
void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
struct amdgpu_bo *bo);
struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
struct amdgpu_bo *bo);
struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
struct amdgpu_bo *bo);
int amdgpu_vm_bo_map(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va,
uint64_t addr, uint64_t offset,
uint64_t size, uint32_t flags);
int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va,
uint64_t addr);
void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va);
int amdgpu_vm_free_job(struct amdgpu_job *job);
/*
* functions used by amdgpu_encoder.c
*/

View File

@ -127,30 +127,6 @@ int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
return 0;
}
struct amdgpu_cs_parser *amdgpu_cs_parser_create(struct amdgpu_device *adev,
struct drm_file *filp,
struct amdgpu_ctx *ctx,
struct amdgpu_ib *ibs,
uint32_t num_ibs)
{
struct amdgpu_cs_parser *parser;
int i;
parser = kzalloc(sizeof(struct amdgpu_cs_parser), GFP_KERNEL);
if (!parser)
return NULL;
parser->adev = adev;
parser->filp = filp;
parser->ctx = ctx;
parser->ibs = ibs;
parser->num_ibs = num_ibs;
for (i = 0; i < num_ibs; i++)
ibs[i].ctx = ctx;
return parser;
}
int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
{
union drm_amdgpu_cs *cs = data;
@ -463,8 +439,18 @@ static int cmp_size_smaller_first(void *priv, struct list_head *a,
return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages;
}
static void amdgpu_cs_parser_fini_early(struct amdgpu_cs_parser *parser, int error, bool backoff)
/**
* cs_parser_fini() - clean parser states
* @parser: parser structure holding parsing context.
* @error: error number
*
* If error is set than unvalidate buffer, otherwise just free memory
* used by parsing context.
**/
static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bool backoff)
{
unsigned i;
if (!error) {
/* Sort the buffer list from the smallest to largest buffer,
* which affects the order of buffers in the LRU list.
@ -479,17 +465,14 @@ static void amdgpu_cs_parser_fini_early(struct amdgpu_cs_parser *parser, int err
list_sort(NULL, &parser->validated, cmp_size_smaller_first);
ttm_eu_fence_buffer_objects(&parser->ticket,
&parser->validated,
&parser->ibs[parser->num_ibs-1].fence->base);
&parser->validated,
parser->fence);
} else if (backoff) {
ttm_eu_backoff_reservation(&parser->ticket,
&parser->validated);
}
}
fence_put(parser->fence);
static void amdgpu_cs_parser_fini_late(struct amdgpu_cs_parser *parser)
{
unsigned i;
if (parser->ctx)
amdgpu_ctx_put(parser->ctx);
if (parser->bo_list)
@ -499,31 +482,12 @@ static void amdgpu_cs_parser_fini_late(struct amdgpu_cs_parser *parser)
for (i = 0; i < parser->nchunks; i++)
drm_free_large(parser->chunks[i].kdata);
kfree(parser->chunks);
if (!amdgpu_enable_scheduler)
{
if (parser->ibs)
for (i = 0; i < parser->num_ibs; i++)
amdgpu_ib_free(parser->adev, &parser->ibs[i]);
kfree(parser->ibs);
if (parser->uf.bo)
drm_gem_object_unreference_unlocked(&parser->uf.bo->gem_base);
}
kfree(parser);
}
/**
* cs_parser_fini() - clean parser states
* @parser: parser structure holding parsing context.
* @error: error number
*
* If error is set than unvalidate buffer, otherwise just free memory
* used by parsing context.
**/
static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bool backoff)
{
amdgpu_cs_parser_fini_early(parser, error, backoff);
amdgpu_cs_parser_fini_late(parser);
if (parser->ibs)
for (i = 0; i < parser->num_ibs; i++)
amdgpu_ib_free(parser->adev, &parser->ibs[i]);
kfree(parser->ibs);
if (parser->uf.bo)
drm_gem_object_unreference_unlocked(&parser->uf.bo->gem_base);
}
static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p,
@ -610,15 +574,9 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
}
r = amdgpu_bo_vm_update_pte(parser, vm);
if (r) {
goto out;
}
amdgpu_cs_sync_rings(parser);
if (!amdgpu_enable_scheduler)
r = amdgpu_ib_schedule(adev, parser->num_ibs, parser->ibs,
parser->filp);
if (!r)
amdgpu_cs_sync_rings(parser);
out:
return r;
}
@ -828,36 +786,36 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
union drm_amdgpu_cs *cs = data;
struct amdgpu_fpriv *fpriv = filp->driver_priv;
struct amdgpu_vm *vm = &fpriv->vm;
struct amdgpu_cs_parser *parser;
struct amdgpu_cs_parser parser = {};
bool reserved_buffers = false;
int i, r;
if (!adev->accel_working)
return -EBUSY;
parser = amdgpu_cs_parser_create(adev, filp, NULL, NULL, 0);
if (!parser)
return -ENOMEM;
r = amdgpu_cs_parser_init(parser, data);
parser.adev = adev;
parser.filp = filp;
r = amdgpu_cs_parser_init(&parser, data);
if (r) {
DRM_ERROR("Failed to initialize parser !\n");
amdgpu_cs_parser_fini(parser, r, false);
amdgpu_cs_parser_fini(&parser, r, false);
r = amdgpu_cs_handle_lockup(adev, r);
return r;
}
mutex_lock(&vm->mutex);
r = amdgpu_cs_parser_relocs(parser);
r = amdgpu_cs_parser_relocs(&parser);
if (r == -ENOMEM)
DRM_ERROR("Not enough memory for command submission!\n");
else if (r && r != -ERESTARTSYS)
DRM_ERROR("Failed to process the buffer list %d!\n", r);
else if (!r) {
reserved_buffers = true;
r = amdgpu_cs_ib_fill(adev, parser);
r = amdgpu_cs_ib_fill(adev, &parser);
}
if (!r) {
r = amdgpu_cs_dependencies(adev, parser);
r = amdgpu_cs_dependencies(adev, &parser);
if (r)
DRM_ERROR("Failed in the dependencies handling %d!\n", r);
}
@ -865,62 +823,71 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
if (r)
goto out;
for (i = 0; i < parser->num_ibs; i++)
trace_amdgpu_cs(parser, i);
for (i = 0; i < parser.num_ibs; i++)
trace_amdgpu_cs(&parser, i);
r = amdgpu_cs_ib_vm_chunk(adev, parser);
r = amdgpu_cs_ib_vm_chunk(adev, &parser);
if (r)
goto out;
if (amdgpu_enable_scheduler && parser->num_ibs) {
if (amdgpu_enable_scheduler && parser.num_ibs) {
struct amdgpu_ring * ring = parser.ibs->ring;
struct amd_sched_fence *fence;
struct amdgpu_job *job;
struct amdgpu_ring * ring = parser->ibs->ring;
job = kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL);
if (!job) {
r = -ENOMEM;
goto out;
}
job->base.sched = &ring->sched;
job->base.s_entity = &parser->ctx->rings[ring->idx].entity;
job->adev = parser->adev;
job->ibs = parser->ibs;
job->num_ibs = parser->num_ibs;
job->base.owner = parser->filp;
mutex_init(&job->job_lock);
job->base.s_entity = &parser.ctx->rings[ring->idx].entity;
job->adev = parser.adev;
job->owner = parser.filp;
job->free_job = amdgpu_cs_free_job;
job->ibs = parser.ibs;
job->num_ibs = parser.num_ibs;
parser.ibs = NULL;
parser.num_ibs = 0;
if (job->ibs[job->num_ibs - 1].user) {
memcpy(&job->uf, &parser->uf,
sizeof(struct amdgpu_user_fence));
job->uf = parser.uf;
job->ibs[job->num_ibs - 1].user = &job->uf;
parser.uf.bo = NULL;
}
job->free_job = amdgpu_cs_free_job;
mutex_lock(&job->job_lock);
r = amd_sched_entity_push_job(&job->base);
if (r) {
mutex_unlock(&job->job_lock);
fence = amd_sched_fence_create(job->base.s_entity,
parser.filp);
if (!fence) {
r = -ENOMEM;
amdgpu_cs_free_job(job);
kfree(job);
goto out;
}
cs->out.handle =
amdgpu_ctx_add_fence(parser->ctx, ring,
&job->base.s_fence->base);
parser->ibs[parser->num_ibs - 1].sequence = cs->out.handle;
job->base.s_fence = fence;
parser.fence = fence_get(&fence->base);
list_sort(NULL, &parser->validated, cmp_size_smaller_first);
ttm_eu_fence_buffer_objects(&parser->ticket,
&parser->validated,
&job->base.s_fence->base);
cs->out.handle = amdgpu_ctx_add_fence(parser.ctx, ring,
&fence->base);
job->ibs[job->num_ibs - 1].sequence = cs->out.handle;
mutex_unlock(&job->job_lock);
amdgpu_cs_parser_fini_late(parser);
mutex_unlock(&vm->mutex);
return 0;
trace_amdgpu_cs_ioctl(job);
amd_sched_entity_push_job(&job->base);
} else {
struct amdgpu_fence *fence;
r = amdgpu_ib_schedule(adev, parser.num_ibs, parser.ibs,
parser.filp);
fence = parser.ibs[parser.num_ibs - 1].fence;
parser.fence = fence_get(&fence->base);
cs->out.handle = parser.ibs[parser.num_ibs - 1].sequence;
}
cs->out.handle = parser->ibs[parser->num_ibs - 1].sequence;
out:
amdgpu_cs_parser_fini(parser, r, reserved_buffers);
amdgpu_cs_parser_fini(&parser, r, reserved_buffers);
mutex_unlock(&vm->mutex);
r = amdgpu_cs_handle_lockup(adev, r);
return r;

View File

@ -47,6 +47,9 @@
* that the the relevant GPU caches have been flushed.
*/
static struct kmem_cache *amdgpu_fence_slab;
static atomic_t amdgpu_fence_slab_ref = ATOMIC_INIT(0);
/**
* amdgpu_fence_write - write a fence value
*
@ -84,24 +87,6 @@ static u32 amdgpu_fence_read(struct amdgpu_ring *ring)
return seq;
}
/**
* amdgpu_fence_schedule_check - schedule lockup check
*
* @ring: pointer to struct amdgpu_ring
*
* Queues a delayed work item to check for lockups.
*/
static void amdgpu_fence_schedule_check(struct amdgpu_ring *ring)
{
/*
* Do not reset the timer here with mod_delayed_work,
* this can livelock in an interaction with TTM delayed destroy.
*/
queue_delayed_work(system_power_efficient_wq,
&ring->fence_drv.lockup_work,
AMDGPU_FENCE_JIFFIES_TIMEOUT);
}
/**
* amdgpu_fence_emit - emit a fence on the requested ring
*
@ -118,7 +103,7 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner,
struct amdgpu_device *adev = ring->adev;
/* we are protected by the ring emission mutex */
*fence = kmalloc(sizeof(struct amdgpu_fence), GFP_KERNEL);
*fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL);
if ((*fence) == NULL) {
return -ENOMEM;
}
@ -132,10 +117,22 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner,
amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
(*fence)->seq,
AMDGPU_FENCE_FLAG_INT);
trace_amdgpu_fence_emit(ring->adev->ddev, ring->idx, (*fence)->seq);
return 0;
}
/**
* amdgpu_fence_schedule_fallback - schedule fallback check
*
* @ring: pointer to struct amdgpu_ring
*
* Start a timer as fallback to our interrupts.
*/
static void amdgpu_fence_schedule_fallback(struct amdgpu_ring *ring)
{
mod_timer(&ring->fence_drv.fallback_timer,
jiffies + AMDGPU_FENCE_JIFFIES_TIMEOUT);
}
/**
* amdgpu_fence_activity - check for fence activity
*
@ -202,32 +199,11 @@ static bool amdgpu_fence_activity(struct amdgpu_ring *ring)
} while (atomic64_xchg(&ring->fence_drv.last_seq, seq) > seq);
if (seq < last_emitted)
amdgpu_fence_schedule_check(ring);
amdgpu_fence_schedule_fallback(ring);
return wake;
}
/**
* amdgpu_fence_check_lockup - check for hardware lockup
*
* @work: delayed work item
*
* Checks for fence activity and if there is none probe
* the hardware if a lockup occured.
*/
static void amdgpu_fence_check_lockup(struct work_struct *work)
{
struct amdgpu_fence_driver *fence_drv;
struct amdgpu_ring *ring;
fence_drv = container_of(work, struct amdgpu_fence_driver,
lockup_work.work);
ring = fence_drv->ring;
if (amdgpu_fence_activity(ring))
wake_up_all(&ring->fence_drv.fence_queue);
}
/**
* amdgpu_fence_process - process a fence
*
@ -243,6 +219,20 @@ void amdgpu_fence_process(struct amdgpu_ring *ring)
wake_up_all(&ring->fence_drv.fence_queue);
}
/**
* amdgpu_fence_fallback - fallback for hardware interrupts
*
* @work: delayed work item
*
* Checks for fence activity.
*/
static void amdgpu_fence_fallback(unsigned long arg)
{
struct amdgpu_ring *ring = (void *)arg;
amdgpu_fence_process(ring);
}
/**
* amdgpu_fence_seq_signaled - check if a fence sequence number has signaled
*
@ -290,7 +280,7 @@ static int amdgpu_fence_ring_wait_seq(struct amdgpu_ring *ring, uint64_t seq)
if (atomic64_read(&ring->fence_drv.last_seq) >= seq)
return 0;
amdgpu_fence_schedule_check(ring);
amdgpu_fence_schedule_fallback(ring);
wait_event(ring->fence_drv.fence_queue, (
(signaled = amdgpu_fence_seq_signaled(ring, seq))));
@ -491,9 +481,8 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring)
atomic64_set(&ring->fence_drv.last_seq, 0);
ring->fence_drv.initialized = false;
INIT_DELAYED_WORK(&ring->fence_drv.lockup_work,
amdgpu_fence_check_lockup);
ring->fence_drv.ring = ring;
setup_timer(&ring->fence_drv.fallback_timer, amdgpu_fence_fallback,
(unsigned long)ring);
init_waitqueue_head(&ring->fence_drv.fence_queue);
@ -536,6 +525,13 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring)
*/
int amdgpu_fence_driver_init(struct amdgpu_device *adev)
{
if (atomic_inc_return(&amdgpu_fence_slab_ref) == 1) {
amdgpu_fence_slab = kmem_cache_create(
"amdgpu_fence", sizeof(struct amdgpu_fence), 0,
SLAB_HWCACHE_ALIGN, NULL);
if (!amdgpu_fence_slab)
return -ENOMEM;
}
if (amdgpu_debugfs_fence_init(adev))
dev_err(adev->dev, "fence debugfs file creation failed\n");
@ -554,9 +550,12 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
{
int i, r;
if (atomic_dec_and_test(&amdgpu_fence_slab_ref))
kmem_cache_destroy(amdgpu_fence_slab);
mutex_lock(&adev->ring_lock);
for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
struct amdgpu_ring *ring = adev->rings[i];
if (!ring || !ring->fence_drv.initialized)
continue;
r = amdgpu_fence_wait_empty(ring);
@ -568,6 +567,7 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
amdgpu_irq_put(adev, ring->fence_drv.irq_src,
ring->fence_drv.irq_type);
amd_sched_fini(&ring->sched);
del_timer_sync(&ring->fence_drv.fallback_timer);
ring->fence_drv.initialized = false;
}
mutex_unlock(&adev->ring_lock);
@ -751,18 +751,25 @@ static bool amdgpu_fence_enable_signaling(struct fence *f)
fence->fence_wake.func = amdgpu_fence_check_signaled;
__add_wait_queue(&ring->fence_drv.fence_queue, &fence->fence_wake);
fence_get(f);
amdgpu_fence_schedule_check(ring);
if (!timer_pending(&ring->fence_drv.fallback_timer))
amdgpu_fence_schedule_fallback(ring);
FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx);
return true;
}
static void amdgpu_fence_release(struct fence *f)
{
struct amdgpu_fence *fence = to_amdgpu_fence(f);
kmem_cache_free(amdgpu_fence_slab, fence);
}
const struct fence_ops amdgpu_fence_ops = {
.get_driver_name = amdgpu_fence_get_driver_name,
.get_timeline_name = amdgpu_fence_get_timeline_name,
.enable_signaling = amdgpu_fence_enable_signaling,
.signaled = amdgpu_fence_is_signaled,
.wait = fence_default_wait,
.release = NULL,
.release = amdgpu_fence_release,
};
/*

View File

@ -483,6 +483,9 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
if (domain == AMDGPU_GEM_DOMAIN_CPU)
goto error_unreserve;
}
r = amdgpu_vm_update_page_directory(adev, bo_va->vm);
if (r)
goto error_unreserve;
r = amdgpu_vm_clear_freed(adev, bo_va->vm);
if (r)
@ -512,6 +515,9 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
struct amdgpu_fpriv *fpriv = filp->driver_priv;
struct amdgpu_bo *rbo;
struct amdgpu_bo_va *bo_va;
struct ttm_validate_buffer tv, tv_pd;
struct ww_acquire_ctx ticket;
struct list_head list, duplicates;
uint32_t invalid_flags, va_flags = 0;
int r = 0;
@ -549,7 +555,18 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
return -ENOENT;
mutex_lock(&fpriv->vm.mutex);
rbo = gem_to_amdgpu_bo(gobj);
r = amdgpu_bo_reserve(rbo, false);
INIT_LIST_HEAD(&list);
INIT_LIST_HEAD(&duplicates);
tv.bo = &rbo->tbo;
tv.shared = true;
list_add(&tv.head, &list);
if (args->operation == AMDGPU_VA_OP_MAP) {
tv_pd.bo = &fpriv->vm.page_directory->tbo;
tv_pd.shared = true;
list_add(&tv_pd.head, &list);
}
r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
if (r) {
mutex_unlock(&fpriv->vm.mutex);
drm_gem_object_unreference_unlocked(gobj);
@ -558,7 +575,8 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
bo_va = amdgpu_vm_bo_find(&fpriv->vm, rbo);
if (!bo_va) {
amdgpu_bo_unreserve(rbo);
ttm_eu_backoff_reservation(&ticket, &list);
drm_gem_object_unreference_unlocked(gobj);
mutex_unlock(&fpriv->vm.mutex);
return -ENOENT;
}
@ -581,7 +599,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
default:
break;
}
ttm_eu_backoff_reservation(&ticket, &list);
if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE))
amdgpu_gem_va_update_vm(adev, bo_va, args->operation);
mutex_unlock(&fpriv->vm.mutex);

View File

@ -62,7 +62,7 @@ int amdgpu_ib_get(struct amdgpu_ring *ring, struct amdgpu_vm *vm,
int r;
if (size) {
r = amdgpu_sa_bo_new(adev, &adev->ring_tmp_bo,
r = amdgpu_sa_bo_new(&adev->ring_tmp_bo,
&ib->sa_bo, size, 256);
if (r) {
dev_err(adev->dev, "failed to get a new IB (%d)\n", r);
@ -216,7 +216,7 @@ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs,
}
if (ib->vm)
amdgpu_vm_fence(adev, ib->vm, ib->fence);
amdgpu_vm_fence(adev, ib->vm, &ib->fence->base);
amdgpu_ring_unlock_commit(ring);
return 0;

View File

@ -189,10 +189,9 @@ int amdgpu_sa_bo_manager_start(struct amdgpu_device *adev,
struct amdgpu_sa_manager *sa_manager);
int amdgpu_sa_bo_manager_suspend(struct amdgpu_device *adev,
struct amdgpu_sa_manager *sa_manager);
int amdgpu_sa_bo_new(struct amdgpu_device *adev,
struct amdgpu_sa_manager *sa_manager,
struct amdgpu_sa_bo **sa_bo,
unsigned size, unsigned align);
int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
struct amdgpu_sa_bo **sa_bo,
unsigned size, unsigned align);
void amdgpu_sa_bo_free(struct amdgpu_device *adev,
struct amdgpu_sa_bo **sa_bo,
struct fence *fence);

View File

@ -311,8 +311,7 @@ static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager,
return false;
}
int amdgpu_sa_bo_new(struct amdgpu_device *adev,
struct amdgpu_sa_manager *sa_manager,
int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
struct amdgpu_sa_bo **sa_bo,
unsigned size, unsigned align)
{

View File

@ -26,6 +26,7 @@
#include <linux/sched.h>
#include <drm/drmP.h>
#include "amdgpu.h"
#include "amdgpu_trace.h"
static struct fence *amdgpu_sched_dependency(struct amd_sched_job *sched_job)
{
@ -44,11 +45,8 @@ static struct fence *amdgpu_sched_run_job(struct amd_sched_job *sched_job)
return NULL;
}
job = to_amdgpu_job(sched_job);
mutex_lock(&job->job_lock);
r = amdgpu_ib_schedule(job->adev,
job->num_ibs,
job->ibs,
job->base.owner);
trace_amdgpu_sched_run_job(job);
r = amdgpu_ib_schedule(job->adev, job->num_ibs, job->ibs, job->owner);
if (r) {
DRM_ERROR("Error scheduling IBs (%d)\n", r);
goto err;
@ -61,8 +59,6 @@ err:
if (job->free_job)
job->free_job(job);
mutex_unlock(&job->job_lock);
fence_put(&job->base.s_fence->base);
kfree(job);
return fence ? &fence->base : NULL;
}
@ -88,21 +84,19 @@ int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev,
return -ENOMEM;
job->base.sched = &ring->sched;
job->base.s_entity = &adev->kernel_ctx.rings[ring->idx].entity;
job->base.s_fence = amd_sched_fence_create(job->base.s_entity, owner);
if (!job->base.s_fence) {
kfree(job);
return -ENOMEM;
}
*f = fence_get(&job->base.s_fence->base);
job->adev = adev;
job->ibs = ibs;
job->num_ibs = num_ibs;
job->base.owner = owner;
mutex_init(&job->job_lock);
job->owner = owner;
job->free_job = free_job;
mutex_lock(&job->job_lock);
r = amd_sched_entity_push_job(&job->base);
if (r) {
mutex_unlock(&job->job_lock);
kfree(job);
return r;
}
*f = fence_get(&job->base.s_fence->base);
mutex_unlock(&job->job_lock);
amd_sched_entity_push_job(&job->base);
} else {
r = amdgpu_ib_schedule(adev, num_ibs, ibs, owner);
if (r)

View File

@ -40,7 +40,7 @@ int amdgpu_semaphore_create(struct amdgpu_device *adev,
if (*semaphore == NULL) {
return -ENOMEM;
}
r = amdgpu_sa_bo_new(adev, &adev->ring_tmp_bo,
r = amdgpu_sa_bo_new(&adev->ring_tmp_bo,
&(*semaphore)->sa_bo, 8, 8);
if (r) {
kfree(*semaphore);

View File

@ -302,8 +302,14 @@ int amdgpu_sync_rings(struct amdgpu_sync *sync,
return -EINVAL;
}
if (amdgpu_enable_scheduler || !amdgpu_enable_semaphores ||
(count >= AMDGPU_NUM_SYNCS)) {
if (amdgpu_enable_scheduler || !amdgpu_enable_semaphores) {
r = fence_wait(&fence->base, true);
if (r)
return r;
continue;
}
if (count >= AMDGPU_NUM_SYNCS) {
/* not enough room, wait manually */
r = fence_wait(&fence->base, false);
if (r)

View File

@ -48,6 +48,57 @@ TRACE_EVENT(amdgpu_cs,
__entry->fences)
);
TRACE_EVENT(amdgpu_cs_ioctl,
TP_PROTO(struct amdgpu_job *job),
TP_ARGS(job),
TP_STRUCT__entry(
__field(struct amdgpu_device *, adev)
__field(struct amd_sched_job *, sched_job)
__field(struct amdgpu_ib *, ib)
__field(struct fence *, fence)
__field(char *, ring_name)
__field(u32, num_ibs)
),
TP_fast_assign(
__entry->adev = job->adev;
__entry->sched_job = &job->base;
__entry->ib = job->ibs;
__entry->fence = &job->base.s_fence->base;
__entry->ring_name = job->ibs[0].ring->name;
__entry->num_ibs = job->num_ibs;
),
TP_printk("adev=%p, sched_job=%p, first ib=%p, sched fence=%p, ring name:%s, num_ibs:%u",
__entry->adev, __entry->sched_job, __entry->ib,
__entry->fence, __entry->ring_name, __entry->num_ibs)
);
TRACE_EVENT(amdgpu_sched_run_job,
TP_PROTO(struct amdgpu_job *job),
TP_ARGS(job),
TP_STRUCT__entry(
__field(struct amdgpu_device *, adev)
__field(struct amd_sched_job *, sched_job)
__field(struct amdgpu_ib *, ib)
__field(struct fence *, fence)
__field(char *, ring_name)
__field(u32, num_ibs)
),
TP_fast_assign(
__entry->adev = job->adev;
__entry->sched_job = &job->base;
__entry->ib = job->ibs;
__entry->fence = &job->base.s_fence->base;
__entry->ring_name = job->ibs[0].ring->name;
__entry->num_ibs = job->num_ibs;
),
TP_printk("adev=%p, sched_job=%p, first ib=%p, sched fence=%p, ring name:%s, num_ibs:%u",
__entry->adev, __entry->sched_job, __entry->ib,
__entry->fence, __entry->ring_name, __entry->num_ibs)
);
TRACE_EVENT(amdgpu_vm_grab_id,
TP_PROTO(unsigned vmid, int ring),
TP_ARGS(vmid, ring),
@ -196,49 +247,6 @@ TRACE_EVENT(amdgpu_bo_list_set,
TP_printk("list=%p, bo=%p", __entry->list, __entry->bo)
);
DECLARE_EVENT_CLASS(amdgpu_fence_request,
TP_PROTO(struct drm_device *dev, int ring, u32 seqno),
TP_ARGS(dev, ring, seqno),
TP_STRUCT__entry(
__field(u32, dev)
__field(int, ring)
__field(u32, seqno)
),
TP_fast_assign(
__entry->dev = dev->primary->index;
__entry->ring = ring;
__entry->seqno = seqno;
),
TP_printk("dev=%u, ring=%d, seqno=%u",
__entry->dev, __entry->ring, __entry->seqno)
);
DEFINE_EVENT(amdgpu_fence_request, amdgpu_fence_emit,
TP_PROTO(struct drm_device *dev, int ring, u32 seqno),
TP_ARGS(dev, ring, seqno)
);
DEFINE_EVENT(amdgpu_fence_request, amdgpu_fence_wait_begin,
TP_PROTO(struct drm_device *dev, int ring, u32 seqno),
TP_ARGS(dev, ring, seqno)
);
DEFINE_EVENT(amdgpu_fence_request, amdgpu_fence_wait_end,
TP_PROTO(struct drm_device *dev, int ring, u32 seqno),
TP_ARGS(dev, ring, seqno)
);
DECLARE_EVENT_CLASS(amdgpu_semaphore_request,
TP_PROTO(int ring, struct amdgpu_semaphore *sem),

View File

@ -1073,10 +1073,10 @@ static int amdgpu_mm_dump_table(struct seq_file *m, void *data)
ret = drm_mm_dump_table(m, mm);
spin_unlock(&glob->lru_lock);
if (ttm_pl == TTM_PL_VRAM)
seq_printf(m, "man size:%llu pages, ram usage:%luMB, vis usage:%luMB\n",
seq_printf(m, "man size:%llu pages, ram usage:%lluMB, vis usage:%lluMB\n",
adev->mman.bdev.man[ttm_pl].size,
atomic64_read(&adev->vram_usage) >> 20,
atomic64_read(&adev->vram_vis_usage) >> 20);
(u64)atomic64_read(&adev->vram_usage) >> 20,
(u64)atomic64_read(&adev->vram_vis_usage) >> 20);
return ret;
}

View File

@ -143,10 +143,15 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
unsigned i;
/* check if the id is still valid */
if (vm_id->id && vm_id->last_id_use &&
vm_id->last_id_use == adev->vm_manager.active[vm_id->id]) {
trace_amdgpu_vm_grab_id(vm_id->id, ring->idx);
return 0;
if (vm_id->id) {
unsigned id = vm_id->id;
long owner;
owner = atomic_long_read(&adev->vm_manager.ids[id].owner);
if (owner == (long)vm) {
trace_amdgpu_vm_grab_id(vm_id->id, ring->idx);
return 0;
}
}
/* we definately need to flush */
@ -154,7 +159,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
/* skip over VMID 0, since it is the system VM */
for (i = 1; i < adev->vm_manager.nvm; ++i) {
struct fence *fence = adev->vm_manager.active[i];
struct fence *fence = adev->vm_manager.ids[i].active;
struct amdgpu_ring *fring;
if (fence == NULL) {
@ -176,7 +181,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
if (choices[i]) {
struct fence *fence;
fence = adev->vm_manager.active[choices[i]];
fence = adev->vm_manager.ids[choices[i]].active;
vm_id->id = choices[i];
trace_amdgpu_vm_grab_id(choices[i], ring->idx);
@ -207,24 +212,21 @@ void amdgpu_vm_flush(struct amdgpu_ring *ring,
uint64_t pd_addr = amdgpu_bo_gpu_offset(vm->page_directory);
struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx];
struct fence *flushed_updates = vm_id->flushed_updates;
bool is_earlier = false;
bool is_later;
if (flushed_updates && updates) {
BUG_ON(flushed_updates->context != updates->context);
is_earlier = (updates->seqno - flushed_updates->seqno <=
INT_MAX) ? true : false;
}
if (pd_addr != vm_id->pd_gpu_addr || !flushed_updates ||
is_earlier) {
if (!flushed_updates)
is_later = true;
else if (!updates)
is_later = false;
else
is_later = fence_is_later(updates, flushed_updates);
if (pd_addr != vm_id->pd_gpu_addr || is_later) {
trace_amdgpu_vm_flush(pd_addr, ring->idx, vm_id->id);
if (is_earlier) {
if (is_later) {
vm_id->flushed_updates = fence_get(updates);
fence_put(flushed_updates);
}
if (!flushed_updates)
vm_id->flushed_updates = fence_get(updates);
vm_id->pd_gpu_addr = pd_addr;
amdgpu_ring_emit_vm_flush(ring, vm_id->id, vm_id->pd_gpu_addr);
}
@ -244,16 +246,14 @@ void amdgpu_vm_flush(struct amdgpu_ring *ring,
*/
void amdgpu_vm_fence(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
struct amdgpu_fence *fence)
struct fence *fence)
{
unsigned ridx = fence->ring->idx;
unsigned vm_id = vm->ids[ridx].id;
struct amdgpu_ring *ring = amdgpu_ring_from_fence(fence);
unsigned vm_id = vm->ids[ring->idx].id;
fence_put(adev->vm_manager.active[vm_id]);
adev->vm_manager.active[vm_id] = fence_get(&fence->base);
fence_put(vm->ids[ridx].last_id_use);
vm->ids[ridx].last_id_use = fence_get(&fence->base);
fence_put(adev->vm_manager.ids[vm_id].active);
adev->vm_manager.ids[vm_id].active = fence_get(fence);
atomic_long_set(&adev->vm_manager.ids[vm_id].owner, (long)vm);
}
/**
@ -332,6 +332,8 @@ int amdgpu_vm_free_job(struct amdgpu_job *job)
*
* @adev: amdgpu_device pointer
* @bo: bo to clear
*
* need to reserve bo first before calling it.
*/
static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
struct amdgpu_bo *bo)
@ -343,24 +345,20 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
uint64_t addr;
int r;
r = amdgpu_bo_reserve(bo, false);
if (r)
return r;
r = reservation_object_reserve_shared(bo->tbo.resv);
if (r)
return r;
r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
if (r)
goto error_unreserve;
goto error;
addr = amdgpu_bo_gpu_offset(bo);
entries = amdgpu_bo_size(bo) / 8;
ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL);
if (!ib)
goto error_unreserve;
goto error;
r = amdgpu_ib_get(ring, NULL, entries * 2 + 64, ib);
if (r)
@ -378,16 +376,14 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
if (!r)
amdgpu_bo_fence(bo, fence, true);
fence_put(fence);
if (amdgpu_enable_scheduler) {
amdgpu_bo_unreserve(bo);
if (amdgpu_enable_scheduler)
return 0;
}
error_free:
amdgpu_ib_free(adev, ib);
kfree(ib);
error_unreserve:
amdgpu_bo_unreserve(bo);
error:
return r;
}
@ -989,7 +985,7 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
* Add a mapping of the BO at the specefied addr into the VM.
* Returns 0 for success, error for failure.
*
* Object has to be reserved and gets unreserved by this function!
* Object has to be reserved and unreserved outside!
*/
int amdgpu_vm_bo_map(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va,
@ -1005,30 +1001,27 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
/* validate the parameters */
if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
size == 0 || size & AMDGPU_GPU_PAGE_MASK) {
amdgpu_bo_unreserve(bo_va->bo);
size == 0 || size & AMDGPU_GPU_PAGE_MASK)
return -EINVAL;
}
/* make sure object fit at this offset */
eaddr = saddr + size;
if ((saddr >= eaddr) || (offset + size > amdgpu_bo_size(bo_va->bo))) {
amdgpu_bo_unreserve(bo_va->bo);
if ((saddr >= eaddr) || (offset + size > amdgpu_bo_size(bo_va->bo)))
return -EINVAL;
}
last_pfn = eaddr / AMDGPU_GPU_PAGE_SIZE;
if (last_pfn > adev->vm_manager.max_pfn) {
dev_err(adev->dev, "va above limit (0x%08X > 0x%08X)\n",
last_pfn, adev->vm_manager.max_pfn);
amdgpu_bo_unreserve(bo_va->bo);
return -EINVAL;
}
saddr /= AMDGPU_GPU_PAGE_SIZE;
eaddr /= AMDGPU_GPU_PAGE_SIZE;
spin_lock(&vm->it_lock);
it = interval_tree_iter_first(&vm->va, saddr, eaddr - 1);
spin_unlock(&vm->it_lock);
if (it) {
struct amdgpu_bo_va_mapping *tmp;
tmp = container_of(it, struct amdgpu_bo_va_mapping, it);
@ -1036,14 +1029,12 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
"0x%010lx-0x%010lx\n", bo_va->bo, saddr, eaddr,
tmp->it.start, tmp->it.last + 1);
amdgpu_bo_unreserve(bo_va->bo);
r = -EINVAL;
goto error;
}
mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
if (!mapping) {
amdgpu_bo_unreserve(bo_va->bo);
r = -ENOMEM;
goto error;
}
@ -1055,7 +1046,9 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
mapping->flags = flags;
list_add(&mapping->list, &bo_va->invalids);
spin_lock(&vm->it_lock);
interval_tree_insert(&mapping->it, &vm->va);
spin_unlock(&vm->it_lock);
trace_amdgpu_vm_bo_map(bo_va, mapping);
/* Make sure the page tables are allocated */
@ -1067,8 +1060,6 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
if (eaddr > vm->max_pde_used)
vm->max_pde_used = eaddr;
amdgpu_bo_unreserve(bo_va->bo);
/* walk over the address space and allocate the page tables */
for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) {
struct reservation_object *resv = vm->page_directory->tbo.resv;
@ -1077,13 +1068,11 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
if (vm->page_tables[pt_idx].bo)
continue;
ww_mutex_lock(&resv->lock, NULL);
r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8,
AMDGPU_GPU_PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_NO_CPU_ACCESS,
NULL, resv, &pt);
ww_mutex_unlock(&resv->lock);
if (r)
goto error_free;
@ -1101,7 +1090,9 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
error_free:
list_del(&mapping->list);
spin_lock(&vm->it_lock);
interval_tree_remove(&mapping->it, &vm->va);
spin_unlock(&vm->it_lock);
trace_amdgpu_vm_bo_unmap(bo_va, mapping);
kfree(mapping);
@ -1119,7 +1110,7 @@ error:
* Remove a mapping of the BO at the specefied addr from the VM.
* Returns 0 for success, error for failure.
*
* Object has to be reserved and gets unreserved by this function!
* Object has to be reserved and unreserved outside!
*/
int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va,
@ -1144,21 +1135,20 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
break;
}
if (&mapping->list == &bo_va->invalids) {
amdgpu_bo_unreserve(bo_va->bo);
if (&mapping->list == &bo_va->invalids)
return -ENOENT;
}
}
list_del(&mapping->list);
spin_lock(&vm->it_lock);
interval_tree_remove(&mapping->it, &vm->va);
spin_unlock(&vm->it_lock);
trace_amdgpu_vm_bo_unmap(bo_va, mapping);
if (valid)
list_add(&mapping->list, &vm->freed);
else
kfree(mapping);
amdgpu_bo_unreserve(bo_va->bo);
return 0;
}
@ -1187,13 +1177,17 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
list_del(&mapping->list);
spin_lock(&vm->it_lock);
interval_tree_remove(&mapping->it, &vm->va);
spin_unlock(&vm->it_lock);
trace_amdgpu_vm_bo_unmap(bo_va, mapping);
list_add(&mapping->list, &vm->freed);
}
list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
list_del(&mapping->list);
spin_lock(&vm->it_lock);
interval_tree_remove(&mapping->it, &vm->va);
spin_unlock(&vm->it_lock);
kfree(mapping);
}
@ -1241,7 +1235,6 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
vm->ids[i].id = 0;
vm->ids[i].flushed_updates = NULL;
vm->ids[i].last_id_use = NULL;
}
mutex_init(&vm->mutex);
vm->va = RB_ROOT;
@ -1249,7 +1242,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
INIT_LIST_HEAD(&vm->invalidated);
INIT_LIST_HEAD(&vm->cleared);
INIT_LIST_HEAD(&vm->freed);
spin_lock_init(&vm->it_lock);
pd_size = amdgpu_vm_directory_size(adev);
pd_entries = amdgpu_vm_num_pdes(adev);
@ -1269,8 +1262,14 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
NULL, NULL, &vm->page_directory);
if (r)
return r;
r = amdgpu_bo_reserve(vm->page_directory, false);
if (r) {
amdgpu_bo_unref(&vm->page_directory);
vm->page_directory = NULL;
return r;
}
r = amdgpu_vm_clear_bo(adev, vm->page_directory);
amdgpu_bo_unreserve(vm->page_directory);
if (r) {
amdgpu_bo_unref(&vm->page_directory);
vm->page_directory = NULL;
@ -1313,11 +1312,28 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
amdgpu_bo_unref(&vm->page_directory);
fence_put(vm->page_directory_fence);
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
unsigned id = vm->ids[i].id;
atomic_long_cmpxchg(&adev->vm_manager.ids[id].owner,
(long)vm, 0);
fence_put(vm->ids[i].flushed_updates);
fence_put(vm->ids[i].last_id_use);
}
mutex_destroy(&vm->mutex);
}
/**
* amdgpu_vm_manager_fini - cleanup VM manager
*
* @adev: amdgpu_device pointer
*
* Cleanup the VM manager and free resources.
*/
void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
{
unsigned i;
for (i = 0; i < AMDGPU_NUM_VM; ++i)
fence_put(adev->vm_manager.ids[i].active);
}

View File

@ -6569,12 +6569,12 @@ static int ci_dpm_set_interrupt_state(struct amdgpu_device *adev,
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
break;
case AMDGPU_IRQ_STATE_ENABLE:
cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
break;
default:
@ -6586,12 +6586,12 @@ static int ci_dpm_set_interrupt_state(struct amdgpu_device *adev,
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
break;
case AMDGPU_IRQ_STATE_ENABLE:
cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
break;
default:

View File

@ -268,7 +268,6 @@ static const u32 fiji_mgcg_cgcg_init[] =
mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100,
mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0x00000100,
mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0x40000100,
mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100,
mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100,
mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100,
@ -296,10 +295,6 @@ static const u32 fiji_mgcg_cgcg_init[] =
mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200,
mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100,
mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003c,
mmPCIE_INDEX, 0xffffffff, 0x0140001c,
mmPCIE_DATA, 0x000f0000, 0x00000000,
mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001,
};
@ -1000,7 +995,7 @@ static void gfx_v8_0_gpu_early_init(struct amdgpu_device *adev)
adev->gfx.config.max_cu_per_sh = 16;
adev->gfx.config.max_sh_per_se = 1;
adev->gfx.config.max_backends_per_se = 4;
adev->gfx.config.max_texture_channel_caches = 8;
adev->gfx.config.max_texture_channel_caches = 16;
adev->gfx.config.max_gprs = 256;
adev->gfx.config.max_gs_threads = 32;
adev->gfx.config.max_hw_contexts = 8;
@ -1613,6 +1608,296 @@ static void gfx_v8_0_tiling_mode_table_init(struct amdgpu_device *adev)
WREG32(mmGB_MACROTILE_MODE0 + reg_offset, gb_tile_moden);
}
case CHIP_FIJI:
for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
switch (reg_offset) {
case 0:
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
break;
case 1:
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
break;
case 2:
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
break;
case 3:
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
break;
case 4:
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
break;
case 5:
gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
break;
case 6:
gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
break;
case 7:
gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P4_16x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
break;
case 8:
gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16));
break;
case 9:
gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
break;
case 10:
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
break;
case 11:
gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
break;
case 12:
gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P4_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
break;
case 13:
gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
break;
case 14:
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
break;
case 15:
gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
break;
case 16:
gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
break;
case 17:
gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P4_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
break;
case 18:
gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
break;
case 19:
gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
break;
case 20:
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
break;
case 21:
gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
break;
case 22:
gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
break;
case 23:
gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
PIPE_CONFIG(ADDR_SURF_P4_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
break;
case 24:
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
break;
case 25:
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
break;
case 26:
gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
break;
case 27:
gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
break;
case 28:
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
break;
case 29:
gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
break;
case 30:
gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P4_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
break;
default:
gb_tile_moden = 0;
break;
}
adev->gfx.config.tile_mode_array[reg_offset] = gb_tile_moden;
WREG32(mmGB_TILE_MODE0 + reg_offset, gb_tile_moden);
}
for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) {
switch (reg_offset) {
case 0:
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
NUM_BANKS(ADDR_SURF_8_BANK));
break;
case 1:
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
NUM_BANKS(ADDR_SURF_8_BANK));
break;
case 2:
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
NUM_BANKS(ADDR_SURF_8_BANK));
break;
case 3:
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
NUM_BANKS(ADDR_SURF_8_BANK));
break;
case 4:
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
NUM_BANKS(ADDR_SURF_8_BANK));
break;
case 5:
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
NUM_BANKS(ADDR_SURF_8_BANK));
break;
case 6:
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
NUM_BANKS(ADDR_SURF_8_BANK));
break;
case 8:
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
NUM_BANKS(ADDR_SURF_8_BANK));
break;
case 9:
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
NUM_BANKS(ADDR_SURF_8_BANK));
break;
case 10:
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
NUM_BANKS(ADDR_SURF_8_BANK));
break;
case 11:
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
NUM_BANKS(ADDR_SURF_8_BANK));
break;
case 12:
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
NUM_BANKS(ADDR_SURF_8_BANK));
break;
case 13:
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
NUM_BANKS(ADDR_SURF_8_BANK));
break;
case 14:
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
NUM_BANKS(ADDR_SURF_4_BANK));
break;
case 7:
/* unused idx */
continue;
default:
gb_tile_moden = 0;
break;
}
adev->gfx.config.macrotile_mode_array[reg_offset] = gb_tile_moden;
WREG32(mmGB_MACROTILE_MODE0 + reg_offset, gb_tile_moden);
}
break;
case CHIP_TONGA:
for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
switch (reg_offset) {
@ -2971,10 +3256,13 @@ static int gfx_v8_0_cp_gfx_start(struct amdgpu_device *adev)
amdgpu_ring_write(ring, mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
switch (adev->asic_type) {
case CHIP_TONGA:
case CHIP_FIJI:
amdgpu_ring_write(ring, 0x16000012);
amdgpu_ring_write(ring, 0x0000002A);
break;
case CHIP_FIJI:
amdgpu_ring_write(ring, 0x3a00161a);
amdgpu_ring_write(ring, 0x0000002e);
break;
case CHIP_TOPAZ:
case CHIP_CARRIZO:
amdgpu_ring_write(ring, 0x00000002);

View File

@ -40,7 +40,7 @@
static void gmc_v7_0_set_gart_funcs(struct amdgpu_device *adev);
static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev);
MODULE_FIRMWARE("radeon/boniare_mc.bin");
MODULE_FIRMWARE("radeon/bonaire_mc.bin");
MODULE_FIRMWARE("radeon/hawaii_mc.bin");
/**
@ -501,6 +501,7 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE, 1);
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, EFFECTIVE_L2_QUEUE_SIZE, 7);
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY, 1);
WREG32(mmVM_L2_CNTL, tmp);
tmp = REG_SET_FIELD(0, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
@ -960,12 +961,10 @@ static int gmc_v7_0_sw_init(void *handle)
static int gmc_v7_0_sw_fini(void *handle)
{
int i;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev->vm_manager.enabled) {
for (i = 0; i < AMDGPU_NUM_VM; ++i)
fence_put(adev->vm_manager.active[i]);
amdgpu_vm_manager_fini(adev);
gmc_v7_0_vm_fini(adev);
adev->vm_manager.enabled = false;
}
@ -1010,12 +1009,10 @@ static int gmc_v7_0_hw_fini(void *handle)
static int gmc_v7_0_suspend(void *handle)
{
int i;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev->vm_manager.enabled) {
for (i = 0; i < AMDGPU_NUM_VM; ++i)
fence_put(adev->vm_manager.active[i]);
amdgpu_vm_manager_fini(adev);
gmc_v7_0_vm_fini(adev);
adev->vm_manager.enabled = false;
}

View File

@ -629,6 +629,7 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE, 1);
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, EFFECTIVE_L2_QUEUE_SIZE, 7);
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY, 1);
WREG32(mmVM_L2_CNTL, tmp);
tmp = RREG32(mmVM_L2_CNTL2);
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
@ -979,12 +980,10 @@ static int gmc_v8_0_sw_init(void *handle)
static int gmc_v8_0_sw_fini(void *handle)
{
int i;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev->vm_manager.enabled) {
for (i = 0; i < AMDGPU_NUM_VM; ++i)
fence_put(adev->vm_manager.active[i]);
amdgpu_vm_manager_fini(adev);
gmc_v8_0_vm_fini(adev);
adev->vm_manager.enabled = false;
}
@ -1031,12 +1030,10 @@ static int gmc_v8_0_hw_fini(void *handle)
static int gmc_v8_0_suspend(void *handle)
{
int i;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev->vm_manager.enabled) {
for (i = 0; i < AMDGPU_NUM_VM; ++i)
fence_put(adev->vm_manager.active[i]);
amdgpu_vm_manager_fini(adev);
gmc_v8_0_vm_fini(adev);
adev->vm_manager.enabled = false;
}

View File

@ -16,6 +16,8 @@ TRACE_EVENT(amd_sched_job,
TP_ARGS(sched_job),
TP_STRUCT__entry(
__field(struct amd_sched_entity *, entity)
__field(struct amd_sched_job *, sched_job)
__field(struct fence *, fence)
__field(const char *, name)
__field(u32, job_count)
__field(int, hw_job_count)
@ -23,16 +25,32 @@ TRACE_EVENT(amd_sched_job,
TP_fast_assign(
__entry->entity = sched_job->s_entity;
__entry->sched_job = sched_job;
__entry->fence = &sched_job->s_fence->base;
__entry->name = sched_job->sched->name;
__entry->job_count = kfifo_len(
&sched_job->s_entity->job_queue) / sizeof(sched_job);
__entry->hw_job_count = atomic_read(
&sched_job->sched->hw_rq_count);
),
TP_printk("entity=%p, ring=%s, job count:%u, hw job count:%d",
__entry->entity, __entry->name, __entry->job_count,
__entry->hw_job_count)
TP_printk("entity=%p, sched job=%p, fence=%p, ring=%s, job count:%u, hw job count:%d",
__entry->entity, __entry->sched_job, __entry->fence, __entry->name,
__entry->job_count, __entry->hw_job_count)
);
TRACE_EVENT(amd_sched_process_job,
TP_PROTO(struct amd_sched_fence *fence),
TP_ARGS(fence),
TP_STRUCT__entry(
__field(struct fence *, fence)
),
TP_fast_assign(
__entry->fence = &fence->base;
),
TP_printk("fence=%p signaled", __entry->fence)
);
#endif
/* This part must be outside protection */

View File

@ -34,6 +34,9 @@ static struct amd_sched_job *
amd_sched_entity_pop_job(struct amd_sched_entity *entity);
static void amd_sched_wakeup(struct amd_gpu_scheduler *sched);
struct kmem_cache *sched_fence_slab;
atomic_t sched_fence_slab_ref = ATOMIC_INIT(0);
/* Initialize a given run queue struct */
static void amd_sched_rq_init(struct amd_sched_rq *rq)
{
@ -273,22 +276,13 @@ static bool amd_sched_entity_in(struct amd_sched_job *sched_job)
*
* Returns 0 for success, negative error code otherwise.
*/
int amd_sched_entity_push_job(struct amd_sched_job *sched_job)
void amd_sched_entity_push_job(struct amd_sched_job *sched_job)
{
struct amd_sched_entity *entity = sched_job->s_entity;
struct amd_sched_fence *fence = amd_sched_fence_create(
entity, sched_job->owner);
if (!fence)
return -ENOMEM;
fence_get(&fence->base);
sched_job->s_fence = fence;
wait_event(entity->sched->job_scheduled,
amd_sched_entity_in(sched_job));
trace_amd_sched_job(sched_job);
return 0;
}
/**
@ -343,6 +337,7 @@ static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
list_del_init(&s_fence->list);
spin_unlock_irqrestore(&sched->fence_list_lock, flags);
}
trace_amd_sched_process_job(s_fence);
fence_put(&s_fence->base);
wake_up_interruptible(&sched->wake_up_worker);
}
@ -450,6 +445,13 @@ int amd_sched_init(struct amd_gpu_scheduler *sched,
init_waitqueue_head(&sched->wake_up_worker);
init_waitqueue_head(&sched->job_scheduled);
atomic_set(&sched->hw_rq_count, 0);
if (atomic_inc_return(&sched_fence_slab_ref) == 1) {
sched_fence_slab = kmem_cache_create(
"amd_sched_fence", sizeof(struct amd_sched_fence), 0,
SLAB_HWCACHE_ALIGN, NULL);
if (!sched_fence_slab)
return -ENOMEM;
}
/* Each scheduler will run on a seperate kernel thread */
sched->thread = kthread_run(amd_sched_main, sched, sched->name);
@ -470,4 +472,6 @@ void amd_sched_fini(struct amd_gpu_scheduler *sched)
{
if (sched->thread)
kthread_stop(sched->thread);
if (atomic_dec_and_test(&sched_fence_slab_ref))
kmem_cache_destroy(sched_fence_slab);
}

View File

@ -30,6 +30,9 @@
struct amd_gpu_scheduler;
struct amd_sched_rq;
extern struct kmem_cache *sched_fence_slab;
extern atomic_t sched_fence_slab_ref;
/**
* A scheduler entity is a wrapper around a job queue or a group
* of other entities. Entities take turns emitting jobs from their
@ -76,7 +79,6 @@ struct amd_sched_job {
struct amd_gpu_scheduler *sched;
struct amd_sched_entity *s_entity;
struct amd_sched_fence *s_fence;
void *owner;
};
extern const struct fence_ops amd_sched_fence_ops;
@ -128,7 +130,7 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
uint32_t jobs);
void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
struct amd_sched_entity *entity);
int amd_sched_entity_push_job(struct amd_sched_job *sched_job);
void amd_sched_entity_push_job(struct amd_sched_job *sched_job);
struct amd_sched_fence *amd_sched_fence_create(
struct amd_sched_entity *s_entity, void *owner);

View File

@ -32,7 +32,7 @@ struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *s_entity
struct amd_sched_fence *fence = NULL;
unsigned seq;
fence = kzalloc(sizeof(struct amd_sched_fence), GFP_KERNEL);
fence = kmem_cache_zalloc(sched_fence_slab, GFP_KERNEL);
if (fence == NULL)
return NULL;
fence->owner = owner;
@ -71,11 +71,17 @@ static bool amd_sched_fence_enable_signaling(struct fence *f)
return true;
}
static void amd_sched_fence_release(struct fence *f)
{
struct amd_sched_fence *fence = to_amd_sched_fence(f);
kmem_cache_free(sched_fence_slab, fence);
}
const struct fence_ops amd_sched_fence_ops = {
.get_driver_name = amd_sched_fence_get_driver_name,
.get_timeline_name = amd_sched_fence_get_timeline_name,
.enable_signaling = amd_sched_fence_enable_signaling,
.signaled = NULL,
.wait = fence_default_wait,
.release = NULL,
.release = amd_sched_fence_release,
};

View File

@ -221,11 +221,17 @@ int radeon_bo_create(struct radeon_device *rdev,
if (!(rdev->flags & RADEON_IS_PCIE))
bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
/* Write-combined CPU mappings of GTT cause GPU hangs with RV6xx
* See https://bugs.freedesktop.org/show_bug.cgi?id=91268
*/
if (rdev->family >= CHIP_RV610 && rdev->family <= CHIP_RV635)
bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
#ifdef CONFIG_X86_32
/* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit
* See https://bugs.freedesktop.org/show_bug.cgi?id=84627
*/
bo->flags &= ~RADEON_GEM_GTT_WC;
bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
#elif defined(CONFIG_X86) && !defined(CONFIG_X86_PAT)
/* Don't try to enable write-combining when it can't work, or things
* may be slow
@ -235,9 +241,10 @@ int radeon_bo_create(struct radeon_device *rdev,
#warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \
thanks to write-combining
DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "
"better performance thanks to write-combining\n");
bo->flags &= ~RADEON_GEM_GTT_WC;
if (bo->flags & RADEON_GEM_GTT_WC)
DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "
"better performance thanks to write-combining\n");
bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
#endif
radeon_ttm_placement_from_domain(bo, domain);

View File

@ -1542,8 +1542,7 @@ int radeon_pm_late_init(struct radeon_device *rdev)
ret = device_create_file(rdev->dev, &dev_attr_power_method);
if (ret)
DRM_ERROR("failed to create device file for power method\n");
if (!ret)
rdev->pm.sysfs_initialized = true;
rdev->pm.sysfs_initialized = true;
}
mutex_lock(&rdev->pm.mutex);

View File

@ -2927,7 +2927,7 @@ static struct si_dpm_quirk si_dpm_quirk_list[] = {
{ PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 },
{ PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 },
{ PCI_VENDOR_ID_ATI, 0x6810, 0x174b, 0xe271, 85000, 90000 },
{ PCI_VENDOR_ID_ATI, 0x6811, 0x1762, 0x2015, 0, 120000 },
{ PCI_VENDOR_ID_ATI, 0x6811, 0x1462, 0x2015, 0, 120000 },
{ PCI_VENDOR_ID_ATI, 0x6811, 0x1043, 0x2015, 0, 120000 },
{ 0, 0, 0, 0 },
};