diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c index c59515ba7e69..add1737dae0d 100644 --- a/drivers/gpu/drm/drm_mm.c +++ b/drivers/gpu/drm/drm_mm.c @@ -64,8 +64,8 @@ static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic) else { child = list_entry(mm->unused_nodes.next, - struct drm_mm_node, free_stack); - list_del(&child->free_stack); + struct drm_mm_node, node_list); + list_del(&child->node_list); --mm->num_unused; } spin_unlock(&mm->unused_lock); @@ -94,195 +94,242 @@ int drm_mm_pre_get(struct drm_mm *mm) return ret; } ++mm->num_unused; - list_add_tail(&node->free_stack, &mm->unused_nodes); + list_add_tail(&node->node_list, &mm->unused_nodes); } spin_unlock(&mm->unused_lock); return 0; } EXPORT_SYMBOL(drm_mm_pre_get); -static int drm_mm_create_tail_node(struct drm_mm *mm, - unsigned long start, - unsigned long size, int atomic) +static inline unsigned long drm_mm_hole_node_start(struct drm_mm_node *hole_node) { - struct drm_mm_node *child; - - child = drm_mm_kmalloc(mm, atomic); - if (unlikely(child == NULL)) - return -ENOMEM; - - child->free = 1; - child->size = size; - child->start = start; - child->mm = mm; - - list_add_tail(&child->node_list, &mm->node_list); - list_add_tail(&child->free_stack, &mm->free_stack); - - return 0; + return hole_node->start + hole_node->size; } -static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent, - unsigned long size, - int atomic) +static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node) { - struct drm_mm_node *child; + struct drm_mm_node *next_node = + list_entry(hole_node->node_list.next, struct drm_mm_node, + node_list); - child = drm_mm_kmalloc(parent->mm, atomic); - if (unlikely(child == NULL)) - return NULL; - - INIT_LIST_HEAD(&child->free_stack); - - child->size = size; - child->start = parent->start; - child->mm = parent->mm; - - list_add_tail(&child->node_list, &parent->node_list); - INIT_LIST_HEAD(&child->free_stack); - - parent->size -= size; - parent->start += size; - return child; + return next_node->start; } +static void drm_mm_insert_helper(struct drm_mm_node *hole_node, + struct drm_mm_node *node, + unsigned long size, unsigned alignment) +{ + struct drm_mm *mm = hole_node->mm; + unsigned long tmp = 0, wasted = 0; + unsigned long hole_start = drm_mm_hole_node_start(hole_node); + unsigned long hole_end = drm_mm_hole_node_end(hole_node); -struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node, + BUG_ON(!hole_node->hole_follows || node->allocated); + + if (alignment) + tmp = hole_start % alignment; + + if (!tmp) { + hole_node->hole_follows = 0; + list_del_init(&hole_node->hole_stack); + } else + wasted = alignment - tmp; + + node->start = hole_start + wasted; + node->size = size; + node->mm = mm; + node->allocated = 1; + + INIT_LIST_HEAD(&node->hole_stack); + list_add(&node->node_list, &hole_node->node_list); + + BUG_ON(node->start + node->size > hole_end); + + if (node->start + node->size < hole_end) { + list_add(&node->hole_stack, &mm->hole_stack); + node->hole_follows = 1; + } else { + node->hole_follows = 0; + } +} + +struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node, unsigned long size, unsigned alignment, int atomic) { + struct drm_mm_node *node; - struct drm_mm_node *align_splitoff = NULL; - unsigned tmp = 0; + node = drm_mm_kmalloc(hole_node->mm, atomic); + if (unlikely(node == NULL)) + return NULL; - if (alignment) - tmp = node->start % alignment; - - if (tmp) { - align_splitoff = - drm_mm_split_at_start(node, alignment - tmp, atomic); - if (unlikely(align_splitoff == NULL)) - return NULL; - } - - if (node->size == size) { - list_del_init(&node->free_stack); - node->free = 0; - } else { - node = drm_mm_split_at_start(node, size, atomic); - } - - if (align_splitoff) - drm_mm_put_block(align_splitoff); + drm_mm_insert_helper(hole_node, node, size, alignment); return node; } EXPORT_SYMBOL(drm_mm_get_block_generic); -struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *node, +/** + * Search for free space and insert a preallocated memory node. Returns + * -ENOSPC if no suitable free area is available. The preallocated memory node + * must be cleared. + */ +int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node, + unsigned long size, unsigned alignment) +{ + struct drm_mm_node *hole_node; + + hole_node = drm_mm_search_free(mm, size, alignment, 0); + if (!hole_node) + return -ENOSPC; + + drm_mm_insert_helper(hole_node, node, size, alignment); + + return 0; +} +EXPORT_SYMBOL(drm_mm_insert_node); + +static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node, + struct drm_mm_node *node, + unsigned long size, unsigned alignment, + unsigned long start, unsigned long end) +{ + struct drm_mm *mm = hole_node->mm; + unsigned long tmp = 0, wasted = 0; + unsigned long hole_start = drm_mm_hole_node_start(hole_node); + unsigned long hole_end = drm_mm_hole_node_end(hole_node); + + BUG_ON(!hole_node->hole_follows || node->allocated); + + if (hole_start < start) + wasted += start - hole_start; + if (alignment) + tmp = (hole_start + wasted) % alignment; + + if (tmp) + wasted += alignment - tmp; + + if (!wasted) { + hole_node->hole_follows = 0; + list_del_init(&hole_node->hole_stack); + } + + node->start = hole_start + wasted; + node->size = size; + node->mm = mm; + node->allocated = 1; + + INIT_LIST_HEAD(&node->hole_stack); + list_add(&node->node_list, &hole_node->node_list); + + BUG_ON(node->start + node->size > hole_end); + BUG_ON(node->start + node->size > end); + + if (node->start + node->size < hole_end) { + list_add(&node->hole_stack, &mm->hole_stack); + node->hole_follows = 1; + } else { + node->hole_follows = 0; + } +} + +struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *hole_node, unsigned long size, unsigned alignment, unsigned long start, unsigned long end, int atomic) { - struct drm_mm_node *align_splitoff = NULL; - unsigned tmp = 0; - unsigned wasted = 0; + struct drm_mm_node *node; - if (node->start < start) - wasted += start - node->start; - if (alignment) - tmp = ((node->start + wasted) % alignment); + node = drm_mm_kmalloc(hole_node->mm, atomic); + if (unlikely(node == NULL)) + return NULL; - if (tmp) - wasted += alignment - tmp; - if (wasted) { - align_splitoff = drm_mm_split_at_start(node, wasted, atomic); - if (unlikely(align_splitoff == NULL)) - return NULL; - } - - if (node->size == size) { - list_del_init(&node->free_stack); - node->free = 0; - } else { - node = drm_mm_split_at_start(node, size, atomic); - } - - if (align_splitoff) - drm_mm_put_block(align_splitoff); + drm_mm_insert_helper_range(hole_node, node, size, alignment, + start, end); return node; } EXPORT_SYMBOL(drm_mm_get_block_range_generic); -/* - * Put a block. Merge with the previous and / or next block if they are free. - * Otherwise add to the free stack. +/** + * Search for free space and insert a preallocated memory node. Returns + * -ENOSPC if no suitable free area is available. This is for range + * restricted allocations. The preallocated memory node must be cleared. */ +int drm_mm_insert_node_in_range(struct drm_mm *mm, struct drm_mm_node *node, + unsigned long size, unsigned alignment, + unsigned long start, unsigned long end) +{ + struct drm_mm_node *hole_node; -void drm_mm_put_block(struct drm_mm_node *cur) + hole_node = drm_mm_search_free_in_range(mm, size, alignment, + start, end, 0); + if (!hole_node) + return -ENOSPC; + + drm_mm_insert_helper_range(hole_node, node, size, alignment, + start, end); + + return 0; +} +EXPORT_SYMBOL(drm_mm_insert_node_in_range); + +/** + * Remove a memory node from the allocator. + */ +void drm_mm_remove_node(struct drm_mm_node *node) +{ + struct drm_mm *mm = node->mm; + struct drm_mm_node *prev_node; + + BUG_ON(node->scanned_block || node->scanned_prev_free + || node->scanned_next_free); + + prev_node = + list_entry(node->node_list.prev, struct drm_mm_node, node_list); + + if (node->hole_follows) { + BUG_ON(drm_mm_hole_node_start(node) + == drm_mm_hole_node_end(node)); + list_del(&node->hole_stack); + } else + BUG_ON(drm_mm_hole_node_start(node) + != drm_mm_hole_node_end(node)); + + if (!prev_node->hole_follows) { + prev_node->hole_follows = 1; + list_add(&prev_node->hole_stack, &mm->hole_stack); + } else + list_move(&prev_node->hole_stack, &mm->hole_stack); + + list_del(&node->node_list); + node->allocated = 0; +} +EXPORT_SYMBOL(drm_mm_remove_node); + +/* + * Remove a memory node from the allocator and free the allocated struct + * drm_mm_node. Only to be used on a struct drm_mm_node obtained by one of the + * drm_mm_get_block functions. + */ +void drm_mm_put_block(struct drm_mm_node *node) { - struct drm_mm *mm = cur->mm; - struct list_head *cur_head = &cur->node_list; - struct list_head *root_head = &mm->node_list; - struct drm_mm_node *prev_node = NULL; - struct drm_mm_node *next_node; + struct drm_mm *mm = node->mm; - int merged = 0; + drm_mm_remove_node(node); - BUG_ON(cur->scanned_block || cur->scanned_prev_free - || cur->scanned_next_free); - - if (cur_head->prev != root_head) { - prev_node = - list_entry(cur_head->prev, struct drm_mm_node, node_list); - if (prev_node->free) { - prev_node->size += cur->size; - merged = 1; - } - } - if (cur_head->next != root_head) { - next_node = - list_entry(cur_head->next, struct drm_mm_node, node_list); - if (next_node->free) { - if (merged) { - prev_node->size += next_node->size; - list_del(&next_node->node_list); - list_del(&next_node->free_stack); - spin_lock(&mm->unused_lock); - if (mm->num_unused < MM_UNUSED_TARGET) { - list_add(&next_node->free_stack, - &mm->unused_nodes); - ++mm->num_unused; - } else - kfree(next_node); - spin_unlock(&mm->unused_lock); - } else { - next_node->size += cur->size; - next_node->start = cur->start; - merged = 1; - } - } - } - if (!merged) { - cur->free = 1; - list_add(&cur->free_stack, &mm->free_stack); - } else { - list_del(&cur->node_list); - spin_lock(&mm->unused_lock); - if (mm->num_unused < MM_UNUSED_TARGET) { - list_add(&cur->free_stack, &mm->unused_nodes); - ++mm->num_unused; - } else - kfree(cur); - spin_unlock(&mm->unused_lock); - } + spin_lock(&mm->unused_lock); + if (mm->num_unused < MM_UNUSED_TARGET) { + list_add(&node->node_list, &mm->unused_nodes); + ++mm->num_unused; + } else + kfree(node); + spin_unlock(&mm->unused_lock); } - EXPORT_SYMBOL(drm_mm_put_block); static int check_free_hole(unsigned long start, unsigned long end, @@ -319,8 +366,10 @@ struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm, best = NULL; best_size = ~0UL; - list_for_each_entry(entry, &mm->free_stack, free_stack) { - if (!check_free_hole(entry->start, entry->start + entry->size, + list_for_each_entry(entry, &mm->hole_stack, hole_stack) { + BUG_ON(!entry->hole_follows); + if (!check_free_hole(drm_mm_hole_node_start(entry), + drm_mm_hole_node_end(entry), size, alignment)) continue; @@ -353,12 +402,13 @@ struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm, best = NULL; best_size = ~0UL; - list_for_each_entry(entry, &mm->free_stack, free_stack) { - unsigned long adj_start = entry->start < start ? - start : entry->start; - unsigned long adj_end = entry->start + entry->size > end ? - end : entry->start + entry->size; + list_for_each_entry(entry, &mm->hole_stack, hole_stack) { + unsigned long adj_start = drm_mm_hole_node_start(entry) < start ? + start : drm_mm_hole_node_start(entry); + unsigned long adj_end = drm_mm_hole_node_end(entry) > end ? + end : drm_mm_hole_node_end(entry); + BUG_ON(!entry->hole_follows); if (!check_free_hole(adj_start, adj_end, size, alignment)) continue; @@ -375,6 +425,23 @@ struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm, } EXPORT_SYMBOL(drm_mm_search_free_in_range); +/** + * Moves an allocation. To be used with embedded struct drm_mm_node. + */ +void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new) +{ + list_replace(&old->node_list, &new->node_list); + list_replace(&old->node_list, &new->hole_stack); + new->hole_follows = old->hole_follows; + new->mm = old->mm; + new->start = old->start; + new->size = old->size; + + old->allocated = 0; + new->allocated = 1; +} +EXPORT_SYMBOL(drm_mm_replace_node); + /** * Initializa lru scanning. * @@ -393,6 +460,7 @@ void drm_mm_init_scan(struct drm_mm *mm, unsigned long size, mm->scan_hit_start = 0; mm->scan_hit_size = 0; mm->scan_check_range = 0; + mm->prev_scanned_node = NULL; } EXPORT_SYMBOL(drm_mm_init_scan); @@ -418,6 +486,7 @@ void drm_mm_init_scan_with_range(struct drm_mm *mm, unsigned long size, mm->scan_start = start; mm->scan_end = end; mm->scan_check_range = 1; + mm->prev_scanned_node = NULL; } EXPORT_SYMBOL(drm_mm_init_scan_with_range); @@ -430,70 +499,42 @@ EXPORT_SYMBOL(drm_mm_init_scan_with_range); int drm_mm_scan_add_block(struct drm_mm_node *node) { struct drm_mm *mm = node->mm; - struct list_head *prev_free, *next_free; - struct drm_mm_node *prev_node, *next_node; + struct drm_mm_node *prev_node; + unsigned long hole_start, hole_end; unsigned long adj_start; unsigned long adj_end; mm->scanned_blocks++; - prev_free = next_free = NULL; - - BUG_ON(node->free); + BUG_ON(node->scanned_block); node->scanned_block = 1; - node->free = 1; - if (node->node_list.prev != &mm->node_list) { - prev_node = list_entry(node->node_list.prev, struct drm_mm_node, - node_list); + prev_node = list_entry(node->node_list.prev, struct drm_mm_node, + node_list); - if (prev_node->free) { - list_del(&prev_node->node_list); - - node->start = prev_node->start; - node->size += prev_node->size; - - prev_node->scanned_prev_free = 1; - - prev_free = &prev_node->free_stack; - } - } - - if (node->node_list.next != &mm->node_list) { - next_node = list_entry(node->node_list.next, struct drm_mm_node, - node_list); - - if (next_node->free) { - list_del(&next_node->node_list); - - node->size += next_node->size; - - next_node->scanned_next_free = 1; - - next_free = &next_node->free_stack; - } - } - - /* The free_stack list is not used for allocated objects, so these two - * pointers can be abused (as long as no allocations in this memory - * manager happens). */ - node->free_stack.prev = prev_free; - node->free_stack.next = next_free; + node->scanned_preceeds_hole = prev_node->hole_follows; + prev_node->hole_follows = 1; + list_del(&node->node_list); + node->node_list.prev = &prev_node->node_list; + node->node_list.next = &mm->prev_scanned_node->node_list; + mm->prev_scanned_node = node; + hole_start = drm_mm_hole_node_start(prev_node); + hole_end = drm_mm_hole_node_end(prev_node); if (mm->scan_check_range) { - adj_start = node->start < mm->scan_start ? - mm->scan_start : node->start; - adj_end = node->start + node->size > mm->scan_end ? - mm->scan_end : node->start + node->size; + adj_start = hole_start < mm->scan_start ? + mm->scan_start : hole_start; + adj_end = hole_end > mm->scan_end ? + mm->scan_end : hole_end; } else { - adj_start = node->start; - adj_end = node->start + node->size; + adj_start = hole_start; + adj_end = hole_end; } if (check_free_hole(adj_start , adj_end, mm->scan_size, mm->scan_alignment)) { - mm->scan_hit_start = node->start; - mm->scan_hit_size = node->size; + mm->scan_hit_start = hole_start; + mm->scan_hit_size = hole_end; return 1; } @@ -519,39 +560,19 @@ EXPORT_SYMBOL(drm_mm_scan_add_block); int drm_mm_scan_remove_block(struct drm_mm_node *node) { struct drm_mm *mm = node->mm; - struct drm_mm_node *prev_node, *next_node; + struct drm_mm_node *prev_node; mm->scanned_blocks--; BUG_ON(!node->scanned_block); node->scanned_block = 0; - node->free = 0; - prev_node = list_entry(node->free_stack.prev, struct drm_mm_node, - free_stack); - next_node = list_entry(node->free_stack.next, struct drm_mm_node, - free_stack); + prev_node = list_entry(node->node_list.prev, struct drm_mm_node, + node_list); - if (prev_node) { - BUG_ON(!prev_node->scanned_prev_free); - prev_node->scanned_prev_free = 0; - - list_add_tail(&prev_node->node_list, &node->node_list); - - node->start = prev_node->start + prev_node->size; - node->size -= prev_node->size; - } - - if (next_node) { - BUG_ON(!next_node->scanned_next_free); - next_node->scanned_next_free = 0; - - list_add(&next_node->node_list, &node->node_list); - - node->size -= next_node->size; - } - - INIT_LIST_HEAD(&node->free_stack); + prev_node->hole_follows = node->scanned_preceeds_hole; + INIT_LIST_HEAD(&node->node_list); + list_add(&node->node_list, &prev_node->node_list); /* Only need to check for containement because start&size for the * complete resulting free block (not just the desired part) is @@ -568,7 +589,7 @@ EXPORT_SYMBOL(drm_mm_scan_remove_block); int drm_mm_clean(struct drm_mm * mm) { - struct list_head *head = &mm->node_list; + struct list_head *head = &mm->head_node.node_list; return (head->next->next == head); } @@ -576,38 +597,40 @@ EXPORT_SYMBOL(drm_mm_clean); int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size) { - INIT_LIST_HEAD(&mm->node_list); - INIT_LIST_HEAD(&mm->free_stack); + INIT_LIST_HEAD(&mm->hole_stack); INIT_LIST_HEAD(&mm->unused_nodes); mm->num_unused = 0; mm->scanned_blocks = 0; spin_lock_init(&mm->unused_lock); - return drm_mm_create_tail_node(mm, start, size, 0); + /* Clever trick to avoid a special case in the free hole tracking. */ + INIT_LIST_HEAD(&mm->head_node.node_list); + INIT_LIST_HEAD(&mm->head_node.hole_stack); + mm->head_node.hole_follows = 1; + mm->head_node.scanned_block = 0; + mm->head_node.scanned_prev_free = 0; + mm->head_node.scanned_next_free = 0; + mm->head_node.mm = mm; + mm->head_node.start = start + size; + mm->head_node.size = start - mm->head_node.start; + list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack); + + return 0; } EXPORT_SYMBOL(drm_mm_init); void drm_mm_takedown(struct drm_mm * mm) { - struct list_head *bnode = mm->free_stack.next; - struct drm_mm_node *entry; - struct drm_mm_node *next; + struct drm_mm_node *entry, *next; - entry = list_entry(bnode, struct drm_mm_node, free_stack); - - if (entry->node_list.next != &mm->node_list || - entry->free_stack.next != &mm->free_stack) { + if (!list_empty(&mm->head_node.node_list)) { DRM_ERROR("Memory manager not clean. Delaying takedown\n"); return; } - list_del(&entry->free_stack); - list_del(&entry->node_list); - kfree(entry); - spin_lock(&mm->unused_lock); - list_for_each_entry_safe(entry, next, &mm->unused_nodes, free_stack) { - list_del(&entry->free_stack); + list_for_each_entry_safe(entry, next, &mm->unused_nodes, node_list) { + list_del(&entry->node_list); kfree(entry); --mm->num_unused; } @@ -620,19 +643,37 @@ EXPORT_SYMBOL(drm_mm_takedown); void drm_mm_debug_table(struct drm_mm *mm, const char *prefix) { struct drm_mm_node *entry; - int total_used = 0, total_free = 0, total = 0; + unsigned long total_used = 0, total_free = 0, total = 0; + unsigned long hole_start, hole_end, hole_size; - list_for_each_entry(entry, &mm->node_list, node_list) { - printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8ld: %s\n", + hole_start = drm_mm_hole_node_start(&mm->head_node); + hole_end = drm_mm_hole_node_end(&mm->head_node); + hole_size = hole_end - hole_start; + if (hole_size) + printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n", + prefix, hole_start, hole_end, + hole_size); + total_free += hole_size; + + drm_mm_for_each_node(entry, mm) { + printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: used\n", prefix, entry->start, entry->start + entry->size, - entry->size, entry->free ? "free" : "used"); - total += entry->size; - if (entry->free) - total_free += entry->size; - else - total_used += entry->size; + entry->size); + total_used += entry->size; + + if (entry->hole_follows) { + hole_start = drm_mm_hole_node_start(entry); + hole_end = drm_mm_hole_node_end(entry); + hole_size = hole_end - hole_start; + printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n", + prefix, hole_start, hole_end, + hole_size); + total_free += hole_size; + } } - printk(KERN_DEBUG "%s total: %d, used %d free %d\n", prefix, total, + total = total_free + total_used; + + printk(KERN_DEBUG "%s total: %lu, used %lu free %lu\n", prefix, total, total_used, total_free); } EXPORT_SYMBOL(drm_mm_debug_table); @@ -641,17 +682,34 @@ EXPORT_SYMBOL(drm_mm_debug_table); int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm) { struct drm_mm_node *entry; - int total_used = 0, total_free = 0, total = 0; + unsigned long total_used = 0, total_free = 0, total = 0; + unsigned long hole_start, hole_end, hole_size; - list_for_each_entry(entry, &mm->node_list, node_list) { - seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: %s\n", entry->start, entry->start + entry->size, entry->size, entry->free ? "free" : "used"); - total += entry->size; - if (entry->free) - total_free += entry->size; - else - total_used += entry->size; + hole_start = drm_mm_hole_node_start(&mm->head_node); + hole_end = drm_mm_hole_node_end(&mm->head_node); + hole_size = hole_end - hole_start; + if (hole_size) + seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n", + hole_start, hole_end, hole_size); + total_free += hole_size; + + drm_mm_for_each_node(entry, mm) { + seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: used\n", + entry->start, entry->start + entry->size, + entry->size); + total_used += entry->size; + if (entry->hole_follows) { + hole_start = drm_mm_hole_node_start(&mm->head_node); + hole_end = drm_mm_hole_node_end(&mm->head_node); + hole_size = hole_end - hole_start; + seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n", + hole_start, hole_end, hole_size); + total_free += hole_size; + } } - seq_printf(m, "total: %d, used %d free %d\n", total, total_used, total_free); + total = total_free + total_used; + + seq_printf(m, "total: %lu, used %lu free %lu\n", total, total_used, total_free); return 0; } EXPORT_SYMBOL(drm_mm_dump_table); diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c index 30b6544467ca..03adfe4c7665 100644 --- a/drivers/gpu/drm/nouveau/nouveau_object.c +++ b/drivers/gpu/drm/nouveau/nouveau_object.c @@ -909,7 +909,7 @@ nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan) nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd); nouveau_gpuobj_ref(NULL, &chan->vm_pd); - if (chan->ramin_heap.free_stack.next) + if (drm_mm_initialized(&chan->ramin_heap)) drm_mm_takedown(&chan->ramin_heap); nouveau_gpuobj_ref(NULL, &chan->ramin); } diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c index ea0041810ae3..300285ae8e9e 100644 --- a/drivers/gpu/drm/nouveau/nv50_instmem.c +++ b/drivers/gpu/drm/nouveau/nv50_instmem.c @@ -56,7 +56,7 @@ nv50_channel_del(struct nouveau_channel **pchan) nouveau_gpuobj_ref(NULL, &chan->ramfc); nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd); nouveau_gpuobj_ref(NULL, &chan->vm_pd); - if (chan->ramin_heap.free_stack.next) + if (drm_mm_initialized(&chan->ramin_heap)) drm_mm_takedown(&chan->ramin_heap); nouveau_gpuobj_ref(NULL, &chan->ramin); kfree(chan); @@ -259,7 +259,7 @@ nv50_instmem_takedown(struct drm_device *dev) nouveau_gpuobj_ref(NULL, &dev_priv->bar3_vm->pgt[0].obj[0]); nouveau_vm_ref(NULL, &dev_priv->bar3_vm, NULL); - if (dev_priv->ramin_heap.free_stack.next) + if (drm_mm_initialized(&dev_priv->ramin_heap)) drm_mm_takedown(&dev_priv->ramin_heap); dev_priv->engine.instmem.priv = NULL; diff --git a/drivers/gpu/drm/nouveau/nvc0_instmem.c b/drivers/gpu/drm/nouveau/nvc0_instmem.c index c09091749054..82357d2df1f4 100644 --- a/drivers/gpu/drm/nouveau/nvc0_instmem.c +++ b/drivers/gpu/drm/nouveau/nvc0_instmem.c @@ -67,7 +67,7 @@ nvc0_channel_del(struct nouveau_channel **pchan) return; nouveau_vm_ref(NULL, &chan->vm, NULL); - if (chan->ramin_heap.free_stack.next) + if (drm_mm_initialized(&chan->ramin_heap)) drm_mm_takedown(&chan->ramin_heap); nouveau_gpuobj_ref(NULL, &chan->ramin); kfree(chan); diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index b1537000a104..d56f08d3cbdc 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c @@ -1030,7 +1030,7 @@ static int evergreen_crtc_do_set_base(struct drm_crtc *crtc, * just update base pointers */ obj = radeon_fb->obj; - rbo = obj->driver_private; + rbo = gem_to_radeon_bo(obj); r = radeon_bo_reserve(rbo, false); if (unlikely(r != 0)) return r; @@ -1145,7 +1145,7 @@ static int evergreen_crtc_do_set_base(struct drm_crtc *crtc, if (!atomic && fb && fb != crtc->fb) { radeon_fb = to_radeon_framebuffer(fb); - rbo = radeon_fb->obj->driver_private; + rbo = gem_to_radeon_bo(radeon_fb->obj); r = radeon_bo_reserve(rbo, false); if (unlikely(r != 0)) return r; @@ -1191,7 +1191,7 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc, } obj = radeon_fb->obj; - rbo = obj->driver_private; + rbo = gem_to_radeon_bo(obj); r = radeon_bo_reserve(rbo, false); if (unlikely(r != 0)) return r; @@ -1308,7 +1308,7 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc, if (!atomic && fb && fb != crtc->fb) { radeon_fb = to_radeon_framebuffer(fb); - rbo = radeon_fb->obj->driver_private; + rbo = gem_to_radeon_bo(radeon_fb->obj); r = radeon_bo_reserve(rbo, false); if (unlikely(r != 0)) return r; diff --git a/drivers/gpu/drm/radeon/evergreen_blit_kms.c b/drivers/gpu/drm/radeon/evergreen_blit_kms.c index a1ba4b3053d0..2ed930e02f3a 100644 --- a/drivers/gpu/drm/radeon/evergreen_blit_kms.c +++ b/drivers/gpu/drm/radeon/evergreen_blit_kms.c @@ -572,7 +572,7 @@ int evergreen_blit_init(struct radeon_device *rdev) obj_size += evergreen_ps_size * 4; obj_size = ALIGN(obj_size, 256); - r = radeon_bo_create(rdev, NULL, obj_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, + r = radeon_bo_create(rdev, obj_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, &rdev->r600_blit.shader_obj); if (r) { DRM_ERROR("evergreen failed to allocate shader\n"); diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 650672a0f5ad..be780a6b9b1d 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -2728,7 +2728,7 @@ static int r600_ih_ring_alloc(struct radeon_device *rdev) /* Allocate ring buffer */ if (rdev->ih.ring_obj == NULL) { - r = radeon_bo_create(rdev, NULL, rdev->ih.ring_size, + r = radeon_bo_create(rdev, rdev->ih.ring_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_GTT, &rdev->ih.ring_obj); diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c index b5443fe1c1d1..846fae576399 100644 --- a/drivers/gpu/drm/radeon/r600_audio.c +++ b/drivers/gpu/drm/radeon/r600_audio.c @@ -26,6 +26,7 @@ #include "drmP.h" #include "radeon.h" #include "radeon_reg.h" +#include "radeon_asic.h" #include "atom.h" #define AUDIO_TIMER_INTERVALL 100 /* 1/10 sekund should be enough */ diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c index 86e5aa07f0db..16e211a614d7 100644 --- a/drivers/gpu/drm/radeon/r600_blit_kms.c +++ b/drivers/gpu/drm/radeon/r600_blit_kms.c @@ -501,7 +501,7 @@ int r600_blit_init(struct radeon_device *rdev) obj_size += r6xx_ps_size * 4; obj_size = ALIGN(obj_size, 256); - r = radeon_bo_create(rdev, NULL, obj_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, + r = radeon_bo_create(rdev, obj_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, &rdev->r600_blit.shader_obj); if (r) { DRM_ERROR("r600 failed to allocate shader\n"); diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c index e6a58ed48dcf..50db6d62eec2 100644 --- a/drivers/gpu/drm/radeon/r600_hdmi.c +++ b/drivers/gpu/drm/radeon/r600_hdmi.c @@ -26,6 +26,7 @@ #include "drmP.h" #include "radeon_drm.h" #include "radeon.h" +#include "radeon_asic.h" #include "atom.h" /* diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 82aa59941aa1..55fefe763965 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -258,8 +258,9 @@ struct radeon_bo { int surface_reg; /* Constant after initialization */ struct radeon_device *rdev; - struct drm_gem_object *gobj; + struct drm_gem_object gem_base; }; +#define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, gem_base) struct radeon_bo_list { struct ttm_validate_buffer tv; @@ -1197,19 +1198,6 @@ int radeon_device_init(struct radeon_device *rdev, void radeon_device_fini(struct radeon_device *rdev); int radeon_gpu_wait_for_idle(struct radeon_device *rdev); -/* r600 blit */ -int r600_blit_prepare_copy(struct radeon_device *rdev, int size_bytes); -void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence); -void r600_kms_blit_copy(struct radeon_device *rdev, - u64 src_gpu_addr, u64 dst_gpu_addr, - int size_bytes); -/* evergreen blit */ -int evergreen_blit_prepare_copy(struct radeon_device *rdev, int size_bytes); -void evergreen_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence); -void evergreen_kms_blit_copy(struct radeon_device *rdev, - u64 src_gpu_addr, u64 dst_gpu_addr, - int size_bytes); - static inline uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg) { if (reg < rdev->rmmio_size) @@ -1460,59 +1448,12 @@ extern void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc extern int radeon_resume_kms(struct drm_device *dev); extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state); -/* r600, rv610, rv630, rv620, rv635, rv670, rs780, rs880 */ -extern bool r600_card_posted(struct radeon_device *rdev); -extern void r600_cp_stop(struct radeon_device *rdev); -extern int r600_cp_start(struct radeon_device *rdev); -extern void r600_ring_init(struct radeon_device *rdev, unsigned ring_size); -extern int r600_cp_resume(struct radeon_device *rdev); -extern void r600_cp_fini(struct radeon_device *rdev); -extern int r600_count_pipe_bits(uint32_t val); -extern int r600_mc_wait_for_idle(struct radeon_device *rdev); -extern int r600_pcie_gart_init(struct radeon_device *rdev); -extern void r600_pcie_gart_tlb_flush(struct radeon_device *rdev); -extern int r600_ib_test(struct radeon_device *rdev); -extern int r600_ring_test(struct radeon_device *rdev); -extern void r600_scratch_init(struct radeon_device *rdev); -extern int r600_blit_init(struct radeon_device *rdev); -extern void r600_blit_fini(struct radeon_device *rdev); -extern int r600_init_microcode(struct radeon_device *rdev); -extern int r600_asic_reset(struct radeon_device *rdev); -/* r600 irq */ -extern int r600_irq_init(struct radeon_device *rdev); -extern void r600_irq_fini(struct radeon_device *rdev); -extern void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size); -extern int r600_irq_set(struct radeon_device *rdev); -extern void r600_irq_suspend(struct radeon_device *rdev); -extern void r600_disable_interrupts(struct radeon_device *rdev); -extern void r600_rlc_stop(struct radeon_device *rdev); -/* r600 audio */ -extern int r600_audio_init(struct radeon_device *rdev); -extern int r600_audio_tmds_index(struct drm_encoder *encoder); -extern void r600_audio_set_clock(struct drm_encoder *encoder, int clock); -extern int r600_audio_channels(struct radeon_device *rdev); -extern int r600_audio_bits_per_sample(struct radeon_device *rdev); -extern int r600_audio_rate(struct radeon_device *rdev); -extern uint8_t r600_audio_status_bits(struct radeon_device *rdev); -extern uint8_t r600_audio_category_code(struct radeon_device *rdev); -extern void r600_audio_schedule_polling(struct radeon_device *rdev); -extern void r600_audio_enable_polling(struct drm_encoder *encoder); -extern void r600_audio_disable_polling(struct drm_encoder *encoder); -extern void r600_audio_fini(struct radeon_device *rdev); -extern void r600_hdmi_init(struct drm_encoder *encoder); +/* + * r600 functions used by radeon_encoder.c + */ extern void r600_hdmi_enable(struct drm_encoder *encoder); extern void r600_hdmi_disable(struct drm_encoder *encoder); extern void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode); -extern int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder); -extern void r600_hdmi_update_audio_settings(struct drm_encoder *encoder); - -extern void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc); -extern void r700_cp_stop(struct radeon_device *rdev); -extern void r700_cp_fini(struct radeon_device *rdev); -extern void evergreen_disable_interrupt_state(struct radeon_device *rdev); -extern int evergreen_irq_set(struct radeon_device *rdev); -extern int evergreen_blit_init(struct radeon_device *rdev); -extern void evergreen_blit_fini(struct radeon_device *rdev); extern int ni_init_microcode(struct radeon_device *rdev); extern int btc_mc_load_microcode(struct radeon_device *rdev); @@ -1524,14 +1465,6 @@ extern int radeon_acpi_init(struct radeon_device *rdev); static inline int radeon_acpi_init(struct radeon_device *rdev) { return 0; } #endif -/* evergreen */ -struct evergreen_mc_save { - u32 vga_control[6]; - u32 vga_render_control; - u32 vga_hdp_control; - u32 crtc_control[6]; -}; - #include "radeon_object.h" #endif diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index c59bd98a2029..1c7317e3aa8c 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h @@ -57,8 +57,6 @@ int r100_init(struct radeon_device *rdev); void r100_fini(struct radeon_device *rdev); int r100_suspend(struct radeon_device *rdev); int r100_resume(struct radeon_device *rdev); -uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg); -void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); void r100_vga_set_state(struct radeon_device *rdev, bool state); bool r100_gpu_is_lockup(struct radeon_device *rdev); int r100_asic_reset(struct radeon_device *rdev); @@ -164,8 +162,6 @@ extern void r300_fence_ring_emit(struct radeon_device *rdev, extern int r300_cs_parse(struct radeon_cs_parser *p); extern void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev); extern int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); -extern uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg); -extern void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes); extern int rv370_get_pcie_lanes(struct radeon_device *rdev); extern void r300_set_reg_safe(struct radeon_device *rdev); @@ -208,7 +204,6 @@ void rs400_gart_adjust_size(struct radeon_device *rdev); void rs400_gart_disable(struct radeon_device *rdev); void rs400_gart_fini(struct radeon_device *rdev); - /* * rs600. */ @@ -270,8 +265,6 @@ void rv515_fini(struct radeon_device *rdev); uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg); void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); void rv515_ring_start(struct radeon_device *rdev); -uint32_t rv515_pcie_rreg(struct radeon_device *rdev, uint32_t reg); -void rv515_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); void rv515_bandwidth_update(struct radeon_device *rdev); int rv515_resume(struct radeon_device *rdev); int rv515_suspend(struct radeon_device *rdev); @@ -307,14 +300,13 @@ void r600_pciep_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); int r600_cs_parse(struct radeon_cs_parser *p); void r600_fence_ring_emit(struct radeon_device *rdev, struct radeon_fence *fence); -int r600_irq_process(struct radeon_device *rdev); -int r600_irq_set(struct radeon_device *rdev); bool r600_gpu_is_lockup(struct radeon_device *rdev); int r600_asic_reset(struct radeon_device *rdev); int r600_set_surface_reg(struct radeon_device *rdev, int reg, uint32_t tiling_flags, uint32_t pitch, uint32_t offset, uint32_t obj_size); void r600_clear_surface_reg(struct radeon_device *rdev, int reg); +int r600_ib_test(struct radeon_device *rdev); void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); int r600_ring_test(struct radeon_device *rdev); int r600_copy_blit(struct radeon_device *rdev, @@ -333,6 +325,50 @@ extern void rs780_pm_init_profile(struct radeon_device *rdev); extern void r600_pm_get_dynpm_state(struct radeon_device *rdev); extern void r600_set_pcie_lanes(struct radeon_device *rdev, int lanes); extern int r600_get_pcie_lanes(struct radeon_device *rdev); +bool r600_card_posted(struct radeon_device *rdev); +void r600_cp_stop(struct radeon_device *rdev); +int r600_cp_start(struct radeon_device *rdev); +void r600_ring_init(struct radeon_device *rdev, unsigned ring_size); +int r600_cp_resume(struct radeon_device *rdev); +void r600_cp_fini(struct radeon_device *rdev); +int r600_count_pipe_bits(uint32_t val); +int r600_mc_wait_for_idle(struct radeon_device *rdev); +int r600_pcie_gart_init(struct radeon_device *rdev); +void r600_scratch_init(struct radeon_device *rdev); +int r600_blit_init(struct radeon_device *rdev); +void r600_blit_fini(struct radeon_device *rdev); +int r600_init_microcode(struct radeon_device *rdev); +/* r600 irq */ +int r600_irq_process(struct radeon_device *rdev); +int r600_irq_init(struct radeon_device *rdev); +void r600_irq_fini(struct radeon_device *rdev); +void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size); +int r600_irq_set(struct radeon_device *rdev); +void r600_irq_suspend(struct radeon_device *rdev); +void r600_disable_interrupts(struct radeon_device *rdev); +void r600_rlc_stop(struct radeon_device *rdev); +/* r600 audio */ +int r600_audio_init(struct radeon_device *rdev); +int r600_audio_tmds_index(struct drm_encoder *encoder); +void r600_audio_set_clock(struct drm_encoder *encoder, int clock); +int r600_audio_channels(struct radeon_device *rdev); +int r600_audio_bits_per_sample(struct radeon_device *rdev); +int r600_audio_rate(struct radeon_device *rdev); +uint8_t r600_audio_status_bits(struct radeon_device *rdev); +uint8_t r600_audio_category_code(struct radeon_device *rdev); +void r600_audio_schedule_polling(struct radeon_device *rdev); +void r600_audio_enable_polling(struct drm_encoder *encoder); +void r600_audio_disable_polling(struct drm_encoder *encoder); +void r600_audio_fini(struct radeon_device *rdev); +void r600_hdmi_init(struct drm_encoder *encoder); +int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder); +void r600_hdmi_update_audio_settings(struct drm_encoder *encoder); +/* r600 blit */ +int r600_blit_prepare_copy(struct radeon_device *rdev, int size_bytes); +void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence); +void r600_kms_blit_copy(struct radeon_device *rdev, + u64 src_gpu_addr, u64 dst_gpu_addr, + int size_bytes); /* * rv770,rv730,rv710,rv740 @@ -341,12 +377,21 @@ int rv770_init(struct radeon_device *rdev); void rv770_fini(struct radeon_device *rdev); int rv770_suspend(struct radeon_device *rdev); int rv770_resume(struct radeon_device *rdev); -extern void rv770_pm_misc(struct radeon_device *rdev); -extern u32 rv770_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base); +void rv770_pm_misc(struct radeon_device *rdev); +u32 rv770_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base); +void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc); +void r700_cp_stop(struct radeon_device *rdev); +void r700_cp_fini(struct radeon_device *rdev); /* * evergreen */ +struct evergreen_mc_save { + u32 vga_control[6]; + u32 vga_render_control; + u32 vga_hdp_control; + u32 crtc_control[6]; +}; void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev); int evergreen_init(struct radeon_device *rdev); void evergreen_fini(struct radeon_device *rdev); @@ -374,5 +419,15 @@ extern void evergreen_pm_finish(struct radeon_device *rdev); extern void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc); extern u32 evergreen_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base); extern void evergreen_post_page_flip(struct radeon_device *rdev, int crtc); +void evergreen_disable_interrupt_state(struct radeon_device *rdev); +int evergreen_blit_init(struct radeon_device *rdev); +void evergreen_blit_fini(struct radeon_device *rdev); +/* evergreen blit */ +int evergreen_blit_prepare_copy(struct radeon_device *rdev, int size_bytes); +void evergreen_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence); +void evergreen_kms_blit_copy(struct radeon_device *rdev, + u64 src_gpu_addr, u64 dst_gpu_addr, + int size_bytes); + #endif diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c index c558685cc637..10191d9372d8 100644 --- a/drivers/gpu/drm/radeon/radeon_benchmark.c +++ b/drivers/gpu/drm/radeon/radeon_benchmark.c @@ -41,7 +41,7 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize, size = bsize; n = 1024; - r = radeon_bo_create(rdev, NULL, size, PAGE_SIZE, true, sdomain, &sobj); + r = radeon_bo_create(rdev, size, PAGE_SIZE, true, sdomain, &sobj); if (r) { goto out_cleanup; } @@ -53,7 +53,7 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize, if (r) { goto out_cleanup; } - r = radeon_bo_create(rdev, NULL, size, PAGE_SIZE, true, ddomain, &dobj); + r = radeon_bo_create(rdev, size, PAGE_SIZE, true, ddomain, &dobj); if (r) { goto out_cleanup; } diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index 35b5eb8fbe2a..8c1916941871 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c @@ -75,7 +75,7 @@ int radeon_cs_parser_relocs(struct radeon_cs_parser *p) return -ENOENT; } p->relocs_ptr[i] = &p->relocs[i]; - p->relocs[i].robj = p->relocs[i].gobj->driver_private; + p->relocs[i].robj = gem_to_radeon_bo(p->relocs[i].gobj); p->relocs[i].lobj.bo = p->relocs[i].robj; p->relocs[i].lobj.wdomain = r->write_domain; p->relocs[i].lobj.rdomain = r->read_domains; diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 0d478932b1a9..7c0a3f26ab5e 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -184,7 +184,7 @@ int radeon_wb_init(struct radeon_device *rdev) int r; if (rdev->wb.wb_obj == NULL) { - r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true, + r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true, RADEON_GEM_DOMAIN_GTT, &rdev->wb.wb_obj); if (r) { dev_warn(rdev->dev, "(%d) create WB bo failed\n", r); @@ -860,7 +860,7 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state) if (rfb == NULL || rfb->obj == NULL) { continue; } - robj = rfb->obj->driver_private; + robj = gem_to_radeon_bo(rfb->obj); /* don't unpin kernel fb objects */ if (!radeon_fbdev_robj_is_fb(rdev, robj)) { r = radeon_bo_reserve(robj, false); diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 2eff98cfd728..4409975a363c 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -371,7 +371,7 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc, new_radeon_fb = to_radeon_framebuffer(fb); /* schedule unpin of the old buffer */ obj = old_radeon_fb->obj; - rbo = obj->driver_private; + rbo = gem_to_radeon_bo(obj); work->old_rbo = rbo; INIT_WORK(&work->work, radeon_unpin_work_func); @@ -391,7 +391,7 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc, /* pin the new buffer */ obj = new_radeon_fb->obj; - rbo = obj->driver_private; + rbo = gem_to_radeon_bo(obj); DRM_DEBUG_DRIVER("flip-ioctl() cur_fbo = %p, cur_bbo = %p\n", work->old_rbo, rbo); diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c index cb968f997ce7..28431e78ab56 100644 --- a/drivers/gpu/drm/radeon/radeon_fb.c +++ b/drivers/gpu/drm/radeon/radeon_fb.c @@ -90,7 +90,7 @@ int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bool tile static void radeonfb_destroy_pinned_object(struct drm_gem_object *gobj) { - struct radeon_bo *rbo = gobj->driver_private; + struct radeon_bo *rbo = gem_to_radeon_bo(gobj); int ret; ret = radeon_bo_reserve(rbo, false); @@ -128,7 +128,7 @@ static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev, aligned_size); return -ENOMEM; } - rbo = gobj->driver_private; + rbo = gem_to_radeon_bo(gobj); if (fb_tiled) tiling_flags = RADEON_TILING_MACRO; @@ -202,7 +202,7 @@ static int radeonfb_create(struct radeon_fbdev *rfbdev, mode_cmd.depth = sizes->surface_depth; ret = radeonfb_create_pinned_object(rfbdev, &mode_cmd, &gobj); - rbo = gobj->driver_private; + rbo = gem_to_radeon_bo(gobj); /* okay we have an object now allocate the framebuffer */ info = framebuffer_alloc(0, device); @@ -403,14 +403,14 @@ int radeon_fbdev_total_size(struct radeon_device *rdev) struct radeon_bo *robj; int size = 0; - robj = rdev->mode_info.rfbdev->rfb.obj->driver_private; + robj = gem_to_radeon_bo(rdev->mode_info.rfbdev->rfb.obj); size += radeon_bo_size(robj); return size; } bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj) { - if (robj == rdev->mode_info.rfbdev->rfb.obj->driver_private) + if (robj == gem_to_radeon_bo(rdev->mode_info.rfbdev->rfb.obj)) return true; return false; } diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c index a6b0fed7bae9..f0534ef2f331 100644 --- a/drivers/gpu/drm/radeon/radeon_gart.c +++ b/drivers/gpu/drm/radeon/radeon_gart.c @@ -78,7 +78,7 @@ int radeon_gart_table_vram_alloc(struct radeon_device *rdev) int r; if (rdev->gart.table.vram.robj == NULL) { - r = radeon_bo_create(rdev, NULL, rdev->gart.table_size, + r = radeon_bo_create(rdev, rdev->gart.table_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, &rdev->gart.table.vram.robj); if (r) { diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index ede5dccdf79f..a419b67d8401 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c @@ -32,21 +32,18 @@ int radeon_gem_object_init(struct drm_gem_object *obj) { - /* we do nothings here */ + BUG(); + return 0; } void radeon_gem_object_free(struct drm_gem_object *gobj) { - struct radeon_bo *robj = gobj->driver_private; + struct radeon_bo *robj = gem_to_radeon_bo(gobj); - gobj->driver_private = NULL; if (robj) { radeon_bo_unref(&robj); } - - drm_gem_object_release(gobj); - kfree(gobj); } int radeon_gem_object_create(struct radeon_device *rdev, int size, @@ -54,36 +51,34 @@ int radeon_gem_object_create(struct radeon_device *rdev, int size, bool discardable, bool kernel, struct drm_gem_object **obj) { - struct drm_gem_object *gobj; struct radeon_bo *robj; int r; *obj = NULL; - gobj = drm_gem_object_alloc(rdev->ddev, size); - if (!gobj) { - return -ENOMEM; - } /* At least align on page size */ if (alignment < PAGE_SIZE) { alignment = PAGE_SIZE; } - r = radeon_bo_create(rdev, gobj, size, alignment, kernel, initial_domain, &robj); + r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain, &robj); if (r) { if (r != -ERESTARTSYS) DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n", size, initial_domain, alignment, r); - drm_gem_object_unreference_unlocked(gobj); return r; } - gobj->driver_private = robj; - *obj = gobj; + *obj = &robj->gem_base; + + mutex_lock(&rdev->gem.mutex); + list_add_tail(&robj->list, &rdev->gem.objects); + mutex_unlock(&rdev->gem.mutex); + return 0; } int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain, uint64_t *gpu_addr) { - struct radeon_bo *robj = obj->driver_private; + struct radeon_bo *robj = gem_to_radeon_bo(obj); int r; r = radeon_bo_reserve(robj, false); @@ -96,7 +91,7 @@ int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain, void radeon_gem_object_unpin(struct drm_gem_object *obj) { - struct radeon_bo *robj = obj->driver_private; + struct radeon_bo *robj = gem_to_radeon_bo(obj); int r; r = radeon_bo_reserve(robj, false); @@ -114,7 +109,7 @@ int radeon_gem_set_domain(struct drm_gem_object *gobj, int r; /* FIXME: reeimplement */ - robj = gobj->driver_private; + robj = gem_to_radeon_bo(gobj); /* work out where to validate the buffer to */ domain = wdomain; if (!domain) { @@ -228,7 +223,7 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data, if (gobj == NULL) { return -ENOENT; } - robj = gobj->driver_private; + robj = gem_to_radeon_bo(gobj); r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain); @@ -247,7 +242,7 @@ int radeon_mode_dumb_mmap(struct drm_file *filp, if (gobj == NULL) { return -ENOENT; } - robj = gobj->driver_private; + robj = gem_to_radeon_bo(gobj); *offset_p = radeon_bo_mmap_offset(robj); drm_gem_object_unreference_unlocked(gobj); return 0; @@ -274,7 +269,7 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data, if (gobj == NULL) { return -ENOENT; } - robj = gobj->driver_private; + robj = gem_to_radeon_bo(gobj); r = radeon_bo_wait(robj, &cur_placement, true); switch (cur_placement) { case TTM_PL_VRAM: @@ -304,7 +299,7 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data, if (gobj == NULL) { return -ENOENT; } - robj = gobj->driver_private; + robj = gem_to_radeon_bo(gobj); r = radeon_bo_wait(robj, NULL, false); /* callback hw specific functions if any */ if (robj->rdev->asic->ioctl_wait_idle) @@ -325,7 +320,7 @@ int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data, gobj = drm_gem_object_lookup(dev, filp, args->handle); if (gobj == NULL) return -ENOENT; - robj = gobj->driver_private; + robj = gem_to_radeon_bo(gobj); r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch); drm_gem_object_unreference_unlocked(gobj); return r; @@ -343,7 +338,7 @@ int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data, gobj = drm_gem_object_lookup(dev, filp, args->handle); if (gobj == NULL) return -ENOENT; - rbo = gobj->driver_private; + rbo = gem_to_radeon_bo(gobj); r = radeon_bo_reserve(rbo, false); if (unlikely(r != 0)) goto out; diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c index cf0638c3b7c7..9ae599eb2e6d 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c @@ -415,7 +415,7 @@ int radeon_crtc_do_set_base(struct drm_crtc *crtc, /* Pin framebuffer & get tilling informations */ obj = radeon_fb->obj; - rbo = obj->driver_private; + rbo = gem_to_radeon_bo(obj); r = radeon_bo_reserve(rbo, false); if (unlikely(r != 0)) return r; @@ -520,7 +520,7 @@ int radeon_crtc_do_set_base(struct drm_crtc *crtc, if (!atomic && fb && fb != crtc->fb) { radeon_fb = to_radeon_framebuffer(fb); - rbo = radeon_fb->obj->driver_private; + rbo = gem_to_radeon_bo(radeon_fb->obj); r = radeon_bo_reserve(rbo, false); if (unlikely(r != 0)) return r; diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 7d6b8e88f746..8758d02cca1a 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -55,6 +55,7 @@ static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo) list_del_init(&bo->list); mutex_unlock(&bo->rdev->gem.mutex); radeon_bo_clear_surface_reg(bo); + drm_gem_object_release(&bo->gem_base); kfree(bo); } @@ -86,7 +87,7 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain) rbo->placement.num_busy_placement = c; } -int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj, +int radeon_bo_create(struct radeon_device *rdev, unsigned long size, int byte_align, bool kernel, u32 domain, struct radeon_bo **bo_ptr) { @@ -96,6 +97,8 @@ int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj, unsigned long max_size = 0; int r; + size = ALIGN(size, PAGE_SIZE); + if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) { rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping; } @@ -118,8 +121,13 @@ retry: bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL); if (bo == NULL) return -ENOMEM; + r = drm_gem_object_init(rdev->ddev, &bo->gem_base, size); + if (unlikely(r)) { + kfree(bo); + return r; + } bo->rdev = rdev; - bo->gobj = gobj; + bo->gem_base.driver_private = NULL; bo->surface_reg = -1; INIT_LIST_HEAD(&bo->list); radeon_ttm_placement_from_domain(bo, domain); @@ -142,12 +150,9 @@ retry: return r; } *bo_ptr = bo; - if (gobj) { - mutex_lock(&bo->rdev->gem.mutex); - list_add_tail(&bo->list, &rdev->gem.objects); - mutex_unlock(&bo->rdev->gem.mutex); - } + trace_radeon_bo_create(bo); + return 0; } @@ -260,7 +265,6 @@ int radeon_bo_evict_vram(struct radeon_device *rdev) void radeon_bo_force_delete(struct radeon_device *rdev) { struct radeon_bo *bo, *n; - struct drm_gem_object *gobj; if (list_empty(&rdev->gem.objects)) { return; @@ -268,16 +272,14 @@ void radeon_bo_force_delete(struct radeon_device *rdev) dev_err(rdev->dev, "Userspace still has active objects !\n"); list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) { mutex_lock(&rdev->ddev->struct_mutex); - gobj = bo->gobj; dev_err(rdev->dev, "%p %p %lu %lu force free\n", - gobj, bo, (unsigned long)gobj->size, - *((unsigned long *)&gobj->refcount)); + &bo->gem_base, bo, (unsigned long)bo->gem_base.size, + *((unsigned long *)&bo->gem_base.refcount)); mutex_lock(&bo->rdev->gem.mutex); list_del_init(&bo->list); mutex_unlock(&bo->rdev->gem.mutex); radeon_bo_unref(&bo); - gobj->driver_private = NULL; - drm_gem_object_unreference(gobj); + drm_gem_object_unreference(&bo->gem_base); mutex_unlock(&rdev->ddev->struct_mutex); } } diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h index 22d4c237dea5..7f8e778dba46 100644 --- a/drivers/gpu/drm/radeon/radeon_object.h +++ b/drivers/gpu/drm/radeon/radeon_object.h @@ -137,10 +137,9 @@ static inline int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, } extern int radeon_bo_create(struct radeon_device *rdev, - struct drm_gem_object *gobj, unsigned long size, - int byte_align, - bool kernel, u32 domain, - struct radeon_bo **bo_ptr); + unsigned long size, int byte_align, + bool kernel, u32 domain, + struct radeon_bo **bo_ptr); extern int radeon_bo_kmap(struct radeon_bo *bo, void **ptr); extern void radeon_bo_kunmap(struct radeon_bo *bo); extern void radeon_bo_unref(struct radeon_bo **bo); diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c index 06e79822a2bf..992d99d13fc5 100644 --- a/drivers/gpu/drm/radeon/radeon_ring.c +++ b/drivers/gpu/drm/radeon/radeon_ring.c @@ -175,7 +175,7 @@ int radeon_ib_pool_init(struct radeon_device *rdev) return 0; INIT_LIST_HEAD(&rdev->ib_pool.bogus_ib); /* Allocate 1M object buffer */ - r = radeon_bo_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024, + r = radeon_bo_create(rdev, RADEON_IB_POOL_SIZE*64*1024, PAGE_SIZE, true, RADEON_GEM_DOMAIN_GTT, &rdev->ib_pool.robj); if (r) { @@ -332,7 +332,7 @@ int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size) rdev->cp.ring_size = ring_size; /* Allocate ring buffer */ if (rdev->cp.ring_obj == NULL) { - r = radeon_bo_create(rdev, NULL, rdev->cp.ring_size, PAGE_SIZE, true, + r = radeon_bo_create(rdev, rdev->cp.ring_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_GTT, &rdev->cp.ring_obj); if (r) { diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c index 5b44f652145c..dee4a0c1b4b2 100644 --- a/drivers/gpu/drm/radeon/radeon_test.c +++ b/drivers/gpu/drm/radeon/radeon_test.c @@ -52,7 +52,7 @@ void radeon_test_moves(struct radeon_device *rdev) goto out_cleanup; } - r = radeon_bo_create(rdev, NULL, size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, + r = radeon_bo_create(rdev, size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, &vram_obj); if (r) { DRM_ERROR("Failed to create VRAM object\n"); @@ -71,7 +71,7 @@ void radeon_test_moves(struct radeon_device *rdev) void **gtt_start, **gtt_end; void **vram_start, **vram_end; - r = radeon_bo_create(rdev, NULL, size, PAGE_SIZE, true, + r = radeon_bo_create(rdev, size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_GTT, gtt_obj + i); if (r) { DRM_ERROR("Failed to create GTT object %d\n", i); diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index c345e899e881..177adc884b74 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -530,7 +530,7 @@ int radeon_ttm_init(struct radeon_device *rdev) DRM_ERROR("Failed initializing VRAM heap.\n"); return r; } - r = radeon_bo_create(rdev, NULL, 256 * 1024, PAGE_SIZE, true, + r = radeon_bo_create(rdev, 256 * 1024, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, &rdev->stollen_vga_memory); if (r) { diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index 2211a323db41..3a95999d2fef 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c @@ -999,7 +999,7 @@ static int rv770_vram_scratch_init(struct radeon_device *rdev) u64 gpu_addr; if (rdev->vram_scratch.robj == NULL) { - r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, + r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, &rdev->vram_scratch.robj); if (r) { diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h index e39177778601..b1e7809e5e15 100644 --- a/include/drm/drm_mm.h +++ b/include/drm/drm_mm.h @@ -42,23 +42,25 @@ #endif struct drm_mm_node { - struct list_head free_stack; struct list_head node_list; - unsigned free : 1; + struct list_head hole_stack; + unsigned hole_follows : 1; unsigned scanned_block : 1; unsigned scanned_prev_free : 1; unsigned scanned_next_free : 1; + unsigned scanned_preceeds_hole : 1; + unsigned allocated : 1; unsigned long start; unsigned long size; struct drm_mm *mm; }; struct drm_mm { - /* List of free memory blocks, most recently freed ordered. */ - struct list_head free_stack; - /* List of all memory nodes, ordered according to the (increasing) start - * address of the memory node. */ - struct list_head node_list; + /* List of all memory nodes that immediatly preceed a free hole. */ + struct list_head hole_stack; + /* head_node.node_list is the list of all memory nodes, ordered + * according to the (increasing) start address of the memory node. */ + struct drm_mm_node head_node; struct list_head unused_nodes; int num_unused; spinlock_t unused_lock; @@ -70,8 +72,28 @@ struct drm_mm { unsigned scanned_blocks; unsigned long scan_start; unsigned long scan_end; + struct drm_mm_node *prev_scanned_node; }; +static inline bool drm_mm_node_allocated(struct drm_mm_node *node) +{ + return node->allocated; +} + +static inline bool drm_mm_initialized(struct drm_mm *mm) +{ + return mm->hole_stack.next; +} +#define drm_mm_for_each_node(entry, mm) list_for_each_entry(entry, \ + &(mm)->head_node.node_list, \ + node_list); +#define drm_mm_for_each_scanned_node_reverse(entry, n, mm) \ + for (entry = (mm)->prev_scanned_node, \ + next = entry ? list_entry(entry->node_list.next, \ + struct drm_mm_node, node_list) : NULL; \ + entry != NULL; entry = next, \ + next = entry ? list_entry(entry->node_list.next, \ + struct drm_mm_node, node_list) : NULL) \ /* * Basic range manager support (drm_mm.c) */ @@ -118,7 +140,15 @@ static inline struct drm_mm_node *drm_mm_get_block_atomic_range( return drm_mm_get_block_range_generic(parent, size, alignment, start, end, 1); } +extern int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node, + unsigned long size, unsigned alignment); +extern int drm_mm_insert_node_in_range(struct drm_mm *mm, + struct drm_mm_node *node, + unsigned long size, unsigned alignment, + unsigned long start, unsigned long end); extern void drm_mm_put_block(struct drm_mm_node *cur); +extern void drm_mm_remove_node(struct drm_mm_node *node); +extern void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new); extern struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm, unsigned long size, unsigned alignment, @@ -134,11 +164,6 @@ extern int drm_mm_init(struct drm_mm *mm, unsigned long start, unsigned long size); extern void drm_mm_takedown(struct drm_mm *mm); extern int drm_mm_clean(struct drm_mm *mm); -extern unsigned long drm_mm_tail_space(struct drm_mm *mm); -extern int drm_mm_remove_space_from_tail(struct drm_mm *mm, - unsigned long size); -extern int drm_mm_add_space_to_tail(struct drm_mm *mm, - unsigned long size, int atomic); extern int drm_mm_pre_get(struct drm_mm *mm); static inline struct drm_mm *drm_get_mm(struct drm_mm_node *block)