Btrfs: Switch the extent buffer rbtree into a radix tree

This patch reduces the CPU time spent in the extent buffer search by using the
radix tree instead of the rbtree and using the rcu lock instead of the spin
lock.

I did a quick test by the benchmark tool[1] and found the patch improve the
file creation/deletion performance problem that I have reported[2].

Before applying this patch:
Create files:
	Total files: 50000
	Total time: 0.971531
	Average time: 0.000019
Delete files:
	Total files: 50000
	Total time: 1.366761
	Average time: 0.000027

After applying this patch:
Create files:
	Total files: 50000
	Total time: 0.927455
	Average time: 0.000019
Delete files:
	Total files: 50000
	Total time: 1.292280
	Average time: 0.000026

[1] http://marc.info/?l=linux-btrfs&m=128212635122920&q=p3
[2] http://marc.info/?l=linux-btrfs&m=128212635122920&w=2

Signed-off-by: Miao Xie <miaox@cn.fujitsu.com>
Signed-off-by: Chris Mason <chris.mason@oracle.com>
This commit is contained in:
Miao Xie 2010-10-26 20:57:29 -04:00 committed by Chris Mason
parent 897ca6e9b4
commit 19fe0a8b78
2 changed files with 51 additions and 71 deletions

View File

@ -104,7 +104,7 @@ void extent_io_tree_init(struct extent_io_tree *tree,
struct address_space *mapping, gfp_t mask) struct address_space *mapping, gfp_t mask)
{ {
tree->state = RB_ROOT; tree->state = RB_ROOT;
tree->buffer = RB_ROOT; INIT_RADIX_TREE(&tree->buffer, GFP_ATOMIC);
tree->ops = NULL; tree->ops = NULL;
tree->dirty_bytes = 0; tree->dirty_bytes = 0;
spin_lock_init(&tree->lock); spin_lock_init(&tree->lock);
@ -235,50 +235,6 @@ static inline struct rb_node *tree_search(struct extent_io_tree *tree,
return ret; return ret;
} }
static struct extent_buffer *buffer_tree_insert(struct extent_io_tree *tree,
u64 offset, struct rb_node *node)
{
struct rb_root *root = &tree->buffer;
struct rb_node **p = &root->rb_node;
struct rb_node *parent = NULL;
struct extent_buffer *eb;
while (*p) {
parent = *p;
eb = rb_entry(parent, struct extent_buffer, rb_node);
if (offset < eb->start)
p = &(*p)->rb_left;
else if (offset > eb->start)
p = &(*p)->rb_right;
else
return eb;
}
rb_link_node(node, parent, p);
rb_insert_color(node, root);
return NULL;
}
static struct extent_buffer *buffer_search(struct extent_io_tree *tree,
u64 offset)
{
struct rb_root *root = &tree->buffer;
struct rb_node *n = root->rb_node;
struct extent_buffer *eb;
while (n) {
eb = rb_entry(n, struct extent_buffer, rb_node);
if (offset < eb->start)
n = n->rb_left;
else if (offset > eb->start)
n = n->rb_right;
else
return eb;
}
return NULL;
}
static void merge_cb(struct extent_io_tree *tree, struct extent_state *new, static void merge_cb(struct extent_io_tree *tree, struct extent_state *new,
struct extent_state *other) struct extent_state *other)
{ {
@ -3082,6 +3038,7 @@ static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
eb->len = len; eb->len = len;
spin_lock_init(&eb->lock); spin_lock_init(&eb->lock);
init_waitqueue_head(&eb->lock_wq); init_waitqueue_head(&eb->lock_wq);
INIT_RCU_HEAD(&eb->rcu_head);
#if LEAK_DEBUG #if LEAK_DEBUG
spin_lock_irqsave(&leak_lock, flags); spin_lock_irqsave(&leak_lock, flags);
@ -3150,16 +3107,16 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
struct page *p; struct page *p;
struct address_space *mapping = tree->mapping; struct address_space *mapping = tree->mapping;
int uptodate = 1; int uptodate = 1;
int ret;
spin_lock(&tree->buffer_lock); rcu_read_lock();
eb = buffer_search(tree, start); eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT);
if (eb) { if (eb && atomic_inc_not_zero(&eb->refs)) {
atomic_inc(&eb->refs); rcu_read_unlock();
spin_unlock(&tree->buffer_lock);
mark_page_accessed(eb->first_page); mark_page_accessed(eb->first_page);
return eb; return eb;
} }
spin_unlock(&tree->buffer_lock); rcu_read_unlock();
eb = __alloc_extent_buffer(tree, start, len, mask); eb = __alloc_extent_buffer(tree, start, len, mask);
if (!eb) if (!eb)
@ -3198,17 +3155,25 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
if (uptodate) if (uptodate)
set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
if (ret)
goto free_eb;
spin_lock(&tree->buffer_lock); spin_lock(&tree->buffer_lock);
exists = buffer_tree_insert(tree, start, &eb->rb_node); ret = radix_tree_insert(&tree->buffer, start >> PAGE_CACHE_SHIFT, eb);
if (exists) { if (ret == -EEXIST) {
exists = radix_tree_lookup(&tree->buffer,
start >> PAGE_CACHE_SHIFT);
/* add one reference for the caller */ /* add one reference for the caller */
atomic_inc(&exists->refs); atomic_inc(&exists->refs);
spin_unlock(&tree->buffer_lock); spin_unlock(&tree->buffer_lock);
radix_tree_preload_end();
goto free_eb; goto free_eb;
} }
/* add one reference for the tree */ /* add one reference for the tree */
atomic_inc(&eb->refs); atomic_inc(&eb->refs);
spin_unlock(&tree->buffer_lock); spin_unlock(&tree->buffer_lock);
radix_tree_preload_end();
return eb; return eb;
free_eb: free_eb:
@ -3224,16 +3189,16 @@ struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
{ {
struct extent_buffer *eb; struct extent_buffer *eb;
spin_lock(&tree->buffer_lock); rcu_read_lock();
eb = buffer_search(tree, start); eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT);
if (eb) if (eb && atomic_inc_not_zero(&eb->refs)) {
atomic_inc(&eb->refs); rcu_read_unlock();
spin_unlock(&tree->buffer_lock);
if (eb)
mark_page_accessed(eb->first_page); mark_page_accessed(eb->first_page);
return eb;
}
rcu_read_unlock();
return eb; return NULL;
} }
void free_extent_buffer(struct extent_buffer *eb) void free_extent_buffer(struct extent_buffer *eb)
@ -3863,6 +3828,14 @@ void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
} }
} }
static inline void btrfs_release_extent_buffer_rcu(struct rcu_head *head)
{
struct extent_buffer *eb =
container_of(head, struct extent_buffer, rcu_head);
btrfs_release_extent_buffer(eb);
}
int try_release_extent_buffer(struct extent_io_tree *tree, struct page *page) int try_release_extent_buffer(struct extent_io_tree *tree, struct page *page)
{ {
u64 start = page_offset(page); u64 start = page_offset(page);
@ -3870,23 +3843,30 @@ int try_release_extent_buffer(struct extent_io_tree *tree, struct page *page)
int ret = 1; int ret = 1;
spin_lock(&tree->buffer_lock); spin_lock(&tree->buffer_lock);
eb = buffer_search(tree, start); eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT);
if (!eb) if (!eb)
goto out; goto out;
if (atomic_read(&eb->refs) > 1) {
ret = 0;
goto out;
}
if (test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) { if (test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
ret = 0; ret = 0;
goto out; goto out;
} }
rb_erase(&eb->rb_node, &tree->buffer); /*
/* at this point we can safely release the extent buffer */ * set @eb->refs to 0 if it is already 1, and then release the @eb.
btrfs_release_extent_buffer(eb); * Or go back.
*/
if (atomic_cmpxchg(&eb->refs, 1, 0) != 1) {
ret = 0;
goto out;
}
radix_tree_delete(&tree->buffer, start >> PAGE_CACHE_SHIFT);
out: out:
spin_unlock(&tree->buffer_lock); spin_unlock(&tree->buffer_lock);
/* at this point we can safely release the extent buffer */
if (atomic_read(&eb->refs) == 0)
call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu);
return ret; return ret;
} }

View File

@ -85,7 +85,7 @@ struct extent_io_ops {
struct extent_io_tree { struct extent_io_tree {
struct rb_root state; struct rb_root state;
struct rb_root buffer; struct radix_tree_root buffer;
struct address_space *mapping; struct address_space *mapping;
u64 dirty_bytes; u64 dirty_bytes;
spinlock_t lock; spinlock_t lock;
@ -123,7 +123,7 @@ struct extent_buffer {
unsigned long bflags; unsigned long bflags;
atomic_t refs; atomic_t refs;
struct list_head leak_list; struct list_head leak_list;
struct rb_node rb_node; struct rcu_head rcu_head;
/* the spinlock is used to protect most operations */ /* the spinlock is used to protect most operations */
spinlock_t lock; spinlock_t lock;