Btrfs: Add BH_Defrag to mark buffers that are in need of defragging

This allows the tree walking code to defrag only the newly allocated
buffers, it seems to be a good balance between perfect defragging and the
performance hit of repeatedly reallocating blocks.

Signed-off-by: Chris Mason <chris.mason@oracle.com>
This commit is contained in:
Chris Mason 2007-08-10 14:42:37 -04:00 committed by David Woodhouse
parent e9d0b13b5b
commit f2183bde1a
4 changed files with 18 additions and 11 deletions

View File

@ -175,6 +175,7 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans,
int end_slot;
int i;
int err = 0;
int parent_level;
if (trans->transaction != root->fs_info->running_transaction) {
printk(KERN_CRIT "trans %Lu running %Lu\n", trans->transid,
@ -188,6 +189,7 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans,
}
parent_node = btrfs_buffer_node(parent);
parent_nritems = btrfs_header_nritems(&parent_node->header);
parent_level = btrfs_header_level(&parent_node->header);
start_slot = 0;
end_slot = parent_nritems;
@ -215,14 +217,17 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans,
cur_bh = btrfs_find_tree_block(root, blocknr);
if (!cur_bh || !buffer_uptodate(cur_bh) ||
buffer_locked(cur_bh)) {
buffer_locked(cur_bh) || !buffer_defrag(cur_bh)) {
if (cache_only) {
brelse(cur_bh);
continue;
}
if (!cur_bh || !buffer_uptodate(cur_bh) ||
buffer_locked(cur_bh)) {
brelse(cur_bh);
cur_bh = read_tree_block(root, blocknr);
}
}
if (search_start == 0)
search_start = last_block & ~((u64)65535);
@ -232,6 +237,9 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans,
if (err)
break;
search_start = bh_blocknr(tmp_bh);
*last_ret = search_start;
if (parent_level == 1)
clear_buffer_defrag(tmp_bh);
brelse(tmp_bh);
}
return err;
@ -811,16 +819,10 @@ static void reada_for_search(struct btrfs_root *root, struct btrfs_path *path,
clear_radix_bit(&found, blocknr);
if (nread > 32)
continue;
if (direction > 0 && cluster_start <= blocknr &&
cluster_start + 8 > blocknr) {
cluster_start = blocknr;
if (close_blocks(cluster_start, blocknr)) {
readahead_tree_block(root, blocknr);
nread++;
} else if (direction < 0 && cluster_start >= blocknr &&
blocknr + 8 > cluster_start) {
cluster_start = blocknr;
readahead_tree_block(root, blocknr);
nread++;
}
}
}

View File

@ -25,8 +25,10 @@
enum btrfs_bh_state_bits {
BH_Checked = BH_PrivateStart,
BH_Defrag,
};
BUFFER_FNS(Checked, checked);
BUFFER_FNS(Defrag, defrag);
static inline struct btrfs_node *btrfs_buffer_node(struct buffer_head *bh)
{

View File

@ -1015,6 +1015,7 @@ check_failed:
ins->objectid = search_start;
ins->offset = 0;
start_found = 0;
path->reada = 1;
ret = btrfs_search_slot(trans, root, ins, path, 0, 0);
if (ret < 0)
@ -1264,6 +1265,7 @@ struct buffer_head *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
WARN_ON(buffer_dirty(buf));
set_buffer_uptodate(buf);
set_buffer_checked(buf);
set_buffer_defrag(buf);
set_radix_bit(&trans->transaction->dirty_pages, buf->b_page->index);
return buf;
}

View File

@ -86,7 +86,7 @@ static int defrag_walk_down(struct btrfs_trans_handle *trans,
if (cache_only) {
next = btrfs_find_tree_block(root, blocknr);
if (!next || !buffer_uptodate(next) ||
buffer_locked(next)) {
buffer_locked(next) || !buffer_defrag(next)) {
brelse(next);
path->slots[*level]++;
continue;
@ -142,6 +142,7 @@ static int defrag_walk_up(struct btrfs_trans_handle *trans,
root->defrag_level = i;
return 0;
} else {
clear_buffer_defrag(path->nodes[*level]);
btrfs_block_release(root, path->nodes[*level]);
path->nodes[*level] = NULL;
*level = i + 1;