ext4: clean up inode bitmaps manipulation in ext4_free_inode

- Reorganize locking scheme to batch two atomic operation in to one.
  This also allow us to state what healthy group must obey following rule
  ext4_free_inodes_count(sb, gdp) == ext4_count_free(inode_bitmap, NUM);
- Fix possible undefined pointer dereference.
- Even if group descriptor stats aren't accessible we have to update
  inode bitmaps.
- Move non-group members update out of group_lock.

Signed-off-by: Dmitry Monakhov <dmonakhov@openvz.org>
Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
This commit is contained in:
Dmitry Monakhov 2010-05-16 07:00:00 -04:00 committed by Theodore Ts'o
parent 21ca087a38
commit d17413c08c
1 changed files with 40 additions and 47 deletions

View File

@ -240,56 +240,49 @@ void ext4_free_inode(handle_t *handle, struct inode *inode)
if (fatal)
goto error_return;
/* Ok, now we can actually update the inode bitmaps.. */
cleared = ext4_clear_bit_atomic(ext4_group_lock_ptr(sb, block_group),
bit, bitmap_bh->b_data);
if (!cleared)
ext4_error(sb, "bit already cleared for inode %lu", ino);
else {
gdp = ext4_get_group_desc(sb, block_group, &bh2);
fatal = -ESRCH;
gdp = ext4_get_group_desc(sb, block_group, &bh2);
if (gdp) {
BUFFER_TRACE(bh2, "get_write_access");
fatal = ext4_journal_get_write_access(handle, bh2);
if (fatal) goto error_return;
if (gdp) {
ext4_lock_group(sb, block_group);
count = ext4_free_inodes_count(sb, gdp) + 1;
ext4_free_inodes_set(sb, gdp, count);
if (is_directory) {
count = ext4_used_dirs_count(sb, gdp) - 1;
ext4_used_dirs_set(sb, gdp, count);
if (sbi->s_log_groups_per_flex) {
ext4_group_t f;
f = ext4_flex_group(sbi, block_group);
atomic_dec(&sbi->s_flex_groups[f].used_dirs);
}
}
gdp->bg_checksum = ext4_group_desc_csum(sbi,
block_group, gdp);
ext4_unlock_group(sb, block_group);
percpu_counter_inc(&sbi->s_freeinodes_counter);
if (is_directory)
percpu_counter_dec(&sbi->s_dirs_counter);
if (sbi->s_log_groups_per_flex) {
ext4_group_t f;
f = ext4_flex_group(sbi, block_group);
atomic_inc(&sbi->s_flex_groups[f].free_inodes);
}
}
BUFFER_TRACE(bh2, "call ext4_handle_dirty_metadata");
err = ext4_handle_dirty_metadata(handle, NULL, bh2);
if (!fatal) fatal = err;
}
BUFFER_TRACE(bitmap_bh, "call ext4_handle_dirty_metadata");
err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
if (!fatal)
fatal = err;
sb->s_dirt = 1;
ext4_lock_group(sb, block_group);
cleared = ext4_clear_bit(bit, bitmap_bh->b_data);
if (fatal || !cleared) {
ext4_unlock_group(sb, block_group);
goto out;
}
count = ext4_free_inodes_count(sb, gdp) + 1;
ext4_free_inodes_set(sb, gdp, count);
if (is_directory) {
count = ext4_used_dirs_count(sb, gdp) - 1;
ext4_used_dirs_set(sb, gdp, count);
percpu_counter_dec(&sbi->s_dirs_counter);
}
gdp->bg_checksum = ext4_group_desc_csum(sbi, block_group, gdp);
ext4_unlock_group(sb, block_group);
percpu_counter_inc(&sbi->s_freeinodes_counter);
if (sbi->s_log_groups_per_flex) {
ext4_group_t f = ext4_flex_group(sbi, block_group);
atomic_inc(&sbi->s_flex_groups[f].free_inodes);
if (is_directory)
atomic_dec(&sbi->s_flex_groups[f].used_dirs);
}
BUFFER_TRACE(bh2, "call ext4_handle_dirty_metadata");
fatal = ext4_handle_dirty_metadata(handle, NULL, bh2);
out:
if (cleared) {
BUFFER_TRACE(bitmap_bh, "call ext4_handle_dirty_metadata");
err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
if (!fatal)
fatal = err;
sb->s_dirt = 1;
} else
ext4_error(sb, "bit already cleared for inode %lu", ino);
error_return:
brelse(bitmap_bh);
ext4_std_error(sb, fatal);