for-5.2-rc1-tag
-----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEE8rQSAMVO+zA4DBdWxWXV+ddtWDsFAlzi2E8ACgkQxWXV+ddt WDuwdg/9Gil8uC28r7HLk1DkMdUZp6qHPXC2D79iN63XOIyxtTv2Y/ZDOneHheTa NaW9DOe6PUWoVyrYRCM/BhRxouZp0cFlpMG1m8ABdaO3uSCzwlc9wHs7YPNOwiGJ DM3qikX4V8w0ECoY3Z9NzbHLGTi9INzgkuazWGQnplK1ZA7CHe4RLH1r442daTAO iFr+bhjODmwHyebXlK66dcOGw7HXp4ac+iyZnlivNcTipGtOTdA7kryZLaNmfepz JfMESxGMrLhdrd/YxeaDEVYRAh1ZSD57/WGrQDeRQ54qD2ELXmoPX0rAtquwoziS F1PSitiW0DzYGjS+KCKP9553tlEtJ5Md45k0AibK4h/aqCPy6s6khK/PfsHQT5K+ lD0CqwB4zr9zOhS0n1uFRlNomzK4UZ2SPDtB4KMpCCEQLlvwJIkUqb3Bx6JZgAEH FPFEZGVX/Xyqv6w/VASHHhhoAGRJ/mIx+mU/RGVU+jFVBzwd0EmlCymFDMF2z44K 8HZz7ib4fMvArR5S2uEz/h85JM7EzDG7YkPluzERiQy86Abi79QQl8qWfC7yBGYd K3g6VQM/H6NUprXqTNQ/NU7Zvrq5HPXC+NhrLvC+Ul0DlwLAwxRj8NeYImUuDDpi Du49hJcV0U2kWocvwdP+600y48UroioJHlqKtqlng3NKxdjUGxw= =qN6T -----END PGP SIGNATURE----- Merge tag 'for-5.2-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux Pull btrfs fixes from David Sterba: "Notable highlights: - fixes for some long-standing bugs in fsync that were quite hard to catch but now finaly fixed - some fixups to error handling paths that did not properly clean up (locking, memory) - fix to space reservation for inheriting properties" * tag 'for-5.2-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux: Btrfs: tree-checker: detect file extent items with overlapping ranges Btrfs: fix race between ranged fsync and writeback of adjacent ranges Btrfs: avoid fallback to transaction commit during fsync of files with holes btrfs: extent-tree: Fix a bug that btrfs is unable to add pinned bytes btrfs: sysfs: don't leak memory when failing add fsid btrfs: sysfs: Fix error path kobject memory leak Btrfs: do not abort transaction at btrfs_update_root() after failure to COW path btrfs: use the existing reserved items for our first prop for inheritance btrfs: don't double unlock on error in btrfs_punch_hole btrfs: Check the compression level before getting a workspace
This commit is contained in:
commit
f49aa1de98
|
@ -1008,6 +1008,7 @@ int btrfs_compress_pages(unsigned int type_level, struct address_space *mapping,
|
|||
struct list_head *workspace;
|
||||
int ret;
|
||||
|
||||
level = btrfs_compress_op[type]->set_level(level);
|
||||
workspace = get_workspace(type, level);
|
||||
ret = btrfs_compress_op[type]->compress_pages(workspace, mapping,
|
||||
start, pages,
|
||||
|
|
|
@ -757,12 +757,14 @@ static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
|
|||
}
|
||||
|
||||
static void add_pinned_bytes(struct btrfs_fs_info *fs_info,
|
||||
struct btrfs_ref *ref)
|
||||
struct btrfs_ref *ref, int sign)
|
||||
{
|
||||
struct btrfs_space_info *space_info;
|
||||
s64 num_bytes = -ref->len;
|
||||
s64 num_bytes;
|
||||
u64 flags;
|
||||
|
||||
ASSERT(sign == 1 || sign == -1);
|
||||
num_bytes = sign * ref->len;
|
||||
if (ref->type == BTRFS_REF_METADATA) {
|
||||
if (ref->tree_ref.root == BTRFS_CHUNK_TREE_OBJECTID)
|
||||
flags = BTRFS_BLOCK_GROUP_SYSTEM;
|
||||
|
@ -2063,7 +2065,7 @@ int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
|
|||
btrfs_ref_tree_mod(fs_info, generic_ref);
|
||||
|
||||
if (ret == 0 && old_ref_mod < 0 && new_ref_mod >= 0)
|
||||
add_pinned_bytes(fs_info, generic_ref);
|
||||
add_pinned_bytes(fs_info, generic_ref, -1);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -3882,8 +3884,7 @@ static int create_space_info(struct btrfs_fs_info *info, u64 flags)
|
|||
info->space_info_kobj, "%s",
|
||||
alloc_name(space_info->flags));
|
||||
if (ret) {
|
||||
percpu_counter_destroy(&space_info->total_bytes_pinned);
|
||||
kfree(space_info);
|
||||
kobject_put(&space_info->kobj);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -7190,7 +7191,7 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
|
|||
}
|
||||
out:
|
||||
if (pin)
|
||||
add_pinned_bytes(fs_info, &generic_ref);
|
||||
add_pinned_bytes(fs_info, &generic_ref, 1);
|
||||
|
||||
if (last_ref) {
|
||||
/*
|
||||
|
@ -7238,7 +7239,7 @@ int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_ref *ref)
|
|||
btrfs_ref_tree_mod(fs_info, ref);
|
||||
|
||||
if (ret == 0 && old_ref_mod >= 0 && new_ref_mod < 0)
|
||||
add_pinned_bytes(fs_info, ref);
|
||||
add_pinned_bytes(fs_info, ref, 1);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -2067,6 +2067,18 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
|
|||
int ret = 0, err;
|
||||
u64 len;
|
||||
|
||||
/*
|
||||
* If the inode needs a full sync, make sure we use a full range to
|
||||
* avoid log tree corruption, due to hole detection racing with ordered
|
||||
* extent completion for adjacent ranges, and assertion failures during
|
||||
* hole detection.
|
||||
*/
|
||||
if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
|
||||
&BTRFS_I(inode)->runtime_flags)) {
|
||||
start = 0;
|
||||
end = LLONG_MAX;
|
||||
}
|
||||
|
||||
/*
|
||||
* The range length can be represented by u64, we have to do the typecasts
|
||||
* to avoid signed overflow if it's [0, LLONG_MAX] eg. from fsync()
|
||||
|
@ -2554,10 +2566,8 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
|
|||
|
||||
ret = btrfs_punch_hole_lock_range(inode, lockstart, lockend,
|
||||
&cached_state);
|
||||
if (ret) {
|
||||
inode_unlock(inode);
|
||||
if (ret)
|
||||
goto out_only_mutex;
|
||||
}
|
||||
|
||||
path = btrfs_alloc_path();
|
||||
if (!path) {
|
||||
|
|
|
@ -332,6 +332,7 @@ static int inherit_props(struct btrfs_trans_handle *trans,
|
|||
struct btrfs_fs_info *fs_info = root->fs_info;
|
||||
int ret;
|
||||
int i;
|
||||
bool need_reserve = false;
|
||||
|
||||
if (!test_bit(BTRFS_INODE_HAS_PROPS,
|
||||
&BTRFS_I(parent)->runtime_flags))
|
||||
|
@ -357,11 +358,20 @@ static int inherit_props(struct btrfs_trans_handle *trans,
|
|||
if (ret)
|
||||
continue;
|
||||
|
||||
num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
|
||||
ret = btrfs_block_rsv_add(root, trans->block_rsv,
|
||||
num_bytes, BTRFS_RESERVE_NO_FLUSH);
|
||||
if (ret)
|
||||
return ret;
|
||||
/*
|
||||
* Currently callers should be reserving 1 item for properties,
|
||||
* since we only have 1 property that we currently support. If
|
||||
* we add more in the future we need to try and reserve more
|
||||
* space for them. But we should also revisit how we do space
|
||||
* reservations if we do add more properties in the future.
|
||||
*/
|
||||
if (need_reserve) {
|
||||
num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
|
||||
ret = btrfs_block_rsv_add(root, trans->block_rsv,
|
||||
num_bytes, BTRFS_RESERVE_NO_FLUSH);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = btrfs_setxattr(trans, inode, h->xattr_name, value,
|
||||
strlen(value), 0);
|
||||
|
@ -375,9 +385,13 @@ static int inherit_props(struct btrfs_trans_handle *trans,
|
|||
&BTRFS_I(inode)->runtime_flags);
|
||||
}
|
||||
|
||||
btrfs_block_rsv_release(fs_info, trans->block_rsv, num_bytes);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (need_reserve) {
|
||||
btrfs_block_rsv_release(fs_info, trans->block_rsv,
|
||||
num_bytes);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
need_reserve = true;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -132,10 +132,8 @@ int btrfs_update_root(struct btrfs_trans_handle *trans, struct btrfs_root
|
|||
return -ENOMEM;
|
||||
|
||||
ret = btrfs_search_slot(trans, root, key, path, 0, 1);
|
||||
if (ret < 0) {
|
||||
btrfs_abort_transaction(trans, ret);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (ret > 0) {
|
||||
btrfs_crit(fs_info,
|
||||
|
|
|
@ -825,7 +825,12 @@ int btrfs_sysfs_add_fsid(struct btrfs_fs_devices *fs_devs,
|
|||
fs_devs->fsid_kobj.kset = btrfs_kset;
|
||||
error = kobject_init_and_add(&fs_devs->fsid_kobj,
|
||||
&btrfs_ktype, parent, "%pU", fs_devs->fsid);
|
||||
return error;
|
||||
if (error) {
|
||||
kobject_put(&fs_devs->fsid_kobj);
|
||||
return error;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int btrfs_sysfs_add_mounted(struct btrfs_fs_info *fs_info)
|
||||
|
|
|
@ -107,8 +107,26 @@ static void file_extent_err(const struct extent_buffer *eb, int slot,
|
|||
(!IS_ALIGNED(btrfs_file_extent_##name((leaf), (fi)), (alignment))); \
|
||||
})
|
||||
|
||||
static u64 file_extent_end(struct extent_buffer *leaf,
|
||||
struct btrfs_key *key,
|
||||
struct btrfs_file_extent_item *extent)
|
||||
{
|
||||
u64 end;
|
||||
u64 len;
|
||||
|
||||
if (btrfs_file_extent_type(leaf, extent) == BTRFS_FILE_EXTENT_INLINE) {
|
||||
len = btrfs_file_extent_ram_bytes(leaf, extent);
|
||||
end = ALIGN(key->offset + len, leaf->fs_info->sectorsize);
|
||||
} else {
|
||||
len = btrfs_file_extent_num_bytes(leaf, extent);
|
||||
end = key->offset + len;
|
||||
}
|
||||
return end;
|
||||
}
|
||||
|
||||
static int check_extent_data_item(struct extent_buffer *leaf,
|
||||
struct btrfs_key *key, int slot)
|
||||
struct btrfs_key *key, int slot,
|
||||
struct btrfs_key *prev_key)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = leaf->fs_info;
|
||||
struct btrfs_file_extent_item *fi;
|
||||
|
@ -188,6 +206,28 @@ static int check_extent_data_item(struct extent_buffer *leaf,
|
|||
CHECK_FE_ALIGNED(leaf, slot, fi, offset, sectorsize) ||
|
||||
CHECK_FE_ALIGNED(leaf, slot, fi, num_bytes, sectorsize))
|
||||
return -EUCLEAN;
|
||||
|
||||
/*
|
||||
* Check that no two consecutive file extent items, in the same leaf,
|
||||
* present ranges that overlap each other.
|
||||
*/
|
||||
if (slot > 0 &&
|
||||
prev_key->objectid == key->objectid &&
|
||||
prev_key->type == BTRFS_EXTENT_DATA_KEY) {
|
||||
struct btrfs_file_extent_item *prev_fi;
|
||||
u64 prev_end;
|
||||
|
||||
prev_fi = btrfs_item_ptr(leaf, slot - 1,
|
||||
struct btrfs_file_extent_item);
|
||||
prev_end = file_extent_end(leaf, prev_key, prev_fi);
|
||||
if (prev_end > key->offset) {
|
||||
file_extent_err(leaf, slot - 1,
|
||||
"file extent end range (%llu) goes beyond start offset (%llu) of the next file extent",
|
||||
prev_end, key->offset);
|
||||
return -EUCLEAN;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -774,14 +814,15 @@ static int check_inode_item(struct extent_buffer *leaf,
|
|||
* Common point to switch the item-specific validation.
|
||||
*/
|
||||
static int check_leaf_item(struct extent_buffer *leaf,
|
||||
struct btrfs_key *key, int slot)
|
||||
struct btrfs_key *key, int slot,
|
||||
struct btrfs_key *prev_key)
|
||||
{
|
||||
int ret = 0;
|
||||
struct btrfs_chunk *chunk;
|
||||
|
||||
switch (key->type) {
|
||||
case BTRFS_EXTENT_DATA_KEY:
|
||||
ret = check_extent_data_item(leaf, key, slot);
|
||||
ret = check_extent_data_item(leaf, key, slot, prev_key);
|
||||
break;
|
||||
case BTRFS_EXTENT_CSUM_KEY:
|
||||
ret = check_csum_item(leaf, key, slot);
|
||||
|
@ -928,7 +969,7 @@ static int check_leaf(struct extent_buffer *leaf, bool check_item_data)
|
|||
* Check if the item size and content meet other
|
||||
* criteria
|
||||
*/
|
||||
ret = check_leaf_item(leaf, &key, slot);
|
||||
ret = check_leaf_item(leaf, &key, slot, &prev_key);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -4182,6 +4182,7 @@ fill_holes:
|
|||
*last_extent, 0,
|
||||
0, len, 0, len,
|
||||
0, 0, 0);
|
||||
*last_extent += len;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue