btrfs: trace: Introduce trace events for all btrfs tree locking events
Unlike btrfs_tree_lock() and btrfs_tree_read_lock(), the remaining functions in locking.c will not sleep, thus doesn't make much sense to record their execution time. Those events are introduced mainly for user space tool to audit and detect lock leakage or dead lock. Signed-off-by: Qu Wenruo <wqu@suse.com> Reviewed-by: David Sterba <dsterba@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
parent
34e73cc930
commit
31aab40207
|
@ -87,6 +87,7 @@ static void btrfs_assert_tree_write_locks_put(struct extent_buffer *eb) { }
|
|||
|
||||
void btrfs_set_lock_blocking_read(struct extent_buffer *eb)
|
||||
{
|
||||
trace_btrfs_set_lock_blocking_read(eb);
|
||||
/*
|
||||
* No lock is required. The lock owner may change if we have a read
|
||||
* lock, but it won't change to or away from us. If we have the write
|
||||
|
@ -102,6 +103,7 @@ void btrfs_set_lock_blocking_read(struct extent_buffer *eb)
|
|||
|
||||
void btrfs_set_lock_blocking_write(struct extent_buffer *eb)
|
||||
{
|
||||
trace_btrfs_set_lock_blocking_write(eb);
|
||||
/*
|
||||
* No lock is required. The lock owner may change if we have a read
|
||||
* lock, but it won't change to or away from us. If we have the write
|
||||
|
@ -119,6 +121,7 @@ void btrfs_set_lock_blocking_write(struct extent_buffer *eb)
|
|||
|
||||
void btrfs_clear_lock_blocking_read(struct extent_buffer *eb)
|
||||
{
|
||||
trace_btrfs_clear_lock_blocking_read(eb);
|
||||
/*
|
||||
* No lock is required. The lock owner may change if we have a read
|
||||
* lock, but it won't change to or away from us. If we have the write
|
||||
|
@ -136,6 +139,7 @@ void btrfs_clear_lock_blocking_read(struct extent_buffer *eb)
|
|||
|
||||
void btrfs_clear_lock_blocking_write(struct extent_buffer *eb)
|
||||
{
|
||||
trace_btrfs_clear_lock_blocking_write(eb);
|
||||
/*
|
||||
* no lock is required. The lock owner may change if
|
||||
* we have a read lock, but it won't change to or away
|
||||
|
@ -209,6 +213,7 @@ int btrfs_tree_read_lock_atomic(struct extent_buffer *eb)
|
|||
}
|
||||
btrfs_assert_tree_read_locks_get(eb);
|
||||
btrfs_assert_spinning_readers_get(eb);
|
||||
trace_btrfs_tree_read_lock_atomic(eb);
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
@ -230,6 +235,7 @@ int btrfs_try_tree_read_lock(struct extent_buffer *eb)
|
|||
}
|
||||
btrfs_assert_tree_read_locks_get(eb);
|
||||
btrfs_assert_spinning_readers_get(eb);
|
||||
trace_btrfs_try_tree_read_lock(eb);
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
@ -252,6 +258,7 @@ int btrfs_try_tree_write_lock(struct extent_buffer *eb)
|
|||
btrfs_assert_tree_write_locks_get(eb);
|
||||
btrfs_assert_spinning_writers_get(eb);
|
||||
eb->lock_owner = current->pid;
|
||||
trace_btrfs_try_tree_write_lock(eb);
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
@ -260,6 +267,7 @@ int btrfs_try_tree_write_lock(struct extent_buffer *eb)
|
|||
*/
|
||||
void btrfs_tree_read_unlock(struct extent_buffer *eb)
|
||||
{
|
||||
trace_btrfs_tree_read_unlock(eb);
|
||||
/*
|
||||
* if we're nested, we have the write lock. No new locking
|
||||
* is needed as long as we are the lock owner.
|
||||
|
@ -281,6 +289,7 @@ void btrfs_tree_read_unlock(struct extent_buffer *eb)
|
|||
*/
|
||||
void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb)
|
||||
{
|
||||
trace_btrfs_tree_read_unlock_blocking(eb);
|
||||
/*
|
||||
* if we're nested, we have the write lock. No new locking
|
||||
* is needed as long as we are the lock owner.
|
||||
|
@ -336,6 +345,7 @@ void btrfs_tree_unlock(struct extent_buffer *eb)
|
|||
BUG_ON(blockers > 1);
|
||||
|
||||
btrfs_assert_tree_locked(eb);
|
||||
trace_btrfs_tree_unlock(eb);
|
||||
eb->lock_owner = 0;
|
||||
btrfs_assert_tree_write_locks_put(eb);
|
||||
|
||||
|
|
|
@ -2049,6 +2049,48 @@ DEFINE_EVENT(btrfs_sleep_tree_lock, btrfs_tree_lock,
|
|||
TP_ARGS(eb, start_ns)
|
||||
);
|
||||
|
||||
DECLARE_EVENT_CLASS(btrfs_locking_events,
|
||||
TP_PROTO(const struct extent_buffer *eb),
|
||||
|
||||
TP_ARGS(eb),
|
||||
|
||||
TP_STRUCT__entry_btrfs(
|
||||
__field( u64, block )
|
||||
__field( u64, generation )
|
||||
__field( u64, owner )
|
||||
__field( int, is_log_tree )
|
||||
),
|
||||
|
||||
TP_fast_assign_btrfs(eb->fs_info,
|
||||
__entry->block = eb->start;
|
||||
__entry->generation = btrfs_header_generation(eb);
|
||||
__entry->owner = btrfs_header_owner(eb);
|
||||
__entry->is_log_tree = (eb->log_index >= 0);
|
||||
),
|
||||
|
||||
TP_printk_btrfs("block=%llu generation=%llu owner=%llu is_log_tree=%d",
|
||||
__entry->block, __entry->generation,
|
||||
__entry->owner, __entry->is_log_tree)
|
||||
);
|
||||
|
||||
#define DEFINE_BTRFS_LOCK_EVENT(name) \
|
||||
DEFINE_EVENT(btrfs_locking_events, name, \
|
||||
TP_PROTO(const struct extent_buffer *eb), \
|
||||
\
|
||||
TP_ARGS(eb) \
|
||||
)
|
||||
|
||||
DEFINE_BTRFS_LOCK_EVENT(btrfs_tree_unlock);
|
||||
DEFINE_BTRFS_LOCK_EVENT(btrfs_tree_read_unlock);
|
||||
DEFINE_BTRFS_LOCK_EVENT(btrfs_tree_read_unlock_blocking);
|
||||
DEFINE_BTRFS_LOCK_EVENT(btrfs_set_lock_blocking_read);
|
||||
DEFINE_BTRFS_LOCK_EVENT(btrfs_set_lock_blocking_write);
|
||||
DEFINE_BTRFS_LOCK_EVENT(btrfs_clear_lock_blocking_read);
|
||||
DEFINE_BTRFS_LOCK_EVENT(btrfs_clear_lock_blocking_write);
|
||||
DEFINE_BTRFS_LOCK_EVENT(btrfs_try_tree_read_lock);
|
||||
DEFINE_BTRFS_LOCK_EVENT(btrfs_try_tree_write_lock);
|
||||
DEFINE_BTRFS_LOCK_EVENT(btrfs_tree_read_lock_atomic);
|
||||
|
||||
#endif /* _TRACE_BTRFS_H */
|
||||
|
||||
/* This part must be outside protection */
|
||||
|
|
Loading…
Reference in New Issue