btf_encoder: Match ftrace addresses within ELF functions

Currently when processing a DWARF function, we check its entrypoint
against ftrace addresses, assuming that the ftrace address matches with
the function's entrypoint.

This is not the case on some architectures as reported by Nathan
when building kernel on arm [1].

Fix the check to take into account the whole function, not just the
entrypoint.

Most of the is_ftrace_func code was contributed by Andrii.

[1] https://lore.kernel.org/bpf/20210209034416.GA1669105@ubuntu-m3-large-x86/

Committer notes:

Test comments by Nathan:

"I did several builds with CONFIG_DEBUG_INFO_BTF enabled (arm64, ppc64le,
 and x86_64) and saw no build errors. I did not do any runtime testing."

Test comments by Sedat:

Linux v5.11-rc7+ and LLVM/Clang v12.0.0-rc1 on x86 (64bit)

Reported-by: Nathan Chancellor <nathan@kernel.org>
Signed-off-by: Jiri Olsa <jolsa@kernel.org>
Acked-by: Andrii Nakryiko <andrii@kernel.org>
Tested-by: Nathan Chancellor <nathan@kernel.org>
Tested-by: Sedat Dilek <sedat.dilek@gmail.com>
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: Andrii Nakryiko <andriin@fb.com>
Cc: Daniel Borkmann <daniel@iogearbox.net>
Cc: Hao Luo <haoluo@google.com>
Cc: John Fastabend <john.fastabend@gmail.com>
Cc: KP Singh <kpsingh@chromium.org>
Cc: Martin KaFai Lau <kafai@fb.com>
Cc: Song Liu <songliubraving@fb.com>
Cc: Yonghong Song <yhs@fb.com>
Cc: bpf@vger.kernel.org
Cc: dwarves@vger.kernel.org
Cc: netdev@vger.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
This commit is contained in:
Jiri Olsa 2021-02-13 17:46:48 +01:00 committed by Arnaldo Carvalho de Melo
parent 9fecc77ed8
commit 8e1f8c904e
1 changed files with 38 additions and 2 deletions

View File

@ -36,6 +36,7 @@ struct funcs_layout {
struct elf_function {
const char *name;
unsigned long addr;
unsigned long size;
unsigned long sh_addr;
bool generated;
};
@ -98,6 +99,7 @@ static int collect_function(struct btf_elf *btfe, GElf_Sym *sym,
functions[functions_cnt].name = name;
functions[functions_cnt].addr = elf_sym__value(sym);
functions[functions_cnt].size = elf_sym__size(sym);
functions[functions_cnt].sh_addr = sh.sh_addr;
functions[functions_cnt].generated = false;
functions_cnt++;
@ -236,6 +238,39 @@ get_kmod_addrs(struct btf_elf *btfe, __u64 **paddrs, __u64 *pcount)
return 0;
}
static int is_ftrace_func(struct elf_function *func, __u64 *addrs, __u64 count)
{
__u64 start = func->addr;
__u64 addr, end = func->addr + func->size;
/*
* The invariant here is addr[r] that is the smallest address
* that is >= than function start addr. Except the corner case
* where there is no such r, but for that we have a final check
* in the return.
*/
size_t l = 0, r = count - 1, m;
/* make sure we don't use invalid r */
if (count == 0)
return false;
while (l < r) {
m = l + (r - l) / 2;
addr = addrs[m];
if (addr >= start) {
/* we satisfy invariant, so tighten r */
r = m;
} else {
/* m is not good enough as l, maybe m + 1 will be */
l = m + 1;
}
}
return start <= addrs[r] && addrs[r] < end;
}
static int setup_functions(struct btf_elf *btfe, struct funcs_layout *fl)
{
__u64 *addrs, count, i;
@ -283,10 +318,11 @@ static int setup_functions(struct btf_elf *btfe, struct funcs_layout *fl)
* functions[x]::addr is relative address within section
* and needs to be relocated by adding sh_addr.
*/
__u64 addr = kmod ? func->addr + func->sh_addr : func->addr;
if (kmod)
func->addr += func->sh_addr;
/* Make sure function is within ftrace addresses. */
if (bsearch(&addr, addrs, count, sizeof(addrs[0]), addrs_cmp)) {
if (is_ftrace_func(func, addrs, count)) {
/*
* We iterate over sorted array, so we can easily skip
* not valid item and move following valid field into