dwarves/strings.c

90 lines
1.5 KiB
C
Raw Normal View History

/*
SPDX-License-Identifier: GPL-2.0-only
Copyright (C) 2008 Arnaldo Carvalho de Melo <acme@redhat.com>
*/
#include "pahole_strings.h"
#include "gobuffer.h"
#include <search.h>
#include <stdint.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <zlib.h>
libbpf: Allow to use packaged version Add a new CMake option, LIBBPF_EMBEDDED, to switch between the embedded version and the system version (searched via pkg-config) of libbpf. Set the embedded version as the default. Committer testing: The default build works as before: ⬢[acme@toolbox pahole]$ rm -rf build ; mkdir build ; cd build ; cmake -DCMAKE_BUILD_TYPE=Release .. ; cd .. -- The C compiler identification is GNU 11.1.1 -- Detecting C compiler ABI info -- Detecting C compiler ABI info - done -- Check for working C compiler: /usr/bin/cc - skipped -- Detecting C compile features -- Detecting C compile features - done -- Checking availability of DWARF and ELF development libraries -- Looking for dwfl_module_build_id in elf -- Looking for dwfl_module_build_id in elf - found -- Found dwarf.h header: /usr/include -- Found elfutils/libdw.h header: /usr/include -- Found libdw library: /usr/lib64/libdw.so -- Found libelf library: /usr/lib64/libelf.so -- Checking availability of DWARF and ELF development libraries - done -- Found ZLIB: /usr/lib64/libz.so (found version "1.2.11") -- Submodule update -- Submodule update - done -- Performing Test HAVE_REALLOCARRAY_SUPPORT -- Performing Test HAVE_REALLOCARRAY_SUPPORT - Success -- Configuring done -- Generating done -- Build files have been written to: /var/home/acme/git/pahole/build ⬢[acme@toolbox pahole]$ make -j28 -C build make: Entering directory '/var/home/acme/git/pahole/build' make[1]: Entering directory '/var/home/acme/git/pahole/build' make[2]: Entering directory '/var/home/acme/git/pahole/build' Scanning dependencies of target bpf make[2]: Leaving directory '/var/home/acme/git/pahole/build' make[2]: Entering directory '/var/home/acme/git/pahole/build' [ 5%] Building C object CMakeFiles/bpf.dir/lib/bpf/src/btf.c.o [ 5%] Building C object CMakeFiles/bpf.dir/lib/bpf/src/bpf_prog_linfo.c.o [ 5%] Building C object CMakeFiles/bpf.dir/lib/bpf/src/bpf.c.o [ 7%] Building C object CMakeFiles/bpf.dir/lib/bpf/src/libbpf_errno.c.o <SNIP> make[2]: Leaving directory '/var/home/acme/git/pahole/build' [ 98%] Built target ctracer [100%] Linking C executable pahole make[2]: Leaving directory '/var/home/acme/git/pahole/build' [100%] Built target pahole make[1]: Leaving directory '/var/home/acme/git/pahole/build' make: Leaving directory '/var/home/acme/git/pahole/build' ⬢[acme@toolbox pahole]$ ldd build/pahole linux-vdso.so.1 (0x00007ffcf4d92000) libdwarves_reorganize.so.1 => /var/home/acme/git/pahole/build/libdwarves_reorganize.so.1 (0x00007f059c289000) libdwarves.so.1 => /var/home/acme/git/pahole/build/libdwarves.so.1 (0x00007f059c226000) libdw.so.1 => /lib64/libdw.so.1 (0x00007f059c186000) libelf.so.1 => /lib64/libelf.so.1 (0x00007f059c16b000) libz.so.1 => /lib64/libz.so.1 (0x00007f059c151000) libc.so.6 => /lib64/libc.so.6 (0x00007f059bf82000) libdl.so.2 => /lib64/libdl.so.2 (0x00007f059bf79000) libzstd.so.1 => /lib64/libzstd.so.1 (0x00007f059be83000) liblzma.so.5 => /lib64/liblzma.so.5 (0x00007f059be57000) libbz2.so.1 => /lib64/libbz2.so.1 (0x00007f059be44000) libpthread.so.0 => /lib64/libpthread.so.0 (0x00007f059be23000) /lib64/ld-linux-x86-64.so.2 (0x00007f059c290000) ⬢[acme@toolbox pahole]$ Then, trying to use the libbpf-devel present in Fedora 34: ⬢[acme@toolbox pahole]$ rm -rf build ; mkdir build ; cd build ; cmake -DCMAKE_BUILD_TYPE=Release -DLIBBPF_EMBEDDED=Off .. ; cd .. -- The C compiler identification is GNU 11.1.1 -- Detecting C compiler ABI info -- Detecting C compiler ABI info - done -- Check for working C compiler: /usr/bin/cc - skipped -- Detecting C compile features -- Detecting C compile features - done -- Found PkgConfig: /usr/bin/pkg-config (found version "1.7.3") -- Checking for module 'libbpf>=0.3.0' -- Found libbpf, version 0.3.0 -- Checking availability of DWARF and ELF development libraries -- Looking for dwfl_module_build_id in elf -- Looking for dwfl_module_build_id in elf - found -- Found dwarf.h header: /usr/include -- Found elfutils/libdw.h header: /usr/include -- Found libdw library: /usr/lib64/libdw.so -- Found libelf library: /usr/lib64/libelf.so -- Checking availability of DWARF and ELF development libraries - done -- Found ZLIB: /usr/lib64/libz.so (found version "1.2.11") -- Submodule update -- Submodule update - done -- Performing Test HAVE_REALLOCARRAY_SUPPORT -- Performing Test HAVE_REALLOCARRAY_SUPPORT - Success -- Configuring done -- Generating done -- Build files have been written to: /var/home/acme/git/pahole/build ⬢[acme@toolbox pahole]$ m make: Entering directory '/var/home/acme/git/pahole/build' make[1]: Entering directory '/var/home/acme/git/pahole/build' make[2]: Entering directory '/var/home/acme/git/pahole/build' Scanning dependencies of target dwarves make[2]: Leaving directory '/var/home/acme/git/pahole/build' make[2]: Entering directory '/var/home/acme/git/pahole/build' [ 2%] Building C object CMakeFiles/dwarves.dir/dwarves.c.o [ 5%] Building C object CMakeFiles/dwarves.dir/dwarves_fprintf.c.o [ 7%] Building C object CMakeFiles/dwarves.dir/gobuffer.c.o <SNIP> [ 33%] Building C object CMakeFiles/dwarves.dir/rbtree.c.o /var/home/acme/git/pahole/btf_encoder.c:84:10: error: ‘BTF_KIND_FLOAT’ undeclared here (not in a function); did you mean ‘BTF_KIND_INT’? 84 | [BTF_KIND_FLOAT] = "FLOAT", | ^~~~~~~~~~~~~~ | BTF_KIND_INT /var/home/acme/git/pahole/btf_encoder.c:84:10: error: array index in initializer not of integer type /var/home/acme/git/pahole/btf_encoder.c:84:10: note: (near initialization for ‘btf_kind_str’) /var/home/acme/git/pahole/btf_encoder.c:84:35: warning: excess elements in array initializer 84 | [BTF_KIND_FLOAT] = "FLOAT", | ^~~~~~~ /var/home/acme/git/pahole/btf_encoder.c:84:35: note: (near initialization for ‘btf_kind_str’) /var/home/acme/git/pahole/btf_encoder.c: In function ‘btf_encoder__add_float’: /var/home/acme/git/pahole/btf_encoder.c:224:22: warning: implicit declaration of function ‘btf__add_float’; did you mean ‘btf__add_var’? [-Wimplicit-function-declaration] 224 | int32_t id = btf__add_float(encoder->btf, name, BITS_ROUNDUP_BYTES(bt->bit_size)); | ^~~~~~~~~~~~~~ | btf__add_var /var/home/acme/git/pahole/btf_loader.c: In function ‘btf__load_types’: /var/home/acme/git/pahole/btf_loader.c:455:22: error: ‘BTF_KIND_FLOAT’ undeclared (first use in this function); did you mean ‘BTF_KIND_INT’? 455 | case BTF_KIND_FLOAT: | ^~~~~~~~~~~~~~ | BTF_KIND_INT /var/home/acme/git/pahole/btf_loader.c:455:22: note: each undeclared identifier is reported only once for each function it appears in make[2]: *** [CMakeFiles/dwarves.dir/build.make:173: CMakeFiles/dwarves.dir/btf_encoder.c.o] Error 1 make[2]: *** Waiting for unfinished jobs.... make[2]: *** [CMakeFiles/dwarves.dir/build.make:186: CMakeFiles/dwarves.dir/btf_loader.c.o] Error 1 make[2]: Leaving directory '/var/home/acme/git/pahole/build' make[1]: *** [CMakeFiles/Makefile2:173: CMakeFiles/dwarves.dir/all] Error 2 make[1]: Leaving directory '/var/home/acme/git/pahole/build' make: *** [Makefile:149: all] Error 2 make: Leaving directory '/var/home/acme/git/pahole/build' ⬢[acme@toolbox pahole]$ It doesn't build as libbpf is old and doesn't have support for BTF_KIND_FLOAT. Signed-off-by: Luca Boccassi <bluca@debian.org> Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: dwarves@vger.kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2021-01-04 23:16:22 +01:00
#include <bpf/libbpf.h>
#include "dutil.h"
struct strings *strings__new(void)
{
struct strings *strs = malloc(sizeof(*strs));
strings: use BTF's string APIs for strings management Switch strings container to using struct btf and its btf__add_str()/btf__find_str() APIs, which do equivalent internal string deduplication. This turns out to be a very significantly faster than using tsearch functions. To satisfy CTF encoding use case, some hacky string size fetching approach is utilized, as libbpf doesn't provide direct API to get total string section size and to copy over just strings data section. BEFORE: 22,624.28 msec task-clock # 1.000 CPUs utilized 85 context-switches # 0.004 K/sec 3 cpu-migrations # 0.000 K/sec 622,545 page-faults # 0.028 M/sec 68,177,206,387 cycles # 3.013 GHz (24.99%) 114,370,031,619 instructions # 1.68 insn per cycle (25.01%) 26,125,001,179 branches # 1154.733 M/sec (25.01%) 458,861,243 branch-misses # 1.76% of all branches (25.00%) 24,533,455,967 L1-dcache-loads # 1084.386 M/sec (25.02%) 973,500,214 L1-dcache-load-misses # 3.97% of all L1-dcache hits (25.05%) 338,773,561 LLC-loads # 14.974 M/sec (25.02%) 12,651,196 LLC-load-misses # 3.73% of all LL-cache hits (25.00%) 22.628910615 seconds time elapsed 21.341063000 seconds user 1.283763000 seconds sys AFTER: 18,362.97 msec task-clock # 1.000 CPUs utilized 37 context-switches # 0.002 K/sec 0 cpu-migrations # 0.000 K/sec 626,281 page-faults # 0.034 M/sec 52,480,619,000 cycles # 2.858 GHz (25.00%) 104,736,434,384 instructions # 2.00 insn per cycle (25.01%) 23,878,428,465 branches # 1300.358 M/sec (25.01%) 252,669,685 branch-misses # 1.06% of all branches (25.03%) 21,829,390,952 L1-dcache-loads # 1188.772 M/sec (25.04%) 638,086,339 L1-dcache-load-misses # 2.92% of all L1-dcache hits (25.02%) 212,327,435 LLC-loads # 11.563 M/sec (25.00%) 14,578,117 LLC-load-misses # 6.87% of all LL-cache hits (25.00%) 18.364427347 seconds time elapsed 16.985494000 seconds user 1.377959000 seconds sys Committer testing: Before: $ perf stat -r5 pahole -J vmlinux Performance counter stats for 'pahole -J vmlinux' (5 runs): 8,735.92 msec task-clock:u # 0.998 CPUs utilized ( +- 0.34% ) 0 context-switches:u # 0.000 K/sec 0 cpu-migrations:u # 0.000 K/sec 353,978 page-faults:u # 0.041 M/sec ( +- 0.00% ) 34,722,167,335 cycles:u # 3.975 GHz ( +- 0.12% ) (83.33%) 555,981,118 stalled-cycles-frontend:u # 1.60% frontend cycles idle ( +- 1.53% ) (83.33%) 5,215,370,531 stalled-cycles-backend:u # 15.02% backend cycles idle ( +- 1.31% ) (83.33%) 72,615,773,119 instructions:u # 2.09 insn per cycle # 0.07 stalled cycles per insn ( +- 0.02% ) (83.34%) 16,624,959,121 branches:u # 1903.057 M/sec ( +- 0.01% ) (83.33%) 229,962,327 branch-misses:u # 1.38% of all branches ( +- 0.07% ) (83.33%) 8.7503 +- 0.0301 seconds time elapsed ( +- 0.34% ) $ After: $ perf stat -r5 pahole -J vmlinux Performance counter stats for 'pahole -J vmlinux' (5 runs): 7,302.31 msec task-clock:u # 0.998 CPUs utilized ( +- 1.16% ) 0 context-switches:u # 0.000 K/sec 0 cpu-migrations:u # 0.000 K/sec 355,884 page-faults:u # 0.049 M/sec ( +- 0.00% ) 29,150,861,078 cycles:u # 3.992 GHz ( +- 0.35% ) (83.33%) 478,705,326 stalled-cycles-frontend:u # 1.64% frontend cycles idle ( +- 2.70% ) (83.33%) 5,351,001,796 stalled-cycles-backend:u # 18.36% backend cycles idle ( +- 1.20% ) (83.33%) 65,835,888,022 instructions:u # 2.26 insn per cycle # 0.08 stalled cycles per insn ( +- 0.03% ) (83.33%) 15,025,195,460 branches:u # 2057.594 M/sec ( +- 0.05% ) (83.34%) 141,209,214 branch-misses:u # 0.94% of all branches ( +- 0.15% ) (83.33%) 7.3140 +- 0.0851 seconds time elapsed ( +- 1.16% ) $ 16.04% less cycles, keep the patches coming! :-) Had to add this patch tho: +++ b/dwarf_loader.c @@ -2159,7 +2159,7 @@ static unsigned long long dwarf_tag__orig_id(const struct tag *tag, static const char *dwarf__strings_ptr(const struct cu *cu __unused, strings_t s) { - return strings__ptr(strings, s); + return s ? strings__ptr(strings, s) : NULL; } To keep preexisting behaviour and to do what the BTF specific strings_ptr method does: static const char *btf_elf__strings_ptr(const struct cu *cu, strings_t s) { return btf_elf__string(cu->priv, s); } const char *btf_elf__string(struct btf_elf *btfe, uint32_t ref) { const char *s = btf__str_by_offset(btfe->btf, ref); return s && s[0] == '\0' ? NULL : s; } With these adjustments, btfdiff on a vmlinux with BTF and DWARF is again clean, i.e. pretty printing from BTF matches what we get when using DWARF. Signed-off-by: Andrii Nakryiko <andriin@fb.com> Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Alexei Starovoitov <ast@kernel.org> Cc: Andrii Nakryiko <andrii@kernel.org> Cc: bpf@vger.kernel.org Cc: dwarves@vger.kernel.org Cc: kernel-team@fb.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2020-10-09 01:39:59 +02:00
if (!strs)
return NULL;
strs->btf = btf__new_empty();
if (libbpf_get_error(strs->btf)) {
free(strs);
return NULL;
}
return strs;
}
void strings__delete(struct strings *strs)
{
if (strs == NULL)
return;
strings: use BTF's string APIs for strings management Switch strings container to using struct btf and its btf__add_str()/btf__find_str() APIs, which do equivalent internal string deduplication. This turns out to be a very significantly faster than using tsearch functions. To satisfy CTF encoding use case, some hacky string size fetching approach is utilized, as libbpf doesn't provide direct API to get total string section size and to copy over just strings data section. BEFORE: 22,624.28 msec task-clock # 1.000 CPUs utilized 85 context-switches # 0.004 K/sec 3 cpu-migrations # 0.000 K/sec 622,545 page-faults # 0.028 M/sec 68,177,206,387 cycles # 3.013 GHz (24.99%) 114,370,031,619 instructions # 1.68 insn per cycle (25.01%) 26,125,001,179 branches # 1154.733 M/sec (25.01%) 458,861,243 branch-misses # 1.76% of all branches (25.00%) 24,533,455,967 L1-dcache-loads # 1084.386 M/sec (25.02%) 973,500,214 L1-dcache-load-misses # 3.97% of all L1-dcache hits (25.05%) 338,773,561 LLC-loads # 14.974 M/sec (25.02%) 12,651,196 LLC-load-misses # 3.73% of all LL-cache hits (25.00%) 22.628910615 seconds time elapsed 21.341063000 seconds user 1.283763000 seconds sys AFTER: 18,362.97 msec task-clock # 1.000 CPUs utilized 37 context-switches # 0.002 K/sec 0 cpu-migrations # 0.000 K/sec 626,281 page-faults # 0.034 M/sec 52,480,619,000 cycles # 2.858 GHz (25.00%) 104,736,434,384 instructions # 2.00 insn per cycle (25.01%) 23,878,428,465 branches # 1300.358 M/sec (25.01%) 252,669,685 branch-misses # 1.06% of all branches (25.03%) 21,829,390,952 L1-dcache-loads # 1188.772 M/sec (25.04%) 638,086,339 L1-dcache-load-misses # 2.92% of all L1-dcache hits (25.02%) 212,327,435 LLC-loads # 11.563 M/sec (25.00%) 14,578,117 LLC-load-misses # 6.87% of all LL-cache hits (25.00%) 18.364427347 seconds time elapsed 16.985494000 seconds user 1.377959000 seconds sys Committer testing: Before: $ perf stat -r5 pahole -J vmlinux Performance counter stats for 'pahole -J vmlinux' (5 runs): 8,735.92 msec task-clock:u # 0.998 CPUs utilized ( +- 0.34% ) 0 context-switches:u # 0.000 K/sec 0 cpu-migrations:u # 0.000 K/sec 353,978 page-faults:u # 0.041 M/sec ( +- 0.00% ) 34,722,167,335 cycles:u # 3.975 GHz ( +- 0.12% ) (83.33%) 555,981,118 stalled-cycles-frontend:u # 1.60% frontend cycles idle ( +- 1.53% ) (83.33%) 5,215,370,531 stalled-cycles-backend:u # 15.02% backend cycles idle ( +- 1.31% ) (83.33%) 72,615,773,119 instructions:u # 2.09 insn per cycle # 0.07 stalled cycles per insn ( +- 0.02% ) (83.34%) 16,624,959,121 branches:u # 1903.057 M/sec ( +- 0.01% ) (83.33%) 229,962,327 branch-misses:u # 1.38% of all branches ( +- 0.07% ) (83.33%) 8.7503 +- 0.0301 seconds time elapsed ( +- 0.34% ) $ After: $ perf stat -r5 pahole -J vmlinux Performance counter stats for 'pahole -J vmlinux' (5 runs): 7,302.31 msec task-clock:u # 0.998 CPUs utilized ( +- 1.16% ) 0 context-switches:u # 0.000 K/sec 0 cpu-migrations:u # 0.000 K/sec 355,884 page-faults:u # 0.049 M/sec ( +- 0.00% ) 29,150,861,078 cycles:u # 3.992 GHz ( +- 0.35% ) (83.33%) 478,705,326 stalled-cycles-frontend:u # 1.64% frontend cycles idle ( +- 2.70% ) (83.33%) 5,351,001,796 stalled-cycles-backend:u # 18.36% backend cycles idle ( +- 1.20% ) (83.33%) 65,835,888,022 instructions:u # 2.26 insn per cycle # 0.08 stalled cycles per insn ( +- 0.03% ) (83.33%) 15,025,195,460 branches:u # 2057.594 M/sec ( +- 0.05% ) (83.34%) 141,209,214 branch-misses:u # 0.94% of all branches ( +- 0.15% ) (83.33%) 7.3140 +- 0.0851 seconds time elapsed ( +- 1.16% ) $ 16.04% less cycles, keep the patches coming! :-) Had to add this patch tho: +++ b/dwarf_loader.c @@ -2159,7 +2159,7 @@ static unsigned long long dwarf_tag__orig_id(const struct tag *tag, static const char *dwarf__strings_ptr(const struct cu *cu __unused, strings_t s) { - return strings__ptr(strings, s); + return s ? strings__ptr(strings, s) : NULL; } To keep preexisting behaviour and to do what the BTF specific strings_ptr method does: static const char *btf_elf__strings_ptr(const struct cu *cu, strings_t s) { return btf_elf__string(cu->priv, s); } const char *btf_elf__string(struct btf_elf *btfe, uint32_t ref) { const char *s = btf__str_by_offset(btfe->btf, ref); return s && s[0] == '\0' ? NULL : s; } With these adjustments, btfdiff on a vmlinux with BTF and DWARF is again clean, i.e. pretty printing from BTF matches what we get when using DWARF. Signed-off-by: Andrii Nakryiko <andriin@fb.com> Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Alexei Starovoitov <ast@kernel.org> Cc: Andrii Nakryiko <andrii@kernel.org> Cc: bpf@vger.kernel.org Cc: dwarves@vger.kernel.org Cc: kernel-team@fb.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2020-10-09 01:39:59 +02:00
btf__free(strs->btf);
free(strs);
}
strings_t strings__add(struct strings *strs, const char *str)
{
strings_t index;
if (str == NULL)
return 0;
index = btf__add_str(strs->btf, str);
pahole: Add --kabi_prefix flag To solve problems similar to _RH_KABI_REPLACE. The _RH_KABI_REPLACE(_orig, _new) macros perserve size alignment and kabi agreement between _orig and _new.Below is the definition of this macro: union { \ _new; \ struct { \ _orig; \ } __UNIQUE_ID(rh_kabi_hide); \ __RH_KABI_CHECK_SIZE_ALIGN(_orig, _new); \ } __UNIQUE_ID uses the __COUNTER__ macro, and the __COUNTER__ macro is automatically incremented by 1 every time it is precompiled. Therefore, in different compilation units, the same structure has different names.Here is a concrete example: struct acpi_dev_node { union { struct acpi_device *companion; struct { void *handle; } __UNIQUE_ID_rh_kabi_hide29; union { }; }; }; struct acpi_dev_node { union { struct acpi_device *companion; struct { void *handle; } __UNIQUE_ID_rh_kabi_hide31; union { }; }; }; Finally, it will cause the btf algorithm to de-duplication efficiency is not high, and time-consuming. For example, running ./pahole -J vmlinux-3.10.0-1160.el7.x86_64 without --kabi_prefix flag, the running time is: real 8m28.912s user 8m27.271s sys 0m1.471s And the size of the generated btf segment is 30678240 bytes. After adding the patch, running ./pahole --kabi_prefix=__UNIQUE_ID_rh_kabi_hide -J vmlinux-3.10.0-1160.el7.x86_64. The running time of the command is: real 0m19.634s user 0m18.457s sys 0m1.169s And the size of the generated btf segment is 3117719 bytes. Signed-off-by: Shuyi Cheng <chengshuyi@linux.alibaba.com> Acked-by: Jiri Olsa <jolsa@redhat.com> Cc: Andrii Nakryiko <andrii.nakryiko@gmail.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Wenan Mao <wenan.mao@linux.alibaba.com> Cc: dwarves@vger.kernel.org Link: https://lore.kernel.org/dwarves/482e5543-d7da-7bed-098d-cc879d8db253@linux.alibaba.com/ Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2021-05-21 03:44:20 +02:00
strings: use BTF's string APIs for strings management Switch strings container to using struct btf and its btf__add_str()/btf__find_str() APIs, which do equivalent internal string deduplication. This turns out to be a very significantly faster than using tsearch functions. To satisfy CTF encoding use case, some hacky string size fetching approach is utilized, as libbpf doesn't provide direct API to get total string section size and to copy over just strings data section. BEFORE: 22,624.28 msec task-clock # 1.000 CPUs utilized 85 context-switches # 0.004 K/sec 3 cpu-migrations # 0.000 K/sec 622,545 page-faults # 0.028 M/sec 68,177,206,387 cycles # 3.013 GHz (24.99%) 114,370,031,619 instructions # 1.68 insn per cycle (25.01%) 26,125,001,179 branches # 1154.733 M/sec (25.01%) 458,861,243 branch-misses # 1.76% of all branches (25.00%) 24,533,455,967 L1-dcache-loads # 1084.386 M/sec (25.02%) 973,500,214 L1-dcache-load-misses # 3.97% of all L1-dcache hits (25.05%) 338,773,561 LLC-loads # 14.974 M/sec (25.02%) 12,651,196 LLC-load-misses # 3.73% of all LL-cache hits (25.00%) 22.628910615 seconds time elapsed 21.341063000 seconds user 1.283763000 seconds sys AFTER: 18,362.97 msec task-clock # 1.000 CPUs utilized 37 context-switches # 0.002 K/sec 0 cpu-migrations # 0.000 K/sec 626,281 page-faults # 0.034 M/sec 52,480,619,000 cycles # 2.858 GHz (25.00%) 104,736,434,384 instructions # 2.00 insn per cycle (25.01%) 23,878,428,465 branches # 1300.358 M/sec (25.01%) 252,669,685 branch-misses # 1.06% of all branches (25.03%) 21,829,390,952 L1-dcache-loads # 1188.772 M/sec (25.04%) 638,086,339 L1-dcache-load-misses # 2.92% of all L1-dcache hits (25.02%) 212,327,435 LLC-loads # 11.563 M/sec (25.00%) 14,578,117 LLC-load-misses # 6.87% of all LL-cache hits (25.00%) 18.364427347 seconds time elapsed 16.985494000 seconds user 1.377959000 seconds sys Committer testing: Before: $ perf stat -r5 pahole -J vmlinux Performance counter stats for 'pahole -J vmlinux' (5 runs): 8,735.92 msec task-clock:u # 0.998 CPUs utilized ( +- 0.34% ) 0 context-switches:u # 0.000 K/sec 0 cpu-migrations:u # 0.000 K/sec 353,978 page-faults:u # 0.041 M/sec ( +- 0.00% ) 34,722,167,335 cycles:u # 3.975 GHz ( +- 0.12% ) (83.33%) 555,981,118 stalled-cycles-frontend:u # 1.60% frontend cycles idle ( +- 1.53% ) (83.33%) 5,215,370,531 stalled-cycles-backend:u # 15.02% backend cycles idle ( +- 1.31% ) (83.33%) 72,615,773,119 instructions:u # 2.09 insn per cycle # 0.07 stalled cycles per insn ( +- 0.02% ) (83.34%) 16,624,959,121 branches:u # 1903.057 M/sec ( +- 0.01% ) (83.33%) 229,962,327 branch-misses:u # 1.38% of all branches ( +- 0.07% ) (83.33%) 8.7503 +- 0.0301 seconds time elapsed ( +- 0.34% ) $ After: $ perf stat -r5 pahole -J vmlinux Performance counter stats for 'pahole -J vmlinux' (5 runs): 7,302.31 msec task-clock:u # 0.998 CPUs utilized ( +- 1.16% ) 0 context-switches:u # 0.000 K/sec 0 cpu-migrations:u # 0.000 K/sec 355,884 page-faults:u # 0.049 M/sec ( +- 0.00% ) 29,150,861,078 cycles:u # 3.992 GHz ( +- 0.35% ) (83.33%) 478,705,326 stalled-cycles-frontend:u # 1.64% frontend cycles idle ( +- 2.70% ) (83.33%) 5,351,001,796 stalled-cycles-backend:u # 18.36% backend cycles idle ( +- 1.20% ) (83.33%) 65,835,888,022 instructions:u # 2.26 insn per cycle # 0.08 stalled cycles per insn ( +- 0.03% ) (83.33%) 15,025,195,460 branches:u # 2057.594 M/sec ( +- 0.05% ) (83.34%) 141,209,214 branch-misses:u # 0.94% of all branches ( +- 0.15% ) (83.33%) 7.3140 +- 0.0851 seconds time elapsed ( +- 1.16% ) $ 16.04% less cycles, keep the patches coming! :-) Had to add this patch tho: +++ b/dwarf_loader.c @@ -2159,7 +2159,7 @@ static unsigned long long dwarf_tag__orig_id(const struct tag *tag, static const char *dwarf__strings_ptr(const struct cu *cu __unused, strings_t s) { - return strings__ptr(strings, s); + return s ? strings__ptr(strings, s) : NULL; } To keep preexisting behaviour and to do what the BTF specific strings_ptr method does: static const char *btf_elf__strings_ptr(const struct cu *cu, strings_t s) { return btf_elf__string(cu->priv, s); } const char *btf_elf__string(struct btf_elf *btfe, uint32_t ref) { const char *s = btf__str_by_offset(btfe->btf, ref); return s && s[0] == '\0' ? NULL : s; } With these adjustments, btfdiff on a vmlinux with BTF and DWARF is again clean, i.e. pretty printing from BTF matches what we get when using DWARF. Signed-off-by: Andrii Nakryiko <andriin@fb.com> Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Alexei Starovoitov <ast@kernel.org> Cc: Andrii Nakryiko <andrii@kernel.org> Cc: bpf@vger.kernel.org Cc: dwarves@vger.kernel.org Cc: kernel-team@fb.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2020-10-09 01:39:59 +02:00
if (index < 0)
return 0;
return index;
}
strings_t strings__find(struct strings *strs, const char *str)
{
strings: use BTF's string APIs for strings management Switch strings container to using struct btf and its btf__add_str()/btf__find_str() APIs, which do equivalent internal string deduplication. This turns out to be a very significantly faster than using tsearch functions. To satisfy CTF encoding use case, some hacky string size fetching approach is utilized, as libbpf doesn't provide direct API to get total string section size and to copy over just strings data section. BEFORE: 22,624.28 msec task-clock # 1.000 CPUs utilized 85 context-switches # 0.004 K/sec 3 cpu-migrations # 0.000 K/sec 622,545 page-faults # 0.028 M/sec 68,177,206,387 cycles # 3.013 GHz (24.99%) 114,370,031,619 instructions # 1.68 insn per cycle (25.01%) 26,125,001,179 branches # 1154.733 M/sec (25.01%) 458,861,243 branch-misses # 1.76% of all branches (25.00%) 24,533,455,967 L1-dcache-loads # 1084.386 M/sec (25.02%) 973,500,214 L1-dcache-load-misses # 3.97% of all L1-dcache hits (25.05%) 338,773,561 LLC-loads # 14.974 M/sec (25.02%) 12,651,196 LLC-load-misses # 3.73% of all LL-cache hits (25.00%) 22.628910615 seconds time elapsed 21.341063000 seconds user 1.283763000 seconds sys AFTER: 18,362.97 msec task-clock # 1.000 CPUs utilized 37 context-switches # 0.002 K/sec 0 cpu-migrations # 0.000 K/sec 626,281 page-faults # 0.034 M/sec 52,480,619,000 cycles # 2.858 GHz (25.00%) 104,736,434,384 instructions # 2.00 insn per cycle (25.01%) 23,878,428,465 branches # 1300.358 M/sec (25.01%) 252,669,685 branch-misses # 1.06% of all branches (25.03%) 21,829,390,952 L1-dcache-loads # 1188.772 M/sec (25.04%) 638,086,339 L1-dcache-load-misses # 2.92% of all L1-dcache hits (25.02%) 212,327,435 LLC-loads # 11.563 M/sec (25.00%) 14,578,117 LLC-load-misses # 6.87% of all LL-cache hits (25.00%) 18.364427347 seconds time elapsed 16.985494000 seconds user 1.377959000 seconds sys Committer testing: Before: $ perf stat -r5 pahole -J vmlinux Performance counter stats for 'pahole -J vmlinux' (5 runs): 8,735.92 msec task-clock:u # 0.998 CPUs utilized ( +- 0.34% ) 0 context-switches:u # 0.000 K/sec 0 cpu-migrations:u # 0.000 K/sec 353,978 page-faults:u # 0.041 M/sec ( +- 0.00% ) 34,722,167,335 cycles:u # 3.975 GHz ( +- 0.12% ) (83.33%) 555,981,118 stalled-cycles-frontend:u # 1.60% frontend cycles idle ( +- 1.53% ) (83.33%) 5,215,370,531 stalled-cycles-backend:u # 15.02% backend cycles idle ( +- 1.31% ) (83.33%) 72,615,773,119 instructions:u # 2.09 insn per cycle # 0.07 stalled cycles per insn ( +- 0.02% ) (83.34%) 16,624,959,121 branches:u # 1903.057 M/sec ( +- 0.01% ) (83.33%) 229,962,327 branch-misses:u # 1.38% of all branches ( +- 0.07% ) (83.33%) 8.7503 +- 0.0301 seconds time elapsed ( +- 0.34% ) $ After: $ perf stat -r5 pahole -J vmlinux Performance counter stats for 'pahole -J vmlinux' (5 runs): 7,302.31 msec task-clock:u # 0.998 CPUs utilized ( +- 1.16% ) 0 context-switches:u # 0.000 K/sec 0 cpu-migrations:u # 0.000 K/sec 355,884 page-faults:u # 0.049 M/sec ( +- 0.00% ) 29,150,861,078 cycles:u # 3.992 GHz ( +- 0.35% ) (83.33%) 478,705,326 stalled-cycles-frontend:u # 1.64% frontend cycles idle ( +- 2.70% ) (83.33%) 5,351,001,796 stalled-cycles-backend:u # 18.36% backend cycles idle ( +- 1.20% ) (83.33%) 65,835,888,022 instructions:u # 2.26 insn per cycle # 0.08 stalled cycles per insn ( +- 0.03% ) (83.33%) 15,025,195,460 branches:u # 2057.594 M/sec ( +- 0.05% ) (83.34%) 141,209,214 branch-misses:u # 0.94% of all branches ( +- 0.15% ) (83.33%) 7.3140 +- 0.0851 seconds time elapsed ( +- 1.16% ) $ 16.04% less cycles, keep the patches coming! :-) Had to add this patch tho: +++ b/dwarf_loader.c @@ -2159,7 +2159,7 @@ static unsigned long long dwarf_tag__orig_id(const struct tag *tag, static const char *dwarf__strings_ptr(const struct cu *cu __unused, strings_t s) { - return strings__ptr(strings, s); + return s ? strings__ptr(strings, s) : NULL; } To keep preexisting behaviour and to do what the BTF specific strings_ptr method does: static const char *btf_elf__strings_ptr(const struct cu *cu, strings_t s) { return btf_elf__string(cu->priv, s); } const char *btf_elf__string(struct btf_elf *btfe, uint32_t ref) { const char *s = btf__str_by_offset(btfe->btf, ref); return s && s[0] == '\0' ? NULL : s; } With these adjustments, btfdiff on a vmlinux with BTF and DWARF is again clean, i.e. pretty printing from BTF matches what we get when using DWARF. Signed-off-by: Andrii Nakryiko <andriin@fb.com> Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Alexei Starovoitov <ast@kernel.org> Cc: Andrii Nakryiko <andrii@kernel.org> Cc: bpf@vger.kernel.org Cc: dwarves@vger.kernel.org Cc: kernel-team@fb.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2020-10-09 01:39:59 +02:00
return btf__find_str(strs->btf, str);
}
strings: use BTF's string APIs for strings management Switch strings container to using struct btf and its btf__add_str()/btf__find_str() APIs, which do equivalent internal string deduplication. This turns out to be a very significantly faster than using tsearch functions. To satisfy CTF encoding use case, some hacky string size fetching approach is utilized, as libbpf doesn't provide direct API to get total string section size and to copy over just strings data section. BEFORE: 22,624.28 msec task-clock # 1.000 CPUs utilized 85 context-switches # 0.004 K/sec 3 cpu-migrations # 0.000 K/sec 622,545 page-faults # 0.028 M/sec 68,177,206,387 cycles # 3.013 GHz (24.99%) 114,370,031,619 instructions # 1.68 insn per cycle (25.01%) 26,125,001,179 branches # 1154.733 M/sec (25.01%) 458,861,243 branch-misses # 1.76% of all branches (25.00%) 24,533,455,967 L1-dcache-loads # 1084.386 M/sec (25.02%) 973,500,214 L1-dcache-load-misses # 3.97% of all L1-dcache hits (25.05%) 338,773,561 LLC-loads # 14.974 M/sec (25.02%) 12,651,196 LLC-load-misses # 3.73% of all LL-cache hits (25.00%) 22.628910615 seconds time elapsed 21.341063000 seconds user 1.283763000 seconds sys AFTER: 18,362.97 msec task-clock # 1.000 CPUs utilized 37 context-switches # 0.002 K/sec 0 cpu-migrations # 0.000 K/sec 626,281 page-faults # 0.034 M/sec 52,480,619,000 cycles # 2.858 GHz (25.00%) 104,736,434,384 instructions # 2.00 insn per cycle (25.01%) 23,878,428,465 branches # 1300.358 M/sec (25.01%) 252,669,685 branch-misses # 1.06% of all branches (25.03%) 21,829,390,952 L1-dcache-loads # 1188.772 M/sec (25.04%) 638,086,339 L1-dcache-load-misses # 2.92% of all L1-dcache hits (25.02%) 212,327,435 LLC-loads # 11.563 M/sec (25.00%) 14,578,117 LLC-load-misses # 6.87% of all LL-cache hits (25.00%) 18.364427347 seconds time elapsed 16.985494000 seconds user 1.377959000 seconds sys Committer testing: Before: $ perf stat -r5 pahole -J vmlinux Performance counter stats for 'pahole -J vmlinux' (5 runs): 8,735.92 msec task-clock:u # 0.998 CPUs utilized ( +- 0.34% ) 0 context-switches:u # 0.000 K/sec 0 cpu-migrations:u # 0.000 K/sec 353,978 page-faults:u # 0.041 M/sec ( +- 0.00% ) 34,722,167,335 cycles:u # 3.975 GHz ( +- 0.12% ) (83.33%) 555,981,118 stalled-cycles-frontend:u # 1.60% frontend cycles idle ( +- 1.53% ) (83.33%) 5,215,370,531 stalled-cycles-backend:u # 15.02% backend cycles idle ( +- 1.31% ) (83.33%) 72,615,773,119 instructions:u # 2.09 insn per cycle # 0.07 stalled cycles per insn ( +- 0.02% ) (83.34%) 16,624,959,121 branches:u # 1903.057 M/sec ( +- 0.01% ) (83.33%) 229,962,327 branch-misses:u # 1.38% of all branches ( +- 0.07% ) (83.33%) 8.7503 +- 0.0301 seconds time elapsed ( +- 0.34% ) $ After: $ perf stat -r5 pahole -J vmlinux Performance counter stats for 'pahole -J vmlinux' (5 runs): 7,302.31 msec task-clock:u # 0.998 CPUs utilized ( +- 1.16% ) 0 context-switches:u # 0.000 K/sec 0 cpu-migrations:u # 0.000 K/sec 355,884 page-faults:u # 0.049 M/sec ( +- 0.00% ) 29,150,861,078 cycles:u # 3.992 GHz ( +- 0.35% ) (83.33%) 478,705,326 stalled-cycles-frontend:u # 1.64% frontend cycles idle ( +- 2.70% ) (83.33%) 5,351,001,796 stalled-cycles-backend:u # 18.36% backend cycles idle ( +- 1.20% ) (83.33%) 65,835,888,022 instructions:u # 2.26 insn per cycle # 0.08 stalled cycles per insn ( +- 0.03% ) (83.33%) 15,025,195,460 branches:u # 2057.594 M/sec ( +- 0.05% ) (83.34%) 141,209,214 branch-misses:u # 0.94% of all branches ( +- 0.15% ) (83.33%) 7.3140 +- 0.0851 seconds time elapsed ( +- 1.16% ) $ 16.04% less cycles, keep the patches coming! :-) Had to add this patch tho: +++ b/dwarf_loader.c @@ -2159,7 +2159,7 @@ static unsigned long long dwarf_tag__orig_id(const struct tag *tag, static const char *dwarf__strings_ptr(const struct cu *cu __unused, strings_t s) { - return strings__ptr(strings, s); + return s ? strings__ptr(strings, s) : NULL; } To keep preexisting behaviour and to do what the BTF specific strings_ptr method does: static const char *btf_elf__strings_ptr(const struct cu *cu, strings_t s) { return btf_elf__string(cu->priv, s); } const char *btf_elf__string(struct btf_elf *btfe, uint32_t ref) { const char *s = btf__str_by_offset(btfe->btf, ref); return s && s[0] == '\0' ? NULL : s; } With these adjustments, btfdiff on a vmlinux with BTF and DWARF is again clean, i.e. pretty printing from BTF matches what we get when using DWARF. Signed-off-by: Andrii Nakryiko <andriin@fb.com> Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Alexei Starovoitov <ast@kernel.org> Cc: Andrii Nakryiko <andrii@kernel.org> Cc: bpf@vger.kernel.org Cc: dwarves@vger.kernel.org Cc: kernel-team@fb.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2020-10-09 01:39:59 +02:00
/* a horrible and inefficient hack to get string section size out of BTF */
strings_t strings__size(const struct strings *strs)
{
const struct btf_header *p;
uint32_t sz;
p = btf__get_raw_data(strs->btf, &sz);
if (!p)
return -1;
strings: use BTF's string APIs for strings management Switch strings container to using struct btf and its btf__add_str()/btf__find_str() APIs, which do equivalent internal string deduplication. This turns out to be a very significantly faster than using tsearch functions. To satisfy CTF encoding use case, some hacky string size fetching approach is utilized, as libbpf doesn't provide direct API to get total string section size and to copy over just strings data section. BEFORE: 22,624.28 msec task-clock # 1.000 CPUs utilized 85 context-switches # 0.004 K/sec 3 cpu-migrations # 0.000 K/sec 622,545 page-faults # 0.028 M/sec 68,177,206,387 cycles # 3.013 GHz (24.99%) 114,370,031,619 instructions # 1.68 insn per cycle (25.01%) 26,125,001,179 branches # 1154.733 M/sec (25.01%) 458,861,243 branch-misses # 1.76% of all branches (25.00%) 24,533,455,967 L1-dcache-loads # 1084.386 M/sec (25.02%) 973,500,214 L1-dcache-load-misses # 3.97% of all L1-dcache hits (25.05%) 338,773,561 LLC-loads # 14.974 M/sec (25.02%) 12,651,196 LLC-load-misses # 3.73% of all LL-cache hits (25.00%) 22.628910615 seconds time elapsed 21.341063000 seconds user 1.283763000 seconds sys AFTER: 18,362.97 msec task-clock # 1.000 CPUs utilized 37 context-switches # 0.002 K/sec 0 cpu-migrations # 0.000 K/sec 626,281 page-faults # 0.034 M/sec 52,480,619,000 cycles # 2.858 GHz (25.00%) 104,736,434,384 instructions # 2.00 insn per cycle (25.01%) 23,878,428,465 branches # 1300.358 M/sec (25.01%) 252,669,685 branch-misses # 1.06% of all branches (25.03%) 21,829,390,952 L1-dcache-loads # 1188.772 M/sec (25.04%) 638,086,339 L1-dcache-load-misses # 2.92% of all L1-dcache hits (25.02%) 212,327,435 LLC-loads # 11.563 M/sec (25.00%) 14,578,117 LLC-load-misses # 6.87% of all LL-cache hits (25.00%) 18.364427347 seconds time elapsed 16.985494000 seconds user 1.377959000 seconds sys Committer testing: Before: $ perf stat -r5 pahole -J vmlinux Performance counter stats for 'pahole -J vmlinux' (5 runs): 8,735.92 msec task-clock:u # 0.998 CPUs utilized ( +- 0.34% ) 0 context-switches:u # 0.000 K/sec 0 cpu-migrations:u # 0.000 K/sec 353,978 page-faults:u # 0.041 M/sec ( +- 0.00% ) 34,722,167,335 cycles:u # 3.975 GHz ( +- 0.12% ) (83.33%) 555,981,118 stalled-cycles-frontend:u # 1.60% frontend cycles idle ( +- 1.53% ) (83.33%) 5,215,370,531 stalled-cycles-backend:u # 15.02% backend cycles idle ( +- 1.31% ) (83.33%) 72,615,773,119 instructions:u # 2.09 insn per cycle # 0.07 stalled cycles per insn ( +- 0.02% ) (83.34%) 16,624,959,121 branches:u # 1903.057 M/sec ( +- 0.01% ) (83.33%) 229,962,327 branch-misses:u # 1.38% of all branches ( +- 0.07% ) (83.33%) 8.7503 +- 0.0301 seconds time elapsed ( +- 0.34% ) $ After: $ perf stat -r5 pahole -J vmlinux Performance counter stats for 'pahole -J vmlinux' (5 runs): 7,302.31 msec task-clock:u # 0.998 CPUs utilized ( +- 1.16% ) 0 context-switches:u # 0.000 K/sec 0 cpu-migrations:u # 0.000 K/sec 355,884 page-faults:u # 0.049 M/sec ( +- 0.00% ) 29,150,861,078 cycles:u # 3.992 GHz ( +- 0.35% ) (83.33%) 478,705,326 stalled-cycles-frontend:u # 1.64% frontend cycles idle ( +- 2.70% ) (83.33%) 5,351,001,796 stalled-cycles-backend:u # 18.36% backend cycles idle ( +- 1.20% ) (83.33%) 65,835,888,022 instructions:u # 2.26 insn per cycle # 0.08 stalled cycles per insn ( +- 0.03% ) (83.33%) 15,025,195,460 branches:u # 2057.594 M/sec ( +- 0.05% ) (83.34%) 141,209,214 branch-misses:u # 0.94% of all branches ( +- 0.15% ) (83.33%) 7.3140 +- 0.0851 seconds time elapsed ( +- 1.16% ) $ 16.04% less cycles, keep the patches coming! :-) Had to add this patch tho: +++ b/dwarf_loader.c @@ -2159,7 +2159,7 @@ static unsigned long long dwarf_tag__orig_id(const struct tag *tag, static const char *dwarf__strings_ptr(const struct cu *cu __unused, strings_t s) { - return strings__ptr(strings, s); + return s ? strings__ptr(strings, s) : NULL; } To keep preexisting behaviour and to do what the BTF specific strings_ptr method does: static const char *btf_elf__strings_ptr(const struct cu *cu, strings_t s) { return btf_elf__string(cu->priv, s); } const char *btf_elf__string(struct btf_elf *btfe, uint32_t ref) { const char *s = btf__str_by_offset(btfe->btf, ref); return s && s[0] == '\0' ? NULL : s; } With these adjustments, btfdiff on a vmlinux with BTF and DWARF is again clean, i.e. pretty printing from BTF matches what we get when using DWARF. Signed-off-by: Andrii Nakryiko <andriin@fb.com> Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Alexei Starovoitov <ast@kernel.org> Cc: Andrii Nakryiko <andrii@kernel.org> Cc: bpf@vger.kernel.org Cc: dwarves@vger.kernel.org Cc: kernel-team@fb.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2020-10-09 01:39:59 +02:00
return p->str_len;
}
strings: use BTF's string APIs for strings management Switch strings container to using struct btf and its btf__add_str()/btf__find_str() APIs, which do equivalent internal string deduplication. This turns out to be a very significantly faster than using tsearch functions. To satisfy CTF encoding use case, some hacky string size fetching approach is utilized, as libbpf doesn't provide direct API to get total string section size and to copy over just strings data section. BEFORE: 22,624.28 msec task-clock # 1.000 CPUs utilized 85 context-switches # 0.004 K/sec 3 cpu-migrations # 0.000 K/sec 622,545 page-faults # 0.028 M/sec 68,177,206,387 cycles # 3.013 GHz (24.99%) 114,370,031,619 instructions # 1.68 insn per cycle (25.01%) 26,125,001,179 branches # 1154.733 M/sec (25.01%) 458,861,243 branch-misses # 1.76% of all branches (25.00%) 24,533,455,967 L1-dcache-loads # 1084.386 M/sec (25.02%) 973,500,214 L1-dcache-load-misses # 3.97% of all L1-dcache hits (25.05%) 338,773,561 LLC-loads # 14.974 M/sec (25.02%) 12,651,196 LLC-load-misses # 3.73% of all LL-cache hits (25.00%) 22.628910615 seconds time elapsed 21.341063000 seconds user 1.283763000 seconds sys AFTER: 18,362.97 msec task-clock # 1.000 CPUs utilized 37 context-switches # 0.002 K/sec 0 cpu-migrations # 0.000 K/sec 626,281 page-faults # 0.034 M/sec 52,480,619,000 cycles # 2.858 GHz (25.00%) 104,736,434,384 instructions # 2.00 insn per cycle (25.01%) 23,878,428,465 branches # 1300.358 M/sec (25.01%) 252,669,685 branch-misses # 1.06% of all branches (25.03%) 21,829,390,952 L1-dcache-loads # 1188.772 M/sec (25.04%) 638,086,339 L1-dcache-load-misses # 2.92% of all L1-dcache hits (25.02%) 212,327,435 LLC-loads # 11.563 M/sec (25.00%) 14,578,117 LLC-load-misses # 6.87% of all LL-cache hits (25.00%) 18.364427347 seconds time elapsed 16.985494000 seconds user 1.377959000 seconds sys Committer testing: Before: $ perf stat -r5 pahole -J vmlinux Performance counter stats for 'pahole -J vmlinux' (5 runs): 8,735.92 msec task-clock:u # 0.998 CPUs utilized ( +- 0.34% ) 0 context-switches:u # 0.000 K/sec 0 cpu-migrations:u # 0.000 K/sec 353,978 page-faults:u # 0.041 M/sec ( +- 0.00% ) 34,722,167,335 cycles:u # 3.975 GHz ( +- 0.12% ) (83.33%) 555,981,118 stalled-cycles-frontend:u # 1.60% frontend cycles idle ( +- 1.53% ) (83.33%) 5,215,370,531 stalled-cycles-backend:u # 15.02% backend cycles idle ( +- 1.31% ) (83.33%) 72,615,773,119 instructions:u # 2.09 insn per cycle # 0.07 stalled cycles per insn ( +- 0.02% ) (83.34%) 16,624,959,121 branches:u # 1903.057 M/sec ( +- 0.01% ) (83.33%) 229,962,327 branch-misses:u # 1.38% of all branches ( +- 0.07% ) (83.33%) 8.7503 +- 0.0301 seconds time elapsed ( +- 0.34% ) $ After: $ perf stat -r5 pahole -J vmlinux Performance counter stats for 'pahole -J vmlinux' (5 runs): 7,302.31 msec task-clock:u # 0.998 CPUs utilized ( +- 1.16% ) 0 context-switches:u # 0.000 K/sec 0 cpu-migrations:u # 0.000 K/sec 355,884 page-faults:u # 0.049 M/sec ( +- 0.00% ) 29,150,861,078 cycles:u # 3.992 GHz ( +- 0.35% ) (83.33%) 478,705,326 stalled-cycles-frontend:u # 1.64% frontend cycles idle ( +- 2.70% ) (83.33%) 5,351,001,796 stalled-cycles-backend:u # 18.36% backend cycles idle ( +- 1.20% ) (83.33%) 65,835,888,022 instructions:u # 2.26 insn per cycle # 0.08 stalled cycles per insn ( +- 0.03% ) (83.33%) 15,025,195,460 branches:u # 2057.594 M/sec ( +- 0.05% ) (83.34%) 141,209,214 branch-misses:u # 0.94% of all branches ( +- 0.15% ) (83.33%) 7.3140 +- 0.0851 seconds time elapsed ( +- 1.16% ) $ 16.04% less cycles, keep the patches coming! :-) Had to add this patch tho: +++ b/dwarf_loader.c @@ -2159,7 +2159,7 @@ static unsigned long long dwarf_tag__orig_id(const struct tag *tag, static const char *dwarf__strings_ptr(const struct cu *cu __unused, strings_t s) { - return strings__ptr(strings, s); + return s ? strings__ptr(strings, s) : NULL; } To keep preexisting behaviour and to do what the BTF specific strings_ptr method does: static const char *btf_elf__strings_ptr(const struct cu *cu, strings_t s) { return btf_elf__string(cu->priv, s); } const char *btf_elf__string(struct btf_elf *btfe, uint32_t ref) { const char *s = btf__str_by_offset(btfe->btf, ref); return s && s[0] == '\0' ? NULL : s; } With these adjustments, btfdiff on a vmlinux with BTF and DWARF is again clean, i.e. pretty printing from BTF matches what we get when using DWARF. Signed-off-by: Andrii Nakryiko <andriin@fb.com> Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Alexei Starovoitov <ast@kernel.org> Cc: Andrii Nakryiko <andrii@kernel.org> Cc: bpf@vger.kernel.org Cc: dwarves@vger.kernel.org Cc: kernel-team@fb.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2020-10-09 01:39:59 +02:00
/* similarly horrible hack to copy out string section out of BTF */
int strings__copy(const struct strings *strs, void *dst)
{
strings: use BTF's string APIs for strings management Switch strings container to using struct btf and its btf__add_str()/btf__find_str() APIs, which do equivalent internal string deduplication. This turns out to be a very significantly faster than using tsearch functions. To satisfy CTF encoding use case, some hacky string size fetching approach is utilized, as libbpf doesn't provide direct API to get total string section size and to copy over just strings data section. BEFORE: 22,624.28 msec task-clock # 1.000 CPUs utilized 85 context-switches # 0.004 K/sec 3 cpu-migrations # 0.000 K/sec 622,545 page-faults # 0.028 M/sec 68,177,206,387 cycles # 3.013 GHz (24.99%) 114,370,031,619 instructions # 1.68 insn per cycle (25.01%) 26,125,001,179 branches # 1154.733 M/sec (25.01%) 458,861,243 branch-misses # 1.76% of all branches (25.00%) 24,533,455,967 L1-dcache-loads # 1084.386 M/sec (25.02%) 973,500,214 L1-dcache-load-misses # 3.97% of all L1-dcache hits (25.05%) 338,773,561 LLC-loads # 14.974 M/sec (25.02%) 12,651,196 LLC-load-misses # 3.73% of all LL-cache hits (25.00%) 22.628910615 seconds time elapsed 21.341063000 seconds user 1.283763000 seconds sys AFTER: 18,362.97 msec task-clock # 1.000 CPUs utilized 37 context-switches # 0.002 K/sec 0 cpu-migrations # 0.000 K/sec 626,281 page-faults # 0.034 M/sec 52,480,619,000 cycles # 2.858 GHz (25.00%) 104,736,434,384 instructions # 2.00 insn per cycle (25.01%) 23,878,428,465 branches # 1300.358 M/sec (25.01%) 252,669,685 branch-misses # 1.06% of all branches (25.03%) 21,829,390,952 L1-dcache-loads # 1188.772 M/sec (25.04%) 638,086,339 L1-dcache-load-misses # 2.92% of all L1-dcache hits (25.02%) 212,327,435 LLC-loads # 11.563 M/sec (25.00%) 14,578,117 LLC-load-misses # 6.87% of all LL-cache hits (25.00%) 18.364427347 seconds time elapsed 16.985494000 seconds user 1.377959000 seconds sys Committer testing: Before: $ perf stat -r5 pahole -J vmlinux Performance counter stats for 'pahole -J vmlinux' (5 runs): 8,735.92 msec task-clock:u # 0.998 CPUs utilized ( +- 0.34% ) 0 context-switches:u # 0.000 K/sec 0 cpu-migrations:u # 0.000 K/sec 353,978 page-faults:u # 0.041 M/sec ( +- 0.00% ) 34,722,167,335 cycles:u # 3.975 GHz ( +- 0.12% ) (83.33%) 555,981,118 stalled-cycles-frontend:u # 1.60% frontend cycles idle ( +- 1.53% ) (83.33%) 5,215,370,531 stalled-cycles-backend:u # 15.02% backend cycles idle ( +- 1.31% ) (83.33%) 72,615,773,119 instructions:u # 2.09 insn per cycle # 0.07 stalled cycles per insn ( +- 0.02% ) (83.34%) 16,624,959,121 branches:u # 1903.057 M/sec ( +- 0.01% ) (83.33%) 229,962,327 branch-misses:u # 1.38% of all branches ( +- 0.07% ) (83.33%) 8.7503 +- 0.0301 seconds time elapsed ( +- 0.34% ) $ After: $ perf stat -r5 pahole -J vmlinux Performance counter stats for 'pahole -J vmlinux' (5 runs): 7,302.31 msec task-clock:u # 0.998 CPUs utilized ( +- 1.16% ) 0 context-switches:u # 0.000 K/sec 0 cpu-migrations:u # 0.000 K/sec 355,884 page-faults:u # 0.049 M/sec ( +- 0.00% ) 29,150,861,078 cycles:u # 3.992 GHz ( +- 0.35% ) (83.33%) 478,705,326 stalled-cycles-frontend:u # 1.64% frontend cycles idle ( +- 2.70% ) (83.33%) 5,351,001,796 stalled-cycles-backend:u # 18.36% backend cycles idle ( +- 1.20% ) (83.33%) 65,835,888,022 instructions:u # 2.26 insn per cycle # 0.08 stalled cycles per insn ( +- 0.03% ) (83.33%) 15,025,195,460 branches:u # 2057.594 M/sec ( +- 0.05% ) (83.34%) 141,209,214 branch-misses:u # 0.94% of all branches ( +- 0.15% ) (83.33%) 7.3140 +- 0.0851 seconds time elapsed ( +- 1.16% ) $ 16.04% less cycles, keep the patches coming! :-) Had to add this patch tho: +++ b/dwarf_loader.c @@ -2159,7 +2159,7 @@ static unsigned long long dwarf_tag__orig_id(const struct tag *tag, static const char *dwarf__strings_ptr(const struct cu *cu __unused, strings_t s) { - return strings__ptr(strings, s); + return s ? strings__ptr(strings, s) : NULL; } To keep preexisting behaviour and to do what the BTF specific strings_ptr method does: static const char *btf_elf__strings_ptr(const struct cu *cu, strings_t s) { return btf_elf__string(cu->priv, s); } const char *btf_elf__string(struct btf_elf *btfe, uint32_t ref) { const char *s = btf__str_by_offset(btfe->btf, ref); return s && s[0] == '\0' ? NULL : s; } With these adjustments, btfdiff on a vmlinux with BTF and DWARF is again clean, i.e. pretty printing from BTF matches what we get when using DWARF. Signed-off-by: Andrii Nakryiko <andriin@fb.com> Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Alexei Starovoitov <ast@kernel.org> Cc: Andrii Nakryiko <andrii@kernel.org> Cc: bpf@vger.kernel.org Cc: dwarves@vger.kernel.org Cc: kernel-team@fb.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2020-10-09 01:39:59 +02:00
const struct btf_header *p;
uint32_t sz;
p = btf__get_raw_data(strs->btf, &sz);
if (!p)
return -1;
memcpy(dst, (void *)p + p->str_off, p->str_len);
return 0;
}