perf/core improvements and fixes:

User visible:
 
 - Properly report when a function wildcard produces no matches in 'perf probe'
   (Masami Hiramatsu)
 
 - Balance opening and reading events in 'perf stat', which could cause
   it to get stuck trying to close invalid file descriptors (Mark Rutland)
 
 Infrastructure:
 
 - Copy more headers from the kernel, this time for headers that
   were just including the contents of its kernel counterparts, should
   help resolving the problems with linux-next, where some uapi related
   patches seem to be breaking tools/object/ build.
 
   Some more combing will be done, but at least it is possible to build
   perf out of tree, via a detached tarball (make help | grep perf)
   without including kernel files in its MANIFEST (Arnaldo Carvalho de Melo)
 
 - Fix smatch found errors that were not causing problems, but are
   mistakes nonetheless (Dan Carpenter)
 
 - Fix string vs byte array resolving in the python script code (Jiri Olsa)
 
 Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v2
 
 iQIcBAABCAAGBQJXjWXkAAoJENZQFvNTUqpAHksQAJ8q8gTRzjF8BKh8PL1LF3T5
 JeJ1/duxnaWFF1zt0Z/H7B17m7RFGA8sqG7/O35kxGuzZPGAPDb7B4msInXKlq8t
 dsO49Kw3ffpNiq2douNqj52RlsC2+Togmtz2FhJPAO0LkPZiukOjp/XcQBEuhh2B
 4a5rls+5G7UeMUdUzDKPIBBIvZM/L0Y9nx2UdnQeDNDbKxe/WcyJlY1yG3d1WvXy
 hFjbMwU92al/dGbbXp0ppaiKfLblf0UsisEVaujb/p4cQzV/NgPzUpyUWSrpWMfO
 AbCD8sZWuoIKolPJZ1XF7vHpXTRPfDJtU5bNBiH1HCCE4aMsjwVq+g6Frcg579ld
 IgcLmnb4zvGHUyDR03jUDtYKlYKY/liyynivrD5fv9IVrv/CVyLrpMbhNm8G+83+
 9xwwpzHyFDEhvvP2N7yg1Cb1BFx4yCE2ETFeSgot77JtGCl/CXK+aNjQOlw7TY7l
 Im/MRGUF5WdBi5bHmig66smZlVBoSz8ujd6WbCscqJz3fLQgpi0f+13qpwnRwsJ6
 uxVoMqdKt5/ZWkM19kn2pdYB16HqSm1VX7aXkvUP3hEgtepxMKyjpE/PUGwcrDcC
 mBg9EvUY0pdJDNt2qn2GCC6PR+4S4FKf+jBoma97mMiRzZanrEMqUGlmM66zmfQK
 nFgbYuY/Cz9FtnVAtiPN
 =8KKS
 -----END PGP SIGNATURE-----

Merge tag 'perf-core-for-mingo-20160718' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux into perf/core

Pull perf/core improvements and fixes from Arnaldo Carvalho de Melo:

User visible changes:

 - Properly report when a function wildcard produces no matches in 'perf probe'
   (Masami Hiramatsu)

 - Balance opening and reading events in 'perf stat', which could cause
   it to get stuck trying to close invalid file descriptors (Mark Rutland)

Infrastructure changes:

 - Copy more headers from the kernel, this time for headers that
   were just including the contents of its kernel counterparts, should
   help resolving the problems with linux-next, where some uapi related
   patches seem to be breaking tools/object/ build. (Arnaldo Carvalho de Melo)

   Some more combing will be done, but at least it is possible to build
   perf out of tree, via a detached tarball (make help | grep perf),
   without including kernel files in its MANIFEST (Arnaldo Carvalho de Melo)

 - Fix smatch found errors that were not causing problems, but are
   mistakes nonetheless (Dan Carpenter)

 - Fix string vs. byte array resolving in the python script code (Jiri Olsa)

Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Ingo Molnar 2016-07-19 08:44:38 +02:00
commit 5048c2af07
26 changed files with 512 additions and 57 deletions

View File

@ -1 +1,43 @@
#include "../../../../include/asm-generic/bitops/__fls.h"
#ifndef _ASM_GENERIC_BITOPS___FLS_H_
#define _ASM_GENERIC_BITOPS___FLS_H_
#include <asm/types.h>
/**
* __fls - find last (most-significant) set bit in a long word
* @word: the word to search
*
* Undefined if no set bit exists, so code should check against 0 first.
*/
static __always_inline unsigned long __fls(unsigned long word)
{
int num = BITS_PER_LONG - 1;
#if BITS_PER_LONG == 64
if (!(word & (~0ul << 32))) {
num -= 32;
word <<= 32;
}
#endif
if (!(word & (~0ul << (BITS_PER_LONG-16)))) {
num -= 16;
word <<= 16;
}
if (!(word & (~0ul << (BITS_PER_LONG-8)))) {
num -= 8;
word <<= 8;
}
if (!(word & (~0ul << (BITS_PER_LONG-4)))) {
num -= 4;
word <<= 4;
}
if (!(word & (~0ul << (BITS_PER_LONG-2)))) {
num -= 2;
word <<= 2;
}
if (!(word & (~0ul << (BITS_PER_LONG-1))))
num -= 1;
return num;
}
#endif /* _ASM_GENERIC_BITOPS___FLS_H_ */

View File

@ -1 +1,25 @@
#include "../../../../include/asm-generic/bitops/arch_hweight.h"
#ifndef _ASM_GENERIC_BITOPS_ARCH_HWEIGHT_H_
#define _ASM_GENERIC_BITOPS_ARCH_HWEIGHT_H_
#include <asm/types.h>
static inline unsigned int __arch_hweight32(unsigned int w)
{
return __sw_hweight32(w);
}
static inline unsigned int __arch_hweight16(unsigned int w)
{
return __sw_hweight16(w);
}
static inline unsigned int __arch_hweight8(unsigned int w)
{
return __sw_hweight8(w);
}
static inline unsigned long __arch_hweight64(__u64 w)
{
return __sw_hweight64(w);
}
#endif /* _ASM_GENERIC_BITOPS_HWEIGHT_H_ */

View File

@ -1 +1,43 @@
#include "../../../../include/asm-generic/bitops/const_hweight.h"
#ifndef _ASM_GENERIC_BITOPS_CONST_HWEIGHT_H_
#define _ASM_GENERIC_BITOPS_CONST_HWEIGHT_H_
/*
* Compile time versions of __arch_hweightN()
*/
#define __const_hweight8(w) \
((unsigned int) \
((!!((w) & (1ULL << 0))) + \
(!!((w) & (1ULL << 1))) + \
(!!((w) & (1ULL << 2))) + \
(!!((w) & (1ULL << 3))) + \
(!!((w) & (1ULL << 4))) + \
(!!((w) & (1ULL << 5))) + \
(!!((w) & (1ULL << 6))) + \
(!!((w) & (1ULL << 7)))))
#define __const_hweight16(w) (__const_hweight8(w) + __const_hweight8((w) >> 8 ))
#define __const_hweight32(w) (__const_hweight16(w) + __const_hweight16((w) >> 16))
#define __const_hweight64(w) (__const_hweight32(w) + __const_hweight32((w) >> 32))
/*
* Generic interface.
*/
#define hweight8(w) (__builtin_constant_p(w) ? __const_hweight8(w) : __arch_hweight8(w))
#define hweight16(w) (__builtin_constant_p(w) ? __const_hweight16(w) : __arch_hweight16(w))
#define hweight32(w) (__builtin_constant_p(w) ? __const_hweight32(w) : __arch_hweight32(w))
#define hweight64(w) (__builtin_constant_p(w) ? __const_hweight64(w) : __arch_hweight64(w))
/*
* Interface for known constant arguments
*/
#define HWEIGHT8(w) (BUILD_BUG_ON_ZERO(!__builtin_constant_p(w)) + __const_hweight8(w))
#define HWEIGHT16(w) (BUILD_BUG_ON_ZERO(!__builtin_constant_p(w)) + __const_hweight16(w))
#define HWEIGHT32(w) (BUILD_BUG_ON_ZERO(!__builtin_constant_p(w)) + __const_hweight32(w))
#define HWEIGHT64(w) (BUILD_BUG_ON_ZERO(!__builtin_constant_p(w)) + __const_hweight64(w))
/*
* Type invariant interface to the compile time constant hweight functions.
*/
#define HWEIGHT(w) HWEIGHT64((u64)w)
#endif /* _ASM_GENERIC_BITOPS_CONST_HWEIGHT_H_ */

View File

@ -1 +1,41 @@
#include "../../../../include/asm-generic/bitops/fls.h"
#ifndef _ASM_GENERIC_BITOPS_FLS_H_
#define _ASM_GENERIC_BITOPS_FLS_H_
/**
* fls - find last (most-significant) bit set
* @x: the word to search
*
* This is defined the same way as ffs.
* Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
*/
static __always_inline int fls(int x)
{
int r = 32;
if (!x)
return 0;
if (!(x & 0xffff0000u)) {
x <<= 16;
r -= 16;
}
if (!(x & 0xff000000u)) {
x <<= 8;
r -= 8;
}
if (!(x & 0xf0000000u)) {
x <<= 4;
r -= 4;
}
if (!(x & 0xc0000000u)) {
x <<= 2;
r -= 2;
}
if (!(x & 0x80000000u)) {
x <<= 1;
r -= 1;
}
return r;
}
#endif /* _ASM_GENERIC_BITOPS_FLS_H_ */

View File

@ -1 +1,36 @@
#include "../../../../include/asm-generic/bitops/fls64.h"
#ifndef _ASM_GENERIC_BITOPS_FLS64_H_
#define _ASM_GENERIC_BITOPS_FLS64_H_
#include <asm/types.h>
/**
* fls64 - find last set bit in a 64-bit word
* @x: the word to search
*
* This is defined in a similar way as the libc and compiler builtin
* ffsll, but returns the position of the most significant set bit.
*
* fls64(value) returns 0 if value is 0 or the position of the last
* set bit if value is nonzero. The last (most significant) bit is
* at position 64.
*/
#if BITS_PER_LONG == 32
static __always_inline int fls64(__u64 x)
{
__u32 h = x >> 32;
if (h)
return fls(h) + 32;
return fls(x);
}
#elif BITS_PER_LONG == 64
static __always_inline int fls64(__u64 x)
{
if (x == 0)
return 0;
return __fls(x) + 1;
}
#else
#error BITS_PER_LONG not 32 or 64
#endif
#endif /* _ASM_GENERIC_BITOPS_FLS64_H_ */

View File

@ -1,5 +1,104 @@
#include "../../../include/linux/hash.h"
#ifndef _LINUX_HASH_H
#define _LINUX_HASH_H
/* Fast hashing routine for ints, longs and pointers.
(C) 2002 Nadia Yvette Chambers, IBM */
#ifndef _TOOLS_LINUX_HASH_H
#define _TOOLS_LINUX_HASH_H
#include <asm/types.h>
#include <linux/compiler.h>
/*
* The "GOLDEN_RATIO_PRIME" is used in ifs/btrfs/brtfs_inode.h and
* fs/inode.c. It's not actually prime any more (the previous primes
* were actively bad for hashing), but the name remains.
*/
#if BITS_PER_LONG == 32
#define GOLDEN_RATIO_PRIME GOLDEN_RATIO_32
#define hash_long(val, bits) hash_32(val, bits)
#elif BITS_PER_LONG == 64
#define hash_long(val, bits) hash_64(val, bits)
#define GOLDEN_RATIO_PRIME GOLDEN_RATIO_64
#else
#error Wordsize not 32 or 64
#endif
/*
* This hash multiplies the input by a large odd number and takes the
* high bits. Since multiplication propagates changes to the most
* significant end only, it is essential that the high bits of the
* product be used for the hash value.
*
* Chuck Lever verified the effectiveness of this technique:
* http://www.citi.umich.edu/techreports/reports/citi-tr-00-1.pdf
*
* Although a random odd number will do, it turns out that the golden
* ratio phi = (sqrt(5)-1)/2, or its negative, has particularly nice
* properties. (See Knuth vol 3, section 6.4, exercise 9.)
*
* These are the negative, (1 - phi) = phi**2 = (3 - sqrt(5))/2,
* which is very slightly easier to multiply by and makes no
* difference to the hash distribution.
*/
#define GOLDEN_RATIO_32 0x61C88647
#define GOLDEN_RATIO_64 0x61C8864680B583EBull
#ifdef CONFIG_HAVE_ARCH_HASH
/* This header may use the GOLDEN_RATIO_xx constants */
#include <asm/hash.h>
#endif
/*
* The _generic versions exist only so lib/test_hash.c can compare
* the arch-optimized versions with the generic.
*
* Note that if you change these, any <asm/hash.h> that aren't updated
* to match need to have their HAVE_ARCH_* define values updated so the
* self-test will not false-positive.
*/
#ifndef HAVE_ARCH__HASH_32
#define __hash_32 __hash_32_generic
#endif
static inline u32 __hash_32_generic(u32 val)
{
return val * GOLDEN_RATIO_32;
}
#ifndef HAVE_ARCH_HASH_32
#define hash_32 hash_32_generic
#endif
static inline u32 hash_32_generic(u32 val, unsigned int bits)
{
/* High bits are more random, so use them. */
return __hash_32(val) >> (32 - bits);
}
#ifndef HAVE_ARCH_HASH_64
#define hash_64 hash_64_generic
#endif
static __always_inline u32 hash_64_generic(u64 val, unsigned int bits)
{
#if BITS_PER_LONG == 64
/* 64x64-bit multiply is efficient on all 64-bit processors */
return val * GOLDEN_RATIO_64 >> (64 - bits);
#else
/* Hash 64 bits using only 32x32-bit multiply. */
return hash_32((u32)val ^ __hash_32(val >> 32), bits);
#endif
}
static inline u32 hash_ptr(const void *ptr, unsigned int bits)
{
return hash_long((unsigned long)ptr, bits);
}
/* This really should be called fold32_ptr; it does no hashing to speak of. */
static inline u32 hash32_ptr(const void *ptr)
{
unsigned long val = (unsigned long)ptr;
#if BITS_PER_LONG == 64
val ^= (val >> 32);
#endif
return (u32)val;
}
#endif /* _LINUX_HASH_H */

View File

@ -1 +1,90 @@
#include "../../../include/linux/poison.h"
#ifndef _LINUX_POISON_H
#define _LINUX_POISON_H
/********** include/linux/list.h **********/
/*
* Architectures might want to move the poison pointer offset
* into some well-recognized area such as 0xdead000000000000,
* that is also not mappable by user-space exploits:
*/
#ifdef CONFIG_ILLEGAL_POINTER_VALUE
# define POISON_POINTER_DELTA _AC(CONFIG_ILLEGAL_POINTER_VALUE, UL)
#else
# define POISON_POINTER_DELTA 0
#endif
/*
* These are non-NULL pointers that will result in page faults
* under normal circumstances, used to verify that nobody uses
* non-initialized list entries.
*/
#define LIST_POISON1 ((void *) 0x100 + POISON_POINTER_DELTA)
#define LIST_POISON2 ((void *) 0x200 + POISON_POINTER_DELTA)
/********** include/linux/timer.h **********/
/*
* Magic number "tsta" to indicate a static timer initializer
* for the object debugging code.
*/
#define TIMER_ENTRY_STATIC ((void *) 0x300 + POISON_POINTER_DELTA)
/********** mm/debug-pagealloc.c **********/
#ifdef CONFIG_PAGE_POISONING_ZERO
#define PAGE_POISON 0x00
#else
#define PAGE_POISON 0xaa
#endif
/********** mm/page_alloc.c ************/
#define TAIL_MAPPING ((void *) 0x400 + POISON_POINTER_DELTA)
/********** mm/slab.c **********/
/*
* Magic nums for obj red zoning.
* Placed in the first word before and the first word after an obj.
*/
#define RED_INACTIVE 0x09F911029D74E35BULL /* when obj is inactive */
#define RED_ACTIVE 0xD84156C5635688C0ULL /* when obj is active */
#define SLUB_RED_INACTIVE 0xbb
#define SLUB_RED_ACTIVE 0xcc
/* ...and for poisoning */
#define POISON_INUSE 0x5a /* for use-uninitialised poisoning */
#define POISON_FREE 0x6b /* for use-after-free poisoning */
#define POISON_END 0xa5 /* end-byte of poisoning */
/********** arch/$ARCH/mm/init.c **********/
#define POISON_FREE_INITMEM 0xcc
/********** arch/ia64/hp/common/sba_iommu.c **********/
/*
* arch/ia64/hp/common/sba_iommu.c uses a 16-byte poison string with a
* value of "SBAIOMMU POISON\0" for spill-over poisoning.
*/
/********** fs/jbd/journal.c **********/
#define JBD_POISON_FREE 0x5b
#define JBD2_POISON_FREE 0x5c
/********** drivers/base/dmapool.c **********/
#define POOL_POISON_FREED 0xa7 /* !inuse */
#define POOL_POISON_ALLOCATED 0xa9 /* !initted */
/********** drivers/atm/ **********/
#define ATM_POISON_FREE 0x12
#define ATM_POISON 0xdeadbeef
/********** kernel/mutexes **********/
#define MUTEX_DEBUG_INIT 0x11
#define MUTEX_DEBUG_FREE 0x22
/********** lib/flex_array.c **********/
#define FLEX_ARRAY_FREE 0x6c /* for use-after-free poisoning */
/********** security/ **********/
#define KEY_DESTROY 0xbd
#endif

View File

@ -77,17 +77,4 @@ tools/include/linux/stringify.h
tools/include/linux/types.h
tools/include/linux/err.h
tools/include/linux/bitmap.h
include/asm-generic/bitops/arch_hweight.h
include/asm-generic/bitops/const_hweight.h
include/asm-generic/bitops/fls64.h
include/asm-generic/bitops/__fls.h
include/asm-generic/bitops/fls.h
include/linux/list.h
include/linux/hash.h
include/linux/swab.h
arch/*/include/asm/unistd*.h
arch/*/include/uapi/asm/unistd*.h
tools/arch/*/include/uapi/asm/perf_regs.h
include/linux/poison.h
include/uapi/linux/const.h
include/uapi/linux/swab.h

View File

@ -351,6 +351,9 @@ $(PERF_IN): prepare FORCE
@(test -f ../../include/uapi/linux/perf_event.h && ( \
(diff -B ../include/uapi/linux/perf_event.h ../../include/uapi/linux/perf_event.h >/dev/null) \
|| echo "Warning: tools/include/uapi/linux/perf_event.h differs from kernel" >&2 )) || true
@(test -f ../../include/linux/hash.h && ( \
(diff -B ../include/linux/hash.h ../../include/linux/hash.h >/dev/null) \
|| echo "Warning: tools/include/linux/hash.h differs from kernel" >&2 )) || true
@(test -f ../../include/uapi/linux/hw_breakpoint.h && ( \
(diff -B ../include/uapi/linux/hw_breakpoint.h ../../include/uapi/linux/hw_breakpoint.h >/dev/null) \
|| echo "Warning: tools/include/uapi/linux/hw_breakpoint.h differs from kernel" >&2 )) || true
@ -411,6 +414,21 @@ $(PERF_IN): prepare FORCE
@(test -f ../../arch/arm64/include/uapi/asm/kvm.h && ( \
(diff -B ../arch/arm64/include/uapi/asm/kvm.h ../../arch/arm64/include/uapi/asm/kvm.h >/dev/null) \
|| echo "Warning: tools/arch/arm64/include/uapi/asm/kvm.h differs from kernel" >&2 )) || true
@(test -f ../../include/asm-generic/bitops/arch_hweight.h && ( \
(diff -B ../include/asm-generic/bitops/arch_hweight.h ../../include/asm-generic/bitops/arch_hweight.h >/dev/null) \
|| echo "Warning: tools/include/asm-generic/bitops/arch_hweight.h differs from kernel" >&2 )) || true
@(test -f ../../include/asm-generic/bitops/const_hweight.h && ( \
(diff -B ../include/asm-generic/bitops/const_hweight.h ../../include/asm-generic/bitops/const_hweight.h >/dev/null) \
|| echo "Warning: tools/include/asm-generic/bitops/const_hweight.h differs from kernel" >&2 )) || true
@(test -f ../../include/asm-generic/bitops/__fls.h && ( \
(diff -B ../include/asm-generic/bitops/__fls.h ../../include/asm-generic/bitops/__fls.h >/dev/null) \
|| echo "Warning: tools/include/asm-generic/bitops/__fls.h differs from kernel" >&2 )) || true
@(test -f ../../include/asm-generic/bitops/fls.h && ( \
(diff -B ../include/asm-generic/bitops/fls.h ../../include/asm-generic/bitops/fls.h >/dev/null) \
|| echo "Warning: tools/include/asm-generic/bitops/fls.h differs from kernel" >&2 )) || true
@(test -f ../../include/asm-generic/bitops/fls64.h && ( \
(diff -B ../include/asm-generic/bitops/fls64.h ../../include/asm-generic/bitops/fls64.h >/dev/null) \
|| echo "Warning: tools/include/asm-generic/bitops/fls64.h differs from kernel" >&2 )) || true
$(Q)$(MAKE) $(build)=perf
$(OUTPUT)perf: $(PERFLIBS) $(PERF_IN) $(LIBTRACEEVENT_DYNAMIC_LIST)

View File

@ -290,8 +290,12 @@ perf_evsel__write_stat_event(struct perf_evsel *counter, u32 cpu, u32 thread,
static int read_counter(struct perf_evsel *counter)
{
int nthreads = thread_map__nr(evsel_list->threads);
int ncpus = perf_evsel__nr_cpus(counter);
int cpu, thread;
int ncpus, cpu, thread;
if (target__has_cpu(&target))
ncpus = perf_evsel__nr_cpus(counter);
else
ncpus = 1;
if (!counter->supported)
return -ENOENT;

View File

@ -59,7 +59,6 @@ static int get_e_machine(struct jitheader *hdr)
ssize_t sret;
char id[16];
int fd, ret = -1;
int m = -1;
struct {
uint16_t e_type;
uint16_t e_machine;
@ -81,11 +80,7 @@ static int get_e_machine(struct jitheader *hdr)
if (sret != sizeof(info))
goto error;
m = info.e_machine;
if (m < 0)
m = 0; /* ELF EM_NONE */
hdr->elf_mach = m;
hdr->elf_mach = info.e_machine;
ret = 0;
error:
close(fd);
@ -491,10 +486,11 @@ jvmti_write_debug_info(void *agent, uint64_t code, const char *file,
if (sret != 1)
goto error;
}
if (padding_count)
if (padding_count) {
sret = fwrite_unlocked(pad_bytes, padding_count, 1, fp);
if (sret != 1)
goto error;
}
funlockfile(fp);
return 0;

View File

@ -5,6 +5,7 @@
#include <sys/types.h>
#include <sys/syscall.h>
#include <linux/types.h>
#include <linux/compiler.h>
#include <linux/perf_event.h>
#include <asm/barrier.h>

View File

@ -40,6 +40,7 @@ perf-y += event_update.o
perf-y += event-times.o
perf-y += backward-ring-buffer.o
perf-y += sdt.o
perf-y += is_printable_array.o
$(OUTPUT)tests/llvm-src-base.c: tests/bpf-script-example.c tests/Build
$(call rule_mkdir)

View File

@ -221,6 +221,10 @@ static struct test generic_tests[] = {
.desc = "Test SDT event probing",
.func = test__sdt_event,
},
{
.desc = "Test is_printable_array function",
.func = test__is_printable_array,
},
{
.func = NULL,
},

View File

@ -0,0 +1,36 @@
#include <linux/compiler.h>
#include "tests.h"
#include "debug.h"
#include "util.h"
int test__is_printable_array(int subtest __maybe_unused)
{
char buf1[] = { 'k', 'r', 4, 'v', 'a', 0 };
char buf2[] = { 'k', 'r', 'a', 'v', 4, 0 };
struct {
char *buf;
unsigned int len;
int ret;
} t[] = {
{ (char *) "krava", sizeof("krava"), 1 },
{ (char *) "krava", sizeof("krava") - 1, 0 },
{ (char *) "", sizeof(""), 1 },
{ (char *) "", 0, 0 },
{ NULL, 0, 0 },
{ buf1, sizeof(buf1), 0 },
{ buf2, sizeof(buf2), 0 },
};
unsigned int i;
for (i = 0; i < ARRAY_SIZE(t); i++) {
int ret;
ret = is_printable_array((char *) t[i].buf, t[i].len);
if (ret != t[i].ret) {
pr_err("failed: test %u\n", i);
return TEST_FAIL;
}
}
return TEST_OK;
}

View File

@ -89,6 +89,7 @@ int test__event_times(int subtest);
int test__backward_ring_buffer(int subtest);
int test__cpu_map_print(int subtest);
int test__sdt_event(int subtest);
int test__is_printable_array(int subtest);
#if defined(__arm__) || defined(__aarch64__)
#ifdef HAVE_DWARF_UNWIND_SUPPORT

View File

@ -588,15 +588,25 @@ int cpu__setup_cpunode_map(void)
}
bool cpu_map__has(struct cpu_map *cpus, int cpu)
{
return cpu_map__idx(cpus, cpu) != -1;
}
int cpu_map__idx(struct cpu_map *cpus, int cpu)
{
int i;
for (i = 0; i < cpus->nr; ++i) {
if (cpus->map[i] == cpu)
return true;
return i;
}
return false;
return -1;
}
int cpu_map__cpu(struct cpu_map *cpus, int idx)
{
return cpus->map[idx];
}
size_t cpu_map__snprint(struct cpu_map *map, char *buf, size_t size)

View File

@ -68,5 +68,7 @@ int cpu_map__build_map(struct cpu_map *cpus, struct cpu_map **res,
int (*f)(struct cpu_map *map, int cpu, void *data),
void *data);
int cpu_map__cpu(struct cpu_map *cpus, int idx);
bool cpu_map__has(struct cpu_map *cpus, int cpu);
int cpu_map__idx(struct cpu_map *cpus, int cpu);
#endif /* __PERF_CPUMAP_H */

View File

@ -1,2 +0,0 @@
#include <asm/types.h>
#include "../../../../include/uapi/linux/swab.h"

View File

@ -1 +0,0 @@
#include "../../../../include/uapi/linux/const.h"

View File

@ -312,6 +312,9 @@ int map__load(struct map *map, symbol_filter_t filter)
pr_warning("%.*s was updated (is prelink enabled?). "
"Restart the long running apps that use it!\n",
(int)real_len, name);
} else if (filter) {
pr_warning("no symbols passed the given filter.\n");
return -2; /* Empty but maybe by the filter */
} else {
pr_warning("no symbols found in %s, maybe install "
"a debug package?\n", name);

View File

@ -3312,8 +3312,16 @@ int show_available_funcs(const char *target, struct strfilter *_filter,
/* Load symbols with given filter */
available_func_filter = _filter;
if (map__load(map, filter_available_functions)) {
pr_err("Failed to load symbols in %s\n", (target) ? : "kernel");
ret = map__load(map, filter_available_functions);
if (ret) {
if (ret == -2) {
char *str = strfilter__string(_filter);
pr_err("Failed to find symbols matched to \"%s\"\n",
str);
free(str);
} else
pr_err("Failed to load symbols in %s\n",
(target) ? : "kernel");
goto end;
}
if (!dso__sorted_by_name(map->dso, map->type))

View File

@ -295,18 +295,6 @@ static bool is_tracepoint(struct pyrf_event *pevent)
return pevent->evsel->attr.type == PERF_TYPE_TRACEPOINT;
}
static int is_printable_array(char *p, unsigned int len)
{
unsigned int i;
for (i = 0; i < len; i++) {
if (!isprint(p[i]) && !isspace(p[i]))
return 0;
}
return 1;
}
static PyObject*
tracepoint_field(struct pyrf_event *pe, struct format_field *field)
{

View File

@ -386,7 +386,6 @@ exit:
return pylist;
}
static void python_process_tracepoint(struct perf_sample *sample,
struct perf_evsel *evsel,
struct addr_location *al)
@ -457,14 +456,26 @@ static void python_process_tracepoint(struct perf_sample *sample,
pydict_set_item_string_decref(dict, "common_callchain", callchain);
}
for (field = event->format.fields; field; field = field->next) {
if (field->flags & FIELD_IS_STRING) {
int offset;
unsigned int offset, len;
unsigned long long val;
if (field->flags & FIELD_IS_ARRAY) {
offset = field->offset;
len = field->size;
if (field->flags & FIELD_IS_DYNAMIC) {
offset = *(int *)(data + field->offset);
val = pevent_read_number(scripting_context->pevent,
data + offset, len);
offset = val;
len = offset >> 16;
offset &= 0xffff;
} else
offset = field->offset;
obj = PyString_FromString((char *)data + offset);
}
if (field->flags & FIELD_IS_STRING &&
is_printable_array(data + offset, len)) {
obj = PyString_FromString((char *) data + offset);
} else {
obj = PyByteArray_FromStringAndSize((const char *) data + offset, len);
field->flags &= ~FIELD_IS_STRING;
}
} else { /* FIELD_IS_NUMERIC */
obj = get_field_numeric_entry(event, field, data);
}

View File

@ -746,3 +746,19 @@ void print_binary(unsigned char *data, size_t len,
}
printer(BINARY_PRINT_DATA_END, -1, extra);
}
int is_printable_array(char *p, unsigned int len)
{
unsigned int i;
if (!p || !len || p[len - 1] != 0)
return 0;
len--;
for (i = 0; i < len; i++) {
if (!isprint(p[i]) && !isspace(p[i]))
return 0;
}
return 1;
}

View File

@ -364,4 +364,5 @@ void print_binary(unsigned char *data, size_t len,
extern int sched_getcpu(void);
#endif
int is_printable_array(char *p, unsigned int len);
#endif /* GIT_COMPAT_UTIL_H */