perf/core improvements and fixes:
. Convert the trace builtins to use the growing evsel/evlist tracepoint infrastructure, removing several open coded constructs like switch like series of strcmp to dispatch events, etc. Basically what had already been showcased in 'perf sched'. . Add evsel constructor for tracepoints, that uses libtraceevent just to parse the /format events file, use it in a new 'perf test' to make sure the libtraceevent format parsing regressions can be more readily caught. . Some strange errors were happening in some builds, but not on the next, reported by several people, problem was some parser related files, generated during the build, didn't had proper make deps, fix from Eric Sandeen. . Fix some compiling errors on 32-bit, from Feng Tang. . Don't use sscanf extension %as, not available on bionic, reimplementation by Irina Tirdea. . Fix bfd.h/libbfd detection with recent binutils, from Markus Trippelsdorf. . Introduce struct and cache information about the environment where a perf.data file was captured, from Namhyung Kim. . Fix several error paths in libtraceevent, from Namhyung Kim. Print event causing perf_event_open() to fail in 'perf record', from Stephane Eranian. . New 'kvm' analysis tool, from Xiao Guangrong. Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com> -----BEGIN PGP SIGNATURE----- Version: GnuPG v2.0.14 (GNU/Linux) iQIcBAABAgAGBQJQYIGnAAoJENZQFvNTUqpApNIP/2yUe3kfZ6Sz5rVwVdxyDBQN Y0sv6IwtT31G6WA8o8fvIKylMiTn0XMt5TG3e57ebw01lHH/xQAkQs7EmpmGclIT qOE5a1JwD8eyTMhBL92dt9LUHEq103pW/oBS1d6TeQ9XyEh+HThv+wVT/yRKneu6 IHV7b/8fC++v80bpsWLr8S3+p9OXMclsjR7IfDCeJnicxyHb/yWfRVbQiBwGk5/v IfKc0uat60+1qAy7GwBM/nVDJqzekrPI76krNP8tVdftxBjatpXS+VBKiYfo352g nr3Gpg2MWEY2oRzkN6jCj5yQAVTxa2OrAYY/I8dSJWkTHXL7UbzNzatNpVpvS0we 0wFP5gEumyuYE1YwjsR14ICSOJdMaO0pBYO4YMnqXsXGiS0JEM+2o+2bL2Nl9EDz LEGWQGWVC23Tu3P1SaHgdb2YnQLZ18AVBwBljESrLvCf4leC/RWoA3HvJ4NOhook 08ee24r57SLLe7z8W3fBPRsslpmoZnhnLHES/N/qf2y5u4Ig9IkafMS2ARVSwgPX 6u0uVIf48YQGAlv8p3cNsYa2PITB3R04Nm5GB2KvTxplwuRD1jYqPpR4BaKrXe+s SBVIxgDygglY+husTYLBWMBYswWhu5ECVobdfxtrQbQPt+v9hxf5S8UhAbu3AVcV IocrCPg3REvuxPwwPmY9 =FgCY -----END PGP SIGNATURE----- Merge tag 'perf-core-for-mingo' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux into perf/core Pull perf/core improvements and fixes from Arnaldo Carvalho de Melo: * Convert the trace builtins to use the growing evsel/evlist tracepoint infrastructure, removing several open coded constructs like switch like series of strcmp to dispatch events, etc. Basically what had already been showcased in 'perf sched'. * Add evsel constructor for tracepoints, that uses libtraceevent just to parse the /format events file, use it in a new 'perf test' to make sure the libtraceevent format parsing regressions can be more readily caught. * Some strange errors were happening in some builds, but not on the next, reported by several people, problem was some parser related files, generated during the build, didn't had proper make deps, fix from Eric Sandeen. * Fix some compiling errors on 32-bit, from Feng Tang. * Don't use sscanf extension %as, not available on bionic, reimplementation by Irina Tirdea. * Fix bfd.h/libbfd detection with recent binutils, from Markus Trippelsdorf. * Introduce struct and cache information about the environment where a perf.data file was captured, from Namhyung Kim. * Fix several error paths in libtraceevent, from Namhyung Kim. Print event causing perf_event_open() to fail in 'perf record', from Stephane Eranian. * New 'kvm' analysis tool, from Xiao Guangrong. Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
This commit is contained in:
commit
f74eb72868
|
@ -9,6 +9,22 @@
|
|||
#include <linux/types.h>
|
||||
#include <linux/ioctl.h>
|
||||
|
||||
#define DE_VECTOR 0
|
||||
#define DB_VECTOR 1
|
||||
#define BP_VECTOR 3
|
||||
#define OF_VECTOR 4
|
||||
#define BR_VECTOR 5
|
||||
#define UD_VECTOR 6
|
||||
#define NM_VECTOR 7
|
||||
#define DF_VECTOR 8
|
||||
#define TS_VECTOR 10
|
||||
#define NP_VECTOR 11
|
||||
#define SS_VECTOR 12
|
||||
#define GP_VECTOR 13
|
||||
#define PF_VECTOR 14
|
||||
#define MF_VECTOR 16
|
||||
#define MC_VECTOR 18
|
||||
|
||||
/* Select x86 specific features in <linux/kvm.h> */
|
||||
#define __KVM_HAVE_PIT
|
||||
#define __KVM_HAVE_IOAPIC
|
||||
|
|
|
@ -75,22 +75,6 @@
|
|||
#define KVM_HPAGE_MASK(x) (~(KVM_HPAGE_SIZE(x) - 1))
|
||||
#define KVM_PAGES_PER_HPAGE(x) (KVM_HPAGE_SIZE(x) / PAGE_SIZE)
|
||||
|
||||
#define DE_VECTOR 0
|
||||
#define DB_VECTOR 1
|
||||
#define BP_VECTOR 3
|
||||
#define OF_VECTOR 4
|
||||
#define BR_VECTOR 5
|
||||
#define UD_VECTOR 6
|
||||
#define NM_VECTOR 7
|
||||
#define DF_VECTOR 8
|
||||
#define TS_VECTOR 10
|
||||
#define NP_VECTOR 11
|
||||
#define SS_VECTOR 12
|
||||
#define GP_VECTOR 13
|
||||
#define PF_VECTOR 14
|
||||
#define MF_VECTOR 16
|
||||
#define MC_VECTOR 18
|
||||
|
||||
#define SELECTOR_TI_MASK (1 << 2)
|
||||
#define SELECTOR_RPL_MASK 0x03
|
||||
|
||||
|
|
|
@ -1,6 +1,135 @@
|
|||
#ifndef __SVM_H
|
||||
#define __SVM_H
|
||||
|
||||
#define SVM_EXIT_READ_CR0 0x000
|
||||
#define SVM_EXIT_READ_CR3 0x003
|
||||
#define SVM_EXIT_READ_CR4 0x004
|
||||
#define SVM_EXIT_READ_CR8 0x008
|
||||
#define SVM_EXIT_WRITE_CR0 0x010
|
||||
#define SVM_EXIT_WRITE_CR3 0x013
|
||||
#define SVM_EXIT_WRITE_CR4 0x014
|
||||
#define SVM_EXIT_WRITE_CR8 0x018
|
||||
#define SVM_EXIT_READ_DR0 0x020
|
||||
#define SVM_EXIT_READ_DR1 0x021
|
||||
#define SVM_EXIT_READ_DR2 0x022
|
||||
#define SVM_EXIT_READ_DR3 0x023
|
||||
#define SVM_EXIT_READ_DR4 0x024
|
||||
#define SVM_EXIT_READ_DR5 0x025
|
||||
#define SVM_EXIT_READ_DR6 0x026
|
||||
#define SVM_EXIT_READ_DR7 0x027
|
||||
#define SVM_EXIT_WRITE_DR0 0x030
|
||||
#define SVM_EXIT_WRITE_DR1 0x031
|
||||
#define SVM_EXIT_WRITE_DR2 0x032
|
||||
#define SVM_EXIT_WRITE_DR3 0x033
|
||||
#define SVM_EXIT_WRITE_DR4 0x034
|
||||
#define SVM_EXIT_WRITE_DR5 0x035
|
||||
#define SVM_EXIT_WRITE_DR6 0x036
|
||||
#define SVM_EXIT_WRITE_DR7 0x037
|
||||
#define SVM_EXIT_EXCP_BASE 0x040
|
||||
#define SVM_EXIT_INTR 0x060
|
||||
#define SVM_EXIT_NMI 0x061
|
||||
#define SVM_EXIT_SMI 0x062
|
||||
#define SVM_EXIT_INIT 0x063
|
||||
#define SVM_EXIT_VINTR 0x064
|
||||
#define SVM_EXIT_CR0_SEL_WRITE 0x065
|
||||
#define SVM_EXIT_IDTR_READ 0x066
|
||||
#define SVM_EXIT_GDTR_READ 0x067
|
||||
#define SVM_EXIT_LDTR_READ 0x068
|
||||
#define SVM_EXIT_TR_READ 0x069
|
||||
#define SVM_EXIT_IDTR_WRITE 0x06a
|
||||
#define SVM_EXIT_GDTR_WRITE 0x06b
|
||||
#define SVM_EXIT_LDTR_WRITE 0x06c
|
||||
#define SVM_EXIT_TR_WRITE 0x06d
|
||||
#define SVM_EXIT_RDTSC 0x06e
|
||||
#define SVM_EXIT_RDPMC 0x06f
|
||||
#define SVM_EXIT_PUSHF 0x070
|
||||
#define SVM_EXIT_POPF 0x071
|
||||
#define SVM_EXIT_CPUID 0x072
|
||||
#define SVM_EXIT_RSM 0x073
|
||||
#define SVM_EXIT_IRET 0x074
|
||||
#define SVM_EXIT_SWINT 0x075
|
||||
#define SVM_EXIT_INVD 0x076
|
||||
#define SVM_EXIT_PAUSE 0x077
|
||||
#define SVM_EXIT_HLT 0x078
|
||||
#define SVM_EXIT_INVLPG 0x079
|
||||
#define SVM_EXIT_INVLPGA 0x07a
|
||||
#define SVM_EXIT_IOIO 0x07b
|
||||
#define SVM_EXIT_MSR 0x07c
|
||||
#define SVM_EXIT_TASK_SWITCH 0x07d
|
||||
#define SVM_EXIT_FERR_FREEZE 0x07e
|
||||
#define SVM_EXIT_SHUTDOWN 0x07f
|
||||
#define SVM_EXIT_VMRUN 0x080
|
||||
#define SVM_EXIT_VMMCALL 0x081
|
||||
#define SVM_EXIT_VMLOAD 0x082
|
||||
#define SVM_EXIT_VMSAVE 0x083
|
||||
#define SVM_EXIT_STGI 0x084
|
||||
#define SVM_EXIT_CLGI 0x085
|
||||
#define SVM_EXIT_SKINIT 0x086
|
||||
#define SVM_EXIT_RDTSCP 0x087
|
||||
#define SVM_EXIT_ICEBP 0x088
|
||||
#define SVM_EXIT_WBINVD 0x089
|
||||
#define SVM_EXIT_MONITOR 0x08a
|
||||
#define SVM_EXIT_MWAIT 0x08b
|
||||
#define SVM_EXIT_MWAIT_COND 0x08c
|
||||
#define SVM_EXIT_XSETBV 0x08d
|
||||
#define SVM_EXIT_NPF 0x400
|
||||
|
||||
#define SVM_EXIT_ERR -1
|
||||
|
||||
#define SVM_EXIT_REASONS \
|
||||
{ SVM_EXIT_READ_CR0, "read_cr0" }, \
|
||||
{ SVM_EXIT_READ_CR3, "read_cr3" }, \
|
||||
{ SVM_EXIT_READ_CR4, "read_cr4" }, \
|
||||
{ SVM_EXIT_READ_CR8, "read_cr8" }, \
|
||||
{ SVM_EXIT_WRITE_CR0, "write_cr0" }, \
|
||||
{ SVM_EXIT_WRITE_CR3, "write_cr3" }, \
|
||||
{ SVM_EXIT_WRITE_CR4, "write_cr4" }, \
|
||||
{ SVM_EXIT_WRITE_CR8, "write_cr8" }, \
|
||||
{ SVM_EXIT_READ_DR0, "read_dr0" }, \
|
||||
{ SVM_EXIT_READ_DR1, "read_dr1" }, \
|
||||
{ SVM_EXIT_READ_DR2, "read_dr2" }, \
|
||||
{ SVM_EXIT_READ_DR3, "read_dr3" }, \
|
||||
{ SVM_EXIT_WRITE_DR0, "write_dr0" }, \
|
||||
{ SVM_EXIT_WRITE_DR1, "write_dr1" }, \
|
||||
{ SVM_EXIT_WRITE_DR2, "write_dr2" }, \
|
||||
{ SVM_EXIT_WRITE_DR3, "write_dr3" }, \
|
||||
{ SVM_EXIT_WRITE_DR5, "write_dr5" }, \
|
||||
{ SVM_EXIT_WRITE_DR7, "write_dr7" }, \
|
||||
{ SVM_EXIT_EXCP_BASE + DB_VECTOR, "DB excp" }, \
|
||||
{ SVM_EXIT_EXCP_BASE + BP_VECTOR, "BP excp" }, \
|
||||
{ SVM_EXIT_EXCP_BASE + UD_VECTOR, "UD excp" }, \
|
||||
{ SVM_EXIT_EXCP_BASE + PF_VECTOR, "PF excp" }, \
|
||||
{ SVM_EXIT_EXCP_BASE + NM_VECTOR, "NM excp" }, \
|
||||
{ SVM_EXIT_EXCP_BASE + MC_VECTOR, "MC excp" }, \
|
||||
{ SVM_EXIT_INTR, "interrupt" }, \
|
||||
{ SVM_EXIT_NMI, "nmi" }, \
|
||||
{ SVM_EXIT_SMI, "smi" }, \
|
||||
{ SVM_EXIT_INIT, "init" }, \
|
||||
{ SVM_EXIT_VINTR, "vintr" }, \
|
||||
{ SVM_EXIT_CPUID, "cpuid" }, \
|
||||
{ SVM_EXIT_INVD, "invd" }, \
|
||||
{ SVM_EXIT_HLT, "hlt" }, \
|
||||
{ SVM_EXIT_INVLPG, "invlpg" }, \
|
||||
{ SVM_EXIT_INVLPGA, "invlpga" }, \
|
||||
{ SVM_EXIT_IOIO, "io" }, \
|
||||
{ SVM_EXIT_MSR, "msr" }, \
|
||||
{ SVM_EXIT_TASK_SWITCH, "task_switch" }, \
|
||||
{ SVM_EXIT_SHUTDOWN, "shutdown" }, \
|
||||
{ SVM_EXIT_VMRUN, "vmrun" }, \
|
||||
{ SVM_EXIT_VMMCALL, "hypercall" }, \
|
||||
{ SVM_EXIT_VMLOAD, "vmload" }, \
|
||||
{ SVM_EXIT_VMSAVE, "vmsave" }, \
|
||||
{ SVM_EXIT_STGI, "stgi" }, \
|
||||
{ SVM_EXIT_CLGI, "clgi" }, \
|
||||
{ SVM_EXIT_SKINIT, "skinit" }, \
|
||||
{ SVM_EXIT_WBINVD, "wbinvd" }, \
|
||||
{ SVM_EXIT_MONITOR, "monitor" }, \
|
||||
{ SVM_EXIT_MWAIT, "mwait" }, \
|
||||
{ SVM_EXIT_XSETBV, "xsetbv" }, \
|
||||
{ SVM_EXIT_NPF, "npf" }
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
enum {
|
||||
INTERCEPT_INTR,
|
||||
INTERCEPT_NMI,
|
||||
|
@ -264,81 +393,6 @@ struct __attribute__ ((__packed__)) vmcb {
|
|||
|
||||
#define SVM_EXITINFO_REG_MASK 0x0F
|
||||
|
||||
#define SVM_EXIT_READ_CR0 0x000
|
||||
#define SVM_EXIT_READ_CR3 0x003
|
||||
#define SVM_EXIT_READ_CR4 0x004
|
||||
#define SVM_EXIT_READ_CR8 0x008
|
||||
#define SVM_EXIT_WRITE_CR0 0x010
|
||||
#define SVM_EXIT_WRITE_CR3 0x013
|
||||
#define SVM_EXIT_WRITE_CR4 0x014
|
||||
#define SVM_EXIT_WRITE_CR8 0x018
|
||||
#define SVM_EXIT_READ_DR0 0x020
|
||||
#define SVM_EXIT_READ_DR1 0x021
|
||||
#define SVM_EXIT_READ_DR2 0x022
|
||||
#define SVM_EXIT_READ_DR3 0x023
|
||||
#define SVM_EXIT_READ_DR4 0x024
|
||||
#define SVM_EXIT_READ_DR5 0x025
|
||||
#define SVM_EXIT_READ_DR6 0x026
|
||||
#define SVM_EXIT_READ_DR7 0x027
|
||||
#define SVM_EXIT_WRITE_DR0 0x030
|
||||
#define SVM_EXIT_WRITE_DR1 0x031
|
||||
#define SVM_EXIT_WRITE_DR2 0x032
|
||||
#define SVM_EXIT_WRITE_DR3 0x033
|
||||
#define SVM_EXIT_WRITE_DR4 0x034
|
||||
#define SVM_EXIT_WRITE_DR5 0x035
|
||||
#define SVM_EXIT_WRITE_DR6 0x036
|
||||
#define SVM_EXIT_WRITE_DR7 0x037
|
||||
#define SVM_EXIT_EXCP_BASE 0x040
|
||||
#define SVM_EXIT_INTR 0x060
|
||||
#define SVM_EXIT_NMI 0x061
|
||||
#define SVM_EXIT_SMI 0x062
|
||||
#define SVM_EXIT_INIT 0x063
|
||||
#define SVM_EXIT_VINTR 0x064
|
||||
#define SVM_EXIT_CR0_SEL_WRITE 0x065
|
||||
#define SVM_EXIT_IDTR_READ 0x066
|
||||
#define SVM_EXIT_GDTR_READ 0x067
|
||||
#define SVM_EXIT_LDTR_READ 0x068
|
||||
#define SVM_EXIT_TR_READ 0x069
|
||||
#define SVM_EXIT_IDTR_WRITE 0x06a
|
||||
#define SVM_EXIT_GDTR_WRITE 0x06b
|
||||
#define SVM_EXIT_LDTR_WRITE 0x06c
|
||||
#define SVM_EXIT_TR_WRITE 0x06d
|
||||
#define SVM_EXIT_RDTSC 0x06e
|
||||
#define SVM_EXIT_RDPMC 0x06f
|
||||
#define SVM_EXIT_PUSHF 0x070
|
||||
#define SVM_EXIT_POPF 0x071
|
||||
#define SVM_EXIT_CPUID 0x072
|
||||
#define SVM_EXIT_RSM 0x073
|
||||
#define SVM_EXIT_IRET 0x074
|
||||
#define SVM_EXIT_SWINT 0x075
|
||||
#define SVM_EXIT_INVD 0x076
|
||||
#define SVM_EXIT_PAUSE 0x077
|
||||
#define SVM_EXIT_HLT 0x078
|
||||
#define SVM_EXIT_INVLPG 0x079
|
||||
#define SVM_EXIT_INVLPGA 0x07a
|
||||
#define SVM_EXIT_IOIO 0x07b
|
||||
#define SVM_EXIT_MSR 0x07c
|
||||
#define SVM_EXIT_TASK_SWITCH 0x07d
|
||||
#define SVM_EXIT_FERR_FREEZE 0x07e
|
||||
#define SVM_EXIT_SHUTDOWN 0x07f
|
||||
#define SVM_EXIT_VMRUN 0x080
|
||||
#define SVM_EXIT_VMMCALL 0x081
|
||||
#define SVM_EXIT_VMLOAD 0x082
|
||||
#define SVM_EXIT_VMSAVE 0x083
|
||||
#define SVM_EXIT_STGI 0x084
|
||||
#define SVM_EXIT_CLGI 0x085
|
||||
#define SVM_EXIT_SKINIT 0x086
|
||||
#define SVM_EXIT_RDTSCP 0x087
|
||||
#define SVM_EXIT_ICEBP 0x088
|
||||
#define SVM_EXIT_WBINVD 0x089
|
||||
#define SVM_EXIT_MONITOR 0x08a
|
||||
#define SVM_EXIT_MWAIT 0x08b
|
||||
#define SVM_EXIT_MWAIT_COND 0x08c
|
||||
#define SVM_EXIT_XSETBV 0x08d
|
||||
#define SVM_EXIT_NPF 0x400
|
||||
|
||||
#define SVM_EXIT_ERR -1
|
||||
|
||||
#define SVM_CR0_SELECTIVE_MASK (X86_CR0_TS | X86_CR0_MP)
|
||||
|
||||
#define SVM_VMLOAD ".byte 0x0f, 0x01, 0xda"
|
||||
|
@ -350,3 +404,4 @@ struct __attribute__ ((__packed__)) vmcb {
|
|||
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
|
|
@ -25,6 +25,88 @@
|
|||
*
|
||||
*/
|
||||
|
||||
#define VMX_EXIT_REASONS_FAILED_VMENTRY 0x80000000
|
||||
|
||||
#define EXIT_REASON_EXCEPTION_NMI 0
|
||||
#define EXIT_REASON_EXTERNAL_INTERRUPT 1
|
||||
#define EXIT_REASON_TRIPLE_FAULT 2
|
||||
|
||||
#define EXIT_REASON_PENDING_INTERRUPT 7
|
||||
#define EXIT_REASON_NMI_WINDOW 8
|
||||
#define EXIT_REASON_TASK_SWITCH 9
|
||||
#define EXIT_REASON_CPUID 10
|
||||
#define EXIT_REASON_HLT 12
|
||||
#define EXIT_REASON_INVD 13
|
||||
#define EXIT_REASON_INVLPG 14
|
||||
#define EXIT_REASON_RDPMC 15
|
||||
#define EXIT_REASON_RDTSC 16
|
||||
#define EXIT_REASON_VMCALL 18
|
||||
#define EXIT_REASON_VMCLEAR 19
|
||||
#define EXIT_REASON_VMLAUNCH 20
|
||||
#define EXIT_REASON_VMPTRLD 21
|
||||
#define EXIT_REASON_VMPTRST 22
|
||||
#define EXIT_REASON_VMREAD 23
|
||||
#define EXIT_REASON_VMRESUME 24
|
||||
#define EXIT_REASON_VMWRITE 25
|
||||
#define EXIT_REASON_VMOFF 26
|
||||
#define EXIT_REASON_VMON 27
|
||||
#define EXIT_REASON_CR_ACCESS 28
|
||||
#define EXIT_REASON_DR_ACCESS 29
|
||||
#define EXIT_REASON_IO_INSTRUCTION 30
|
||||
#define EXIT_REASON_MSR_READ 31
|
||||
#define EXIT_REASON_MSR_WRITE 32
|
||||
#define EXIT_REASON_INVALID_STATE 33
|
||||
#define EXIT_REASON_MWAIT_INSTRUCTION 36
|
||||
#define EXIT_REASON_MONITOR_INSTRUCTION 39
|
||||
#define EXIT_REASON_PAUSE_INSTRUCTION 40
|
||||
#define EXIT_REASON_MCE_DURING_VMENTRY 41
|
||||
#define EXIT_REASON_TPR_BELOW_THRESHOLD 43
|
||||
#define EXIT_REASON_APIC_ACCESS 44
|
||||
#define EXIT_REASON_EPT_VIOLATION 48
|
||||
#define EXIT_REASON_EPT_MISCONFIG 49
|
||||
#define EXIT_REASON_WBINVD 54
|
||||
#define EXIT_REASON_XSETBV 55
|
||||
#define EXIT_REASON_INVPCID 58
|
||||
|
||||
#define VMX_EXIT_REASONS \
|
||||
{ EXIT_REASON_EXCEPTION_NMI, "EXCEPTION_NMI" }, \
|
||||
{ EXIT_REASON_EXTERNAL_INTERRUPT, "EXTERNAL_INTERRUPT" }, \
|
||||
{ EXIT_REASON_TRIPLE_FAULT, "TRIPLE_FAULT" }, \
|
||||
{ EXIT_REASON_PENDING_INTERRUPT, "PENDING_INTERRUPT" }, \
|
||||
{ EXIT_REASON_NMI_WINDOW, "NMI_WINDOW" }, \
|
||||
{ EXIT_REASON_TASK_SWITCH, "TASK_SWITCH" }, \
|
||||
{ EXIT_REASON_CPUID, "CPUID" }, \
|
||||
{ EXIT_REASON_HLT, "HLT" }, \
|
||||
{ EXIT_REASON_INVLPG, "INVLPG" }, \
|
||||
{ EXIT_REASON_RDPMC, "RDPMC" }, \
|
||||
{ EXIT_REASON_RDTSC, "RDTSC" }, \
|
||||
{ EXIT_REASON_VMCALL, "VMCALL" }, \
|
||||
{ EXIT_REASON_VMCLEAR, "VMCLEAR" }, \
|
||||
{ EXIT_REASON_VMLAUNCH, "VMLAUNCH" }, \
|
||||
{ EXIT_REASON_VMPTRLD, "VMPTRLD" }, \
|
||||
{ EXIT_REASON_VMPTRST, "VMPTRST" }, \
|
||||
{ EXIT_REASON_VMREAD, "VMREAD" }, \
|
||||
{ EXIT_REASON_VMRESUME, "VMRESUME" }, \
|
||||
{ EXIT_REASON_VMWRITE, "VMWRITE" }, \
|
||||
{ EXIT_REASON_VMOFF, "VMOFF" }, \
|
||||
{ EXIT_REASON_VMON, "VMON" }, \
|
||||
{ EXIT_REASON_CR_ACCESS, "CR_ACCESS" }, \
|
||||
{ EXIT_REASON_DR_ACCESS, "DR_ACCESS" }, \
|
||||
{ EXIT_REASON_IO_INSTRUCTION, "IO_INSTRUCTION" }, \
|
||||
{ EXIT_REASON_MSR_READ, "MSR_READ" }, \
|
||||
{ EXIT_REASON_MSR_WRITE, "MSR_WRITE" }, \
|
||||
{ EXIT_REASON_MWAIT_INSTRUCTION, "MWAIT_INSTRUCTION" }, \
|
||||
{ EXIT_REASON_MONITOR_INSTRUCTION, "MONITOR_INSTRUCTION" }, \
|
||||
{ EXIT_REASON_PAUSE_INSTRUCTION, "PAUSE_INSTRUCTION" }, \
|
||||
{ EXIT_REASON_MCE_DURING_VMENTRY, "MCE_DURING_VMENTRY" }, \
|
||||
{ EXIT_REASON_TPR_BELOW_THRESHOLD, "TPR_BELOW_THRESHOLD" }, \
|
||||
{ EXIT_REASON_APIC_ACCESS, "APIC_ACCESS" }, \
|
||||
{ EXIT_REASON_EPT_VIOLATION, "EPT_VIOLATION" }, \
|
||||
{ EXIT_REASON_EPT_MISCONFIG, "EPT_MISCONFIG" }, \
|
||||
{ EXIT_REASON_WBINVD, "WBINVD" }
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
/*
|
||||
|
@ -241,49 +323,6 @@ enum vmcs_field {
|
|||
HOST_RIP = 0x00006c16,
|
||||
};
|
||||
|
||||
#define VMX_EXIT_REASONS_FAILED_VMENTRY 0x80000000
|
||||
|
||||
#define EXIT_REASON_EXCEPTION_NMI 0
|
||||
#define EXIT_REASON_EXTERNAL_INTERRUPT 1
|
||||
#define EXIT_REASON_TRIPLE_FAULT 2
|
||||
|
||||
#define EXIT_REASON_PENDING_INTERRUPT 7
|
||||
#define EXIT_REASON_NMI_WINDOW 8
|
||||
#define EXIT_REASON_TASK_SWITCH 9
|
||||
#define EXIT_REASON_CPUID 10
|
||||
#define EXIT_REASON_HLT 12
|
||||
#define EXIT_REASON_INVD 13
|
||||
#define EXIT_REASON_INVLPG 14
|
||||
#define EXIT_REASON_RDPMC 15
|
||||
#define EXIT_REASON_RDTSC 16
|
||||
#define EXIT_REASON_VMCALL 18
|
||||
#define EXIT_REASON_VMCLEAR 19
|
||||
#define EXIT_REASON_VMLAUNCH 20
|
||||
#define EXIT_REASON_VMPTRLD 21
|
||||
#define EXIT_REASON_VMPTRST 22
|
||||
#define EXIT_REASON_VMREAD 23
|
||||
#define EXIT_REASON_VMRESUME 24
|
||||
#define EXIT_REASON_VMWRITE 25
|
||||
#define EXIT_REASON_VMOFF 26
|
||||
#define EXIT_REASON_VMON 27
|
||||
#define EXIT_REASON_CR_ACCESS 28
|
||||
#define EXIT_REASON_DR_ACCESS 29
|
||||
#define EXIT_REASON_IO_INSTRUCTION 30
|
||||
#define EXIT_REASON_MSR_READ 31
|
||||
#define EXIT_REASON_MSR_WRITE 32
|
||||
#define EXIT_REASON_INVALID_STATE 33
|
||||
#define EXIT_REASON_MWAIT_INSTRUCTION 36
|
||||
#define EXIT_REASON_MONITOR_INSTRUCTION 39
|
||||
#define EXIT_REASON_PAUSE_INSTRUCTION 40
|
||||
#define EXIT_REASON_MCE_DURING_VMENTRY 41
|
||||
#define EXIT_REASON_TPR_BELOW_THRESHOLD 43
|
||||
#define EXIT_REASON_APIC_ACCESS 44
|
||||
#define EXIT_REASON_EPT_VIOLATION 48
|
||||
#define EXIT_REASON_EPT_MISCONFIG 49
|
||||
#define EXIT_REASON_WBINVD 54
|
||||
#define EXIT_REASON_XSETBV 55
|
||||
#define EXIT_REASON_INVPCID 58
|
||||
|
||||
/*
|
||||
* Interruption-information format
|
||||
*/
|
||||
|
@ -488,3 +527,5 @@ enum vm_instruction_error_number {
|
|||
};
|
||||
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
|
|
@ -183,95 +183,6 @@ TRACE_EVENT(kvm_apic,
|
|||
#define KVM_ISA_VMX 1
|
||||
#define KVM_ISA_SVM 2
|
||||
|
||||
#define VMX_EXIT_REASONS \
|
||||
{ EXIT_REASON_EXCEPTION_NMI, "EXCEPTION_NMI" }, \
|
||||
{ EXIT_REASON_EXTERNAL_INTERRUPT, "EXTERNAL_INTERRUPT" }, \
|
||||
{ EXIT_REASON_TRIPLE_FAULT, "TRIPLE_FAULT" }, \
|
||||
{ EXIT_REASON_PENDING_INTERRUPT, "PENDING_INTERRUPT" }, \
|
||||
{ EXIT_REASON_NMI_WINDOW, "NMI_WINDOW" }, \
|
||||
{ EXIT_REASON_TASK_SWITCH, "TASK_SWITCH" }, \
|
||||
{ EXIT_REASON_CPUID, "CPUID" }, \
|
||||
{ EXIT_REASON_HLT, "HLT" }, \
|
||||
{ EXIT_REASON_INVLPG, "INVLPG" }, \
|
||||
{ EXIT_REASON_RDPMC, "RDPMC" }, \
|
||||
{ EXIT_REASON_RDTSC, "RDTSC" }, \
|
||||
{ EXIT_REASON_VMCALL, "VMCALL" }, \
|
||||
{ EXIT_REASON_VMCLEAR, "VMCLEAR" }, \
|
||||
{ EXIT_REASON_VMLAUNCH, "VMLAUNCH" }, \
|
||||
{ EXIT_REASON_VMPTRLD, "VMPTRLD" }, \
|
||||
{ EXIT_REASON_VMPTRST, "VMPTRST" }, \
|
||||
{ EXIT_REASON_VMREAD, "VMREAD" }, \
|
||||
{ EXIT_REASON_VMRESUME, "VMRESUME" }, \
|
||||
{ EXIT_REASON_VMWRITE, "VMWRITE" }, \
|
||||
{ EXIT_REASON_VMOFF, "VMOFF" }, \
|
||||
{ EXIT_REASON_VMON, "VMON" }, \
|
||||
{ EXIT_REASON_CR_ACCESS, "CR_ACCESS" }, \
|
||||
{ EXIT_REASON_DR_ACCESS, "DR_ACCESS" }, \
|
||||
{ EXIT_REASON_IO_INSTRUCTION, "IO_INSTRUCTION" }, \
|
||||
{ EXIT_REASON_MSR_READ, "MSR_READ" }, \
|
||||
{ EXIT_REASON_MSR_WRITE, "MSR_WRITE" }, \
|
||||
{ EXIT_REASON_MWAIT_INSTRUCTION, "MWAIT_INSTRUCTION" }, \
|
||||
{ EXIT_REASON_MONITOR_INSTRUCTION, "MONITOR_INSTRUCTION" }, \
|
||||
{ EXIT_REASON_PAUSE_INSTRUCTION, "PAUSE_INSTRUCTION" }, \
|
||||
{ EXIT_REASON_MCE_DURING_VMENTRY, "MCE_DURING_VMENTRY" }, \
|
||||
{ EXIT_REASON_TPR_BELOW_THRESHOLD, "TPR_BELOW_THRESHOLD" }, \
|
||||
{ EXIT_REASON_APIC_ACCESS, "APIC_ACCESS" }, \
|
||||
{ EXIT_REASON_EPT_VIOLATION, "EPT_VIOLATION" }, \
|
||||
{ EXIT_REASON_EPT_MISCONFIG, "EPT_MISCONFIG" }, \
|
||||
{ EXIT_REASON_WBINVD, "WBINVD" }
|
||||
|
||||
#define SVM_EXIT_REASONS \
|
||||
{ SVM_EXIT_READ_CR0, "read_cr0" }, \
|
||||
{ SVM_EXIT_READ_CR3, "read_cr3" }, \
|
||||
{ SVM_EXIT_READ_CR4, "read_cr4" }, \
|
||||
{ SVM_EXIT_READ_CR8, "read_cr8" }, \
|
||||
{ SVM_EXIT_WRITE_CR0, "write_cr0" }, \
|
||||
{ SVM_EXIT_WRITE_CR3, "write_cr3" }, \
|
||||
{ SVM_EXIT_WRITE_CR4, "write_cr4" }, \
|
||||
{ SVM_EXIT_WRITE_CR8, "write_cr8" }, \
|
||||
{ SVM_EXIT_READ_DR0, "read_dr0" }, \
|
||||
{ SVM_EXIT_READ_DR1, "read_dr1" }, \
|
||||
{ SVM_EXIT_READ_DR2, "read_dr2" }, \
|
||||
{ SVM_EXIT_READ_DR3, "read_dr3" }, \
|
||||
{ SVM_EXIT_WRITE_DR0, "write_dr0" }, \
|
||||
{ SVM_EXIT_WRITE_DR1, "write_dr1" }, \
|
||||
{ SVM_EXIT_WRITE_DR2, "write_dr2" }, \
|
||||
{ SVM_EXIT_WRITE_DR3, "write_dr3" }, \
|
||||
{ SVM_EXIT_WRITE_DR5, "write_dr5" }, \
|
||||
{ SVM_EXIT_WRITE_DR7, "write_dr7" }, \
|
||||
{ SVM_EXIT_EXCP_BASE + DB_VECTOR, "DB excp" }, \
|
||||
{ SVM_EXIT_EXCP_BASE + BP_VECTOR, "BP excp" }, \
|
||||
{ SVM_EXIT_EXCP_BASE + UD_VECTOR, "UD excp" }, \
|
||||
{ SVM_EXIT_EXCP_BASE + PF_VECTOR, "PF excp" }, \
|
||||
{ SVM_EXIT_EXCP_BASE + NM_VECTOR, "NM excp" }, \
|
||||
{ SVM_EXIT_EXCP_BASE + MC_VECTOR, "MC excp" }, \
|
||||
{ SVM_EXIT_INTR, "interrupt" }, \
|
||||
{ SVM_EXIT_NMI, "nmi" }, \
|
||||
{ SVM_EXIT_SMI, "smi" }, \
|
||||
{ SVM_EXIT_INIT, "init" }, \
|
||||
{ SVM_EXIT_VINTR, "vintr" }, \
|
||||
{ SVM_EXIT_CPUID, "cpuid" }, \
|
||||
{ SVM_EXIT_INVD, "invd" }, \
|
||||
{ SVM_EXIT_HLT, "hlt" }, \
|
||||
{ SVM_EXIT_INVLPG, "invlpg" }, \
|
||||
{ SVM_EXIT_INVLPGA, "invlpga" }, \
|
||||
{ SVM_EXIT_IOIO, "io" }, \
|
||||
{ SVM_EXIT_MSR, "msr" }, \
|
||||
{ SVM_EXIT_TASK_SWITCH, "task_switch" }, \
|
||||
{ SVM_EXIT_SHUTDOWN, "shutdown" }, \
|
||||
{ SVM_EXIT_VMRUN, "vmrun" }, \
|
||||
{ SVM_EXIT_VMMCALL, "hypercall" }, \
|
||||
{ SVM_EXIT_VMLOAD, "vmload" }, \
|
||||
{ SVM_EXIT_VMSAVE, "vmsave" }, \
|
||||
{ SVM_EXIT_STGI, "stgi" }, \
|
||||
{ SVM_EXIT_CLGI, "clgi" }, \
|
||||
{ SVM_EXIT_SKINIT, "skinit" }, \
|
||||
{ SVM_EXIT_WBINVD, "wbinvd" }, \
|
||||
{ SVM_EXIT_MONITOR, "monitor" }, \
|
||||
{ SVM_EXIT_MWAIT, "mwait" }, \
|
||||
{ SVM_EXIT_XSETBV, "xsetbv" }, \
|
||||
{ SVM_EXIT_NPF, "npf" }
|
||||
|
||||
/*
|
||||
* Tracepoint for kvm guest exit:
|
||||
*/
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -540,6 +540,9 @@ int pevent_parse_header_page(struct pevent *pevent, char *buf, unsigned long siz
|
|||
|
||||
enum pevent_errno pevent_parse_event(struct pevent *pevent, const char *buf,
|
||||
unsigned long size, const char *sys);
|
||||
enum pevent_errno pevent_parse_format(struct event_format **eventp, const char *buf,
|
||||
unsigned long size, const char *sys);
|
||||
void pevent_free_format(struct event_format *event);
|
||||
|
||||
void *pevent_get_field_raw(struct trace_seq *s, struct event_format *event,
|
||||
const char *name, struct pevent_record *record,
|
||||
|
|
|
@ -12,7 +12,7 @@ SYNOPSIS
|
|||
[--guestkallsyms=<path> --guestmodules=<path> | --guestvmlinux=<path>]]
|
||||
{top|record|report|diff|buildid-list}
|
||||
'perf kvm' [--host] [--guest] [--guestkallsyms=<path> --guestmodules=<path>
|
||||
| --guestvmlinux=<path>] {top|record|report|diff|buildid-list}
|
||||
| --guestvmlinux=<path>] {top|record|report|diff|buildid-list|stat}
|
||||
|
||||
DESCRIPTION
|
||||
-----------
|
||||
|
@ -38,6 +38,18 @@ There are a couple of variants of perf kvm:
|
|||
so that other tools can be used to fetch packages with matching symbol tables
|
||||
for use by perf report.
|
||||
|
||||
'perf kvm stat <command>' to run a command and gather performance counter
|
||||
statistics.
|
||||
Especially, perf 'kvm stat record/report' generates a statistical analysis
|
||||
of KVM events. Currently, vmexit, mmio and ioport events are supported.
|
||||
'perf kvm stat record <command>' records kvm events and the events between
|
||||
start and end <command>.
|
||||
And this command produces a file which contains tracing results of kvm
|
||||
events.
|
||||
|
||||
'perf kvm stat report' reports statistical data which includes events
|
||||
handled time, samples, and so on.
|
||||
|
||||
OPTIONS
|
||||
-------
|
||||
-i::
|
||||
|
@ -68,7 +80,21 @@ OPTIONS
|
|||
--guestvmlinux=<path>::
|
||||
Guest os kernel vmlinux.
|
||||
|
||||
STAT REPORT OPTIONS
|
||||
-------------------
|
||||
--vcpu=<value>::
|
||||
analyze events which occures on this vcpu. (default: all vcpus)
|
||||
|
||||
--events=<value>::
|
||||
events to be analyzed. Possible values: vmexit, mmio, ioport.
|
||||
(default: vmexit)
|
||||
-k::
|
||||
--key=<value>::
|
||||
Sorting key. Possible values: sample (default, sort by samples
|
||||
number), time (sort by average time).
|
||||
|
||||
SEE ALSO
|
||||
--------
|
||||
linkperf:perf-top[1], linkperf:perf-record[1], linkperf:perf-report[1],
|
||||
linkperf:perf-diff[1], linkperf:perf-buildid-list[1]
|
||||
linkperf:perf-diff[1], linkperf:perf-buildid-list[1],
|
||||
linkperf:perf-stat[1]
|
||||
|
|
|
@ -16,3 +16,6 @@ arch/*/lib/memset*.S
|
|||
include/linux/poison.h
|
||||
include/linux/magic.h
|
||||
include/linux/hw_breakpoint.h
|
||||
arch/x86/include/asm/svm.h
|
||||
arch/x86/include/asm/vmx.h
|
||||
arch/x86/include/asm/kvm_host.h
|
||||
|
|
|
@ -233,13 +233,13 @@ export PERL_PATH
|
|||
FLEX = flex
|
||||
BISON= bison
|
||||
|
||||
$(OUTPUT)util/parse-events-flex.c: util/parse-events.l
|
||||
$(OUTPUT)util/parse-events-flex.c: util/parse-events.l $(OUTPUT)util/parse-events-bison.c
|
||||
$(QUIET_FLEX)$(FLEX) --header-file=$(OUTPUT)util/parse-events-flex.h $(PARSER_DEBUG_FLEX) -t util/parse-events.l > $(OUTPUT)util/parse-events-flex.c
|
||||
|
||||
$(OUTPUT)util/parse-events-bison.c: util/parse-events.y
|
||||
$(QUIET_BISON)$(BISON) -v util/parse-events.y -d $(PARSER_DEBUG_BISON) -o $(OUTPUT)util/parse-events-bison.c
|
||||
|
||||
$(OUTPUT)util/pmu-flex.c: util/pmu.l
|
||||
$(OUTPUT)util/pmu-flex.c: util/pmu.l $(OUTPUT)util/pmu-bison.c
|
||||
$(QUIET_FLEX)$(FLEX) --header-file=$(OUTPUT)util/pmu-flex.h -t util/pmu.l > $(OUTPUT)util/pmu-flex.c
|
||||
|
||||
$(OUTPUT)util/pmu-bison.c: util/pmu.y
|
||||
|
@ -715,7 +715,7 @@ else
|
|||
EXTLIBS += -liberty
|
||||
BASIC_CFLAGS += -DHAVE_CPLUS_DEMANGLE
|
||||
else
|
||||
FLAGS_BFD=$(ALL_CFLAGS) $(ALL_LDFLAGS) $(EXTLIBS) -lbfd
|
||||
FLAGS_BFD=$(ALL_CFLAGS) $(ALL_LDFLAGS) $(EXTLIBS) -DPACKAGE='perf' -lbfd
|
||||
has_bfd := $(call try-cc,$(SOURCE_BFD),$(FLAGS_BFD))
|
||||
ifeq ($(has_bfd),y)
|
||||
EXTLIBS += -lbfd
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
#include "builtin.h"
|
||||
#include "perf.h"
|
||||
|
||||
#include "util/evlist.h"
|
||||
#include "util/evsel.h"
|
||||
#include "util/util.h"
|
||||
#include "util/cache.h"
|
||||
|
@ -212,36 +213,38 @@ static int insert_caller_stat(unsigned long call_site,
|
|||
}
|
||||
|
||||
static int perf_evsel__process_alloc_event(struct perf_evsel *evsel,
|
||||
struct perf_sample *sample, int node)
|
||||
struct perf_sample *sample)
|
||||
{
|
||||
struct event_format *event = evsel->tp_format;
|
||||
void *data = sample->raw_data;
|
||||
unsigned long call_site;
|
||||
unsigned long ptr;
|
||||
int bytes_req, cpu = sample->cpu;
|
||||
int bytes_alloc;
|
||||
int node1, node2;
|
||||
unsigned long ptr = perf_evsel__intval(evsel, sample, "ptr"),
|
||||
call_site = perf_evsel__intval(evsel, sample, "call_site");
|
||||
int bytes_req = perf_evsel__intval(evsel, sample, "bytes_req"),
|
||||
bytes_alloc = perf_evsel__intval(evsel, sample, "bytes_alloc");
|
||||
|
||||
ptr = raw_field_value(event, "ptr", data);
|
||||
call_site = raw_field_value(event, "call_site", data);
|
||||
bytes_req = raw_field_value(event, "bytes_req", data);
|
||||
bytes_alloc = raw_field_value(event, "bytes_alloc", data);
|
||||
|
||||
if (insert_alloc_stat(call_site, ptr, bytes_req, bytes_alloc, cpu) ||
|
||||
if (insert_alloc_stat(call_site, ptr, bytes_req, bytes_alloc, sample->cpu) ||
|
||||
insert_caller_stat(call_site, bytes_req, bytes_alloc))
|
||||
return -1;
|
||||
|
||||
total_requested += bytes_req;
|
||||
total_allocated += bytes_alloc;
|
||||
|
||||
if (node) {
|
||||
node1 = cpunode_map[cpu];
|
||||
node2 = raw_field_value(event, "node", data);
|
||||
nr_allocs++;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int perf_evsel__process_alloc_node_event(struct perf_evsel *evsel,
|
||||
struct perf_sample *sample)
|
||||
{
|
||||
int ret = perf_evsel__process_alloc_event(evsel, sample);
|
||||
|
||||
if (!ret) {
|
||||
int node1 = cpunode_map[sample->cpu],
|
||||
node2 = perf_evsel__intval(evsel, sample, "node");
|
||||
|
||||
if (node1 != node2)
|
||||
nr_cross_allocs++;
|
||||
}
|
||||
nr_allocs++;
|
||||
return 0;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int ptr_cmp(struct alloc_stat *, struct alloc_stat *);
|
||||
|
@ -275,8 +278,7 @@ static struct alloc_stat *search_alloc_stat(unsigned long ptr,
|
|||
static int perf_evsel__process_free_event(struct perf_evsel *evsel,
|
||||
struct perf_sample *sample)
|
||||
{
|
||||
unsigned long ptr = raw_field_value(evsel->tp_format, "ptr",
|
||||
sample->raw_data);
|
||||
unsigned long ptr = perf_evsel__intval(evsel, sample, "ptr");
|
||||
struct alloc_stat *s_alloc, *s_caller;
|
||||
|
||||
s_alloc = search_alloc_stat(ptr, 0, &root_alloc_stat, ptr_cmp);
|
||||
|
@ -297,28 +299,8 @@ static int perf_evsel__process_free_event(struct perf_evsel *evsel,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int perf_evsel__process_kmem_event(struct perf_evsel *evsel,
|
||||
struct perf_sample *sample)
|
||||
{
|
||||
struct event_format *event = evsel->tp_format;
|
||||
|
||||
if (!strcmp(event->name, "kmalloc") ||
|
||||
!strcmp(event->name, "kmem_cache_alloc")) {
|
||||
return perf_evsel__process_alloc_event(evsel, sample, 0);
|
||||
}
|
||||
|
||||
if (!strcmp(event->name, "kmalloc_node") ||
|
||||
!strcmp(event->name, "kmem_cache_alloc_node")) {
|
||||
return perf_evsel__process_alloc_event(evsel, sample, 1);
|
||||
}
|
||||
|
||||
if (!strcmp(event->name, "kfree") ||
|
||||
!strcmp(event->name, "kmem_cache_free")) {
|
||||
return perf_evsel__process_free_event(evsel, sample);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
typedef int (*tracepoint_handler)(struct perf_evsel *evsel,
|
||||
struct perf_sample *sample);
|
||||
|
||||
static int process_sample_event(struct perf_tool *tool __maybe_unused,
|
||||
union perf_event *event,
|
||||
|
@ -336,7 +318,12 @@ static int process_sample_event(struct perf_tool *tool __maybe_unused,
|
|||
|
||||
dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
|
||||
|
||||
return perf_evsel__process_kmem_event(evsel, sample);
|
||||
if (evsel->handler.func != NULL) {
|
||||
tracepoint_handler f = evsel->handler.func;
|
||||
return f(evsel, sample);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct perf_tool perf_kmem = {
|
||||
|
@ -498,6 +485,14 @@ static int __cmd_kmem(void)
|
|||
{
|
||||
int err = -EINVAL;
|
||||
struct perf_session *session;
|
||||
const struct perf_evsel_str_handler kmem_tracepoints[] = {
|
||||
{ "kmem:kmalloc", perf_evsel__process_alloc_event, },
|
||||
{ "kmem:kmem_cache_alloc", perf_evsel__process_alloc_event, },
|
||||
{ "kmem:kmalloc_node", perf_evsel__process_alloc_node_event, },
|
||||
{ "kmem:kmem_cache_alloc_node", perf_evsel__process_alloc_node_event, },
|
||||
{ "kmem:kfree", perf_evsel__process_free_event, },
|
||||
{ "kmem:kmem_cache_free", perf_evsel__process_free_event, },
|
||||
};
|
||||
|
||||
session = perf_session__new(input_name, O_RDONLY, 0, false, &perf_kmem);
|
||||
if (session == NULL)
|
||||
|
@ -509,6 +504,11 @@ static int __cmd_kmem(void)
|
|||
if (!perf_session__has_traces(session, "kmem record"))
|
||||
goto out_delete;
|
||||
|
||||
if (perf_session__set_tracepoints_handlers(session, kmem_tracepoints)) {
|
||||
pr_err("Initializing perf session tracepoint handlers failed\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
setup_pager();
|
||||
err = perf_session__process_events(session, &perf_kmem);
|
||||
if (err != 0)
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
#include "builtin.h"
|
||||
#include "perf.h"
|
||||
|
||||
#include "util/evsel.h"
|
||||
#include "util/util.h"
|
||||
#include "util/cache.h"
|
||||
#include "util/symbol.h"
|
||||
|
@ -10,8 +11,10 @@
|
|||
|
||||
#include "util/parse-options.h"
|
||||
#include "util/trace-event.h"
|
||||
|
||||
#include "util/debug.h"
|
||||
#include "util/debugfs.h"
|
||||
#include "util/tool.h"
|
||||
#include "util/stat.h"
|
||||
|
||||
#include <sys/prctl.h>
|
||||
|
||||
|
@ -19,11 +22,836 @@
|
|||
#include <pthread.h>
|
||||
#include <math.h>
|
||||
|
||||
static const char *file_name;
|
||||
#include "../../arch/x86/include/asm/svm.h"
|
||||
#include "../../arch/x86/include/asm/vmx.h"
|
||||
#include "../../arch/x86/include/asm/kvm.h"
|
||||
|
||||
struct event_key {
|
||||
#define INVALID_KEY (~0ULL)
|
||||
u64 key;
|
||||
int info;
|
||||
};
|
||||
|
||||
struct kvm_events_ops {
|
||||
bool (*is_begin_event)(struct perf_evsel *evsel,
|
||||
struct perf_sample *sample,
|
||||
struct event_key *key);
|
||||
bool (*is_end_event)(struct perf_evsel *evsel,
|
||||
struct perf_sample *sample, struct event_key *key);
|
||||
void (*decode_key)(struct event_key *key, char decode[20]);
|
||||
const char *name;
|
||||
};
|
||||
|
||||
static void exit_event_get_key(struct perf_evsel *evsel,
|
||||
struct perf_sample *sample,
|
||||
struct event_key *key)
|
||||
{
|
||||
key->info = 0;
|
||||
key->key = perf_evsel__intval(evsel, sample, "exit_reason");
|
||||
}
|
||||
|
||||
static bool kvm_exit_event(struct perf_evsel *evsel)
|
||||
{
|
||||
return !strcmp(evsel->name, "kvm:kvm_exit");
|
||||
}
|
||||
|
||||
static bool exit_event_begin(struct perf_evsel *evsel,
|
||||
struct perf_sample *sample, struct event_key *key)
|
||||
{
|
||||
if (kvm_exit_event(evsel)) {
|
||||
exit_event_get_key(evsel, sample, key);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool kvm_entry_event(struct perf_evsel *evsel)
|
||||
{
|
||||
return !strcmp(evsel->name, "kvm:kvm_entry");
|
||||
}
|
||||
|
||||
static bool exit_event_end(struct perf_evsel *evsel,
|
||||
struct perf_sample *sample __maybe_unused,
|
||||
struct event_key *key __maybe_unused)
|
||||
{
|
||||
return kvm_entry_event(evsel);
|
||||
}
|
||||
|
||||
struct exit_reasons_table {
|
||||
unsigned long exit_code;
|
||||
const char *reason;
|
||||
};
|
||||
|
||||
struct exit_reasons_table vmx_exit_reasons[] = {
|
||||
VMX_EXIT_REASONS
|
||||
};
|
||||
|
||||
struct exit_reasons_table svm_exit_reasons[] = {
|
||||
SVM_EXIT_REASONS
|
||||
};
|
||||
|
||||
static int cpu_isa;
|
||||
|
||||
static const char *get_exit_reason(u64 exit_code)
|
||||
{
|
||||
int table_size = ARRAY_SIZE(svm_exit_reasons);
|
||||
struct exit_reasons_table *table = svm_exit_reasons;
|
||||
|
||||
if (cpu_isa == 1) {
|
||||
table = vmx_exit_reasons;
|
||||
table_size = ARRAY_SIZE(vmx_exit_reasons);
|
||||
}
|
||||
|
||||
while (table_size--) {
|
||||
if (table->exit_code == exit_code)
|
||||
return table->reason;
|
||||
table++;
|
||||
}
|
||||
|
||||
pr_err("unknown kvm exit code:%lld on %s\n",
|
||||
(unsigned long long)exit_code, cpu_isa ? "VMX" : "SVM");
|
||||
return "UNKNOWN";
|
||||
}
|
||||
|
||||
static void exit_event_decode_key(struct event_key *key, char decode[20])
|
||||
{
|
||||
const char *exit_reason = get_exit_reason(key->key);
|
||||
|
||||
scnprintf(decode, 20, "%s", exit_reason);
|
||||
}
|
||||
|
||||
static struct kvm_events_ops exit_events = {
|
||||
.is_begin_event = exit_event_begin,
|
||||
.is_end_event = exit_event_end,
|
||||
.decode_key = exit_event_decode_key,
|
||||
.name = "VM-EXIT"
|
||||
};
|
||||
|
||||
/*
|
||||
* For the mmio events, we treat:
|
||||
* the time of MMIO write: kvm_mmio(KVM_TRACE_MMIO_WRITE...) -> kvm_entry
|
||||
* the time of MMIO read: kvm_exit -> kvm_mmio(KVM_TRACE_MMIO_READ...).
|
||||
*/
|
||||
static void mmio_event_get_key(struct perf_evsel *evsel, struct perf_sample *sample,
|
||||
struct event_key *key)
|
||||
{
|
||||
key->key = perf_evsel__intval(evsel, sample, "gpa");
|
||||
key->info = perf_evsel__intval(evsel, sample, "type");
|
||||
}
|
||||
|
||||
#define KVM_TRACE_MMIO_READ_UNSATISFIED 0
|
||||
#define KVM_TRACE_MMIO_READ 1
|
||||
#define KVM_TRACE_MMIO_WRITE 2
|
||||
|
||||
static bool mmio_event_begin(struct perf_evsel *evsel,
|
||||
struct perf_sample *sample, struct event_key *key)
|
||||
{
|
||||
/* MMIO read begin event in kernel. */
|
||||
if (kvm_exit_event(evsel))
|
||||
return true;
|
||||
|
||||
/* MMIO write begin event in kernel. */
|
||||
if (!strcmp(evsel->name, "kvm:kvm_mmio") &&
|
||||
perf_evsel__intval(evsel, sample, "type") == KVM_TRACE_MMIO_WRITE) {
|
||||
mmio_event_get_key(evsel, sample, key);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool mmio_event_end(struct perf_evsel *evsel, struct perf_sample *sample,
|
||||
struct event_key *key)
|
||||
{
|
||||
/* MMIO write end event in kernel. */
|
||||
if (kvm_entry_event(evsel))
|
||||
return true;
|
||||
|
||||
/* MMIO read end event in kernel.*/
|
||||
if (!strcmp(evsel->name, "kvm:kvm_mmio") &&
|
||||
perf_evsel__intval(evsel, sample, "type") == KVM_TRACE_MMIO_READ) {
|
||||
mmio_event_get_key(evsel, sample, key);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static void mmio_event_decode_key(struct event_key *key, char decode[20])
|
||||
{
|
||||
scnprintf(decode, 20, "%#lx:%s", (unsigned long)key->key,
|
||||
key->info == KVM_TRACE_MMIO_WRITE ? "W" : "R");
|
||||
}
|
||||
|
||||
static struct kvm_events_ops mmio_events = {
|
||||
.is_begin_event = mmio_event_begin,
|
||||
.is_end_event = mmio_event_end,
|
||||
.decode_key = mmio_event_decode_key,
|
||||
.name = "MMIO Access"
|
||||
};
|
||||
|
||||
/* The time of emulation pio access is from kvm_pio to kvm_entry. */
|
||||
static void ioport_event_get_key(struct perf_evsel *evsel,
|
||||
struct perf_sample *sample,
|
||||
struct event_key *key)
|
||||
{
|
||||
key->key = perf_evsel__intval(evsel, sample, "port");
|
||||
key->info = perf_evsel__intval(evsel, sample, "rw");
|
||||
}
|
||||
|
||||
static bool ioport_event_begin(struct perf_evsel *evsel,
|
||||
struct perf_sample *sample,
|
||||
struct event_key *key)
|
||||
{
|
||||
if (!strcmp(evsel->name, "kvm:kvm_pio")) {
|
||||
ioport_event_get_key(evsel, sample, key);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool ioport_event_end(struct perf_evsel *evsel,
|
||||
struct perf_sample *sample __maybe_unused,
|
||||
struct event_key *key __maybe_unused)
|
||||
{
|
||||
return kvm_entry_event(evsel);
|
||||
}
|
||||
|
||||
static void ioport_event_decode_key(struct event_key *key, char decode[20])
|
||||
{
|
||||
scnprintf(decode, 20, "%#llx:%s", (unsigned long long)key->key,
|
||||
key->info ? "POUT" : "PIN");
|
||||
}
|
||||
|
||||
static struct kvm_events_ops ioport_events = {
|
||||
.is_begin_event = ioport_event_begin,
|
||||
.is_end_event = ioport_event_end,
|
||||
.decode_key = ioport_event_decode_key,
|
||||
.name = "IO Port Access"
|
||||
};
|
||||
|
||||
static const char *report_event = "vmexit";
|
||||
struct kvm_events_ops *events_ops;
|
||||
|
||||
static bool register_kvm_events_ops(void)
|
||||
{
|
||||
bool ret = true;
|
||||
|
||||
if (!strcmp(report_event, "vmexit"))
|
||||
events_ops = &exit_events;
|
||||
else if (!strcmp(report_event, "mmio"))
|
||||
events_ops = &mmio_events;
|
||||
else if (!strcmp(report_event, "ioport"))
|
||||
events_ops = &ioport_events;
|
||||
else {
|
||||
pr_err("Unknown report event:%s\n", report_event);
|
||||
ret = false;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
struct kvm_event_stats {
|
||||
u64 time;
|
||||
struct stats stats;
|
||||
};
|
||||
|
||||
struct kvm_event {
|
||||
struct list_head hash_entry;
|
||||
struct rb_node rb;
|
||||
|
||||
struct event_key key;
|
||||
|
||||
struct kvm_event_stats total;
|
||||
|
||||
#define DEFAULT_VCPU_NUM 8
|
||||
int max_vcpu;
|
||||
struct kvm_event_stats *vcpu;
|
||||
};
|
||||
|
||||
struct vcpu_event_record {
|
||||
int vcpu_id;
|
||||
u64 start_time;
|
||||
struct kvm_event *last_event;
|
||||
};
|
||||
|
||||
#define EVENTS_BITS 12
|
||||
#define EVENTS_CACHE_SIZE (1UL << EVENTS_BITS)
|
||||
|
||||
static u64 total_time;
|
||||
static u64 total_count;
|
||||
static struct list_head kvm_events_cache[EVENTS_CACHE_SIZE];
|
||||
|
||||
static void init_kvm_event_record(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < (int)EVENTS_CACHE_SIZE; i++)
|
||||
INIT_LIST_HEAD(&kvm_events_cache[i]);
|
||||
}
|
||||
|
||||
static int kvm_events_hash_fn(u64 key)
|
||||
{
|
||||
return key & (EVENTS_CACHE_SIZE - 1);
|
||||
}
|
||||
|
||||
static bool kvm_event_expand(struct kvm_event *event, int vcpu_id)
|
||||
{
|
||||
int old_max_vcpu = event->max_vcpu;
|
||||
|
||||
if (vcpu_id < event->max_vcpu)
|
||||
return true;
|
||||
|
||||
while (event->max_vcpu <= vcpu_id)
|
||||
event->max_vcpu += DEFAULT_VCPU_NUM;
|
||||
|
||||
event->vcpu = realloc(event->vcpu,
|
||||
event->max_vcpu * sizeof(*event->vcpu));
|
||||
if (!event->vcpu) {
|
||||
pr_err("Not enough memory\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
memset(event->vcpu + old_max_vcpu, 0,
|
||||
(event->max_vcpu - old_max_vcpu) * sizeof(*event->vcpu));
|
||||
return true;
|
||||
}
|
||||
|
||||
static struct kvm_event *kvm_alloc_init_event(struct event_key *key)
|
||||
{
|
||||
struct kvm_event *event;
|
||||
|
||||
event = zalloc(sizeof(*event));
|
||||
if (!event) {
|
||||
pr_err("Not enough memory\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
event->key = *key;
|
||||
return event;
|
||||
}
|
||||
|
||||
static struct kvm_event *find_create_kvm_event(struct event_key *key)
|
||||
{
|
||||
struct kvm_event *event;
|
||||
struct list_head *head;
|
||||
|
||||
BUG_ON(key->key == INVALID_KEY);
|
||||
|
||||
head = &kvm_events_cache[kvm_events_hash_fn(key->key)];
|
||||
list_for_each_entry(event, head, hash_entry)
|
||||
if (event->key.key == key->key && event->key.info == key->info)
|
||||
return event;
|
||||
|
||||
event = kvm_alloc_init_event(key);
|
||||
if (!event)
|
||||
return NULL;
|
||||
|
||||
list_add(&event->hash_entry, head);
|
||||
return event;
|
||||
}
|
||||
|
||||
static bool handle_begin_event(struct vcpu_event_record *vcpu_record,
|
||||
struct event_key *key, u64 timestamp)
|
||||
{
|
||||
struct kvm_event *event = NULL;
|
||||
|
||||
if (key->key != INVALID_KEY)
|
||||
event = find_create_kvm_event(key);
|
||||
|
||||
vcpu_record->last_event = event;
|
||||
vcpu_record->start_time = timestamp;
|
||||
return true;
|
||||
}
|
||||
|
||||
static void
|
||||
kvm_update_event_stats(struct kvm_event_stats *kvm_stats, u64 time_diff)
|
||||
{
|
||||
kvm_stats->time += time_diff;
|
||||
update_stats(&kvm_stats->stats, time_diff);
|
||||
}
|
||||
|
||||
static double kvm_event_rel_stddev(int vcpu_id, struct kvm_event *event)
|
||||
{
|
||||
struct kvm_event_stats *kvm_stats = &event->total;
|
||||
|
||||
if (vcpu_id != -1)
|
||||
kvm_stats = &event->vcpu[vcpu_id];
|
||||
|
||||
return rel_stddev_stats(stddev_stats(&kvm_stats->stats),
|
||||
avg_stats(&kvm_stats->stats));
|
||||
}
|
||||
|
||||
static bool update_kvm_event(struct kvm_event *event, int vcpu_id,
|
||||
u64 time_diff)
|
||||
{
|
||||
kvm_update_event_stats(&event->total, time_diff);
|
||||
|
||||
if (!kvm_event_expand(event, vcpu_id))
|
||||
return false;
|
||||
|
||||
kvm_update_event_stats(&event->vcpu[vcpu_id], time_diff);
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool handle_end_event(struct vcpu_event_record *vcpu_record,
|
||||
struct event_key *key, u64 timestamp)
|
||||
{
|
||||
struct kvm_event *event;
|
||||
u64 time_begin, time_diff;
|
||||
|
||||
event = vcpu_record->last_event;
|
||||
time_begin = vcpu_record->start_time;
|
||||
|
||||
/* The begin event is not caught. */
|
||||
if (!time_begin)
|
||||
return true;
|
||||
|
||||
/*
|
||||
* In some case, the 'begin event' only records the start timestamp,
|
||||
* the actual event is recognized in the 'end event' (e.g. mmio-event).
|
||||
*/
|
||||
|
||||
/* Both begin and end events did not get the key. */
|
||||
if (!event && key->key == INVALID_KEY)
|
||||
return true;
|
||||
|
||||
if (!event)
|
||||
event = find_create_kvm_event(key);
|
||||
|
||||
if (!event)
|
||||
return false;
|
||||
|
||||
vcpu_record->last_event = NULL;
|
||||
vcpu_record->start_time = 0;
|
||||
|
||||
BUG_ON(timestamp < time_begin);
|
||||
|
||||
time_diff = timestamp - time_begin;
|
||||
return update_kvm_event(event, vcpu_record->vcpu_id, time_diff);
|
||||
}
|
||||
|
||||
static
|
||||
struct vcpu_event_record *per_vcpu_record(struct thread *thread,
|
||||
struct perf_evsel *evsel,
|
||||
struct perf_sample *sample)
|
||||
{
|
||||
/* Only kvm_entry records vcpu id. */
|
||||
if (!thread->priv && kvm_entry_event(evsel)) {
|
||||
struct vcpu_event_record *vcpu_record;
|
||||
|
||||
vcpu_record = zalloc(sizeof(*vcpu_record));
|
||||
if (!vcpu_record) {
|
||||
pr_err("%s: Not enough memory\n", __func__);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
vcpu_record->vcpu_id = perf_evsel__intval(evsel, sample, "vcpu_id");
|
||||
thread->priv = vcpu_record;
|
||||
}
|
||||
|
||||
return thread->priv;
|
||||
}
|
||||
|
||||
static bool handle_kvm_event(struct thread *thread, struct perf_evsel *evsel,
|
||||
struct perf_sample *sample)
|
||||
{
|
||||
struct vcpu_event_record *vcpu_record;
|
||||
struct event_key key = {.key = INVALID_KEY};
|
||||
|
||||
vcpu_record = per_vcpu_record(thread, evsel, sample);
|
||||
if (!vcpu_record)
|
||||
return true;
|
||||
|
||||
if (events_ops->is_begin_event(evsel, sample, &key))
|
||||
return handle_begin_event(vcpu_record, &key, sample->time);
|
||||
|
||||
if (events_ops->is_end_event(evsel, sample, &key))
|
||||
return handle_end_event(vcpu_record, &key, sample->time);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
typedef int (*key_cmp_fun)(struct kvm_event*, struct kvm_event*, int);
|
||||
struct kvm_event_key {
|
||||
const char *name;
|
||||
key_cmp_fun key;
|
||||
};
|
||||
|
||||
static int trace_vcpu = -1;
|
||||
#define GET_EVENT_KEY(func, field) \
|
||||
static u64 get_event_ ##func(struct kvm_event *event, int vcpu) \
|
||||
{ \
|
||||
if (vcpu == -1) \
|
||||
return event->total.field; \
|
||||
\
|
||||
if (vcpu >= event->max_vcpu) \
|
||||
return 0; \
|
||||
\
|
||||
return event->vcpu[vcpu].field; \
|
||||
}
|
||||
|
||||
#define COMPARE_EVENT_KEY(func, field) \
|
||||
GET_EVENT_KEY(func, field) \
|
||||
static int compare_kvm_event_ ## func(struct kvm_event *one, \
|
||||
struct kvm_event *two, int vcpu)\
|
||||
{ \
|
||||
return get_event_ ##func(one, vcpu) > \
|
||||
get_event_ ##func(two, vcpu); \
|
||||
}
|
||||
|
||||
GET_EVENT_KEY(time, time);
|
||||
COMPARE_EVENT_KEY(count, stats.n);
|
||||
COMPARE_EVENT_KEY(mean, stats.mean);
|
||||
|
||||
#define DEF_SORT_NAME_KEY(name, compare_key) \
|
||||
{ #name, compare_kvm_event_ ## compare_key }
|
||||
|
||||
static struct kvm_event_key keys[] = {
|
||||
DEF_SORT_NAME_KEY(sample, count),
|
||||
DEF_SORT_NAME_KEY(time, mean),
|
||||
{ NULL, NULL }
|
||||
};
|
||||
|
||||
static const char *sort_key = "sample";
|
||||
static key_cmp_fun compare;
|
||||
|
||||
static bool select_key(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; keys[i].name; i++) {
|
||||
if (!strcmp(keys[i].name, sort_key)) {
|
||||
compare = keys[i].key;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
pr_err("Unknown compare key:%s\n", sort_key);
|
||||
return false;
|
||||
}
|
||||
|
||||
static struct rb_root result;
|
||||
static void insert_to_result(struct kvm_event *event, key_cmp_fun bigger,
|
||||
int vcpu)
|
||||
{
|
||||
struct rb_node **rb = &result.rb_node;
|
||||
struct rb_node *parent = NULL;
|
||||
struct kvm_event *p;
|
||||
|
||||
while (*rb) {
|
||||
p = container_of(*rb, struct kvm_event, rb);
|
||||
parent = *rb;
|
||||
|
||||
if (bigger(event, p, vcpu))
|
||||
rb = &(*rb)->rb_left;
|
||||
else
|
||||
rb = &(*rb)->rb_right;
|
||||
}
|
||||
|
||||
rb_link_node(&event->rb, parent, rb);
|
||||
rb_insert_color(&event->rb, &result);
|
||||
}
|
||||
|
||||
static void update_total_count(struct kvm_event *event, int vcpu)
|
||||
{
|
||||
total_count += get_event_count(event, vcpu);
|
||||
total_time += get_event_time(event, vcpu);
|
||||
}
|
||||
|
||||
static bool event_is_valid(struct kvm_event *event, int vcpu)
|
||||
{
|
||||
return !!get_event_count(event, vcpu);
|
||||
}
|
||||
|
||||
static void sort_result(int vcpu)
|
||||
{
|
||||
unsigned int i;
|
||||
struct kvm_event *event;
|
||||
|
||||
for (i = 0; i < EVENTS_CACHE_SIZE; i++)
|
||||
list_for_each_entry(event, &kvm_events_cache[i], hash_entry)
|
||||
if (event_is_valid(event, vcpu)) {
|
||||
update_total_count(event, vcpu);
|
||||
insert_to_result(event, compare, vcpu);
|
||||
}
|
||||
}
|
||||
|
||||
/* returns left most element of result, and erase it */
|
||||
static struct kvm_event *pop_from_result(void)
|
||||
{
|
||||
struct rb_node *node = rb_first(&result);
|
||||
|
||||
if (!node)
|
||||
return NULL;
|
||||
|
||||
rb_erase(node, &result);
|
||||
return container_of(node, struct kvm_event, rb);
|
||||
}
|
||||
|
||||
static void print_vcpu_info(int vcpu)
|
||||
{
|
||||
pr_info("Analyze events for ");
|
||||
|
||||
if (vcpu == -1)
|
||||
pr_info("all VCPUs:\n\n");
|
||||
else
|
||||
pr_info("VCPU %d:\n\n", vcpu);
|
||||
}
|
||||
|
||||
static void print_result(int vcpu)
|
||||
{
|
||||
char decode[20];
|
||||
struct kvm_event *event;
|
||||
|
||||
pr_info("\n\n");
|
||||
print_vcpu_info(vcpu);
|
||||
pr_info("%20s ", events_ops->name);
|
||||
pr_info("%10s ", "Samples");
|
||||
pr_info("%9s ", "Samples%");
|
||||
|
||||
pr_info("%9s ", "Time%");
|
||||
pr_info("%16s ", "Avg time");
|
||||
pr_info("\n\n");
|
||||
|
||||
while ((event = pop_from_result())) {
|
||||
u64 ecount, etime;
|
||||
|
||||
ecount = get_event_count(event, vcpu);
|
||||
etime = get_event_time(event, vcpu);
|
||||
|
||||
events_ops->decode_key(&event->key, decode);
|
||||
pr_info("%20s ", decode);
|
||||
pr_info("%10llu ", (unsigned long long)ecount);
|
||||
pr_info("%8.2f%% ", (double)ecount / total_count * 100);
|
||||
pr_info("%8.2f%% ", (double)etime / total_time * 100);
|
||||
pr_info("%9.2fus ( +-%7.2f%% )", (double)etime / ecount/1e3,
|
||||
kvm_event_rel_stddev(vcpu, event));
|
||||
pr_info("\n");
|
||||
}
|
||||
|
||||
pr_info("\nTotal Samples:%lld, Total events handled time:%.2fus.\n\n",
|
||||
(unsigned long long)total_count, total_time / 1e3);
|
||||
}
|
||||
|
||||
static int process_sample_event(struct perf_tool *tool __maybe_unused,
|
||||
union perf_event *event,
|
||||
struct perf_sample *sample,
|
||||
struct perf_evsel *evsel,
|
||||
struct machine *machine)
|
||||
{
|
||||
struct thread *thread = machine__findnew_thread(machine, sample->tid);
|
||||
|
||||
if (thread == NULL) {
|
||||
pr_debug("problem processing %d event, skipping it.\n",
|
||||
event->header.type);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (!handle_kvm_event(thread, evsel, sample))
|
||||
return -1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct perf_tool eops = {
|
||||
.sample = process_sample_event,
|
||||
.comm = perf_event__process_comm,
|
||||
.ordered_samples = true,
|
||||
};
|
||||
|
||||
static int get_cpu_isa(struct perf_session *session)
|
||||
{
|
||||
char *cpuid = session->header.env.cpuid;
|
||||
int isa;
|
||||
|
||||
if (strstr(cpuid, "Intel"))
|
||||
isa = 1;
|
||||
else if (strstr(cpuid, "AMD"))
|
||||
isa = 0;
|
||||
else {
|
||||
pr_err("CPU %s is not supported.\n", cpuid);
|
||||
isa = -ENOTSUP;
|
||||
}
|
||||
|
||||
return isa;
|
||||
}
|
||||
|
||||
static const char *file_name;
|
||||
|
||||
static int read_events(void)
|
||||
{
|
||||
struct perf_session *kvm_session;
|
||||
int ret;
|
||||
|
||||
kvm_session = perf_session__new(file_name, O_RDONLY, 0, false, &eops);
|
||||
if (!kvm_session) {
|
||||
pr_err("Initializing perf session failed\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!perf_session__has_traces(kvm_session, "kvm record"))
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* Do not use 'isa' recorded in kvm_exit tracepoint since it is not
|
||||
* traced in the old kernel.
|
||||
*/
|
||||
ret = get_cpu_isa(kvm_session);
|
||||
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
cpu_isa = ret;
|
||||
|
||||
return perf_session__process_events(kvm_session, &eops);
|
||||
}
|
||||
|
||||
static bool verify_vcpu(int vcpu)
|
||||
{
|
||||
if (vcpu != -1 && vcpu < 0) {
|
||||
pr_err("Invalid vcpu:%d.\n", vcpu);
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static int kvm_events_report_vcpu(int vcpu)
|
||||
{
|
||||
int ret = -EINVAL;
|
||||
|
||||
if (!verify_vcpu(vcpu))
|
||||
goto exit;
|
||||
|
||||
if (!select_key())
|
||||
goto exit;
|
||||
|
||||
if (!register_kvm_events_ops())
|
||||
goto exit;
|
||||
|
||||
init_kvm_event_record();
|
||||
setup_pager();
|
||||
|
||||
ret = read_events();
|
||||
if (ret)
|
||||
goto exit;
|
||||
|
||||
sort_result(vcpu);
|
||||
print_result(vcpu);
|
||||
exit:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const char * const record_args[] = {
|
||||
"record",
|
||||
"-R",
|
||||
"-f",
|
||||
"-m", "1024",
|
||||
"-c", "1",
|
||||
"-e", "kvm:kvm_entry",
|
||||
"-e", "kvm:kvm_exit",
|
||||
"-e", "kvm:kvm_mmio",
|
||||
"-e", "kvm:kvm_pio",
|
||||
};
|
||||
|
||||
#define STRDUP_FAIL_EXIT(s) \
|
||||
({ char *_p; \
|
||||
_p = strdup(s); \
|
||||
if (!_p) \
|
||||
return -ENOMEM; \
|
||||
_p; \
|
||||
})
|
||||
|
||||
static int kvm_events_record(int argc, const char **argv)
|
||||
{
|
||||
unsigned int rec_argc, i, j;
|
||||
const char **rec_argv;
|
||||
|
||||
rec_argc = ARRAY_SIZE(record_args) + argc + 2;
|
||||
rec_argv = calloc(rec_argc + 1, sizeof(char *));
|
||||
|
||||
if (rec_argv == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(record_args); i++)
|
||||
rec_argv[i] = STRDUP_FAIL_EXIT(record_args[i]);
|
||||
|
||||
rec_argv[i++] = STRDUP_FAIL_EXIT("-o");
|
||||
rec_argv[i++] = STRDUP_FAIL_EXIT(file_name);
|
||||
|
||||
for (j = 1; j < (unsigned int)argc; j++, i++)
|
||||
rec_argv[i] = argv[j];
|
||||
|
||||
return cmd_record(i, rec_argv, NULL);
|
||||
}
|
||||
|
||||
static const char * const kvm_events_report_usage[] = {
|
||||
"perf kvm stat report [<options>]",
|
||||
NULL
|
||||
};
|
||||
|
||||
static const struct option kvm_events_report_options[] = {
|
||||
OPT_STRING(0, "event", &report_event, "report event",
|
||||
"event for reporting: vmexit, mmio, ioport"),
|
||||
OPT_INTEGER(0, "vcpu", &trace_vcpu,
|
||||
"vcpu id to report"),
|
||||
OPT_STRING('k', "key", &sort_key, "sort-key",
|
||||
"key for sorting: sample(sort by samples number)"
|
||||
" time (sort by avg time)"),
|
||||
OPT_END()
|
||||
};
|
||||
|
||||
static int kvm_events_report(int argc, const char **argv)
|
||||
{
|
||||
symbol__init();
|
||||
|
||||
if (argc) {
|
||||
argc = parse_options(argc, argv,
|
||||
kvm_events_report_options,
|
||||
kvm_events_report_usage, 0);
|
||||
if (argc)
|
||||
usage_with_options(kvm_events_report_usage,
|
||||
kvm_events_report_options);
|
||||
}
|
||||
|
||||
return kvm_events_report_vcpu(trace_vcpu);
|
||||
}
|
||||
|
||||
static void print_kvm_stat_usage(void)
|
||||
{
|
||||
printf("Usage: perf kvm stat <command>\n\n");
|
||||
|
||||
printf("# Available commands:\n");
|
||||
printf("\trecord: record kvm events\n");
|
||||
printf("\treport: report statistical data of kvm events\n");
|
||||
|
||||
printf("\nOtherwise, it is the alias of 'perf stat':\n");
|
||||
}
|
||||
|
||||
static int kvm_cmd_stat(int argc, const char **argv)
|
||||
{
|
||||
if (argc == 1) {
|
||||
print_kvm_stat_usage();
|
||||
goto perf_stat;
|
||||
}
|
||||
|
||||
if (!strncmp(argv[1], "rec", 3))
|
||||
return kvm_events_record(argc - 1, argv + 1);
|
||||
|
||||
if (!strncmp(argv[1], "rep", 3))
|
||||
return kvm_events_report(argc - 1 , argv + 1);
|
||||
|
||||
perf_stat:
|
||||
return cmd_stat(argc, argv, NULL);
|
||||
}
|
||||
|
||||
static char name_buffer[256];
|
||||
|
||||
static const char * const kvm_usage[] = {
|
||||
"perf kvm [<options>] {top|record|report|diff|buildid-list}",
|
||||
"perf kvm [<options>] {top|record|report|diff|buildid-list|stat}",
|
||||
NULL
|
||||
};
|
||||
|
||||
|
@ -135,6 +963,8 @@ int cmd_kvm(int argc, const char **argv, const char *prefix __maybe_unused)
|
|||
return cmd_top(argc, argv, NULL);
|
||||
else if (!strncmp(argv[0], "buildid-list", 12))
|
||||
return __cmd_buildid_list(argc, argv);
|
||||
else if (!strncmp(argv[0], "stat", 4))
|
||||
return kvm_cmd_stat(argc, argv);
|
||||
else
|
||||
usage_with_options(kvm_usage, kvm_options);
|
||||
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
#include "builtin.h"
|
||||
#include "perf.h"
|
||||
|
||||
#include "util/evlist.h"
|
||||
#include "util/evsel.h"
|
||||
#include "util/util.h"
|
||||
#include "util/cache.h"
|
||||
|
@ -41,7 +42,7 @@ struct lock_stat {
|
|||
struct rb_node rb; /* used for sorting */
|
||||
|
||||
/*
|
||||
* FIXME: raw_field_value() returns unsigned long long,
|
||||
* FIXME: perf_evsel__intval() returns u64,
|
||||
* so address of lockdep_map should be dealed as 64bit.
|
||||
* Is there more better solution?
|
||||
*/
|
||||
|
@ -336,44 +337,18 @@ alloc_failed:
|
|||
|
||||
static const char *input_name;
|
||||
|
||||
struct raw_event_sample {
|
||||
u32 size;
|
||||
char data[0];
|
||||
};
|
||||
|
||||
struct trace_acquire_event {
|
||||
void *addr;
|
||||
const char *name;
|
||||
int flag;
|
||||
};
|
||||
|
||||
struct trace_acquired_event {
|
||||
void *addr;
|
||||
const char *name;
|
||||
};
|
||||
|
||||
struct trace_contended_event {
|
||||
void *addr;
|
||||
const char *name;
|
||||
};
|
||||
|
||||
struct trace_release_event {
|
||||
void *addr;
|
||||
const char *name;
|
||||
};
|
||||
|
||||
struct trace_lock_handler {
|
||||
int (*acquire_event)(struct trace_acquire_event *,
|
||||
const struct perf_sample *sample);
|
||||
int (*acquire_event)(struct perf_evsel *evsel,
|
||||
struct perf_sample *sample);
|
||||
|
||||
int (*acquired_event)(struct trace_acquired_event *,
|
||||
const struct perf_sample *sample);
|
||||
int (*acquired_event)(struct perf_evsel *evsel,
|
||||
struct perf_sample *sample);
|
||||
|
||||
int (*contended_event)(struct trace_contended_event *,
|
||||
const struct perf_sample *sample);
|
||||
int (*contended_event)(struct perf_evsel *evsel,
|
||||
struct perf_sample *sample);
|
||||
|
||||
int (*release_event)(struct trace_release_event *,
|
||||
const struct perf_sample *sample);
|
||||
int (*release_event)(struct perf_evsel *evsel,
|
||||
struct perf_sample *sample);
|
||||
};
|
||||
|
||||
static struct lock_seq_stat *get_seq(struct thread_stat *ts, void *addr)
|
||||
|
@ -412,15 +387,20 @@ enum acquire_flags {
|
|||
READ_LOCK = 2,
|
||||
};
|
||||
|
||||
static int
|
||||
report_lock_acquire_event(struct trace_acquire_event *acquire_event,
|
||||
const struct perf_sample *sample)
|
||||
static int report_lock_acquire_event(struct perf_evsel *evsel,
|
||||
struct perf_sample *sample)
|
||||
{
|
||||
void *addr;
|
||||
struct lock_stat *ls;
|
||||
struct thread_stat *ts;
|
||||
struct lock_seq_stat *seq;
|
||||
const char *name = perf_evsel__strval(evsel, sample, "name");
|
||||
u64 tmp = perf_evsel__intval(evsel, sample, "lockdep_addr");
|
||||
int flag = perf_evsel__intval(evsel, sample, "flag");
|
||||
|
||||
ls = lock_stat_findnew(acquire_event->addr, acquire_event->name);
|
||||
memcpy(&addr, &tmp, sizeof(void *));
|
||||
|
||||
ls = lock_stat_findnew(addr, name);
|
||||
if (!ls)
|
||||
return -1;
|
||||
if (ls->discard)
|
||||
|
@ -430,19 +410,19 @@ report_lock_acquire_event(struct trace_acquire_event *acquire_event,
|
|||
if (!ts)
|
||||
return -1;
|
||||
|
||||
seq = get_seq(ts, acquire_event->addr);
|
||||
seq = get_seq(ts, addr);
|
||||
if (!seq)
|
||||
return -1;
|
||||
|
||||
switch (seq->state) {
|
||||
case SEQ_STATE_UNINITIALIZED:
|
||||
case SEQ_STATE_RELEASED:
|
||||
if (!acquire_event->flag) {
|
||||
if (!flag) {
|
||||
seq->state = SEQ_STATE_ACQUIRING;
|
||||
} else {
|
||||
if (acquire_event->flag & TRY_LOCK)
|
||||
if (flag & TRY_LOCK)
|
||||
ls->nr_trylock++;
|
||||
if (acquire_event->flag & READ_LOCK)
|
||||
if (flag & READ_LOCK)
|
||||
ls->nr_readlock++;
|
||||
seq->state = SEQ_STATE_READ_ACQUIRED;
|
||||
seq->read_count = 1;
|
||||
|
@ -450,7 +430,7 @@ report_lock_acquire_event(struct trace_acquire_event *acquire_event,
|
|||
}
|
||||
break;
|
||||
case SEQ_STATE_READ_ACQUIRED:
|
||||
if (acquire_event->flag & READ_LOCK) {
|
||||
if (flag & READ_LOCK) {
|
||||
seq->read_count++;
|
||||
ls->nr_acquired++;
|
||||
goto end;
|
||||
|
@ -480,17 +460,20 @@ end:
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
report_lock_acquired_event(struct trace_acquired_event *acquired_event,
|
||||
const struct perf_sample *sample)
|
||||
static int report_lock_acquired_event(struct perf_evsel *evsel,
|
||||
struct perf_sample *sample)
|
||||
{
|
||||
u64 timestamp = sample->time;
|
||||
void *addr;
|
||||
struct lock_stat *ls;
|
||||
struct thread_stat *ts;
|
||||
struct lock_seq_stat *seq;
|
||||
u64 contended_term;
|
||||
const char *name = perf_evsel__strval(evsel, sample, "name");
|
||||
u64 tmp = perf_evsel__intval(evsel, sample, "lockdep_addr");
|
||||
|
||||
ls = lock_stat_findnew(acquired_event->addr, acquired_event->name);
|
||||
memcpy(&addr, &tmp, sizeof(void *));
|
||||
|
||||
ls = lock_stat_findnew(addr, name);
|
||||
if (!ls)
|
||||
return -1;
|
||||
if (ls->discard)
|
||||
|
@ -500,7 +483,7 @@ report_lock_acquired_event(struct trace_acquired_event *acquired_event,
|
|||
if (!ts)
|
||||
return -1;
|
||||
|
||||
seq = get_seq(ts, acquired_event->addr);
|
||||
seq = get_seq(ts, addr);
|
||||
if (!seq)
|
||||
return -1;
|
||||
|
||||
|
@ -511,7 +494,7 @@ report_lock_acquired_event(struct trace_acquired_event *acquired_event,
|
|||
case SEQ_STATE_ACQUIRING:
|
||||
break;
|
||||
case SEQ_STATE_CONTENDED:
|
||||
contended_term = timestamp - seq->prev_event_time;
|
||||
contended_term = sample->time - seq->prev_event_time;
|
||||
ls->wait_time_total += contended_term;
|
||||
if (contended_term < ls->wait_time_min)
|
||||
ls->wait_time_min = contended_term;
|
||||
|
@ -536,20 +519,24 @@ report_lock_acquired_event(struct trace_acquired_event *acquired_event,
|
|||
|
||||
seq->state = SEQ_STATE_ACQUIRED;
|
||||
ls->nr_acquired++;
|
||||
seq->prev_event_time = timestamp;
|
||||
seq->prev_event_time = sample->time;
|
||||
end:
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
report_lock_contended_event(struct trace_contended_event *contended_event,
|
||||
const struct perf_sample *sample)
|
||||
static int report_lock_contended_event(struct perf_evsel *evsel,
|
||||
struct perf_sample *sample)
|
||||
{
|
||||
void *addr;
|
||||
struct lock_stat *ls;
|
||||
struct thread_stat *ts;
|
||||
struct lock_seq_stat *seq;
|
||||
const char *name = perf_evsel__strval(evsel, sample, "name");
|
||||
u64 tmp = perf_evsel__intval(evsel, sample, "lockdep_addr");
|
||||
|
||||
ls = lock_stat_findnew(contended_event->addr, contended_event->name);
|
||||
memcpy(&addr, &tmp, sizeof(void *));
|
||||
|
||||
ls = lock_stat_findnew(addr, name);
|
||||
if (!ls)
|
||||
return -1;
|
||||
if (ls->discard)
|
||||
|
@ -559,7 +546,7 @@ report_lock_contended_event(struct trace_contended_event *contended_event,
|
|||
if (!ts)
|
||||
return -1;
|
||||
|
||||
seq = get_seq(ts, contended_event->addr);
|
||||
seq = get_seq(ts, addr);
|
||||
if (!seq)
|
||||
return -1;
|
||||
|
||||
|
@ -592,15 +579,19 @@ end:
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
report_lock_release_event(struct trace_release_event *release_event,
|
||||
const struct perf_sample *sample)
|
||||
static int report_lock_release_event(struct perf_evsel *evsel,
|
||||
struct perf_sample *sample)
|
||||
{
|
||||
void *addr;
|
||||
struct lock_stat *ls;
|
||||
struct thread_stat *ts;
|
||||
struct lock_seq_stat *seq;
|
||||
const char *name = perf_evsel__strval(evsel, sample, "name");
|
||||
u64 tmp = perf_evsel__intval(evsel, sample, "lockdep_addr");
|
||||
|
||||
ls = lock_stat_findnew(release_event->addr, release_event->name);
|
||||
memcpy(&addr, &tmp, sizeof(void *));
|
||||
|
||||
ls = lock_stat_findnew(addr, name);
|
||||
if (!ls)
|
||||
return -1;
|
||||
if (ls->discard)
|
||||
|
@ -610,7 +601,7 @@ report_lock_release_event(struct trace_release_event *release_event,
|
|||
if (!ts)
|
||||
return -1;
|
||||
|
||||
seq = get_seq(ts, release_event->addr);
|
||||
seq = get_seq(ts, addr);
|
||||
if (!seq)
|
||||
return -1;
|
||||
|
||||
|
@ -663,96 +654,33 @@ static struct trace_lock_handler *trace_handler;
|
|||
static int perf_evsel__process_lock_acquire(struct perf_evsel *evsel,
|
||||
struct perf_sample *sample)
|
||||
{
|
||||
struct trace_acquire_event acquire_event;
|
||||
struct event_format *event = evsel->tp_format;
|
||||
void *data = sample->raw_data;
|
||||
u64 tmp; /* this is required for casting... */
|
||||
int rc = 0;
|
||||
|
||||
tmp = raw_field_value(event, "lockdep_addr", data);
|
||||
memcpy(&acquire_event.addr, &tmp, sizeof(void *));
|
||||
acquire_event.name = (char *)raw_field_ptr(event, "name", data);
|
||||
acquire_event.flag = (int)raw_field_value(event, "flag", data);
|
||||
|
||||
if (trace_handler->acquire_event)
|
||||
rc = trace_handler->acquire_event(&acquire_event, sample);
|
||||
|
||||
return rc;
|
||||
return trace_handler->acquire_event(evsel, sample);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int perf_evsel__process_lock_acquired(struct perf_evsel *evsel,
|
||||
struct perf_sample *sample)
|
||||
{
|
||||
struct trace_acquired_event acquired_event;
|
||||
struct event_format *event = evsel->tp_format;
|
||||
void *data = sample->raw_data;
|
||||
u64 tmp; /* this is required for casting... */
|
||||
int rc = 0;
|
||||
|
||||
tmp = raw_field_value(event, "lockdep_addr", data);
|
||||
memcpy(&acquired_event.addr, &tmp, sizeof(void *));
|
||||
acquired_event.name = (char *)raw_field_ptr(event, "name", data);
|
||||
|
||||
if (trace_handler->acquired_event)
|
||||
rc = trace_handler->acquired_event(&acquired_event, sample);
|
||||
|
||||
return rc;
|
||||
return trace_handler->acquired_event(evsel, sample);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int perf_evsel__process_lock_contended(struct perf_evsel *evsel,
|
||||
struct perf_sample *sample)
|
||||
struct perf_sample *sample)
|
||||
{
|
||||
struct trace_contended_event contended_event;
|
||||
struct event_format *event = evsel->tp_format;
|
||||
void *data = sample->raw_data;
|
||||
u64 tmp; /* this is required for casting... */
|
||||
int rc = 0;
|
||||
|
||||
tmp = raw_field_value(event, "lockdep_addr", data);
|
||||
memcpy(&contended_event.addr, &tmp, sizeof(void *));
|
||||
contended_event.name = (char *)raw_field_ptr(event, "name", data);
|
||||
|
||||
if (trace_handler->contended_event)
|
||||
rc = trace_handler->contended_event(&contended_event, sample);
|
||||
|
||||
return rc;
|
||||
return trace_handler->contended_event(evsel, sample);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int perf_evsel__process_lock_release(struct perf_evsel *evsel,
|
||||
struct perf_sample *sample)
|
||||
struct perf_sample *sample)
|
||||
{
|
||||
struct trace_release_event release_event;
|
||||
struct event_format *event = evsel->tp_format;
|
||||
void *data = sample->raw_data;
|
||||
u64 tmp; /* this is required for casting... */
|
||||
int rc = 0;
|
||||
|
||||
tmp = raw_field_value(event, "lockdep_addr", data);
|
||||
memcpy(&release_event.addr, &tmp, sizeof(void *));
|
||||
release_event.name = (char *)raw_field_ptr(event, "name", data);
|
||||
|
||||
if (trace_handler->release_event)
|
||||
rc = trace_handler->release_event(&release_event, sample);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int perf_evsel__process_lock_event(struct perf_evsel *evsel,
|
||||
struct perf_sample *sample)
|
||||
{
|
||||
struct event_format *event = evsel->tp_format;
|
||||
int rc = 0;
|
||||
|
||||
if (!strcmp(event->name, "lock_acquire"))
|
||||
rc = perf_evsel__process_lock_acquire(evsel, sample);
|
||||
if (!strcmp(event->name, "lock_acquired"))
|
||||
rc = perf_evsel__process_lock_acquired(evsel, sample);
|
||||
if (!strcmp(event->name, "lock_contended"))
|
||||
rc = perf_evsel__process_lock_contended(evsel, sample);
|
||||
if (!strcmp(event->name, "lock_release"))
|
||||
rc = perf_evsel__process_lock_release(evsel, sample);
|
||||
|
||||
return rc;
|
||||
return trace_handler->release_event(evsel, sample);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void print_bad_events(int bad, int total)
|
||||
|
@ -870,6 +798,9 @@ static int dump_info(void)
|
|||
return rc;
|
||||
}
|
||||
|
||||
typedef int (*tracepoint_handler)(struct perf_evsel *evsel,
|
||||
struct perf_sample *sample);
|
||||
|
||||
static int process_sample_event(struct perf_tool *tool __maybe_unused,
|
||||
union perf_event *event,
|
||||
struct perf_sample *sample,
|
||||
|
@ -884,7 +815,12 @@ static int process_sample_event(struct perf_tool *tool __maybe_unused,
|
|||
return -1;
|
||||
}
|
||||
|
||||
return perf_evsel__process_lock_event(evsel, sample);
|
||||
if (evsel->handler.func != NULL) {
|
||||
tracepoint_handler f = evsel->handler.func;
|
||||
return f(evsel, sample);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct perf_tool eops = {
|
||||
|
@ -893,6 +829,13 @@ static struct perf_tool eops = {
|
|||
.ordered_samples = true,
|
||||
};
|
||||
|
||||
static const struct perf_evsel_str_handler lock_tracepoints[] = {
|
||||
{ "lock:lock_acquire", perf_evsel__process_lock_acquire, }, /* CONFIG_LOCKDEP */
|
||||
{ "lock:lock_acquired", perf_evsel__process_lock_acquired, }, /* CONFIG_LOCKDEP, CONFIG_LOCK_STAT */
|
||||
{ "lock:lock_contended", perf_evsel__process_lock_contended, }, /* CONFIG_LOCKDEP, CONFIG_LOCK_STAT */
|
||||
{ "lock:lock_release", perf_evsel__process_lock_release, }, /* CONFIG_LOCKDEP */
|
||||
};
|
||||
|
||||
static int read_events(void)
|
||||
{
|
||||
session = perf_session__new(input_name, O_RDONLY, 0, false, &eops);
|
||||
|
@ -901,6 +844,11 @@ static int read_events(void)
|
|||
return -1;
|
||||
}
|
||||
|
||||
if (perf_session__set_tracepoints_handlers(session, lock_tracepoints)) {
|
||||
pr_err("Initializing perf session tracepoint handlers failed\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
return perf_session__process_events(session, &eops);
|
||||
}
|
||||
|
||||
|
@ -967,13 +915,6 @@ static const struct option lock_options[] = {
|
|||
OPT_END()
|
||||
};
|
||||
|
||||
static const char * const lock_tracepoints[] = {
|
||||
"lock:lock_acquire", /* CONFIG_LOCKDEP */
|
||||
"lock:lock_acquired", /* CONFIG_LOCKDEP, CONFIG_LOCK_STAT */
|
||||
"lock:lock_contended", /* CONFIG_LOCKDEP, CONFIG_LOCK_STAT */
|
||||
"lock:lock_release", /* CONFIG_LOCKDEP */
|
||||
};
|
||||
|
||||
static const char *record_args[] = {
|
||||
"record",
|
||||
"-R",
|
||||
|
@ -988,10 +929,10 @@ static int __cmd_record(int argc, const char **argv)
|
|||
const char **rec_argv;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(lock_tracepoints); i++) {
|
||||
if (!is_valid_tracepoint(lock_tracepoints[i])) {
|
||||
if (!is_valid_tracepoint(lock_tracepoints[i].name)) {
|
||||
pr_err("tracepoint %s is not enabled. "
|
||||
"Are CONFIG_LOCKDEP and CONFIG_LOCK_STAT enabled?\n",
|
||||
lock_tracepoints[i]);
|
||||
lock_tracepoints[i].name);
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
@ -1009,7 +950,7 @@ static int __cmd_record(int argc, const char **argv)
|
|||
|
||||
for (j = 0; j < ARRAY_SIZE(lock_tracepoints); j++) {
|
||||
rec_argv[i++] = "-e";
|
||||
rec_argv[i++] = strdup(lock_tracepoints[j]);
|
||||
rec_argv[i++] = strdup(lock_tracepoints[j].name);
|
||||
}
|
||||
|
||||
for (j = 1; j < (unsigned int)argc; j++, i++)
|
||||
|
|
|
@ -297,8 +297,10 @@ try_again:
|
|||
}
|
||||
|
||||
printf("\n");
|
||||
error("sys_perf_event_open() syscall returned with %d (%s). /bin/dmesg may provide additional information.\n",
|
||||
err, strerror(err));
|
||||
error("sys_perf_event_open() syscall returned with %d "
|
||||
"(%s) for event %s. /bin/dmesg may provide "
|
||||
"additional information.\n",
|
||||
err, strerror(err), perf_evsel__name(pos));
|
||||
|
||||
#if defined(__i386__) || defined(__x86_64__)
|
||||
if (attr->type == PERF_TYPE_HARDWARE &&
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
#include "util/symbol.h"
|
||||
#include "util/thread_map.h"
|
||||
#include "util/pmu.h"
|
||||
#include "event-parse.h"
|
||||
#include "../../include/linux/hw_breakpoint.h"
|
||||
|
||||
#include <sys/mman.h>
|
||||
|
@ -1207,6 +1208,87 @@ static int perf_evsel__roundtrip_name_test(void)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int perf_evsel__test_field(struct perf_evsel *evsel, const char *name,
|
||||
int size, bool should_be_signed)
|
||||
{
|
||||
struct format_field *field = perf_evsel__field(evsel, name);
|
||||
int is_signed;
|
||||
int ret = 0;
|
||||
|
||||
if (field == NULL) {
|
||||
pr_debug("%s: \"%s\" field not found!\n", evsel->name, name);
|
||||
return -1;
|
||||
}
|
||||
|
||||
is_signed = !!(field->flags | FIELD_IS_SIGNED);
|
||||
if (should_be_signed && !is_signed) {
|
||||
pr_debug("%s: \"%s\" signedness(%d) is wrong, should be %d\n",
|
||||
evsel->name, name, is_signed, should_be_signed);
|
||||
ret = -1;
|
||||
}
|
||||
|
||||
if (field->size != size) {
|
||||
pr_debug("%s: \"%s\" size (%d) should be %d!\n",
|
||||
evsel->name, name, field->size, size);
|
||||
ret = -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int perf_evsel__tp_sched_test(void)
|
||||
{
|
||||
struct perf_evsel *evsel = perf_evsel__newtp("sched", "sched_switch", 0);
|
||||
int ret = 0;
|
||||
|
||||
if (evsel == NULL) {
|
||||
pr_debug("perf_evsel__new\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (perf_evsel__test_field(evsel, "prev_comm", 16, true))
|
||||
ret = -1;
|
||||
|
||||
if (perf_evsel__test_field(evsel, "prev_pid", 4, true))
|
||||
ret = -1;
|
||||
|
||||
if (perf_evsel__test_field(evsel, "prev_prio", 4, true))
|
||||
ret = -1;
|
||||
|
||||
if (perf_evsel__test_field(evsel, "prev_state", 8, true))
|
||||
ret = -1;
|
||||
|
||||
if (perf_evsel__test_field(evsel, "next_comm", 16, true))
|
||||
ret = -1;
|
||||
|
||||
if (perf_evsel__test_field(evsel, "next_pid", 4, true))
|
||||
ret = -1;
|
||||
|
||||
if (perf_evsel__test_field(evsel, "next_prio", 4, true))
|
||||
ret = -1;
|
||||
|
||||
perf_evsel__delete(evsel);
|
||||
|
||||
evsel = perf_evsel__newtp("sched", "sched_wakeup", 0);
|
||||
|
||||
if (perf_evsel__test_field(evsel, "comm", 16, true))
|
||||
ret = -1;
|
||||
|
||||
if (perf_evsel__test_field(evsel, "pid", 4, true))
|
||||
ret = -1;
|
||||
|
||||
if (perf_evsel__test_field(evsel, "prio", 4, true))
|
||||
ret = -1;
|
||||
|
||||
if (perf_evsel__test_field(evsel, "success", 4, true))
|
||||
ret = -1;
|
||||
|
||||
if (perf_evsel__test_field(evsel, "target_cpu", 4, true))
|
||||
ret = -1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct test {
|
||||
const char *desc;
|
||||
int (*func)(void);
|
||||
|
@ -1253,6 +1335,10 @@ static struct test {
|
|||
.desc = "roundtrip evsel->name check",
|
||||
.func = perf_evsel__roundtrip_name_test,
|
||||
},
|
||||
{
|
||||
.desc = "Check parsing of sched tracepoints fields",
|
||||
.func = perf_evsel__tp_sched_test,
|
||||
},
|
||||
{
|
||||
.func = NULL,
|
||||
},
|
||||
|
|
|
@ -168,9 +168,8 @@ static struct per_pid *find_create_pid(int pid)
|
|||
return cursor;
|
||||
cursor = cursor->next;
|
||||
}
|
||||
cursor = malloc(sizeof(struct per_pid));
|
||||
cursor = zalloc(sizeof(*cursor));
|
||||
assert(cursor != NULL);
|
||||
memset(cursor, 0, sizeof(struct per_pid));
|
||||
cursor->pid = pid;
|
||||
cursor->next = all_data;
|
||||
all_data = cursor;
|
||||
|
@ -195,9 +194,8 @@ static void pid_set_comm(int pid, char *comm)
|
|||
}
|
||||
c = c->next;
|
||||
}
|
||||
c = malloc(sizeof(struct per_pidcomm));
|
||||
c = zalloc(sizeof(*c));
|
||||
assert(c != NULL);
|
||||
memset(c, 0, sizeof(struct per_pidcomm));
|
||||
c->comm = strdup(comm);
|
||||
p->current = c;
|
||||
c->next = p->all;
|
||||
|
@ -239,17 +237,15 @@ pid_put_sample(int pid, int type, unsigned int cpu, u64 start, u64 end)
|
|||
p = find_create_pid(pid);
|
||||
c = p->current;
|
||||
if (!c) {
|
||||
c = malloc(sizeof(struct per_pidcomm));
|
||||
c = zalloc(sizeof(*c));
|
||||
assert(c != NULL);
|
||||
memset(c, 0, sizeof(struct per_pidcomm));
|
||||
p->current = c;
|
||||
c->next = p->all;
|
||||
p->all = c;
|
||||
}
|
||||
|
||||
sample = malloc(sizeof(struct cpu_sample));
|
||||
sample = zalloc(sizeof(*sample));
|
||||
assert(sample != NULL);
|
||||
memset(sample, 0, sizeof(struct cpu_sample));
|
||||
sample->start_time = start;
|
||||
sample->end_time = end;
|
||||
sample->type = type;
|
||||
|
@ -373,11 +369,10 @@ static void c_state_start(int cpu, u64 timestamp, int state)
|
|||
|
||||
static void c_state_end(int cpu, u64 timestamp)
|
||||
{
|
||||
struct power_event *pwr;
|
||||
pwr = malloc(sizeof(struct power_event));
|
||||
struct power_event *pwr = zalloc(sizeof(*pwr));
|
||||
|
||||
if (!pwr)
|
||||
return;
|
||||
memset(pwr, 0, sizeof(struct power_event));
|
||||
|
||||
pwr->state = cpus_cstate_state[cpu];
|
||||
pwr->start_time = cpus_cstate_start_times[cpu];
|
||||
|
@ -392,14 +387,13 @@ static void c_state_end(int cpu, u64 timestamp)
|
|||
static void p_state_change(int cpu, u64 timestamp, u64 new_freq)
|
||||
{
|
||||
struct power_event *pwr;
|
||||
pwr = malloc(sizeof(struct power_event));
|
||||
|
||||
if (new_freq > 8000000) /* detect invalid data */
|
||||
return;
|
||||
|
||||
pwr = zalloc(sizeof(*pwr));
|
||||
if (!pwr)
|
||||
return;
|
||||
memset(pwr, 0, sizeof(struct power_event));
|
||||
|
||||
pwr->state = cpus_pstate_state[cpu];
|
||||
pwr->start_time = cpus_pstate_start_times[cpu];
|
||||
|
@ -429,15 +423,13 @@ static void p_state_change(int cpu, u64 timestamp, u64 new_freq)
|
|||
static void
|
||||
sched_wakeup(int cpu, u64 timestamp, int pid, struct trace_entry *te)
|
||||
{
|
||||
struct wake_event *we;
|
||||
struct per_pid *p;
|
||||
struct wakeup_entry *wake = (void *)te;
|
||||
struct wake_event *we = zalloc(sizeof(*we));
|
||||
|
||||
we = malloc(sizeof(struct wake_event));
|
||||
if (!we)
|
||||
return;
|
||||
|
||||
memset(we, 0, sizeof(struct wake_event));
|
||||
we->time = timestamp;
|
||||
we->waker = pid;
|
||||
|
||||
|
@ -579,13 +571,12 @@ static void end_sample_processing(void)
|
|||
struct power_event *pwr;
|
||||
|
||||
for (cpu = 0; cpu <= numcpus; cpu++) {
|
||||
pwr = malloc(sizeof(struct power_event));
|
||||
if (!pwr)
|
||||
return;
|
||||
memset(pwr, 0, sizeof(struct power_event));
|
||||
|
||||
/* C state */
|
||||
#if 0
|
||||
pwr = zalloc(sizeof(*pwr));
|
||||
if (!pwr)
|
||||
return;
|
||||
|
||||
pwr->state = cpus_cstate_state[cpu];
|
||||
pwr->start_time = cpus_cstate_start_times[cpu];
|
||||
pwr->end_time = last_time;
|
||||
|
@ -597,10 +588,9 @@ static void end_sample_processing(void)
|
|||
#endif
|
||||
/* P state */
|
||||
|
||||
pwr = malloc(sizeof(struct power_event));
|
||||
pwr = zalloc(sizeof(*pwr));
|
||||
if (!pwr)
|
||||
return;
|
||||
memset(pwr, 0, sizeof(struct power_event));
|
||||
|
||||
pwr->state = cpus_pstate_state[cpu];
|
||||
pwr->start_time = cpus_pstate_start_times[cpu];
|
||||
|
@ -830,11 +820,9 @@ static void draw_process_bars(void)
|
|||
|
||||
static void add_process_filter(const char *string)
|
||||
{
|
||||
struct process_filter *filt;
|
||||
int pid;
|
||||
int pid = strtoull(string, NULL, 10);
|
||||
struct process_filter *filt = malloc(sizeof(*filt));
|
||||
|
||||
pid = strtoull(string, NULL, 10);
|
||||
filt = malloc(sizeof(struct process_filter));
|
||||
if (!filt)
|
||||
return;
|
||||
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
#include <byteswap.h>
|
||||
#include <linux/bitops.h>
|
||||
#include "asm/bug.h"
|
||||
#include "debugfs.h"
|
||||
#include "event-parse.h"
|
||||
#include "evsel.h"
|
||||
#include "evlist.h"
|
||||
|
@ -69,6 +70,72 @@ struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx)
|
|||
return evsel;
|
||||
}
|
||||
|
||||
static struct event_format *event_format__new(const char *sys, const char *name)
|
||||
{
|
||||
int fd, n;
|
||||
char *filename;
|
||||
void *bf = NULL, *nbf;
|
||||
size_t size = 0, alloc_size = 0;
|
||||
struct event_format *format = NULL;
|
||||
|
||||
if (asprintf(&filename, "%s/%s/%s/format", tracing_events_path, sys, name) < 0)
|
||||
goto out;
|
||||
|
||||
fd = open(filename, O_RDONLY);
|
||||
if (fd < 0)
|
||||
goto out_free_filename;
|
||||
|
||||
do {
|
||||
if (size == alloc_size) {
|
||||
alloc_size += BUFSIZ;
|
||||
nbf = realloc(bf, alloc_size);
|
||||
if (nbf == NULL)
|
||||
goto out_free_bf;
|
||||
bf = nbf;
|
||||
}
|
||||
|
||||
n = read(fd, bf + size, BUFSIZ);
|
||||
if (n < 0)
|
||||
goto out_free_bf;
|
||||
size += n;
|
||||
} while (n > 0);
|
||||
|
||||
pevent_parse_format(&format, bf, size, sys);
|
||||
|
||||
out_free_bf:
|
||||
free(bf);
|
||||
close(fd);
|
||||
out_free_filename:
|
||||
free(filename);
|
||||
out:
|
||||
return format;
|
||||
}
|
||||
|
||||
struct perf_evsel *perf_evsel__newtp(const char *sys, const char *name, int idx)
|
||||
{
|
||||
struct perf_evsel *evsel = zalloc(sizeof(*evsel));
|
||||
|
||||
if (evsel != NULL) {
|
||||
struct perf_event_attr attr = {
|
||||
.type = PERF_TYPE_TRACEPOINT,
|
||||
};
|
||||
|
||||
evsel->tp_format = event_format__new(sys, name);
|
||||
if (evsel->tp_format == NULL)
|
||||
goto out_free;
|
||||
|
||||
attr.config = evsel->tp_format->id;
|
||||
perf_evsel__init(evsel, &attr, idx);
|
||||
evsel->name = evsel->tp_format->name;
|
||||
}
|
||||
|
||||
return evsel;
|
||||
|
||||
out_free:
|
||||
free(evsel);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
const char *perf_evsel__hw_names[PERF_COUNT_HW_MAX] = {
|
||||
"cycles",
|
||||
"instructions",
|
||||
|
@ -495,6 +562,10 @@ void perf_evsel__delete(struct perf_evsel *evsel)
|
|||
perf_evsel__exit(evsel);
|
||||
close_cgroup(evsel->cgrp);
|
||||
free(evsel->group_name);
|
||||
if (evsel->tp_format && evsel->name == evsel->tp_format->name) {
|
||||
evsel->name = NULL;
|
||||
pevent_free_format(evsel->tp_format);
|
||||
}
|
||||
free(evsel->name);
|
||||
free(evsel);
|
||||
}
|
||||
|
@ -1002,14 +1073,19 @@ int perf_event__synthesize_sample(union perf_event *event, u64 type,
|
|||
return 0;
|
||||
}
|
||||
|
||||
struct format_field *perf_evsel__field(struct perf_evsel *evsel, const char *name)
|
||||
{
|
||||
return pevent_find_field(evsel->tp_format, name);
|
||||
}
|
||||
|
||||
char *perf_evsel__strval(struct perf_evsel *evsel, struct perf_sample *sample,
|
||||
const char *name)
|
||||
{
|
||||
struct format_field *field = pevent_find_field(evsel->tp_format, name);
|
||||
struct format_field *field = perf_evsel__field(evsel, name);
|
||||
int offset;
|
||||
|
||||
if (!field)
|
||||
return NULL;
|
||||
if (!field)
|
||||
return NULL;
|
||||
|
||||
offset = field->offset;
|
||||
|
||||
|
@ -1024,11 +1100,11 @@ char *perf_evsel__strval(struct perf_evsel *evsel, struct perf_sample *sample,
|
|||
u64 perf_evsel__intval(struct perf_evsel *evsel, struct perf_sample *sample,
|
||||
const char *name)
|
||||
{
|
||||
struct format_field *field = pevent_find_field(evsel->tp_format, name);
|
||||
struct format_field *field = perf_evsel__field(evsel, name);
|
||||
u64 val;
|
||||
|
||||
if (!field)
|
||||
return 0;
|
||||
if (!field)
|
||||
return 0;
|
||||
|
||||
val = pevent_read_number(evsel->tp_format->pevent,
|
||||
sample->raw_data + field->offset, field->size);
|
||||
|
|
|
@ -81,6 +81,7 @@ struct perf_evlist;
|
|||
struct perf_record_opts;
|
||||
|
||||
struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx);
|
||||
struct perf_evsel *perf_evsel__newtp(const char *sys, const char *name, int idx);
|
||||
void perf_evsel__init(struct perf_evsel *evsel,
|
||||
struct perf_event_attr *attr, int idx);
|
||||
void perf_evsel__exit(struct perf_evsel *evsel);
|
||||
|
@ -128,6 +129,10 @@ char *perf_evsel__strval(struct perf_evsel *evsel, struct perf_sample *sample,
|
|||
u64 perf_evsel__intval(struct perf_evsel *evsel, struct perf_sample *sample,
|
||||
const char *name);
|
||||
|
||||
struct format_field;
|
||||
|
||||
struct format_field *perf_evsel__field(struct perf_evsel *evsel, const char *name);
|
||||
|
||||
#define perf_evsel__match(evsel, t, c) \
|
||||
(evsel->attr.type == PERF_TYPE_##t && \
|
||||
evsel->attr.config == PERF_COUNT_##c)
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
#include "cpumap.h"
|
||||
#include "pmu.h"
|
||||
#include "vdso.h"
|
||||
#include "strbuf.h"
|
||||
|
||||
static bool no_buildid_cache = false;
|
||||
|
||||
|
@ -1102,118 +1103,80 @@ static int write_branch_stack(int fd __maybe_unused,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void print_hostname(struct perf_header *ph, int fd, FILE *fp)
|
||||
static void print_hostname(struct perf_header *ph, int fd __maybe_unused,
|
||||
FILE *fp)
|
||||
{
|
||||
char *str = do_read_string(fd, ph);
|
||||
fprintf(fp, "# hostname : %s\n", str);
|
||||
free(str);
|
||||
fprintf(fp, "# hostname : %s\n", ph->env.hostname);
|
||||
}
|
||||
|
||||
static void print_osrelease(struct perf_header *ph, int fd, FILE *fp)
|
||||
static void print_osrelease(struct perf_header *ph, int fd __maybe_unused,
|
||||
FILE *fp)
|
||||
{
|
||||
char *str = do_read_string(fd, ph);
|
||||
fprintf(fp, "# os release : %s\n", str);
|
||||
free(str);
|
||||
fprintf(fp, "# os release : %s\n", ph->env.os_release);
|
||||
}
|
||||
|
||||
static void print_arch(struct perf_header *ph, int fd, FILE *fp)
|
||||
static void print_arch(struct perf_header *ph, int fd __maybe_unused, FILE *fp)
|
||||
{
|
||||
char *str = do_read_string(fd, ph);
|
||||
fprintf(fp, "# arch : %s\n", str);
|
||||
free(str);
|
||||
fprintf(fp, "# arch : %s\n", ph->env.arch);
|
||||
}
|
||||
|
||||
static void print_cpudesc(struct perf_header *ph, int fd, FILE *fp)
|
||||
static void print_cpudesc(struct perf_header *ph, int fd __maybe_unused,
|
||||
FILE *fp)
|
||||
{
|
||||
char *str = do_read_string(fd, ph);
|
||||
fprintf(fp, "# cpudesc : %s\n", str);
|
||||
free(str);
|
||||
fprintf(fp, "# cpudesc : %s\n", ph->env.cpu_desc);
|
||||
}
|
||||
|
||||
static void print_nrcpus(struct perf_header *ph, int fd, FILE *fp)
|
||||
static void print_nrcpus(struct perf_header *ph, int fd __maybe_unused,
|
||||
FILE *fp)
|
||||
{
|
||||
ssize_t ret;
|
||||
u32 nr;
|
||||
|
||||
ret = read(fd, &nr, sizeof(nr));
|
||||
if (ret != (ssize_t)sizeof(nr))
|
||||
nr = -1; /* interpreted as error */
|
||||
|
||||
if (ph->needs_swap)
|
||||
nr = bswap_32(nr);
|
||||
|
||||
fprintf(fp, "# nrcpus online : %u\n", nr);
|
||||
|
||||
ret = read(fd, &nr, sizeof(nr));
|
||||
if (ret != (ssize_t)sizeof(nr))
|
||||
nr = -1; /* interpreted as error */
|
||||
|
||||
if (ph->needs_swap)
|
||||
nr = bswap_32(nr);
|
||||
|
||||
fprintf(fp, "# nrcpus avail : %u\n", nr);
|
||||
fprintf(fp, "# nrcpus online : %u\n", ph->env.nr_cpus_online);
|
||||
fprintf(fp, "# nrcpus avail : %u\n", ph->env.nr_cpus_avail);
|
||||
}
|
||||
|
||||
static void print_version(struct perf_header *ph, int fd, FILE *fp)
|
||||
static void print_version(struct perf_header *ph, int fd __maybe_unused,
|
||||
FILE *fp)
|
||||
{
|
||||
char *str = do_read_string(fd, ph);
|
||||
fprintf(fp, "# perf version : %s\n", str);
|
||||
free(str);
|
||||
fprintf(fp, "# perf version : %s\n", ph->env.version);
|
||||
}
|
||||
|
||||
static void print_cmdline(struct perf_header *ph, int fd, FILE *fp)
|
||||
static void print_cmdline(struct perf_header *ph, int fd __maybe_unused,
|
||||
FILE *fp)
|
||||
{
|
||||
ssize_t ret;
|
||||
int nr, i;
|
||||
char *str;
|
||||
u32 nr, i;
|
||||
|
||||
ret = read(fd, &nr, sizeof(nr));
|
||||
if (ret != (ssize_t)sizeof(nr))
|
||||
return;
|
||||
|
||||
if (ph->needs_swap)
|
||||
nr = bswap_32(nr);
|
||||
nr = ph->env.nr_cmdline;
|
||||
str = ph->env.cmdline;
|
||||
|
||||
fprintf(fp, "# cmdline : ");
|
||||
|
||||
for (i = 0; i < nr; i++) {
|
||||
str = do_read_string(fd, ph);
|
||||
fprintf(fp, "%s ", str);
|
||||
free(str);
|
||||
str += strlen(str) + 1;
|
||||
}
|
||||
fputc('\n', fp);
|
||||
}
|
||||
|
||||
static void print_cpu_topology(struct perf_header *ph, int fd, FILE *fp)
|
||||
static void print_cpu_topology(struct perf_header *ph, int fd __maybe_unused,
|
||||
FILE *fp)
|
||||
{
|
||||
ssize_t ret;
|
||||
u32 nr, i;
|
||||
int nr, i;
|
||||
char *str;
|
||||
|
||||
ret = read(fd, &nr, sizeof(nr));
|
||||
if (ret != (ssize_t)sizeof(nr))
|
||||
return;
|
||||
|
||||
if (ph->needs_swap)
|
||||
nr = bswap_32(nr);
|
||||
nr = ph->env.nr_sibling_cores;
|
||||
str = ph->env.sibling_cores;
|
||||
|
||||
for (i = 0; i < nr; i++) {
|
||||
str = do_read_string(fd, ph);
|
||||
fprintf(fp, "# sibling cores : %s\n", str);
|
||||
free(str);
|
||||
str += strlen(str) + 1;
|
||||
}
|
||||
|
||||
ret = read(fd, &nr, sizeof(nr));
|
||||
if (ret != (ssize_t)sizeof(nr))
|
||||
return;
|
||||
|
||||
if (ph->needs_swap)
|
||||
nr = bswap_32(nr);
|
||||
nr = ph->env.nr_sibling_threads;
|
||||
str = ph->env.sibling_threads;
|
||||
|
||||
for (i = 0; i < nr; i++) {
|
||||
str = do_read_string(fd, ph);
|
||||
fprintf(fp, "# sibling threads : %s\n", str);
|
||||
free(str);
|
||||
str += strlen(str) + 1;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1374,126 +1337,89 @@ static void print_event_desc(struct perf_header *ph, int fd, FILE *fp)
|
|||
free_event_desc(events);
|
||||
}
|
||||
|
||||
static void print_total_mem(struct perf_header *h __maybe_unused, int fd,
|
||||
static void print_total_mem(struct perf_header *ph, int fd __maybe_unused,
|
||||
FILE *fp)
|
||||
{
|
||||
uint64_t mem;
|
||||
ssize_t ret;
|
||||
|
||||
ret = read(fd, &mem, sizeof(mem));
|
||||
if (ret != sizeof(mem))
|
||||
goto error;
|
||||
|
||||
if (h->needs_swap)
|
||||
mem = bswap_64(mem);
|
||||
|
||||
fprintf(fp, "# total memory : %"PRIu64" kB\n", mem);
|
||||
return;
|
||||
error:
|
||||
fprintf(fp, "# total memory : unknown\n");
|
||||
fprintf(fp, "# total memory : %Lu kB\n", ph->env.total_mem);
|
||||
}
|
||||
|
||||
static void print_numa_topology(struct perf_header *h __maybe_unused, int fd,
|
||||
static void print_numa_topology(struct perf_header *ph, int fd __maybe_unused,
|
||||
FILE *fp)
|
||||
{
|
||||
ssize_t ret;
|
||||
u32 nr, c, i;
|
||||
char *str;
|
||||
char *str, *tmp;
|
||||
uint64_t mem_total, mem_free;
|
||||
|
||||
/* nr nodes */
|
||||
ret = read(fd, &nr, sizeof(nr));
|
||||
if (ret != (ssize_t)sizeof(nr))
|
||||
goto error;
|
||||
|
||||
if (h->needs_swap)
|
||||
nr = bswap_32(nr);
|
||||
nr = ph->env.nr_numa_nodes;
|
||||
str = ph->env.numa_nodes;
|
||||
|
||||
for (i = 0; i < nr; i++) {
|
||||
|
||||
/* node number */
|
||||
ret = read(fd, &c, sizeof(c));
|
||||
if (ret != (ssize_t)sizeof(c))
|
||||
c = strtoul(str, &tmp, 0);
|
||||
if (*tmp != ':')
|
||||
goto error;
|
||||
|
||||
if (h->needs_swap)
|
||||
c = bswap_32(c);
|
||||
|
||||
ret = read(fd, &mem_total, sizeof(u64));
|
||||
if (ret != sizeof(u64))
|
||||
str = tmp + 1;
|
||||
mem_total = strtoull(str, &tmp, 0);
|
||||
if (*tmp != ':')
|
||||
goto error;
|
||||
|
||||
ret = read(fd, &mem_free, sizeof(u64));
|
||||
if (ret != sizeof(u64))
|
||||
str = tmp + 1;
|
||||
mem_free = strtoull(str, &tmp, 0);
|
||||
if (*tmp != ':')
|
||||
goto error;
|
||||
|
||||
if (h->needs_swap) {
|
||||
mem_total = bswap_64(mem_total);
|
||||
mem_free = bswap_64(mem_free);
|
||||
}
|
||||
|
||||
fprintf(fp, "# node%u meminfo : total = %"PRIu64" kB,"
|
||||
" free = %"PRIu64" kB\n",
|
||||
c,
|
||||
mem_total,
|
||||
mem_free);
|
||||
c, mem_total, mem_free);
|
||||
|
||||
str = do_read_string(fd, h);
|
||||
str = tmp + 1;
|
||||
fprintf(fp, "# node%u cpu list : %s\n", c, str);
|
||||
free(str);
|
||||
}
|
||||
return;
|
||||
error:
|
||||
fprintf(fp, "# numa topology : not available\n");
|
||||
}
|
||||
|
||||
static void print_cpuid(struct perf_header *ph, int fd, FILE *fp)
|
||||
static void print_cpuid(struct perf_header *ph, int fd __maybe_unused, FILE *fp)
|
||||
{
|
||||
char *str = do_read_string(fd, ph);
|
||||
fprintf(fp, "# cpuid : %s\n", str);
|
||||
free(str);
|
||||
fprintf(fp, "# cpuid : %s\n", ph->env.cpuid);
|
||||
}
|
||||
|
||||
static void print_branch_stack(struct perf_header *ph __maybe_unused,
|
||||
int fd __maybe_unused,
|
||||
FILE *fp)
|
||||
int fd __maybe_unused, FILE *fp)
|
||||
{
|
||||
fprintf(fp, "# contains samples with branch stack\n");
|
||||
}
|
||||
|
||||
static void print_pmu_mappings(struct perf_header *ph, int fd, FILE *fp)
|
||||
static void print_pmu_mappings(struct perf_header *ph, int fd __maybe_unused,
|
||||
FILE *fp)
|
||||
{
|
||||
const char *delimiter = "# pmu mappings: ";
|
||||
char *name;
|
||||
int ret;
|
||||
char *str, *tmp;
|
||||
u32 pmu_num;
|
||||
u32 type;
|
||||
|
||||
ret = read(fd, &pmu_num, sizeof(pmu_num));
|
||||
if (ret != sizeof(pmu_num))
|
||||
goto error;
|
||||
|
||||
if (ph->needs_swap)
|
||||
pmu_num = bswap_32(pmu_num);
|
||||
|
||||
pmu_num = ph->env.nr_pmu_mappings;
|
||||
if (!pmu_num) {
|
||||
fprintf(fp, "# pmu mappings: not available\n");
|
||||
return;
|
||||
}
|
||||
|
||||
while (pmu_num) {
|
||||
if (read(fd, &type, sizeof(type)) != sizeof(type))
|
||||
break;
|
||||
if (ph->needs_swap)
|
||||
type = bswap_32(type);
|
||||
str = ph->env.pmu_mappings;
|
||||
|
||||
while (pmu_num) {
|
||||
type = strtoul(str, &tmp, 0);
|
||||
if (*tmp != ':')
|
||||
goto error;
|
||||
|
||||
str = tmp + 1;
|
||||
fprintf(fp, "%s%s = %" PRIu32, delimiter, str, type);
|
||||
|
||||
name = do_read_string(fd, ph);
|
||||
if (!name)
|
||||
break;
|
||||
pmu_num--;
|
||||
fprintf(fp, "%s%s = %" PRIu32, delimiter, name, type);
|
||||
free(name);
|
||||
delimiter = ", ";
|
||||
str += strlen(str) + 1;
|
||||
pmu_num--;
|
||||
}
|
||||
|
||||
fprintf(fp, "\n");
|
||||
|
@ -1654,18 +1580,16 @@ out:
|
|||
return err;
|
||||
}
|
||||
|
||||
static int process_tracing_data(struct perf_file_section *section
|
||||
__maybe_unused,
|
||||
struct perf_header *ph __maybe_unused,
|
||||
int feat __maybe_unused, int fd, void *data)
|
||||
static int process_tracing_data(struct perf_file_section *section __maybe_unused,
|
||||
struct perf_header *ph __maybe_unused,
|
||||
int fd, void *data)
|
||||
{
|
||||
trace_report(fd, data, false);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int process_build_id(struct perf_file_section *section,
|
||||
struct perf_header *ph,
|
||||
int feat __maybe_unused, int fd,
|
||||
struct perf_header *ph, int fd,
|
||||
void *data __maybe_unused)
|
||||
{
|
||||
if (perf_header__read_build_ids(ph, fd, section->offset, section->size))
|
||||
|
@ -1673,6 +1597,99 @@ static int process_build_id(struct perf_file_section *section,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int process_hostname(struct perf_file_section *section __maybe_unused,
|
||||
struct perf_header *ph, int fd,
|
||||
void *data __maybe_unused)
|
||||
{
|
||||
ph->env.hostname = do_read_string(fd, ph);
|
||||
return ph->env.hostname ? 0 : -ENOMEM;
|
||||
}
|
||||
|
||||
static int process_osrelease(struct perf_file_section *section __maybe_unused,
|
||||
struct perf_header *ph, int fd,
|
||||
void *data __maybe_unused)
|
||||
{
|
||||
ph->env.os_release = do_read_string(fd, ph);
|
||||
return ph->env.os_release ? 0 : -ENOMEM;
|
||||
}
|
||||
|
||||
static int process_version(struct perf_file_section *section __maybe_unused,
|
||||
struct perf_header *ph, int fd,
|
||||
void *data __maybe_unused)
|
||||
{
|
||||
ph->env.version = do_read_string(fd, ph);
|
||||
return ph->env.version ? 0 : -ENOMEM;
|
||||
}
|
||||
|
||||
static int process_arch(struct perf_file_section *section __maybe_unused,
|
||||
struct perf_header *ph, int fd,
|
||||
void *data __maybe_unused)
|
||||
{
|
||||
ph->env.arch = do_read_string(fd, ph);
|
||||
return ph->env.arch ? 0 : -ENOMEM;
|
||||
}
|
||||
|
||||
static int process_nrcpus(struct perf_file_section *section __maybe_unused,
|
||||
struct perf_header *ph, int fd,
|
||||
void *data __maybe_unused)
|
||||
{
|
||||
size_t ret;
|
||||
u32 nr;
|
||||
|
||||
ret = read(fd, &nr, sizeof(nr));
|
||||
if (ret != sizeof(nr))
|
||||
return -1;
|
||||
|
||||
if (ph->needs_swap)
|
||||
nr = bswap_32(nr);
|
||||
|
||||
ph->env.nr_cpus_online = nr;
|
||||
|
||||
ret = read(fd, &nr, sizeof(nr));
|
||||
if (ret != sizeof(nr))
|
||||
return -1;
|
||||
|
||||
if (ph->needs_swap)
|
||||
nr = bswap_32(nr);
|
||||
|
||||
ph->env.nr_cpus_avail = nr;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int process_cpudesc(struct perf_file_section *section __maybe_unused,
|
||||
struct perf_header *ph, int fd,
|
||||
void *data __maybe_unused)
|
||||
{
|
||||
ph->env.cpu_desc = do_read_string(fd, ph);
|
||||
return ph->env.cpu_desc ? 0 : -ENOMEM;
|
||||
}
|
||||
|
||||
static int process_cpuid(struct perf_file_section *section __maybe_unused,
|
||||
struct perf_header *ph, int fd,
|
||||
void *data __maybe_unused)
|
||||
{
|
||||
ph->env.cpuid = do_read_string(fd, ph);
|
||||
return ph->env.cpuid ? 0 : -ENOMEM;
|
||||
}
|
||||
|
||||
static int process_total_mem(struct perf_file_section *section __maybe_unused,
|
||||
struct perf_header *ph, int fd,
|
||||
void *data __maybe_unused)
|
||||
{
|
||||
uint64_t mem;
|
||||
size_t ret;
|
||||
|
||||
ret = read(fd, &mem, sizeof(mem));
|
||||
if (ret != sizeof(mem))
|
||||
return -1;
|
||||
|
||||
if (ph->needs_swap)
|
||||
mem = bswap_64(mem);
|
||||
|
||||
ph->env.total_mem = mem;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct perf_evsel *
|
||||
perf_evlist__find_by_index(struct perf_evlist *evlist, int idx)
|
||||
{
|
||||
|
@ -1687,7 +1704,8 @@ perf_evlist__find_by_index(struct perf_evlist *evlist, int idx)
|
|||
}
|
||||
|
||||
static void
|
||||
perf_evlist__set_event_name(struct perf_evlist *evlist, struct perf_evsel *event)
|
||||
perf_evlist__set_event_name(struct perf_evlist *evlist,
|
||||
struct perf_evsel *event)
|
||||
{
|
||||
struct perf_evsel *evsel;
|
||||
|
||||
|
@ -1706,15 +1724,16 @@ perf_evlist__set_event_name(struct perf_evlist *evlist, struct perf_evsel *event
|
|||
|
||||
static int
|
||||
process_event_desc(struct perf_file_section *section __maybe_unused,
|
||||
struct perf_header *header, int feat __maybe_unused, int fd,
|
||||
struct perf_header *header, int fd,
|
||||
void *data __maybe_unused)
|
||||
{
|
||||
struct perf_session *session = container_of(header, struct perf_session, header);
|
||||
struct perf_session *session;
|
||||
struct perf_evsel *evsel, *events = read_event_desc(header, fd);
|
||||
|
||||
if (!events)
|
||||
return 0;
|
||||
|
||||
session = container_of(header, struct perf_session, header);
|
||||
for (evsel = events; evsel->attr.size; evsel++)
|
||||
perf_evlist__set_event_name(session->evlist, evsel);
|
||||
|
||||
|
@ -1723,11 +1742,213 @@ process_event_desc(struct perf_file_section *section __maybe_unused,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int process_cmdline(struct perf_file_section *section __maybe_unused,
|
||||
struct perf_header *ph, int fd,
|
||||
void *data __maybe_unused)
|
||||
{
|
||||
size_t ret;
|
||||
char *str;
|
||||
u32 nr, i;
|
||||
struct strbuf sb;
|
||||
|
||||
ret = read(fd, &nr, sizeof(nr));
|
||||
if (ret != sizeof(nr))
|
||||
return -1;
|
||||
|
||||
if (ph->needs_swap)
|
||||
nr = bswap_32(nr);
|
||||
|
||||
ph->env.nr_cmdline = nr;
|
||||
strbuf_init(&sb, 128);
|
||||
|
||||
for (i = 0; i < nr; i++) {
|
||||
str = do_read_string(fd, ph);
|
||||
if (!str)
|
||||
goto error;
|
||||
|
||||
/* include a NULL character at the end */
|
||||
strbuf_add(&sb, str, strlen(str) + 1);
|
||||
free(str);
|
||||
}
|
||||
ph->env.cmdline = strbuf_detach(&sb, NULL);
|
||||
return 0;
|
||||
|
||||
error:
|
||||
strbuf_release(&sb);
|
||||
return -1;
|
||||
}
|
||||
|
||||
static int process_cpu_topology(struct perf_file_section *section __maybe_unused,
|
||||
struct perf_header *ph, int fd,
|
||||
void *data __maybe_unused)
|
||||
{
|
||||
size_t ret;
|
||||
u32 nr, i;
|
||||
char *str;
|
||||
struct strbuf sb;
|
||||
|
||||
ret = read(fd, &nr, sizeof(nr));
|
||||
if (ret != sizeof(nr))
|
||||
return -1;
|
||||
|
||||
if (ph->needs_swap)
|
||||
nr = bswap_32(nr);
|
||||
|
||||
ph->env.nr_sibling_cores = nr;
|
||||
strbuf_init(&sb, 128);
|
||||
|
||||
for (i = 0; i < nr; i++) {
|
||||
str = do_read_string(fd, ph);
|
||||
if (!str)
|
||||
goto error;
|
||||
|
||||
/* include a NULL character at the end */
|
||||
strbuf_add(&sb, str, strlen(str) + 1);
|
||||
free(str);
|
||||
}
|
||||
ph->env.sibling_cores = strbuf_detach(&sb, NULL);
|
||||
|
||||
ret = read(fd, &nr, sizeof(nr));
|
||||
if (ret != sizeof(nr))
|
||||
return -1;
|
||||
|
||||
if (ph->needs_swap)
|
||||
nr = bswap_32(nr);
|
||||
|
||||
ph->env.nr_sibling_threads = nr;
|
||||
|
||||
for (i = 0; i < nr; i++) {
|
||||
str = do_read_string(fd, ph);
|
||||
if (!str)
|
||||
goto error;
|
||||
|
||||
/* include a NULL character at the end */
|
||||
strbuf_add(&sb, str, strlen(str) + 1);
|
||||
free(str);
|
||||
}
|
||||
ph->env.sibling_threads = strbuf_detach(&sb, NULL);
|
||||
return 0;
|
||||
|
||||
error:
|
||||
strbuf_release(&sb);
|
||||
return -1;
|
||||
}
|
||||
|
||||
static int process_numa_topology(struct perf_file_section *section __maybe_unused,
|
||||
struct perf_header *ph, int fd,
|
||||
void *data __maybe_unused)
|
||||
{
|
||||
size_t ret;
|
||||
u32 nr, node, i;
|
||||
char *str;
|
||||
uint64_t mem_total, mem_free;
|
||||
struct strbuf sb;
|
||||
|
||||
/* nr nodes */
|
||||
ret = read(fd, &nr, sizeof(nr));
|
||||
if (ret != sizeof(nr))
|
||||
goto error;
|
||||
|
||||
if (ph->needs_swap)
|
||||
nr = bswap_32(nr);
|
||||
|
||||
ph->env.nr_numa_nodes = nr;
|
||||
strbuf_init(&sb, 256);
|
||||
|
||||
for (i = 0; i < nr; i++) {
|
||||
/* node number */
|
||||
ret = read(fd, &node, sizeof(node));
|
||||
if (ret != sizeof(node))
|
||||
goto error;
|
||||
|
||||
ret = read(fd, &mem_total, sizeof(u64));
|
||||
if (ret != sizeof(u64))
|
||||
goto error;
|
||||
|
||||
ret = read(fd, &mem_free, sizeof(u64));
|
||||
if (ret != sizeof(u64))
|
||||
goto error;
|
||||
|
||||
if (ph->needs_swap) {
|
||||
node = bswap_32(node);
|
||||
mem_total = bswap_64(mem_total);
|
||||
mem_free = bswap_64(mem_free);
|
||||
}
|
||||
|
||||
strbuf_addf(&sb, "%u:%"PRIu64":%"PRIu64":",
|
||||
node, mem_total, mem_free);
|
||||
|
||||
str = do_read_string(fd, ph);
|
||||
if (!str)
|
||||
goto error;
|
||||
|
||||
/* include a NULL character at the end */
|
||||
strbuf_add(&sb, str, strlen(str) + 1);
|
||||
free(str);
|
||||
}
|
||||
ph->env.numa_nodes = strbuf_detach(&sb, NULL);
|
||||
return 0;
|
||||
|
||||
error:
|
||||
strbuf_release(&sb);
|
||||
return -1;
|
||||
}
|
||||
|
||||
static int process_pmu_mappings(struct perf_file_section *section __maybe_unused,
|
||||
struct perf_header *ph, int fd,
|
||||
void *data __maybe_unused)
|
||||
{
|
||||
size_t ret;
|
||||
char *name;
|
||||
u32 pmu_num;
|
||||
u32 type;
|
||||
struct strbuf sb;
|
||||
|
||||
ret = read(fd, &pmu_num, sizeof(pmu_num));
|
||||
if (ret != sizeof(pmu_num))
|
||||
return -1;
|
||||
|
||||
if (ph->needs_swap)
|
||||
pmu_num = bswap_32(pmu_num);
|
||||
|
||||
if (!pmu_num) {
|
||||
pr_debug("pmu mappings not available\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
ph->env.nr_pmu_mappings = pmu_num;
|
||||
strbuf_init(&sb, 128);
|
||||
|
||||
while (pmu_num) {
|
||||
if (read(fd, &type, sizeof(type)) != sizeof(type))
|
||||
goto error;
|
||||
if (ph->needs_swap)
|
||||
type = bswap_32(type);
|
||||
|
||||
name = do_read_string(fd, ph);
|
||||
if (!name)
|
||||
goto error;
|
||||
|
||||
strbuf_addf(&sb, "%u:%s", type, name);
|
||||
/* include a NULL character at the end */
|
||||
strbuf_add(&sb, "", 1);
|
||||
|
||||
free(name);
|
||||
pmu_num--;
|
||||
}
|
||||
ph->env.pmu_mappings = strbuf_detach(&sb, NULL);
|
||||
return 0;
|
||||
|
||||
error:
|
||||
strbuf_release(&sb);
|
||||
return -1;
|
||||
}
|
||||
|
||||
struct feature_ops {
|
||||
int (*write)(int fd, struct perf_header *h, struct perf_evlist *evlist);
|
||||
void (*print)(struct perf_header *h, int fd, FILE *fp);
|
||||
int (*process)(struct perf_file_section *section,
|
||||
struct perf_header *h, int feat, int fd, void *data);
|
||||
struct perf_header *h, int fd, void *data);
|
||||
const char *name;
|
||||
bool full_only;
|
||||
};
|
||||
|
@ -1739,7 +1960,7 @@ struct feature_ops {
|
|||
.process = process_##func }
|
||||
#define FEAT_OPF(n, func) \
|
||||
[n] = { .name = #n, .write = write_##func, .print = print_##func, \
|
||||
.full_only = true }
|
||||
.process = process_##func, .full_only = true }
|
||||
|
||||
/* feature_ops not implemented: */
|
||||
#define print_tracing_data NULL
|
||||
|
@ -1748,20 +1969,20 @@ struct feature_ops {
|
|||
static const struct feature_ops feat_ops[HEADER_LAST_FEATURE] = {
|
||||
FEAT_OPP(HEADER_TRACING_DATA, tracing_data),
|
||||
FEAT_OPP(HEADER_BUILD_ID, build_id),
|
||||
FEAT_OPA(HEADER_HOSTNAME, hostname),
|
||||
FEAT_OPA(HEADER_OSRELEASE, osrelease),
|
||||
FEAT_OPA(HEADER_VERSION, version),
|
||||
FEAT_OPA(HEADER_ARCH, arch),
|
||||
FEAT_OPA(HEADER_NRCPUS, nrcpus),
|
||||
FEAT_OPA(HEADER_CPUDESC, cpudesc),
|
||||
FEAT_OPA(HEADER_CPUID, cpuid),
|
||||
FEAT_OPA(HEADER_TOTAL_MEM, total_mem),
|
||||
FEAT_OPP(HEADER_HOSTNAME, hostname),
|
||||
FEAT_OPP(HEADER_OSRELEASE, osrelease),
|
||||
FEAT_OPP(HEADER_VERSION, version),
|
||||
FEAT_OPP(HEADER_ARCH, arch),
|
||||
FEAT_OPP(HEADER_NRCPUS, nrcpus),
|
||||
FEAT_OPP(HEADER_CPUDESC, cpudesc),
|
||||
FEAT_OPP(HEADER_CPUID, cpuid),
|
||||
FEAT_OPP(HEADER_TOTAL_MEM, total_mem),
|
||||
FEAT_OPP(HEADER_EVENT_DESC, event_desc),
|
||||
FEAT_OPA(HEADER_CMDLINE, cmdline),
|
||||
FEAT_OPP(HEADER_CMDLINE, cmdline),
|
||||
FEAT_OPF(HEADER_CPU_TOPOLOGY, cpu_topology),
|
||||
FEAT_OPF(HEADER_NUMA_TOPOLOGY, numa_topology),
|
||||
FEAT_OPA(HEADER_BRANCH_STACK, branch_stack),
|
||||
FEAT_OPA(HEADER_PMU_MAPPINGS, pmu_mappings),
|
||||
FEAT_OPP(HEADER_PMU_MAPPINGS, pmu_mappings),
|
||||
};
|
||||
|
||||
struct header_print_data {
|
||||
|
@ -2241,7 +2462,7 @@ static int perf_file_section__process(struct perf_file_section *section,
|
|||
if (!feat_ops[feat].process)
|
||||
return 0;
|
||||
|
||||
return feat_ops[feat].process(section, ph, feat, fd, data);
|
||||
return feat_ops[feat].process(section, ph, fd, data);
|
||||
}
|
||||
|
||||
static int perf_file_header__read_pipe(struct perf_pipe_file_header *header,
|
||||
|
|
|
@ -58,6 +58,29 @@ struct perf_header;
|
|||
int perf_file_header__read(struct perf_file_header *header,
|
||||
struct perf_header *ph, int fd);
|
||||
|
||||
struct perf_session_env {
|
||||
char *hostname;
|
||||
char *os_release;
|
||||
char *version;
|
||||
char *arch;
|
||||
int nr_cpus_online;
|
||||
int nr_cpus_avail;
|
||||
char *cpu_desc;
|
||||
char *cpuid;
|
||||
unsigned long long total_mem;
|
||||
|
||||
int nr_cmdline;
|
||||
char *cmdline;
|
||||
int nr_sibling_cores;
|
||||
char *sibling_cores;
|
||||
int nr_sibling_threads;
|
||||
char *sibling_threads;
|
||||
int nr_numa_nodes;
|
||||
char *numa_nodes;
|
||||
int nr_pmu_mappings;
|
||||
char *pmu_mappings;
|
||||
};
|
||||
|
||||
struct perf_header {
|
||||
int frozen;
|
||||
bool needs_swap;
|
||||
|
@ -67,6 +90,7 @@ struct perf_header {
|
|||
u64 event_offset;
|
||||
u64 event_size;
|
||||
DECLARE_BITMAP(adds_features, HEADER_FEAT_BITS);
|
||||
struct perf_session_env env;
|
||||
};
|
||||
|
||||
struct perf_evlist;
|
||||
|
|
|
@ -243,15 +243,14 @@ size_t map__fprintf(struct map *self, FILE *fp)
|
|||
|
||||
size_t map__fprintf_dsoname(struct map *map, FILE *fp)
|
||||
{
|
||||
const char *dsoname;
|
||||
const char *dsoname = "[unknown]";
|
||||
|
||||
if (map && map->dso && (map->dso->name || map->dso->long_name)) {
|
||||
if (symbol_conf.show_kernel_path && map->dso->long_name)
|
||||
dsoname = map->dso->long_name;
|
||||
else if (map->dso->name)
|
||||
dsoname = map->dso->name;
|
||||
} else
|
||||
dsoname = "[unknown]";
|
||||
}
|
||||
|
||||
return fprintf(fp, "%s", dsoname);
|
||||
}
|
||||
|
|
|
@ -1100,6 +1100,7 @@ static int parse_probe_trace_command(const char *cmd,
|
|||
struct probe_trace_point *tp = &tev->point;
|
||||
char pr;
|
||||
char *p;
|
||||
char *argv0_str = NULL, *fmt, *fmt1_str, *fmt2_str, *fmt3_str;
|
||||
int ret, i, argc;
|
||||
char **argv;
|
||||
|
||||
|
@ -1116,14 +1117,27 @@ static int parse_probe_trace_command(const char *cmd,
|
|||
}
|
||||
|
||||
/* Scan event and group name. */
|
||||
ret = sscanf(argv[0], "%c:%a[^/ \t]/%a[^ \t]",
|
||||
&pr, (float *)(void *)&tev->group,
|
||||
(float *)(void *)&tev->event);
|
||||
if (ret != 3) {
|
||||
argv0_str = strdup(argv[0]);
|
||||
if (argv0_str == NULL) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
fmt1_str = strtok_r(argv0_str, ":", &fmt);
|
||||
fmt2_str = strtok_r(NULL, "/", &fmt);
|
||||
fmt3_str = strtok_r(NULL, " \t", &fmt);
|
||||
if (fmt1_str == NULL || strlen(fmt1_str) != 1 || fmt2_str == NULL
|
||||
|| fmt3_str == NULL) {
|
||||
semantic_error("Failed to parse event name: %s\n", argv[0]);
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
pr = fmt1_str[0];
|
||||
tev->group = strdup(fmt2_str);
|
||||
tev->event = strdup(fmt3_str);
|
||||
if (tev->group == NULL || tev->event == NULL) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
pr_debug("Group:%s Event:%s probe:%c\n", tev->group, tev->event, pr);
|
||||
|
||||
tp->retprobe = (pr == 'r');
|
||||
|
@ -1135,10 +1149,17 @@ static int parse_probe_trace_command(const char *cmd,
|
|||
p++;
|
||||
} else
|
||||
p = argv[1];
|
||||
ret = sscanf(p, "%a[^+]+%lu", (float *)(void *)&tp->symbol,
|
||||
&tp->offset);
|
||||
if (ret == 1)
|
||||
fmt1_str = strtok_r(p, "+", &fmt);
|
||||
tp->symbol = strdup(fmt1_str);
|
||||
if (tp->symbol == NULL) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
fmt2_str = strtok_r(NULL, "", &fmt);
|
||||
if (fmt2_str == NULL)
|
||||
tp->offset = 0;
|
||||
else
|
||||
tp->offset = strtoul(fmt2_str, NULL, 10);
|
||||
|
||||
tev->nargs = argc - 2;
|
||||
tev->args = zalloc(sizeof(struct probe_trace_arg) * tev->nargs);
|
||||
|
@ -1162,6 +1183,7 @@ static int parse_probe_trace_command(const char *cmd,
|
|||
}
|
||||
ret = 0;
|
||||
out:
|
||||
free(argv0_str);
|
||||
argv_free(argv);
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -282,7 +282,7 @@ static void perl_process_tracepoint(union perf_event *perf_event __maybe_unused,
|
|||
|
||||
event = find_cache_event(evsel);
|
||||
if (!event)
|
||||
die("ug! no event found for type %d", evsel->attr.config);
|
||||
die("ug! no event found for type %" PRIu64, evsel->attr.config);
|
||||
|
||||
pid = raw_field_value(event, "common_pid", data);
|
||||
|
||||
|
|
|
@ -34,6 +34,7 @@ static inline char *bfd_demangle(void __maybe_unused *v,
|
|||
return NULL;
|
||||
}
|
||||
#else
|
||||
#define PACKAGE 'perf'
|
||||
#include <bfd.h>
|
||||
#endif
|
||||
#endif
|
||||
|
|
|
@ -16,6 +16,8 @@ struct thread {
|
|||
bool comm_set;
|
||||
char *comm;
|
||||
int comm_len;
|
||||
|
||||
void *priv;
|
||||
};
|
||||
|
||||
struct machine;
|
||||
|
|
|
@ -229,24 +229,22 @@ void parse_proc_kallsyms(struct pevent *pevent,
|
|||
char *next = NULL;
|
||||
char *addr_str;
|
||||
char *mod;
|
||||
char ch;
|
||||
char *fmt;
|
||||
|
||||
line = strtok_r(file, "\n", &next);
|
||||
while (line) {
|
||||
mod = NULL;
|
||||
sscanf(line, "%as %c %as\t[%as",
|
||||
(float *)(void *)&addr_str, /* workaround gcc warning */
|
||||
&ch, (float *)(void *)&func, (float *)(void *)&mod);
|
||||
addr_str = strtok_r(line, " ", &fmt);
|
||||
addr = strtoull(addr_str, NULL, 16);
|
||||
free(addr_str);
|
||||
|
||||
/* truncate the extra ']' */
|
||||
/* skip character */
|
||||
strtok_r(NULL, " ", &fmt);
|
||||
func = strtok_r(NULL, "\t", &fmt);
|
||||
mod = strtok_r(NULL, "]", &fmt);
|
||||
/* truncate the extra '[' */
|
||||
if (mod)
|
||||
mod[strlen(mod) - 1] = 0;
|
||||
mod = mod + 1;
|
||||
|
||||
pevent_register_function(pevent, func, addr, mod);
|
||||
free(func);
|
||||
free(mod);
|
||||
|
||||
line = strtok_r(NULL, "\n", &next);
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue