2016-07-11 12:53:41 +02:00
|
|
|
/*
|
|
|
|
* Interface for configuring and controlling the state of tracing events.
|
|
|
|
*
|
2017-06-25 13:08:38 +02:00
|
|
|
* Copyright (C) 2014-2017 Lluís Vilanova <vilanova@ac.upc.edu>
|
2016-07-11 12:53:41 +02:00
|
|
|
*
|
|
|
|
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
|
|
|
* See the COPYING file in the top-level directory.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "qemu/osdep.h"
|
|
|
|
#include "cpu.h"
|
2020-02-04 12:20:10 +01:00
|
|
|
#include "trace/trace-root.h"
|
2016-07-11 12:53:41 +02:00
|
|
|
#include "trace/control.h"
|
|
|
|
|
|
|
|
|
2016-08-23 10:58:52 +02:00
|
|
|
void trace_event_set_state_dynamic_init(TraceEvent *ev, bool state)
|
|
|
|
{
|
2016-08-23 10:58:58 +02:00
|
|
|
bool state_pre;
|
2016-08-23 10:58:52 +02:00
|
|
|
assert(trace_event_get_state_static(ev));
|
2016-08-23 10:58:58 +02:00
|
|
|
/*
|
|
|
|
* We ignore the "vcpu" property here, since no vCPUs have been created
|
|
|
|
* yet. Then dstate can only be 1 or 0.
|
|
|
|
*/
|
2016-10-04 15:35:45 +02:00
|
|
|
state_pre = *ev->dstate;
|
2016-08-23 10:58:58 +02:00
|
|
|
if (state_pre != state) {
|
|
|
|
if (state) {
|
|
|
|
trace_events_enabled_count++;
|
2016-10-04 15:35:45 +02:00
|
|
|
*ev->dstate = 1;
|
2016-08-23 10:58:58 +02:00
|
|
|
} else {
|
|
|
|
trace_events_enabled_count--;
|
2016-10-04 15:35:45 +02:00
|
|
|
*ev->dstate = 0;
|
2016-08-23 10:58:58 +02:00
|
|
|
}
|
|
|
|
}
|
2016-08-23 10:58:52 +02:00
|
|
|
}
|
|
|
|
|
2016-07-11 12:53:41 +02:00
|
|
|
void trace_event_set_state_dynamic(TraceEvent *ev, bool state)
|
|
|
|
{
|
|
|
|
CPUState *vcpu;
|
|
|
|
assert(trace_event_get_state_static(ev));
|
2017-06-25 13:08:38 +02:00
|
|
|
if (trace_event_is_vcpu(ev) && likely(first_cpu != NULL)) {
|
2016-07-11 12:53:41 +02:00
|
|
|
CPU_FOREACH(vcpu) {
|
|
|
|
trace_event_set_vcpu_state_dynamic(vcpu, ev, state);
|
|
|
|
}
|
|
|
|
} else {
|
2017-06-25 13:08:38 +02:00
|
|
|
/*
|
|
|
|
* Without the "vcpu" property, dstate can only be 1 or 0. With it, we
|
|
|
|
* haven't instantiated any vCPU yet, so we will set a global state
|
|
|
|
* instead, and trace_init_vcpu will reconcile it afterwards.
|
|
|
|
*/
|
2016-10-04 15:35:45 +02:00
|
|
|
bool state_pre = *ev->dstate;
|
2016-08-23 10:58:58 +02:00
|
|
|
if (state_pre != state) {
|
|
|
|
if (state) {
|
|
|
|
trace_events_enabled_count++;
|
2016-10-04 15:35:45 +02:00
|
|
|
*ev->dstate = 1;
|
2016-08-23 10:58:58 +02:00
|
|
|
} else {
|
|
|
|
trace_events_enabled_count--;
|
2016-10-04 15:35:45 +02:00
|
|
|
*ev->dstate = 0;
|
2016-08-23 10:58:58 +02:00
|
|
|
}
|
|
|
|
}
|
2016-07-11 12:53:41 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-07-04 10:38:26 +02:00
|
|
|
static void trace_event_synchronize_vcpu_state_dynamic(
|
|
|
|
CPUState *vcpu, run_on_cpu_data ignored)
|
|
|
|
{
|
|
|
|
bitmap_copy(vcpu->trace_dstate, vcpu->trace_dstate_delayed,
|
|
|
|
CPU_TRACE_DSTATE_MAX_EVENTS);
|
2017-07-04 10:42:32 +02:00
|
|
|
cpu_tb_jmp_cache_clear(vcpu);
|
2017-07-04 10:38:26 +02:00
|
|
|
}
|
|
|
|
|
2016-07-11 12:53:41 +02:00
|
|
|
void trace_event_set_vcpu_state_dynamic(CPUState *vcpu,
|
|
|
|
TraceEvent *ev, bool state)
|
|
|
|
{
|
2016-10-04 15:35:49 +02:00
|
|
|
uint32_t vcpu_id;
|
2016-07-11 12:53:41 +02:00
|
|
|
bool state_pre;
|
|
|
|
assert(trace_event_get_state_static(ev));
|
|
|
|
assert(trace_event_is_vcpu(ev));
|
|
|
|
vcpu_id = trace_event_get_vcpu_id(ev);
|
|
|
|
state_pre = test_bit(vcpu_id, vcpu->trace_dstate);
|
|
|
|
if (state_pre != state) {
|
|
|
|
if (state) {
|
|
|
|
trace_events_enabled_count++;
|
2017-07-04 10:38:26 +02:00
|
|
|
set_bit(vcpu_id, vcpu->trace_dstate_delayed);
|
2016-10-04 15:35:45 +02:00
|
|
|
(*ev->dstate)++;
|
2016-07-11 12:53:41 +02:00
|
|
|
} else {
|
|
|
|
trace_events_enabled_count--;
|
2017-07-04 10:38:26 +02:00
|
|
|
clear_bit(vcpu_id, vcpu->trace_dstate_delayed);
|
2016-10-04 15:35:45 +02:00
|
|
|
(*ev->dstate)--;
|
2016-07-11 12:53:41 +02:00
|
|
|
}
|
2017-09-13 00:50:25 +02:00
|
|
|
if (vcpu->created) {
|
|
|
|
/*
|
|
|
|
* Delay changes until next TB; we want all TBs to be built from a
|
|
|
|
* single set of dstate values to ensure consistency of generated
|
|
|
|
* tracing code.
|
|
|
|
*/
|
|
|
|
async_run_on_cpu(vcpu, trace_event_synchronize_vcpu_state_dynamic,
|
|
|
|
RUN_ON_CPU_NULL);
|
|
|
|
} else {
|
|
|
|
trace_event_synchronize_vcpu_state_dynamic(vcpu, RUN_ON_CPU_NULL);
|
|
|
|
}
|
2016-07-11 12:53:41 +02:00
|
|
|
}
|
|
|
|
}
|
2016-09-19 14:55:07 +02:00
|
|
|
|
2016-12-26 22:24:35 +01:00
|
|
|
static bool adding_first_cpu1(void)
|
2016-09-19 14:55:07 +02:00
|
|
|
{
|
|
|
|
CPUState *cpu;
|
|
|
|
size_t count = 0;
|
|
|
|
CPU_FOREACH(cpu) {
|
|
|
|
count++;
|
|
|
|
if (count > 1) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2016-12-26 22:24:35 +01:00
|
|
|
static bool adding_first_cpu(void)
|
|
|
|
{
|
|
|
|
bool res;
|
|
|
|
cpu_list_lock();
|
|
|
|
res = adding_first_cpu1();
|
|
|
|
cpu_list_unlock();
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
2016-09-19 14:55:07 +02:00
|
|
|
void trace_init_vcpu(CPUState *vcpu)
|
|
|
|
{
|
2016-10-04 15:35:43 +02:00
|
|
|
TraceEventIter iter;
|
|
|
|
TraceEvent *ev;
|
|
|
|
trace_event_iter_init(&iter, NULL);
|
|
|
|
while ((ev = trace_event_iter_next(&iter)) != NULL) {
|
2016-09-19 14:55:07 +02:00
|
|
|
if (trace_event_is_vcpu(ev) &&
|
|
|
|
trace_event_get_state_static(ev) &&
|
|
|
|
trace_event_get_state_dynamic(ev)) {
|
|
|
|
if (adding_first_cpu()) {
|
|
|
|
/* check preconditions */
|
2016-10-04 15:35:45 +02:00
|
|
|
assert(*ev->dstate == 1);
|
2016-09-19 14:55:07 +02:00
|
|
|
/* disable early-init state ... */
|
2016-10-04 15:35:45 +02:00
|
|
|
*ev->dstate = 0;
|
2016-09-19 14:55:07 +02:00
|
|
|
trace_events_enabled_count--;
|
|
|
|
/* ... and properly re-enable */
|
|
|
|
trace_event_set_vcpu_state_dynamic(vcpu, ev, true);
|
|
|
|
} else {
|
|
|
|
trace_event_set_vcpu_state_dynamic(vcpu, ev, true);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2016-09-19 14:55:13 +02:00
|
|
|
trace_guest_cpu_enter(vcpu);
|
2016-09-19 14:55:07 +02:00
|
|
|
}
|