2010-05-22 20:24:51 +02:00
|
|
|
/*
|
|
|
|
* Simple trace backend
|
|
|
|
*
|
|
|
|
* Copyright IBM, Corp. 2010
|
|
|
|
*
|
|
|
|
* This work is licensed under the terms of the GNU GPL, version 2. See
|
|
|
|
* the COPYING file in the top-level directory.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <stdlib.h>
|
|
|
|
#include <stdint.h>
|
|
|
|
#include <stdio.h>
|
|
|
|
#include <time.h>
|
2011-09-05 09:30:17 +02:00
|
|
|
#ifndef _WIN32
|
2011-02-26 19:38:39 +01:00
|
|
|
#include <signal.h>
|
|
|
|
#include <pthread.h>
|
2011-09-05 09:30:17 +02:00
|
|
|
#endif
|
2012-12-17 18:20:00 +01:00
|
|
|
#include "qemu/timer.h"
|
2010-05-22 20:24:51 +02:00
|
|
|
#include "trace.h"
|
2011-08-31 20:31:03 +02:00
|
|
|
#include "trace/control.h"
|
2010-05-22 20:24:51 +02:00
|
|
|
|
|
|
|
/** Trace file header event ID */
|
|
|
|
#define HEADER_EVENT_ID (~(uint64_t)0) /* avoids conflicting with TraceEventIDs */
|
|
|
|
|
|
|
|
/** Trace file magic number */
|
|
|
|
#define HEADER_MAGIC 0xf2b177cb0aa429b4ULL
|
|
|
|
|
|
|
|
/** Trace file version number, bump if format changes */
|
2012-07-18 11:45:59 +02:00
|
|
|
#define HEADER_VERSION 2
|
2010-05-22 20:24:51 +02:00
|
|
|
|
2011-02-26 19:38:39 +01:00
|
|
|
/** Records were dropped event ID */
|
|
|
|
#define DROPPED_EVENT_ID (~(uint64_t)0 - 1)
|
|
|
|
|
|
|
|
/** Trace record is valid */
|
|
|
|
#define TRACE_RECORD_VALID ((uint64_t)1 << 63)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Trace records are written out by a dedicated thread. The thread waits for
|
|
|
|
* records to become available, writes them out, and then waits again.
|
|
|
|
*/
|
2011-09-05 09:30:17 +02:00
|
|
|
static GStaticMutex trace_lock = G_STATIC_MUTEX_INIT;
|
|
|
|
static GCond *trace_available_cond;
|
|
|
|
static GCond *trace_empty_cond;
|
2011-02-26 19:38:39 +01:00
|
|
|
static bool trace_available;
|
|
|
|
static bool trace_writeout_enabled;
|
|
|
|
|
2012-07-18 11:45:59 +02:00
|
|
|
enum {
|
|
|
|
TRACE_BUF_LEN = 4096 * 64,
|
|
|
|
TRACE_BUF_FLUSH_THRESHOLD = TRACE_BUF_LEN / 4,
|
|
|
|
};
|
|
|
|
|
|
|
|
uint8_t trace_buf[TRACE_BUF_LEN];
|
2010-05-22 20:24:51 +02:00
|
|
|
static unsigned int trace_idx;
|
2012-07-18 11:45:59 +02:00
|
|
|
static unsigned int writeout_idx;
|
|
|
|
static uint64_t dropped_events;
|
2010-05-22 20:24:51 +02:00
|
|
|
static FILE *trace_fp;
|
2012-08-13 21:51:16 +02:00
|
|
|
static char *trace_file_name;
|
2010-05-22 20:24:51 +02:00
|
|
|
|
2012-07-18 11:45:59 +02:00
|
|
|
/* * Trace buffer entry */
|
|
|
|
typedef struct {
|
|
|
|
uint64_t event; /* TraceEventID */
|
|
|
|
uint64_t timestamp_ns;
|
|
|
|
uint32_t length; /* in bytes */
|
|
|
|
uint32_t reserved; /* unused */
|
|
|
|
uint8_t arguments[];
|
|
|
|
} TraceRecord;
|
|
|
|
|
|
|
|
typedef struct {
|
|
|
|
uint64_t header_event_id; /* HEADER_EVENT_ID */
|
|
|
|
uint64_t header_magic; /* HEADER_MAGIC */
|
|
|
|
uint64_t header_version; /* HEADER_VERSION */
|
2012-07-20 15:22:12 +02:00
|
|
|
} TraceLogHeader;
|
2012-07-18 11:45:59 +02:00
|
|
|
|
|
|
|
|
|
|
|
static void read_from_buffer(unsigned int idx, void *dataptr, size_t size);
|
|
|
|
static unsigned int write_to_buffer(unsigned int idx, void *dataptr, size_t size);
|
|
|
|
|
|
|
|
static void clear_buffer_range(unsigned int idx, size_t len)
|
|
|
|
{
|
|
|
|
uint32_t num = 0;
|
|
|
|
while (num < len) {
|
|
|
|
if (idx >= TRACE_BUF_LEN) {
|
|
|
|
idx = idx % TRACE_BUF_LEN;
|
|
|
|
}
|
|
|
|
trace_buf[idx++] = 0;
|
|
|
|
num++;
|
|
|
|
}
|
|
|
|
}
|
2010-07-13 10:26:33 +02:00
|
|
|
/**
|
2011-02-26 19:38:39 +01:00
|
|
|
* Read a trace record from the trace buffer
|
|
|
|
*
|
|
|
|
* @idx Trace buffer index
|
|
|
|
* @record Trace record to fill
|
|
|
|
*
|
|
|
|
* Returns false if the record is not valid.
|
2010-07-13 10:26:33 +02:00
|
|
|
*/
|
2012-07-18 11:45:59 +02:00
|
|
|
static bool get_trace_record(unsigned int idx, TraceRecord **recordptr)
|
2010-07-13 10:26:32 +02:00
|
|
|
{
|
2012-07-18 11:45:59 +02:00
|
|
|
uint64_t event_flag = 0;
|
|
|
|
TraceRecord record;
|
|
|
|
/* read the event flag to see if its a valid record */
|
|
|
|
read_from_buffer(idx, &record, sizeof(event_flag));
|
|
|
|
|
|
|
|
if (!(record.event & TRACE_RECORD_VALID)) {
|
2011-02-26 19:38:39 +01:00
|
|
|
return false;
|
2010-07-13 10:26:32 +02:00
|
|
|
}
|
|
|
|
|
2012-07-18 11:45:59 +02:00
|
|
|
smp_rmb(); /* read memory barrier before accessing record */
|
|
|
|
/* read the record header to know record length */
|
|
|
|
read_from_buffer(idx, &record, sizeof(TraceRecord));
|
|
|
|
*recordptr = malloc(record.length); /* dont use g_malloc, can deadlock when traced */
|
|
|
|
/* make a copy of record to avoid being overwritten */
|
|
|
|
read_from_buffer(idx, *recordptr, record.length);
|
|
|
|
smp_rmb(); /* memory barrier before clearing valid flag */
|
|
|
|
(*recordptr)->event &= ~TRACE_RECORD_VALID;
|
|
|
|
/* clear the trace buffer range for consumed record otherwise any byte
|
|
|
|
* with its MSB set may be considered as a valid event id when the writer
|
|
|
|
* thread crosses this range of buffer again.
|
|
|
|
*/
|
|
|
|
clear_buffer_range(idx, record.length);
|
2010-07-13 10:26:33 +02:00
|
|
|
return true;
|
2010-07-13 10:26:32 +02:00
|
|
|
}
|
|
|
|
|
2011-02-26 19:38:39 +01:00
|
|
|
/**
|
|
|
|
* Kick writeout thread
|
|
|
|
*
|
|
|
|
* @wait Whether to wait for writeout thread to complete
|
|
|
|
*/
|
|
|
|
static void flush_trace_file(bool wait)
|
2010-05-22 20:24:51 +02:00
|
|
|
{
|
2011-09-05 09:30:17 +02:00
|
|
|
g_static_mutex_lock(&trace_lock);
|
2011-02-26 19:38:39 +01:00
|
|
|
trace_available = true;
|
2011-09-05 09:30:17 +02:00
|
|
|
g_cond_signal(trace_available_cond);
|
2010-07-13 10:26:33 +02:00
|
|
|
|
2011-02-26 19:38:39 +01:00
|
|
|
if (wait) {
|
2011-09-05 09:30:17 +02:00
|
|
|
g_cond_wait(trace_empty_cond, g_static_mutex_get_mutex(&trace_lock));
|
2010-05-22 20:24:51 +02:00
|
|
|
}
|
2011-02-26 19:38:39 +01:00
|
|
|
|
2011-09-05 09:30:17 +02:00
|
|
|
g_static_mutex_unlock(&trace_lock);
|
2010-07-13 10:26:33 +02:00
|
|
|
}
|
|
|
|
|
2011-02-26 19:38:39 +01:00
|
|
|
static void wait_for_trace_records_available(void)
|
2010-07-13 10:26:33 +02:00
|
|
|
{
|
2011-09-05 09:30:17 +02:00
|
|
|
g_static_mutex_lock(&trace_lock);
|
2011-02-26 19:38:39 +01:00
|
|
|
while (!(trace_available && trace_writeout_enabled)) {
|
2011-09-05 09:30:17 +02:00
|
|
|
g_cond_signal(trace_empty_cond);
|
|
|
|
g_cond_wait(trace_available_cond,
|
|
|
|
g_static_mutex_get_mutex(&trace_lock));
|
2010-07-13 10:26:33 +02:00
|
|
|
}
|
2011-02-26 19:38:39 +01:00
|
|
|
trace_available = false;
|
2011-09-05 09:30:17 +02:00
|
|
|
g_static_mutex_unlock(&trace_lock);
|
2010-05-22 20:24:51 +02:00
|
|
|
}
|
|
|
|
|
2011-09-05 09:30:17 +02:00
|
|
|
static gpointer writeout_thread(gpointer opaque)
|
2010-05-22 20:24:51 +02:00
|
|
|
{
|
2012-07-18 11:45:59 +02:00
|
|
|
TraceRecord *recordptr;
|
|
|
|
union {
|
|
|
|
TraceRecord rec;
|
|
|
|
uint8_t bytes[sizeof(TraceRecord) + sizeof(uint64_t)];
|
|
|
|
} dropped;
|
|
|
|
unsigned int idx = 0;
|
|
|
|
uint64_t dropped_count;
|
2011-07-23 23:21:14 +02:00
|
|
|
size_t unused __attribute__ ((unused));
|
2011-02-26 19:38:39 +01:00
|
|
|
|
|
|
|
for (;;) {
|
|
|
|
wait_for_trace_records_available();
|
|
|
|
|
2012-07-18 11:45:59 +02:00
|
|
|
if (dropped_events) {
|
|
|
|
dropped.rec.event = DROPPED_EVENT_ID,
|
|
|
|
dropped.rec.timestamp_ns = get_clock();
|
|
|
|
dropped.rec.length = sizeof(TraceRecord) + sizeof(dropped_events),
|
|
|
|
dropped.rec.reserved = 0;
|
|
|
|
while (1) {
|
|
|
|
dropped_count = dropped_events;
|
|
|
|
if (g_atomic_int_compare_and_exchange((gint *)&dropped_events,
|
|
|
|
dropped_count, 0)) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
memcpy(dropped.rec.arguments, &dropped_count, sizeof(uint64_t));
|
|
|
|
unused = fwrite(&dropped.rec, dropped.rec.length, 1, trace_fp);
|
2011-02-26 19:38:39 +01:00
|
|
|
}
|
2010-05-22 20:24:51 +02:00
|
|
|
|
2012-07-18 11:45:59 +02:00
|
|
|
while (get_trace_record(idx, &recordptr)) {
|
|
|
|
unused = fwrite(recordptr, recordptr->length, 1, trace_fp);
|
|
|
|
writeout_idx += recordptr->length;
|
|
|
|
free(recordptr); /* dont use g_free, can deadlock when traced */
|
|
|
|
idx = writeout_idx % TRACE_BUF_LEN;
|
2011-02-26 19:38:39 +01:00
|
|
|
}
|
2010-05-22 20:24:51 +02:00
|
|
|
|
2011-02-26 19:38:39 +01:00
|
|
|
fflush(trace_fp);
|
2010-05-22 20:24:51 +02:00
|
|
|
}
|
2011-02-26 19:38:39 +01:00
|
|
|
return NULL;
|
2010-05-22 20:24:51 +02:00
|
|
|
}
|
|
|
|
|
2012-07-18 11:45:59 +02:00
|
|
|
void trace_record_write_u64(TraceBufferRecord *rec, uint64_t val)
|
2010-05-22 20:24:51 +02:00
|
|
|
{
|
2012-07-18 11:45:59 +02:00
|
|
|
rec->rec_off = write_to_buffer(rec->rec_off, &val, sizeof(uint64_t));
|
2010-05-22 20:24:51 +02:00
|
|
|
}
|
|
|
|
|
2012-07-18 11:45:59 +02:00
|
|
|
void trace_record_write_str(TraceBufferRecord *rec, const char *s, uint32_t slen)
|
2010-05-22 20:24:51 +02:00
|
|
|
{
|
2012-07-18 11:45:59 +02:00
|
|
|
/* Write string length first */
|
|
|
|
rec->rec_off = write_to_buffer(rec->rec_off, &slen, sizeof(slen));
|
|
|
|
/* Write actual string now */
|
|
|
|
rec->rec_off = write_to_buffer(rec->rec_off, (void*)s, slen);
|
2010-05-22 20:24:51 +02:00
|
|
|
}
|
|
|
|
|
2012-07-18 11:45:59 +02:00
|
|
|
int trace_record_start(TraceBufferRecord *rec, TraceEventID event, size_t datasize)
|
2010-05-22 20:24:51 +02:00
|
|
|
{
|
2012-07-18 11:45:59 +02:00
|
|
|
unsigned int idx, rec_off, old_idx, new_idx;
|
|
|
|
uint32_t rec_len = sizeof(TraceRecord) + datasize;
|
|
|
|
uint64_t timestamp_ns = get_clock();
|
|
|
|
|
|
|
|
while (1) {
|
|
|
|
old_idx = trace_idx;
|
|
|
|
smp_rmb();
|
|
|
|
new_idx = old_idx + rec_len;
|
|
|
|
|
|
|
|
if (new_idx - writeout_idx > TRACE_BUF_LEN) {
|
|
|
|
/* Trace Buffer Full, Event dropped ! */
|
|
|
|
g_atomic_int_inc((gint *)&dropped_events);
|
|
|
|
return -ENOSPC;
|
|
|
|
}
|
2010-05-22 20:24:51 +02:00
|
|
|
|
2012-07-18 11:45:59 +02:00
|
|
|
if (g_atomic_int_compare_and_exchange((gint *)&trace_idx,
|
|
|
|
old_idx, new_idx)) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2010-05-22 20:24:51 +02:00
|
|
|
|
2012-07-18 11:45:59 +02:00
|
|
|
idx = old_idx % TRACE_BUF_LEN;
|
|
|
|
|
|
|
|
rec_off = idx;
|
2012-07-20 15:22:13 +02:00
|
|
|
rec_off = write_to_buffer(rec_off, &event, sizeof(event));
|
|
|
|
rec_off = write_to_buffer(rec_off, ×tamp_ns, sizeof(timestamp_ns));
|
|
|
|
rec_off = write_to_buffer(rec_off, &rec_len, sizeof(rec_len));
|
2012-07-18 11:45:59 +02:00
|
|
|
|
|
|
|
rec->tbuf_idx = idx;
|
|
|
|
rec->rec_off = (idx + sizeof(TraceRecord)) % TRACE_BUF_LEN;
|
|
|
|
return 0;
|
2010-05-22 20:24:51 +02:00
|
|
|
}
|
|
|
|
|
2012-07-18 11:45:59 +02:00
|
|
|
static void read_from_buffer(unsigned int idx, void *dataptr, size_t size)
|
2010-05-22 20:24:51 +02:00
|
|
|
{
|
2012-07-18 11:45:59 +02:00
|
|
|
uint8_t *data_ptr = dataptr;
|
|
|
|
uint32_t x = 0;
|
|
|
|
while (x < size) {
|
|
|
|
if (idx >= TRACE_BUF_LEN) {
|
|
|
|
idx = idx % TRACE_BUF_LEN;
|
|
|
|
}
|
|
|
|
data_ptr[x++] = trace_buf[idx++];
|
|
|
|
}
|
2010-05-22 20:24:51 +02:00
|
|
|
}
|
|
|
|
|
2012-07-18 11:45:59 +02:00
|
|
|
static unsigned int write_to_buffer(unsigned int idx, void *dataptr, size_t size)
|
2010-05-22 20:24:51 +02:00
|
|
|
{
|
2012-07-18 11:45:59 +02:00
|
|
|
uint8_t *data_ptr = dataptr;
|
|
|
|
uint32_t x = 0;
|
|
|
|
while (x < size) {
|
|
|
|
if (idx >= TRACE_BUF_LEN) {
|
|
|
|
idx = idx % TRACE_BUF_LEN;
|
|
|
|
}
|
|
|
|
trace_buf[idx++] = data_ptr[x++];
|
|
|
|
}
|
|
|
|
return idx; /* most callers wants to know where to write next */
|
2010-05-22 20:24:51 +02:00
|
|
|
}
|
|
|
|
|
2012-07-18 11:45:59 +02:00
|
|
|
void trace_record_finish(TraceBufferRecord *rec)
|
2010-05-22 20:24:51 +02:00
|
|
|
{
|
2012-07-20 15:22:15 +02:00
|
|
|
TraceRecord record;
|
|
|
|
read_from_buffer(rec->tbuf_idx, &record, sizeof(TraceRecord));
|
2012-07-18 11:45:59 +02:00
|
|
|
smp_wmb(); /* write barrier before marking as valid */
|
2012-07-20 15:22:15 +02:00
|
|
|
record.event |= TRACE_RECORD_VALID;
|
|
|
|
write_to_buffer(rec->tbuf_idx, &record, sizeof(TraceRecord));
|
2012-07-18 11:45:59 +02:00
|
|
|
|
|
|
|
if ((trace_idx - writeout_idx) > TRACE_BUF_FLUSH_THRESHOLD) {
|
|
|
|
flush_trace_file(false);
|
|
|
|
}
|
2010-05-22 20:24:51 +02:00
|
|
|
}
|
|
|
|
|
2011-02-26 19:38:39 +01:00
|
|
|
void st_set_trace_file_enabled(bool enable)
|
|
|
|
{
|
|
|
|
if (enable == !!trace_fp) {
|
|
|
|
return; /* no change */
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Halt trace writeout */
|
|
|
|
flush_trace_file(true);
|
|
|
|
trace_writeout_enabled = false;
|
|
|
|
flush_trace_file(true);
|
|
|
|
|
|
|
|
if (enable) {
|
2012-07-20 15:22:12 +02:00
|
|
|
static const TraceLogHeader header = {
|
2012-07-18 11:45:59 +02:00
|
|
|
.header_event_id = HEADER_EVENT_ID,
|
|
|
|
.header_magic = HEADER_MAGIC,
|
|
|
|
/* Older log readers will check for version at next location */
|
|
|
|
.header_version = HEADER_VERSION,
|
2011-02-26 19:38:39 +01:00
|
|
|
};
|
|
|
|
|
2011-09-05 19:31:21 +02:00
|
|
|
trace_fp = fopen(trace_file_name, "wb");
|
2011-02-26 19:38:39 +01:00
|
|
|
if (!trace_fp) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (fwrite(&header, sizeof header, 1, trace_fp) != 1) {
|
|
|
|
fclose(trace_fp);
|
|
|
|
trace_fp = NULL;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Resume trace writeout */
|
|
|
|
trace_writeout_enabled = true;
|
|
|
|
flush_trace_file(false);
|
|
|
|
} else {
|
|
|
|
fclose(trace_fp);
|
|
|
|
trace_fp = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-05-22 20:24:51 +02:00
|
|
|
/**
|
2011-02-26 19:38:39 +01:00
|
|
|
* Set the name of a trace file
|
|
|
|
*
|
|
|
|
* @file The trace file name or NULL for the default name-<pid> set at
|
|
|
|
* config time
|
2010-05-22 20:24:51 +02:00
|
|
|
*/
|
2011-02-26 19:38:39 +01:00
|
|
|
bool st_set_trace_file(const char *file)
|
2010-05-22 20:24:51 +02:00
|
|
|
{
|
2011-02-26 19:38:39 +01:00
|
|
|
st_set_trace_file_enabled(false);
|
|
|
|
|
2012-08-13 21:51:16 +02:00
|
|
|
g_free(trace_file_name);
|
2011-02-26 19:38:39 +01:00
|
|
|
|
|
|
|
if (!file) {
|
2012-08-13 21:51:16 +02:00
|
|
|
trace_file_name = g_strdup_printf(CONFIG_TRACE_FILE, getpid());
|
2011-02-26 19:38:39 +01:00
|
|
|
} else {
|
2012-08-13 21:51:16 +02:00
|
|
|
trace_file_name = g_strdup_printf("%s", file);
|
2011-02-26 19:38:39 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
st_set_trace_file_enabled(true);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
void st_print_trace_file_status(FILE *stream, int (*stream_printf)(FILE *stream, const char *fmt, ...))
|
|
|
|
{
|
|
|
|
stream_printf(stream, "Trace file \"%s\" %s.\n",
|
|
|
|
trace_file_name, trace_fp ? "on" : "off");
|
2010-05-22 20:24:51 +02:00
|
|
|
}
|
2010-06-24 13:34:53 +02:00
|
|
|
|
2011-08-31 20:31:18 +02:00
|
|
|
void st_flush_trace_buffer(void)
|
|
|
|
{
|
|
|
|
flush_trace_file(true);
|
|
|
|
}
|
|
|
|
|
|
|
|
void trace_print_events(FILE *stream, fprintf_function stream_printf)
|
2010-06-24 13:34:53 +02:00
|
|
|
{
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
for (i = 0; i < NR_TRACE_EVENTS; i++) {
|
|
|
|
stream_printf(stream, "%s [Event ID %u] : state %u\n",
|
|
|
|
trace_list[i].tp_name, i, trace_list[i].state);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-08-31 20:31:18 +02:00
|
|
|
bool trace_event_set_state(const char *name, bool state)
|
2010-06-24 13:34:53 +02:00
|
|
|
{
|
|
|
|
unsigned int i;
|
2011-10-31 04:29:04 +01:00
|
|
|
unsigned int len;
|
|
|
|
bool wildcard = false;
|
|
|
|
bool matched = false;
|
|
|
|
|
|
|
|
len = strlen(name);
|
|
|
|
if (len > 0 && name[len - 1] == '*') {
|
|
|
|
wildcard = true;
|
|
|
|
len -= 1;
|
|
|
|
}
|
2010-06-24 13:34:53 +02:00
|
|
|
for (i = 0; i < NR_TRACE_EVENTS; i++) {
|
2011-10-31 04:29:04 +01:00
|
|
|
if (wildcard) {
|
|
|
|
if (!strncmp(trace_list[i].tp_name, name, len)) {
|
|
|
|
trace_list[i].state = state;
|
|
|
|
matched = true;
|
|
|
|
}
|
|
|
|
continue;
|
|
|
|
}
|
2011-02-26 19:38:39 +01:00
|
|
|
if (!strcmp(trace_list[i].tp_name, name)) {
|
2011-08-31 20:31:18 +02:00
|
|
|
trace_list[i].state = state;
|
2011-02-26 19:38:39 +01:00
|
|
|
return true;
|
2010-06-24 13:34:53 +02:00
|
|
|
}
|
|
|
|
}
|
2011-10-31 04:29:04 +01:00
|
|
|
return matched;
|
2011-02-26 19:38:39 +01:00
|
|
|
}
|
|
|
|
|
2011-09-05 09:30:17 +02:00
|
|
|
/* Helper function to create a thread with signals blocked. Use glib's
|
|
|
|
* portable threads since QEMU abstractions cannot be used due to reentrancy in
|
|
|
|
* the tracer. Also note the signal masking on POSIX hosts so that the thread
|
|
|
|
* does not steal signals when the rest of the program wants them blocked.
|
|
|
|
*/
|
|
|
|
static GThread *trace_thread_create(GThreadFunc fn)
|
2010-06-24 13:34:53 +02:00
|
|
|
{
|
2011-09-05 09:30:17 +02:00
|
|
|
GThread *thread;
|
|
|
|
#ifndef _WIN32
|
2011-02-26 19:38:39 +01:00
|
|
|
sigset_t set, oldset;
|
2010-06-24 13:34:53 +02:00
|
|
|
|
2011-02-26 19:38:39 +01:00
|
|
|
sigfillset(&set);
|
|
|
|
pthread_sigmask(SIG_SETMASK, &set, &oldset);
|
2011-09-05 09:30:17 +02:00
|
|
|
#endif
|
2012-03-08 07:20:37 +01:00
|
|
|
thread = g_thread_create(fn, NULL, FALSE, NULL);
|
2011-09-05 09:30:17 +02:00
|
|
|
#ifndef _WIN32
|
2011-02-26 19:38:39 +01:00
|
|
|
pthread_sigmask(SIG_SETMASK, &oldset, NULL);
|
2011-09-05 09:30:17 +02:00
|
|
|
#endif
|
2011-02-26 19:38:39 +01:00
|
|
|
|
2011-09-05 09:30:17 +02:00
|
|
|
return thread;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool trace_backend_init(const char *events, const char *file)
|
|
|
|
{
|
|
|
|
GThread *thread;
|
|
|
|
|
|
|
|
if (!g_thread_supported()) {
|
2011-12-20 12:41:04 +01:00
|
|
|
#if !GLIB_CHECK_VERSION(2, 31, 0)
|
2011-09-05 09:30:17 +02:00
|
|
|
g_thread_init(NULL);
|
2011-12-20 12:41:04 +01:00
|
|
|
#else
|
|
|
|
fprintf(stderr, "glib threading failed to initialize.\n");
|
|
|
|
exit(1);
|
|
|
|
#endif
|
2011-09-05 09:30:17 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
trace_available_cond = g_cond_new();
|
|
|
|
trace_empty_cond = g_cond_new();
|
|
|
|
|
|
|
|
thread = trace_thread_create(writeout_thread);
|
|
|
|
if (!thread) {
|
2011-08-31 20:31:03 +02:00
|
|
|
fprintf(stderr, "warning: unable to initialize simple trace backend\n");
|
2011-09-05 09:30:17 +02:00
|
|
|
return false;
|
2010-06-24 13:34:53 +02:00
|
|
|
}
|
2011-02-26 19:38:39 +01:00
|
|
|
|
2011-09-05 09:30:17 +02:00
|
|
|
atexit(st_flush_trace_buffer);
|
|
|
|
trace_backend_init_events(events);
|
|
|
|
st_set_trace_file(file);
|
2011-03-13 21:14:30 +01:00
|
|
|
return true;
|
2010-06-24 13:34:53 +02:00
|
|
|
}
|