2010-05-22 20:24:51 +02:00
|
|
|
/*
|
|
|
|
* Simple trace backend
|
|
|
|
*
|
|
|
|
* Copyright IBM, Corp. 2010
|
|
|
|
*
|
|
|
|
* This work is licensed under the terms of the GNU GPL, version 2. See
|
|
|
|
* the COPYING file in the top-level directory.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <stdlib.h>
|
|
|
|
#include <stdint.h>
|
|
|
|
#include <stdio.h>
|
|
|
|
#include <time.h>
|
2011-09-05 09:30:17 +02:00
|
|
|
#ifndef _WIN32
|
2011-02-26 19:38:39 +01:00
|
|
|
#include <signal.h>
|
|
|
|
#include <pthread.h>
|
2011-09-05 09:30:17 +02:00
|
|
|
#endif
|
2012-12-17 18:20:00 +01:00
|
|
|
#include "qemu/timer.h"
|
2010-05-22 20:24:51 +02:00
|
|
|
#include "trace.h"
|
2011-08-31 20:31:03 +02:00
|
|
|
#include "trace/control.h"
|
2014-01-14 16:52:55 +01:00
|
|
|
#include "trace/simple.h"
|
2010-05-22 20:24:51 +02:00
|
|
|
|
|
|
|
/** Trace file header event ID */
|
|
|
|
#define HEADER_EVENT_ID (~(uint64_t)0) /* avoids conflicting with TraceEventIDs */
|
|
|
|
|
|
|
|
/** Trace file magic number */
|
|
|
|
#define HEADER_MAGIC 0xf2b177cb0aa429b4ULL
|
|
|
|
|
|
|
|
/** Trace file version number, bump if format changes */
|
2014-02-23 20:37:35 +01:00
|
|
|
#define HEADER_VERSION 3
|
2010-05-22 20:24:51 +02:00
|
|
|
|
2011-02-26 19:38:39 +01:00
|
|
|
/** Records were dropped event ID */
|
|
|
|
#define DROPPED_EVENT_ID (~(uint64_t)0 - 1)
|
|
|
|
|
|
|
|
/** Trace record is valid */
|
|
|
|
#define TRACE_RECORD_VALID ((uint64_t)1 << 63)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Trace records are written out by a dedicated thread. The thread waits for
|
|
|
|
* records to become available, writes them out, and then waits again.
|
|
|
|
*/
|
2014-05-08 10:30:46 +02:00
|
|
|
static CompatGMutex trace_lock;
|
|
|
|
static CompatGCond trace_available_cond;
|
|
|
|
static CompatGCond trace_empty_cond;
|
2013-02-12 14:34:05 +01:00
|
|
|
|
2011-02-26 19:38:39 +01:00
|
|
|
static bool trace_available;
|
|
|
|
static bool trace_writeout_enabled;
|
|
|
|
|
2012-07-18 11:45:59 +02:00
|
|
|
enum {
|
|
|
|
TRACE_BUF_LEN = 4096 * 64,
|
|
|
|
TRACE_BUF_FLUSH_THRESHOLD = TRACE_BUF_LEN / 4,
|
|
|
|
};
|
|
|
|
|
|
|
|
uint8_t trace_buf[TRACE_BUF_LEN];
|
2013-02-12 14:34:04 +01:00
|
|
|
static volatile gint trace_idx;
|
2012-07-18 11:45:59 +02:00
|
|
|
static unsigned int writeout_idx;
|
2013-02-12 14:34:04 +01:00
|
|
|
static volatile gint dropped_events;
|
2014-05-07 19:24:10 +02:00
|
|
|
static uint32_t trace_pid;
|
2010-05-22 20:24:51 +02:00
|
|
|
static FILE *trace_fp;
|
2012-08-13 21:51:16 +02:00
|
|
|
static char *trace_file_name;
|
2010-05-22 20:24:51 +02:00
|
|
|
|
2012-07-18 11:45:59 +02:00
|
|
|
/* * Trace buffer entry */
|
|
|
|
typedef struct {
|
|
|
|
uint64_t event; /* TraceEventID */
|
|
|
|
uint64_t timestamp_ns;
|
|
|
|
uint32_t length; /* in bytes */
|
2014-05-07 19:24:10 +02:00
|
|
|
uint32_t pid;
|
trace: Fix simple trace dropped event record for big endian
We use atomic operations to keep track of dropped events.
Inconveniently, GLib supports only int and void * atomics, but the
counter dropped_events is uint64_t. Can't stop commit 62bab732: a
quick (gint *)&dropped_events bludgeons the compiler into submission.
That cast is okay only when int is exactly 64 bits wide, which it
commonly isn't.
If int is even wider, we clobber whatever follows dropped_events. Not
worth worrying about, as none of the machines that interest us have
such morbidly obese ints.
That leaves the common case: int narrower than 64 bits.
Harmless on little endian hosts: we just don't access the most
significant bits of dropped_events. They remain zero.
On big endian hosts, we use only the most significant bits of
dropped_events as counter. The least significant bits remain zero.
However, we write out the full value, which is the correct counter
shifted left a bunch of places.
Fix by changing the variables involved to int.
There's another, equally suspicious-looking (gint *)&trace_idx
argument to g_atomic_int_compare_and_exchange(), but that one casts
unsigned *, so it's okay. But it's also superfluous, because GLib's
atomic int operations work just fine for unsigned. Drop it.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Laszlo Ersek <lersek@redhat.com>
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2013-01-25 16:43:37 +01:00
|
|
|
uint64_t arguments[];
|
2012-07-18 11:45:59 +02:00
|
|
|
} TraceRecord;
|
|
|
|
|
|
|
|
typedef struct {
|
|
|
|
uint64_t header_event_id; /* HEADER_EVENT_ID */
|
|
|
|
uint64_t header_magic; /* HEADER_MAGIC */
|
|
|
|
uint64_t header_version; /* HEADER_VERSION */
|
2012-07-20 15:22:12 +02:00
|
|
|
} TraceLogHeader;
|
2012-07-18 11:45:59 +02:00
|
|
|
|
|
|
|
|
|
|
|
static void read_from_buffer(unsigned int idx, void *dataptr, size_t size);
|
|
|
|
static unsigned int write_to_buffer(unsigned int idx, void *dataptr, size_t size);
|
|
|
|
|
|
|
|
static void clear_buffer_range(unsigned int idx, size_t len)
|
|
|
|
{
|
|
|
|
uint32_t num = 0;
|
|
|
|
while (num < len) {
|
|
|
|
if (idx >= TRACE_BUF_LEN) {
|
|
|
|
idx = idx % TRACE_BUF_LEN;
|
|
|
|
}
|
|
|
|
trace_buf[idx++] = 0;
|
|
|
|
num++;
|
|
|
|
}
|
|
|
|
}
|
2010-07-13 10:26:33 +02:00
|
|
|
/**
|
2011-02-26 19:38:39 +01:00
|
|
|
* Read a trace record from the trace buffer
|
|
|
|
*
|
|
|
|
* @idx Trace buffer index
|
|
|
|
* @record Trace record to fill
|
|
|
|
*
|
|
|
|
* Returns false if the record is not valid.
|
2010-07-13 10:26:33 +02:00
|
|
|
*/
|
2012-07-18 11:45:59 +02:00
|
|
|
static bool get_trace_record(unsigned int idx, TraceRecord **recordptr)
|
2010-07-13 10:26:32 +02:00
|
|
|
{
|
2012-07-18 11:45:59 +02:00
|
|
|
uint64_t event_flag = 0;
|
|
|
|
TraceRecord record;
|
|
|
|
/* read the event flag to see if its a valid record */
|
|
|
|
read_from_buffer(idx, &record, sizeof(event_flag));
|
|
|
|
|
|
|
|
if (!(record.event & TRACE_RECORD_VALID)) {
|
2011-02-26 19:38:39 +01:00
|
|
|
return false;
|
2010-07-13 10:26:32 +02:00
|
|
|
}
|
|
|
|
|
2012-07-18 11:45:59 +02:00
|
|
|
smp_rmb(); /* read memory barrier before accessing record */
|
|
|
|
/* read the record header to know record length */
|
|
|
|
read_from_buffer(idx, &record, sizeof(TraceRecord));
|
|
|
|
*recordptr = malloc(record.length); /* dont use g_malloc, can deadlock when traced */
|
|
|
|
/* make a copy of record to avoid being overwritten */
|
|
|
|
read_from_buffer(idx, *recordptr, record.length);
|
|
|
|
smp_rmb(); /* memory barrier before clearing valid flag */
|
|
|
|
(*recordptr)->event &= ~TRACE_RECORD_VALID;
|
|
|
|
/* clear the trace buffer range for consumed record otherwise any byte
|
|
|
|
* with its MSB set may be considered as a valid event id when the writer
|
|
|
|
* thread crosses this range of buffer again.
|
|
|
|
*/
|
|
|
|
clear_buffer_range(idx, record.length);
|
2010-07-13 10:26:33 +02:00
|
|
|
return true;
|
2010-07-13 10:26:32 +02:00
|
|
|
}
|
|
|
|
|
2011-02-26 19:38:39 +01:00
|
|
|
/**
|
|
|
|
* Kick writeout thread
|
|
|
|
*
|
|
|
|
* @wait Whether to wait for writeout thread to complete
|
|
|
|
*/
|
|
|
|
static void flush_trace_file(bool wait)
|
2010-05-22 20:24:51 +02:00
|
|
|
{
|
2014-05-08 10:30:46 +02:00
|
|
|
g_mutex_lock(&trace_lock);
|
2011-02-26 19:38:39 +01:00
|
|
|
trace_available = true;
|
2014-05-08 10:30:46 +02:00
|
|
|
g_cond_signal(&trace_available_cond);
|
2010-07-13 10:26:33 +02:00
|
|
|
|
2011-02-26 19:38:39 +01:00
|
|
|
if (wait) {
|
2014-05-08 10:30:46 +02:00
|
|
|
g_cond_wait(&trace_empty_cond, &trace_lock);
|
2010-05-22 20:24:51 +02:00
|
|
|
}
|
2011-02-26 19:38:39 +01:00
|
|
|
|
2014-05-08 10:30:46 +02:00
|
|
|
g_mutex_unlock(&trace_lock);
|
2010-07-13 10:26:33 +02:00
|
|
|
}
|
|
|
|
|
2011-02-26 19:38:39 +01:00
|
|
|
static void wait_for_trace_records_available(void)
|
2010-07-13 10:26:33 +02:00
|
|
|
{
|
2014-05-08 10:30:46 +02:00
|
|
|
g_mutex_lock(&trace_lock);
|
2011-02-26 19:38:39 +01:00
|
|
|
while (!(trace_available && trace_writeout_enabled)) {
|
2014-05-08 10:30:46 +02:00
|
|
|
g_cond_signal(&trace_empty_cond);
|
|
|
|
g_cond_wait(&trace_available_cond, &trace_lock);
|
2010-07-13 10:26:33 +02:00
|
|
|
}
|
2011-02-26 19:38:39 +01:00
|
|
|
trace_available = false;
|
2014-05-08 10:30:46 +02:00
|
|
|
g_mutex_unlock(&trace_lock);
|
2010-05-22 20:24:51 +02:00
|
|
|
}
|
|
|
|
|
2011-09-05 09:30:17 +02:00
|
|
|
static gpointer writeout_thread(gpointer opaque)
|
2010-05-22 20:24:51 +02:00
|
|
|
{
|
2012-07-18 11:45:59 +02:00
|
|
|
TraceRecord *recordptr;
|
|
|
|
union {
|
|
|
|
TraceRecord rec;
|
|
|
|
uint8_t bytes[sizeof(TraceRecord) + sizeof(uint64_t)];
|
|
|
|
} dropped;
|
|
|
|
unsigned int idx = 0;
|
trace: Fix simple trace dropped event record for big endian
We use atomic operations to keep track of dropped events.
Inconveniently, GLib supports only int and void * atomics, but the
counter dropped_events is uint64_t. Can't stop commit 62bab732: a
quick (gint *)&dropped_events bludgeons the compiler into submission.
That cast is okay only when int is exactly 64 bits wide, which it
commonly isn't.
If int is even wider, we clobber whatever follows dropped_events. Not
worth worrying about, as none of the machines that interest us have
such morbidly obese ints.
That leaves the common case: int narrower than 64 bits.
Harmless on little endian hosts: we just don't access the most
significant bits of dropped_events. They remain zero.
On big endian hosts, we use only the most significant bits of
dropped_events as counter. The least significant bits remain zero.
However, we write out the full value, which is the correct counter
shifted left a bunch of places.
Fix by changing the variables involved to int.
There's another, equally suspicious-looking (gint *)&trace_idx
argument to g_atomic_int_compare_and_exchange(), but that one casts
unsigned *, so it's okay. But it's also superfluous, because GLib's
atomic int operations work just fine for unsigned. Drop it.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Laszlo Ersek <lersek@redhat.com>
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2013-01-25 16:43:37 +01:00
|
|
|
int dropped_count;
|
2011-07-23 23:21:14 +02:00
|
|
|
size_t unused __attribute__ ((unused));
|
2011-02-26 19:38:39 +01:00
|
|
|
|
|
|
|
for (;;) {
|
|
|
|
wait_for_trace_records_available();
|
|
|
|
|
2013-01-25 16:43:38 +01:00
|
|
|
if (g_atomic_int_get(&dropped_events)) {
|
2012-07-18 11:45:59 +02:00
|
|
|
dropped.rec.event = DROPPED_EVENT_ID,
|
|
|
|
dropped.rec.timestamp_ns = get_clock();
|
trace: Fix simple trace dropped event record for big endian
We use atomic operations to keep track of dropped events.
Inconveniently, GLib supports only int and void * atomics, but the
counter dropped_events is uint64_t. Can't stop commit 62bab732: a
quick (gint *)&dropped_events bludgeons the compiler into submission.
That cast is okay only when int is exactly 64 bits wide, which it
commonly isn't.
If int is even wider, we clobber whatever follows dropped_events. Not
worth worrying about, as none of the machines that interest us have
such morbidly obese ints.
That leaves the common case: int narrower than 64 bits.
Harmless on little endian hosts: we just don't access the most
significant bits of dropped_events. They remain zero.
On big endian hosts, we use only the most significant bits of
dropped_events as counter. The least significant bits remain zero.
However, we write out the full value, which is the correct counter
shifted left a bunch of places.
Fix by changing the variables involved to int.
There's another, equally suspicious-looking (gint *)&trace_idx
argument to g_atomic_int_compare_and_exchange(), but that one casts
unsigned *, so it's okay. But it's also superfluous, because GLib's
atomic int operations work just fine for unsigned. Drop it.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Laszlo Ersek <lersek@redhat.com>
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2013-01-25 16:43:37 +01:00
|
|
|
dropped.rec.length = sizeof(TraceRecord) + sizeof(uint64_t),
|
2014-05-07 19:24:10 +02:00
|
|
|
dropped.rec.pid = trace_pid;
|
2013-01-25 16:43:39 +01:00
|
|
|
do {
|
2013-01-25 16:43:38 +01:00
|
|
|
dropped_count = g_atomic_int_get(&dropped_events);
|
2013-01-25 16:43:39 +01:00
|
|
|
} while (!g_atomic_int_compare_and_exchange(&dropped_events,
|
|
|
|
dropped_count, 0));
|
trace: Fix simple trace dropped event record for big endian
We use atomic operations to keep track of dropped events.
Inconveniently, GLib supports only int and void * atomics, but the
counter dropped_events is uint64_t. Can't stop commit 62bab732: a
quick (gint *)&dropped_events bludgeons the compiler into submission.
That cast is okay only when int is exactly 64 bits wide, which it
commonly isn't.
If int is even wider, we clobber whatever follows dropped_events. Not
worth worrying about, as none of the machines that interest us have
such morbidly obese ints.
That leaves the common case: int narrower than 64 bits.
Harmless on little endian hosts: we just don't access the most
significant bits of dropped_events. They remain zero.
On big endian hosts, we use only the most significant bits of
dropped_events as counter. The least significant bits remain zero.
However, we write out the full value, which is the correct counter
shifted left a bunch of places.
Fix by changing the variables involved to int.
There's another, equally suspicious-looking (gint *)&trace_idx
argument to g_atomic_int_compare_and_exchange(), but that one casts
unsigned *, so it's okay. But it's also superfluous, because GLib's
atomic int operations work just fine for unsigned. Drop it.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Laszlo Ersek <lersek@redhat.com>
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2013-01-25 16:43:37 +01:00
|
|
|
dropped.rec.arguments[0] = dropped_count;
|
2012-07-18 11:45:59 +02:00
|
|
|
unused = fwrite(&dropped.rec, dropped.rec.length, 1, trace_fp);
|
2011-02-26 19:38:39 +01:00
|
|
|
}
|
2010-05-22 20:24:51 +02:00
|
|
|
|
2012-07-18 11:45:59 +02:00
|
|
|
while (get_trace_record(idx, &recordptr)) {
|
|
|
|
unused = fwrite(recordptr, recordptr->length, 1, trace_fp);
|
|
|
|
writeout_idx += recordptr->length;
|
|
|
|
free(recordptr); /* dont use g_free, can deadlock when traced */
|
|
|
|
idx = writeout_idx % TRACE_BUF_LEN;
|
2011-02-26 19:38:39 +01:00
|
|
|
}
|
2010-05-22 20:24:51 +02:00
|
|
|
|
2011-02-26 19:38:39 +01:00
|
|
|
fflush(trace_fp);
|
2010-05-22 20:24:51 +02:00
|
|
|
}
|
2011-02-26 19:38:39 +01:00
|
|
|
return NULL;
|
2010-05-22 20:24:51 +02:00
|
|
|
}
|
|
|
|
|
2012-07-18 11:45:59 +02:00
|
|
|
void trace_record_write_u64(TraceBufferRecord *rec, uint64_t val)
|
2010-05-22 20:24:51 +02:00
|
|
|
{
|
2012-07-18 11:45:59 +02:00
|
|
|
rec->rec_off = write_to_buffer(rec->rec_off, &val, sizeof(uint64_t));
|
2010-05-22 20:24:51 +02:00
|
|
|
}
|
|
|
|
|
2012-07-18 11:45:59 +02:00
|
|
|
void trace_record_write_str(TraceBufferRecord *rec, const char *s, uint32_t slen)
|
2010-05-22 20:24:51 +02:00
|
|
|
{
|
2012-07-18 11:45:59 +02:00
|
|
|
/* Write string length first */
|
|
|
|
rec->rec_off = write_to_buffer(rec->rec_off, &slen, sizeof(slen));
|
|
|
|
/* Write actual string now */
|
|
|
|
rec->rec_off = write_to_buffer(rec->rec_off, (void*)s, slen);
|
2010-05-22 20:24:51 +02:00
|
|
|
}
|
|
|
|
|
2012-07-18 11:45:59 +02:00
|
|
|
int trace_record_start(TraceBufferRecord *rec, TraceEventID event, size_t datasize)
|
2010-05-22 20:24:51 +02:00
|
|
|
{
|
2012-07-18 11:45:59 +02:00
|
|
|
unsigned int idx, rec_off, old_idx, new_idx;
|
|
|
|
uint32_t rec_len = sizeof(TraceRecord) + datasize;
|
2013-03-05 14:47:55 +01:00
|
|
|
uint64_t event_u64 = event;
|
2012-07-18 11:45:59 +02:00
|
|
|
uint64_t timestamp_ns = get_clock();
|
|
|
|
|
2013-01-25 16:43:39 +01:00
|
|
|
do {
|
2013-01-25 16:43:38 +01:00
|
|
|
old_idx = g_atomic_int_get(&trace_idx);
|
2012-07-18 11:45:59 +02:00
|
|
|
smp_rmb();
|
|
|
|
new_idx = old_idx + rec_len;
|
|
|
|
|
|
|
|
if (new_idx - writeout_idx > TRACE_BUF_LEN) {
|
|
|
|
/* Trace Buffer Full, Event dropped ! */
|
trace: Fix simple trace dropped event record for big endian
We use atomic operations to keep track of dropped events.
Inconveniently, GLib supports only int and void * atomics, but the
counter dropped_events is uint64_t. Can't stop commit 62bab732: a
quick (gint *)&dropped_events bludgeons the compiler into submission.
That cast is okay only when int is exactly 64 bits wide, which it
commonly isn't.
If int is even wider, we clobber whatever follows dropped_events. Not
worth worrying about, as none of the machines that interest us have
such morbidly obese ints.
That leaves the common case: int narrower than 64 bits.
Harmless on little endian hosts: we just don't access the most
significant bits of dropped_events. They remain zero.
On big endian hosts, we use only the most significant bits of
dropped_events as counter. The least significant bits remain zero.
However, we write out the full value, which is the correct counter
shifted left a bunch of places.
Fix by changing the variables involved to int.
There's another, equally suspicious-looking (gint *)&trace_idx
argument to g_atomic_int_compare_and_exchange(), but that one casts
unsigned *, so it's okay. But it's also superfluous, because GLib's
atomic int operations work just fine for unsigned. Drop it.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Laszlo Ersek <lersek@redhat.com>
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2013-01-25 16:43:37 +01:00
|
|
|
g_atomic_int_inc(&dropped_events);
|
2012-07-18 11:45:59 +02:00
|
|
|
return -ENOSPC;
|
|
|
|
}
|
2013-01-25 16:43:39 +01:00
|
|
|
} while (!g_atomic_int_compare_and_exchange(&trace_idx, old_idx, new_idx));
|
2010-05-22 20:24:51 +02:00
|
|
|
|
2012-07-18 11:45:59 +02:00
|
|
|
idx = old_idx % TRACE_BUF_LEN;
|
|
|
|
|
|
|
|
rec_off = idx;
|
2013-03-05 14:47:55 +01:00
|
|
|
rec_off = write_to_buffer(rec_off, &event_u64, sizeof(event_u64));
|
2012-07-20 15:22:13 +02:00
|
|
|
rec_off = write_to_buffer(rec_off, ×tamp_ns, sizeof(timestamp_ns));
|
|
|
|
rec_off = write_to_buffer(rec_off, &rec_len, sizeof(rec_len));
|
2014-05-07 19:24:10 +02:00
|
|
|
rec_off = write_to_buffer(rec_off, &trace_pid, sizeof(trace_pid));
|
2012-07-18 11:45:59 +02:00
|
|
|
|
|
|
|
rec->tbuf_idx = idx;
|
|
|
|
rec->rec_off = (idx + sizeof(TraceRecord)) % TRACE_BUF_LEN;
|
|
|
|
return 0;
|
2010-05-22 20:24:51 +02:00
|
|
|
}
|
|
|
|
|
2012-07-18 11:45:59 +02:00
|
|
|
static void read_from_buffer(unsigned int idx, void *dataptr, size_t size)
|
2010-05-22 20:24:51 +02:00
|
|
|
{
|
2012-07-18 11:45:59 +02:00
|
|
|
uint8_t *data_ptr = dataptr;
|
|
|
|
uint32_t x = 0;
|
|
|
|
while (x < size) {
|
|
|
|
if (idx >= TRACE_BUF_LEN) {
|
|
|
|
idx = idx % TRACE_BUF_LEN;
|
|
|
|
}
|
|
|
|
data_ptr[x++] = trace_buf[idx++];
|
|
|
|
}
|
2010-05-22 20:24:51 +02:00
|
|
|
}
|
|
|
|
|
2012-07-18 11:45:59 +02:00
|
|
|
static unsigned int write_to_buffer(unsigned int idx, void *dataptr, size_t size)
|
2010-05-22 20:24:51 +02:00
|
|
|
{
|
2012-07-18 11:45:59 +02:00
|
|
|
uint8_t *data_ptr = dataptr;
|
|
|
|
uint32_t x = 0;
|
|
|
|
while (x < size) {
|
|
|
|
if (idx >= TRACE_BUF_LEN) {
|
|
|
|
idx = idx % TRACE_BUF_LEN;
|
|
|
|
}
|
|
|
|
trace_buf[idx++] = data_ptr[x++];
|
|
|
|
}
|
|
|
|
return idx; /* most callers wants to know where to write next */
|
2010-05-22 20:24:51 +02:00
|
|
|
}
|
|
|
|
|
2012-07-18 11:45:59 +02:00
|
|
|
void trace_record_finish(TraceBufferRecord *rec)
|
2010-05-22 20:24:51 +02:00
|
|
|
{
|
2012-07-20 15:22:15 +02:00
|
|
|
TraceRecord record;
|
|
|
|
read_from_buffer(rec->tbuf_idx, &record, sizeof(TraceRecord));
|
2012-07-18 11:45:59 +02:00
|
|
|
smp_wmb(); /* write barrier before marking as valid */
|
2012-07-20 15:22:15 +02:00
|
|
|
record.event |= TRACE_RECORD_VALID;
|
|
|
|
write_to_buffer(rec->tbuf_idx, &record, sizeof(TraceRecord));
|
2012-07-18 11:45:59 +02:00
|
|
|
|
2013-02-12 14:34:04 +01:00
|
|
|
if (((unsigned int)g_atomic_int_get(&trace_idx) - writeout_idx)
|
2013-01-25 16:43:38 +01:00
|
|
|
> TRACE_BUF_FLUSH_THRESHOLD) {
|
2012-07-18 11:45:59 +02:00
|
|
|
flush_trace_file(false);
|
|
|
|
}
|
2010-05-22 20:24:51 +02:00
|
|
|
}
|
|
|
|
|
2011-02-26 19:38:39 +01:00
|
|
|
void st_set_trace_file_enabled(bool enable)
|
|
|
|
{
|
|
|
|
if (enable == !!trace_fp) {
|
|
|
|
return; /* no change */
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Halt trace writeout */
|
|
|
|
flush_trace_file(true);
|
|
|
|
trace_writeout_enabled = false;
|
|
|
|
flush_trace_file(true);
|
|
|
|
|
|
|
|
if (enable) {
|
2012-07-20 15:22:12 +02:00
|
|
|
static const TraceLogHeader header = {
|
2012-07-18 11:45:59 +02:00
|
|
|
.header_event_id = HEADER_EVENT_ID,
|
|
|
|
.header_magic = HEADER_MAGIC,
|
|
|
|
/* Older log readers will check for version at next location */
|
|
|
|
.header_version = HEADER_VERSION,
|
2011-02-26 19:38:39 +01:00
|
|
|
};
|
|
|
|
|
2011-09-05 19:31:21 +02:00
|
|
|
trace_fp = fopen(trace_file_name, "wb");
|
2011-02-26 19:38:39 +01:00
|
|
|
if (!trace_fp) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (fwrite(&header, sizeof header, 1, trace_fp) != 1) {
|
|
|
|
fclose(trace_fp);
|
|
|
|
trace_fp = NULL;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Resume trace writeout */
|
|
|
|
trace_writeout_enabled = true;
|
|
|
|
flush_trace_file(false);
|
|
|
|
} else {
|
|
|
|
fclose(trace_fp);
|
|
|
|
trace_fp = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-05-22 20:24:51 +02:00
|
|
|
/**
|
2011-02-26 19:38:39 +01:00
|
|
|
* Set the name of a trace file
|
|
|
|
*
|
|
|
|
* @file The trace file name or NULL for the default name-<pid> set at
|
|
|
|
* config time
|
2010-05-22 20:24:51 +02:00
|
|
|
*/
|
2011-02-26 19:38:39 +01:00
|
|
|
bool st_set_trace_file(const char *file)
|
2010-05-22 20:24:51 +02:00
|
|
|
{
|
2011-02-26 19:38:39 +01:00
|
|
|
st_set_trace_file_enabled(false);
|
|
|
|
|
2012-08-13 21:51:16 +02:00
|
|
|
g_free(trace_file_name);
|
2011-02-26 19:38:39 +01:00
|
|
|
|
|
|
|
if (!file) {
|
2012-08-13 21:51:16 +02:00
|
|
|
trace_file_name = g_strdup_printf(CONFIG_TRACE_FILE, getpid());
|
2011-02-26 19:38:39 +01:00
|
|
|
} else {
|
2012-08-13 21:51:16 +02:00
|
|
|
trace_file_name = g_strdup_printf("%s", file);
|
2011-02-26 19:38:39 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
st_set_trace_file_enabled(true);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
void st_print_trace_file_status(FILE *stream, int (*stream_printf)(FILE *stream, const char *fmt, ...))
|
|
|
|
{
|
|
|
|
stream_printf(stream, "Trace file \"%s\" %s.\n",
|
|
|
|
trace_file_name, trace_fp ? "on" : "off");
|
2010-05-22 20:24:51 +02:00
|
|
|
}
|
2010-06-24 13:34:53 +02:00
|
|
|
|
2011-08-31 20:31:18 +02:00
|
|
|
void st_flush_trace_buffer(void)
|
|
|
|
{
|
|
|
|
flush_trace_file(true);
|
|
|
|
}
|
|
|
|
|
2011-09-05 09:30:17 +02:00
|
|
|
/* Helper function to create a thread with signals blocked. Use glib's
|
|
|
|
* portable threads since QEMU abstractions cannot be used due to reentrancy in
|
|
|
|
* the tracer. Also note the signal masking on POSIX hosts so that the thread
|
|
|
|
* does not steal signals when the rest of the program wants them blocked.
|
|
|
|
*/
|
|
|
|
static GThread *trace_thread_create(GThreadFunc fn)
|
2010-06-24 13:34:53 +02:00
|
|
|
{
|
2011-09-05 09:30:17 +02:00
|
|
|
GThread *thread;
|
|
|
|
#ifndef _WIN32
|
2011-02-26 19:38:39 +01:00
|
|
|
sigset_t set, oldset;
|
2010-06-24 13:34:53 +02:00
|
|
|
|
2011-02-26 19:38:39 +01:00
|
|
|
sigfillset(&set);
|
|
|
|
pthread_sigmask(SIG_SETMASK, &set, &oldset);
|
2011-09-05 09:30:17 +02:00
|
|
|
#endif
|
2013-02-12 14:34:05 +01:00
|
|
|
|
|
|
|
thread = g_thread_new("trace-thread", fn, NULL);
|
|
|
|
|
2011-09-05 09:30:17 +02:00
|
|
|
#ifndef _WIN32
|
2011-02-26 19:38:39 +01:00
|
|
|
pthread_sigmask(SIG_SETMASK, &oldset, NULL);
|
2011-09-05 09:30:17 +02:00
|
|
|
#endif
|
2011-02-26 19:38:39 +01:00
|
|
|
|
2011-09-05 09:30:17 +02:00
|
|
|
return thread;
|
|
|
|
}
|
|
|
|
|
2014-05-27 15:02:14 +02:00
|
|
|
bool st_init(const char *file)
|
2011-09-05 09:30:17 +02:00
|
|
|
{
|
|
|
|
GThread *thread;
|
|
|
|
|
2014-05-07 19:24:10 +02:00
|
|
|
trace_pid = getpid();
|
|
|
|
|
2011-09-05 09:30:17 +02:00
|
|
|
thread = trace_thread_create(writeout_thread);
|
|
|
|
if (!thread) {
|
2011-08-31 20:31:03 +02:00
|
|
|
fprintf(stderr, "warning: unable to initialize simple trace backend\n");
|
2011-09-05 09:30:17 +02:00
|
|
|
return false;
|
2010-06-24 13:34:53 +02:00
|
|
|
}
|
2011-02-26 19:38:39 +01:00
|
|
|
|
2011-09-05 09:30:17 +02:00
|
|
|
atexit(st_flush_trace_buffer);
|
|
|
|
st_set_trace_file(file);
|
2011-03-13 21:14:30 +01:00
|
|
|
return true;
|
2010-06-24 13:34:53 +02:00
|
|
|
}
|