Pull request

* Trace TCG atomic memory accesses
  * Document that trace event arguments cannot be floating point
 -----BEGIN PGP SIGNATURE-----
 
 iQEcBAABAgAGBQJbM4nEAAoJEJykq7OBq3PIeEsH/iE6Hf+wd/C7UPsGzI/jjsha
 FFd1Jw+mNGN27MyZnlqEe5dR/HYFl4mPNWO6eJrWBKlEgGGuEix+smR3n6+Q/JZF
 9KqfmghjR6EYldU1G0NladHjXvg74ABEVR+k+7OhRSATgHHonqVEkstIougXUvE9
 IPWFdeU04H0GCkM6pyXFEFSGIjKa0hTiWon5DQ5HZYUtaJOTRBNszjr5dQEx/fmZ
 VbbYI7VuOKI9804WWRXgED1PjwL6v3VjWfnhNJXO4rqfADy6GxBkm4BIhWCjX71y
 Ligq5jCO2JbPCIU7Rc2gNRJn4xL1Dfo/UUcPnRL9244NYq0kJPmwL/MqdCy/GWk=
 =eV+4
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/stefanha/tags/tracing-pull-request' into staging

Pull request

 * Trace TCG atomic memory accesses
 * Document that trace event arguments cannot be floating point

# gpg: Signature made Wed 27 Jun 2018 13:57:40 BST
# gpg:                using RSA key 9CA4ABB381AB73C8
# gpg: Good signature from "Stefan Hajnoczi <stefanha@redhat.com>"
# gpg:                 aka "Stefan Hajnoczi <stefanha@gmail.com>"
# Primary key fingerprint: 8695 A8BF D3F9 7CDA AC35  775A 9CA4 ABB3 81AB 73C8

* remotes/stefanha/tags/tracing-pull-request:
  trace: forbid floating point types
  trace: enable tracing of TCG atomics
  trace: add trace_mem_build_info_no_se_be/le
  trace: expand mem_info:size_shift to 3 bits
  trace: simplify trace_mem functions
  trace: fix misreporting of TCG access sizes for user-space

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2018-06-28 09:54:03 +01:00
commit 1571a23c8f
8 changed files with 127 additions and 36 deletions

View File

@ -18,30 +18,37 @@
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
#include "trace/mem.h"
#if DATA_SIZE == 16
# define SUFFIX o
# define DATA_TYPE Int128
# define BSWAP bswap128
# define SHIFT 4
#elif DATA_SIZE == 8
# define SUFFIX q
# define DATA_TYPE uint64_t
# define SDATA_TYPE int64_t
# define BSWAP bswap64
# define SHIFT 3
#elif DATA_SIZE == 4
# define SUFFIX l
# define DATA_TYPE uint32_t
# define SDATA_TYPE int32_t
# define BSWAP bswap32
# define SHIFT 2
#elif DATA_SIZE == 2
# define SUFFIX w
# define DATA_TYPE uint16_t
# define SDATA_TYPE int16_t
# define BSWAP bswap16
# define SHIFT 1
#elif DATA_SIZE == 1
# define SUFFIX b
# define DATA_TYPE uint8_t
# define SDATA_TYPE int8_t
# define BSWAP
# define SHIFT 0
#else
# error unsupported data size
#endif
@ -52,14 +59,37 @@
# define ABI_TYPE uint32_t
#endif
#define ATOMIC_TRACE_RMW do { \
uint8_t info = glue(trace_mem_build_info_no_se, MEND)(SHIFT, false); \
\
trace_guest_mem_before_exec(ENV_GET_CPU(env), addr, info); \
trace_guest_mem_before_exec(ENV_GET_CPU(env), addr, \
info | TRACE_MEM_ST); \
} while (0)
#define ATOMIC_TRACE_LD do { \
uint8_t info = glue(trace_mem_build_info_no_se, MEND)(SHIFT, false); \
\
trace_guest_mem_before_exec(ENV_GET_CPU(env), addr, info); \
} while (0)
# define ATOMIC_TRACE_ST do { \
uint8_t info = glue(trace_mem_build_info_no_se, MEND)(SHIFT, true); \
\
trace_guest_mem_before_exec(ENV_GET_CPU(env), addr, info); \
} while (0)
/* Define host-endian atomic operations. Note that END is used within
the ATOMIC_NAME macro, and redefined below. */
#if DATA_SIZE == 1
# define END
# define MEND _be /* either le or be would be fine */
#elif defined(HOST_WORDS_BIGENDIAN)
# define END _be
# define MEND _be
#else
# define END _le
# define MEND _le
#endif
ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
@ -67,7 +97,10 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
{
ATOMIC_MMU_DECLS;
DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;
DATA_TYPE ret = atomic_cmpxchg__nocheck(haddr, cmpv, newv);
DATA_TYPE ret;
ATOMIC_TRACE_RMW;
ret = atomic_cmpxchg__nocheck(haddr, cmpv, newv);
ATOMIC_MMU_CLEANUP;
return ret;
}
@ -77,6 +110,8 @@ ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr EXTRA_ARGS)
{
ATOMIC_MMU_DECLS;
DATA_TYPE val, *haddr = ATOMIC_MMU_LOOKUP;
ATOMIC_TRACE_LD;
__atomic_load(haddr, &val, __ATOMIC_RELAXED);
ATOMIC_MMU_CLEANUP;
return val;
@ -87,6 +122,8 @@ void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr,
{
ATOMIC_MMU_DECLS;
DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;
ATOMIC_TRACE_ST;
__atomic_store(haddr, &val, __ATOMIC_RELAXED);
ATOMIC_MMU_CLEANUP;
}
@ -96,7 +133,10 @@ ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr,
{
ATOMIC_MMU_DECLS;
DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;
DATA_TYPE ret = atomic_xchg__nocheck(haddr, val);
DATA_TYPE ret;
ATOMIC_TRACE_RMW;
ret = atomic_xchg__nocheck(haddr, val);
ATOMIC_MMU_CLEANUP;
return ret;
}
@ -107,7 +147,10 @@ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
{ \
ATOMIC_MMU_DECLS; \
DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; \
DATA_TYPE ret = atomic_##X(haddr, val); \
DATA_TYPE ret; \
\
ATOMIC_TRACE_RMW; \
ret = atomic_##X(haddr, val); \
ATOMIC_MMU_CLEANUP; \
return ret; \
}
@ -126,6 +169,9 @@ GEN_ATOMIC_HELPER(xor_fetch)
/* These helpers are, as a whole, full barriers. Within the helper,
* the leading barrier is explicit and the trailing barrier is within
* cmpxchg primitive.
*
* Trace this load + RMW loop as a single RMW op. This way, regardless
* of CF_PARALLEL's value, we'll trace just a read and a write.
*/
#define GEN_ATOMIC_HELPER_FN(X, FN, XDATA_TYPE, RET) \
ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
@ -134,6 +180,8 @@ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
ATOMIC_MMU_DECLS; \
XDATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; \
XDATA_TYPE cmp, old, new, val = xval; \
\
ATOMIC_TRACE_RMW; \
smp_mb(); \
cmp = atomic_read__nocheck(haddr); \
do { \
@ -158,6 +206,7 @@ GEN_ATOMIC_HELPER_FN(umax_fetch, MAX, DATA_TYPE, new)
#endif /* DATA SIZE >= 16 */
#undef END
#undef MEND
#if DATA_SIZE > 1
@ -165,8 +214,10 @@ GEN_ATOMIC_HELPER_FN(umax_fetch, MAX, DATA_TYPE, new)
within the ATOMIC_NAME macro. */
#ifdef HOST_WORDS_BIGENDIAN
# define END _le
# define MEND _le
#else
# define END _be
# define MEND _be
#endif
ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
@ -174,7 +225,10 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
{
ATOMIC_MMU_DECLS;
DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;
DATA_TYPE ret = atomic_cmpxchg__nocheck(haddr, BSWAP(cmpv), BSWAP(newv));
DATA_TYPE ret;
ATOMIC_TRACE_RMW;
ret = atomic_cmpxchg__nocheck(haddr, BSWAP(cmpv), BSWAP(newv));
ATOMIC_MMU_CLEANUP;
return BSWAP(ret);
}
@ -184,6 +238,8 @@ ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr EXTRA_ARGS)
{
ATOMIC_MMU_DECLS;
DATA_TYPE val, *haddr = ATOMIC_MMU_LOOKUP;
ATOMIC_TRACE_LD;
__atomic_load(haddr, &val, __ATOMIC_RELAXED);
ATOMIC_MMU_CLEANUP;
return BSWAP(val);
@ -194,6 +250,8 @@ void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr,
{
ATOMIC_MMU_DECLS;
DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;
ATOMIC_TRACE_ST;
val = BSWAP(val);
__atomic_store(haddr, &val, __ATOMIC_RELAXED);
ATOMIC_MMU_CLEANUP;
@ -204,7 +262,10 @@ ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr,
{
ATOMIC_MMU_DECLS;
DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;
ABI_TYPE ret = atomic_xchg__nocheck(haddr, BSWAP(val));
ABI_TYPE ret;
ATOMIC_TRACE_RMW;
ret = atomic_xchg__nocheck(haddr, BSWAP(val));
ATOMIC_MMU_CLEANUP;
return BSWAP(ret);
}
@ -215,7 +276,10 @@ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
{ \
ATOMIC_MMU_DECLS; \
DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; \
DATA_TYPE ret = atomic_##X(haddr, BSWAP(val)); \
DATA_TYPE ret; \
\
ATOMIC_TRACE_RMW; \
ret = atomic_##X(haddr, BSWAP(val)); \
ATOMIC_MMU_CLEANUP; \
return BSWAP(ret); \
}
@ -232,6 +296,9 @@ GEN_ATOMIC_HELPER(xor_fetch)
/* These helpers are, as a whole, full barriers. Within the helper,
* the leading barrier is explicit and the trailing barrier is within
* cmpxchg primitive.
*
* Trace this load + RMW loop as a single RMW op. This way, regardless
* of CF_PARALLEL's value, we'll trace just a read and a write.
*/
#define GEN_ATOMIC_HELPER_FN(X, FN, XDATA_TYPE, RET) \
ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
@ -240,6 +307,8 @@ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
ATOMIC_MMU_DECLS; \
XDATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; \
XDATA_TYPE ldo, ldn, old, new, val = xval; \
\
ATOMIC_TRACE_RMW; \
smp_mb(); \
ldn = atomic_read__nocheck(haddr); \
do { \
@ -271,11 +340,17 @@ GEN_ATOMIC_HELPER_FN(add_fetch, ADD, DATA_TYPE, new)
#endif /* DATA_SIZE >= 16 */
#undef END
#undef MEND
#endif /* DATA_SIZE > 1 */
#undef ATOMIC_TRACE_ST
#undef ATOMIC_TRACE_LD
#undef ATOMIC_TRACE_RMW
#undef BSWAP
#undef ABI_TYPE
#undef DATA_TYPE
#undef SDATA_TYPE
#undef SUFFIX
#undef DATA_SIZE
#undef SHIFT

View File

@ -104,6 +104,11 @@ Trace events should use types as follows:
* For everything else, use primitive scalar types (char, int, long) with the
appropriate signedness.
* Avoid floating point types (float and double) because SystemTap does not
support them. In most cases it is possible to round to an integer type
instead. This may require scaling the value first by multiplying it by 1000
or the like when digits after the decimal point need to be preserved.
Format strings should reflect the types defined in the trace event. Take
special care to use PRId64 and PRIu64 for int64_t and uint64_t types,
respectively. This ensures portability between 32- and 64-bit platforms.

View File

@ -33,20 +33,24 @@
#define SUFFIX q
#define USUFFIX q
#define DATA_TYPE uint64_t
#define SHIFT 3
#elif DATA_SIZE == 4
#define SUFFIX l
#define USUFFIX l
#define DATA_TYPE uint32_t
#define SHIFT 2
#elif DATA_SIZE == 2
#define SUFFIX w
#define USUFFIX uw
#define DATA_TYPE uint16_t
#define DATA_STYPE int16_t
#define SHIFT 1
#elif DATA_SIZE == 1
#define SUFFIX b
#define USUFFIX ub
#define DATA_TYPE uint8_t
#define DATA_STYPE int8_t
#define SHIFT 0
#else
#error unsupported data size
#endif
@ -63,7 +67,7 @@ glue(glue(cpu_ld, USUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr)
#if !defined(CODE_ACCESS)
trace_guest_mem_before_exec(
ENV_GET_CPU(env), ptr,
trace_mem_build_info(DATA_SIZE, false, MO_TE, false));
trace_mem_build_info(SHIFT, false, MO_TE, false));
#endif
return glue(glue(ld, USUFFIX), _p)(g2h(ptr));
}
@ -87,7 +91,7 @@ glue(glue(cpu_lds, SUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr)
#if !defined(CODE_ACCESS)
trace_guest_mem_before_exec(
ENV_GET_CPU(env), ptr,
trace_mem_build_info(DATA_SIZE, true, MO_TE, false));
trace_mem_build_info(SHIFT, true, MO_TE, false));
#endif
return glue(glue(lds, SUFFIX), _p)(g2h(ptr));
}
@ -113,7 +117,7 @@ glue(glue(cpu_st, SUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr,
#if !defined(CODE_ACCESS)
trace_guest_mem_before_exec(
ENV_GET_CPU(env), ptr,
trace_mem_build_info(DATA_SIZE, false, MO_TE, true));
trace_mem_build_info(SHIFT, false, MO_TE, true));
#endif
glue(glue(st, SUFFIX), _p)(g2h(ptr), v);
}
@ -136,3 +140,4 @@ glue(glue(glue(cpu_st, SUFFIX), MEMSUFFIX), _ra)(CPUArchState *env,
#undef SUFFIX
#undef USUFFIX
#undef DATA_SIZE
#undef SHIFT

View File

@ -133,7 +133,7 @@ migrate_global_state_post_load(const char *state) "loaded state: %s"
migrate_global_state_pre_save(const char *state) "saved state: %s"
migration_thread_low_pending(uint64_t pending) "%" PRIu64
migrate_state_too_big(void) ""
migrate_transferred(uint64_t tranferred, uint64_t time_spent, double bandwidth, uint64_t size) "transferred %" PRIu64 " time_spent %" PRIu64 " bandwidth %g max_size %" PRId64
migrate_transferred(uint64_t tranferred, uint64_t time_spent, uint64_t bandwidth, uint64_t size) "transferred %" PRIu64 " time_spent %" PRIu64 " bandwidth %" PRIu64 " max_size %" PRId64
process_incoming_migration_co_end(int ret, int ps) "ret=%d postcopy-state=%d"
process_incoming_migration_co_postcopy_end_main(void) ""
migration_set_incoming_channel(void *ioc, const char *ioctype) "ioc=%p ioctype=%s"

View File

@ -29,6 +29,6 @@ visit_type_int64(void *v, const char *name, int64_t *obj) "v=%p name=%s obj=%p"
visit_type_size(void *v, const char *name, uint64_t *obj) "v=%p name=%s obj=%p"
visit_type_bool(void *v, const char *name, bool *obj) "v=%p name=%s obj=%p"
visit_type_str(void *v, const char *name, char **obj) "v=%p name=%s obj=%p"
visit_type_number(void *v, const char *name, double *obj) "v=%p name=%s obj=%p"
visit_type_number(void *v, const char *name, void *obj) "v=%p name=%s obj=%p"
visit_type_any(void *v, const char *name, void *obj) "v=%p name=%s obj=%p"
visit_type_null(void *v, const char *name, void *obj) "v=%p name=%s obj=%p"

View File

@ -53,8 +53,6 @@ ALLOWED_TYPES = [
"bool",
"unsigned",
"signed",
"float",
"double",
"int8_t",
"uint8_t",
"int16_t",

View File

@ -10,37 +10,45 @@
#ifndef TRACE__MEM_INTERNAL_H
#define TRACE__MEM_INTERNAL_H
static inline uint8_t trace_mem_get_info(TCGMemOp op, bool store)
#define TRACE_MEM_SZ_SHIFT_MASK 0x7 /* size shift mask */
#define TRACE_MEM_SE (1ULL << 3) /* sign extended (y/n) */
#define TRACE_MEM_BE (1ULL << 4) /* big endian (y/n) */
#define TRACE_MEM_ST (1ULL << 5) /* store (y/n) */
static inline uint8_t trace_mem_build_info(
int size_shift, bool sign_extend, TCGMemOp endianness, bool store)
{
uint8_t res = op;
bool be = (op & MO_BSWAP) == MO_BE;
uint8_t res;
/* remove untraced fields */
res &= (1ULL << 4) - 1;
/* make endianness absolute */
res &= ~MO_BSWAP;
if (be) {
res |= 1ULL << 3;
res = size_shift & TRACE_MEM_SZ_SHIFT_MASK;
if (sign_extend) {
res |= TRACE_MEM_SE;
}
if (endianness == MO_BE) {
res |= TRACE_MEM_BE;
}
/* add fields */
if (store) {
res |= 1ULL << 4;
res |= TRACE_MEM_ST;
}
return res;
}
static inline uint8_t trace_mem_build_info(
TCGMemOp size, bool sign_extend, TCGMemOp endianness, bool store)
static inline uint8_t trace_mem_get_info(TCGMemOp op, bool store)
{
uint8_t res = 0;
res |= size;
res |= (sign_extend << 2);
if (endianness == MO_BE) {
res |= (1ULL << 3);
}
res |= (store << 4);
return res;
return trace_mem_build_info(op & MO_SIZE, !!(op & MO_SIGN),
op & MO_BSWAP, store);
}
static inline
uint8_t trace_mem_build_info_no_se_be(int size_shift, bool store)
{
return trace_mem_build_info(size_shift, false, MO_BE, store);
}
static inline
uint8_t trace_mem_build_info_no_se_le(int size_shift, bool store)
{
return trace_mem_build_info(size_shift, false, MO_LE, store);
}
#endif /* TRACE__MEM_INTERNAL_H */

View File

@ -25,7 +25,7 @@ static uint8_t trace_mem_get_info(TCGMemOp op, bool store);
*
* Return a value for the 'info' argument in guest memory access traces.
*/
static uint8_t trace_mem_build_info(TCGMemOp size, bool sign_extend,
static uint8_t trace_mem_build_info(int size_shift, bool sign_extend,
TCGMemOp endianness, bool store);