tcg: let plugins instrument virtual memory accesses

To capture all memory accesses we need hook into all the various
helper functions that are involved in memory operations as well as the
injected inline helper calls. A later commit will allow us to resolve
the actual guest HW addresses by replaying the lookup.

Signed-off-by: Emilio G. Cota <cota@braap.org>
[AJB: drop haddr handling, just deal in vaddr]
Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
Emilio G. Cota 2018-10-21 13:24:26 -04:00 committed by Alex Bennée
parent cfec388518
commit e6d86bed50
8 changed files with 74 additions and 36 deletions

View File

@ -25,6 +25,8 @@ void atomic_trace_rmw_pre(CPUArchState *env, target_ulong addr, uint16_t info)
static inline void
atomic_trace_rmw_post(CPUArchState *env, target_ulong addr, uint16_t info)
{
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, info);
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, info | TRACE_MEM_ST);
}
static inline
@ -36,6 +38,7 @@ void atomic_trace_ld_pre(CPUArchState *env, target_ulong addr, uint16_t info)
static inline
void atomic_trace_ld_post(CPUArchState *env, target_ulong addr, uint16_t info)
{
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, info);
}
static inline
@ -47,4 +50,5 @@ void atomic_trace_st_pre(CPUArchState *env, target_ulong addr, uint16_t info)
static inline
void atomic_trace_st_post(CPUArchState *env, target_ulong addr, uint16_t info)
{
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, info);
}

View File

@ -18,6 +18,7 @@
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
#include "qemu/plugin.h"
#include "trace/mem.h"
#if DATA_SIZE == 16

View File

@ -268,6 +268,7 @@ void cpu_exec_step_atomic(CPUState *cpu)
qemu_mutex_unlock_iothread();
}
assert_no_pages_locked();
qemu_plugin_disable_mem_helpers(cpu);
}
if (cpu_in_exclusive_context(cpu)) {
@ -701,6 +702,8 @@ int cpu_exec(CPUState *cpu)
if (qemu_mutex_iothread_locked()) {
qemu_mutex_unlock_iothread();
}
qemu_plugin_disable_mem_helpers(cpu);
assert_no_pages_locked();
}

View File

@ -214,6 +214,7 @@ typedef struct CPUTLBCommon {
* Since this is placed within CPUNegativeOffsetState, the smallest
* negative offsets are at the end of the struct.
*/
typedef struct CPUTLB {
CPUTLBCommon c;
CPUTLBDesc d[NB_MMU_MODES];

View File

@ -28,6 +28,7 @@
#include "trace-root.h"
#endif
#include "qemu/plugin.h"
#include "trace/mem.h"
#if DATA_SIZE == 8
@ -86,11 +87,9 @@ glue(glue(glue(cpu_ld, USUFFIX), MEMSUFFIX), _ra)(CPUArchState *env,
target_ulong addr;
int mmu_idx = CPU_MMU_INDEX;
TCGMemOpIdx oi;
#if !defined(SOFTMMU_CODE_ACCESS)
trace_guest_mem_before_exec(
env_cpu(env), ptr,
trace_mem_build_info(SHIFT, false, MO_TE, false, mmu_idx));
uint16_t meminfo = trace_mem_build_info(SHIFT, false, MO_TE, false, mmu_idx);
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
#endif
addr = ptr;
@ -104,6 +103,9 @@ glue(glue(glue(cpu_ld, USUFFIX), MEMSUFFIX), _ra)(CPUArchState *env,
uintptr_t hostaddr = addr + entry->addend;
res = glue(glue(ld, USUFFIX), _p)((uint8_t *)hostaddr);
}
#ifndef SOFTMMU_CODE_ACCESS
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
#endif
return res;
}
@ -124,11 +126,9 @@ glue(glue(glue(cpu_lds, SUFFIX), MEMSUFFIX), _ra)(CPUArchState *env,
target_ulong addr;
int mmu_idx = CPU_MMU_INDEX;
TCGMemOpIdx oi;
#if !defined(SOFTMMU_CODE_ACCESS)
trace_guest_mem_before_exec(
env_cpu(env), ptr,
trace_mem_build_info(SHIFT, true, MO_TE, false, mmu_idx));
uint16_t meminfo = trace_mem_build_info(SHIFT, true, MO_TE, false, mmu_idx);
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
#endif
addr = ptr;
@ -142,6 +142,9 @@ glue(glue(glue(cpu_lds, SUFFIX), MEMSUFFIX), _ra)(CPUArchState *env,
uintptr_t hostaddr = addr + entry->addend;
res = glue(glue(lds, SUFFIX), _p)((uint8_t *)hostaddr);
}
#ifndef SOFTMMU_CODE_ACCESS
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
#endif
return res;
}
@ -165,11 +168,9 @@ glue(glue(glue(cpu_st, SUFFIX), MEMSUFFIX), _ra)(CPUArchState *env,
target_ulong addr;
int mmu_idx = CPU_MMU_INDEX;
TCGMemOpIdx oi;
#if !defined(SOFTMMU_CODE_ACCESS)
trace_guest_mem_before_exec(
env_cpu(env), ptr,
trace_mem_build_info(SHIFT, false, MO_TE, true, mmu_idx));
uint16_t meminfo = trace_mem_build_info(SHIFT, false, MO_TE, true, mmu_idx);
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
#endif
addr = ptr;
@ -183,6 +184,9 @@ glue(glue(glue(cpu_st, SUFFIX), MEMSUFFIX), _ra)(CPUArchState *env,
uintptr_t hostaddr = addr + entry->addend;
glue(glue(st, SUFFIX), _p)((uint8_t *)hostaddr, v);
}
#ifndef SOFTMMU_CODE_ACCESS
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
#endif
}
static inline void

View File

@ -64,18 +64,18 @@
static inline RES_TYPE
glue(glue(cpu_ld, USUFFIX), MEMSUFFIX)(CPUArchState *env, abi_ptr ptr)
{
#ifdef CODE_ACCESS
RES_TYPE ret;
#ifdef CODE_ACCESS
set_helper_retaddr(1);
ret = glue(glue(ld, USUFFIX), _p)(g2h(ptr));
clear_helper_retaddr();
return ret;
#else
trace_guest_mem_before_exec(
env_cpu(env), ptr,
trace_mem_build_info(SHIFT, false, MO_TE, false, MMU_USER_IDX));
return glue(glue(ld, USUFFIX), _p)(g2h(ptr));
uint16_t meminfo = trace_mem_build_info(SHIFT, false, MO_TE, false,
MMU_USER_IDX);
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
ret = glue(glue(ld, USUFFIX), _p)(g2h(ptr));
#endif
return ret;
}
#ifndef CODE_ACCESS
@ -96,18 +96,19 @@ glue(glue(glue(cpu_ld, USUFFIX), MEMSUFFIX), _ra)(CPUArchState *env,
static inline int
glue(glue(cpu_lds, SUFFIX), MEMSUFFIX)(CPUArchState *env, abi_ptr ptr)
{
#ifdef CODE_ACCESS
int ret;
#ifdef CODE_ACCESS
set_helper_retaddr(1);
ret = glue(glue(lds, SUFFIX), _p)(g2h(ptr));
clear_helper_retaddr();
return ret;
#else
trace_guest_mem_before_exec(
env_cpu(env), ptr,
trace_mem_build_info(SHIFT, true, MO_TE, false, MMU_USER_IDX));
return glue(glue(lds, SUFFIX), _p)(g2h(ptr));
uint16_t meminfo = trace_mem_build_info(SHIFT, true, MO_TE, false,
MMU_USER_IDX);
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
ret = glue(glue(lds, SUFFIX), _p)(g2h(ptr));
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
#endif
return ret;
}
#ifndef CODE_ACCESS
@ -130,10 +131,11 @@ static inline void
glue(glue(cpu_st, SUFFIX), MEMSUFFIX)(CPUArchState *env, abi_ptr ptr,
RES_TYPE v)
{
trace_guest_mem_before_exec(
env_cpu(env), ptr,
trace_mem_build_info(SHIFT, false, MO_TE, true, MMU_USER_IDX));
uint16_t meminfo = trace_mem_build_info(SHIFT, false, MO_TE, true,
MMU_USER_IDX);
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
glue(glue(st, SUFFIX), _p)(g2h(ptr), v);
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
}
static inline void

View File

@ -30,6 +30,7 @@
#include "tcg-mo.h"
#include "trace-tcg.h"
#include "trace/mem.h"
#include "exec/plugin-gen.h"
/* Reduce the number of ifdefs below. This assumes that all uses of
TCGV_HIGH and TCGV_LOW are properly protected by a conditional that
@ -2684,6 +2685,7 @@ void tcg_gen_exit_tb(TranslationBlock *tb, unsigned idx)
tcg_debug_assert(idx == TB_EXIT_REQUESTED);
}
plugin_gen_disable_mem_helpers();
tcg_gen_op1i(INDEX_op_exit_tb, val);
}
@ -2696,6 +2698,7 @@ void tcg_gen_goto_tb(unsigned idx)
tcg_debug_assert((tcg_ctx->goto_tb_issue_mask & (1 << idx)) == 0);
tcg_ctx->goto_tb_issue_mask |= 1 << idx;
#endif
plugin_gen_disable_mem_helpers();
/* When not chaining, we simply fall through to the "fallback" exit. */
if (!qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) {
tcg_gen_op1i(INDEX_op_goto_tb, idx);
@ -2705,7 +2708,10 @@ void tcg_gen_goto_tb(unsigned idx)
void tcg_gen_lookup_and_goto_ptr(void)
{
if (TCG_TARGET_HAS_goto_ptr && !qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) {
TCGv_ptr ptr = tcg_temp_new_ptr();
TCGv_ptr ptr;
plugin_gen_disable_mem_helpers();
ptr = tcg_temp_new_ptr();
gen_helper_lookup_tb_ptr(ptr, cpu_env);
tcg_gen_op1i(INDEX_op_goto_ptr, tcgv_ptr_arg(ptr));
tcg_temp_free_ptr(ptr);
@ -2788,14 +2794,24 @@ static void tcg_gen_req_mo(TCGBar type)
}
}
static inline void plugin_gen_mem_callbacks(TCGv vaddr, uint16_t info)
{
#ifdef CONFIG_PLUGIN
if (tcg_ctx->plugin_insn == NULL) {
return;
}
plugin_gen_empty_mem_callback(vaddr, info);
#endif
}
void tcg_gen_qemu_ld_i32(TCGv_i32 val, TCGv addr, TCGArg idx, MemOp memop)
{
MemOp orig_memop;
uint16_t info = trace_mem_get_info(memop, idx, 0);
tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
memop = tcg_canonicalize_memop(memop, 0, 0);
trace_guest_mem_before_tcg(tcg_ctx->cpu, cpu_env,
addr, trace_mem_get_info(memop, idx, 0));
trace_guest_mem_before_tcg(tcg_ctx->cpu, cpu_env, addr, info);
orig_memop = memop;
if (!TCG_TARGET_HAS_MEMORY_BSWAP && (memop & MO_BSWAP)) {
@ -2807,6 +2823,7 @@ void tcg_gen_qemu_ld_i32(TCGv_i32 val, TCGv addr, TCGArg idx, MemOp memop)
}
gen_ldst_i32(INDEX_op_qemu_ld_i32, val, addr, memop, idx);
plugin_gen_mem_callbacks(addr, info);
if ((orig_memop ^ memop) & MO_BSWAP) {
switch (orig_memop & MO_SIZE) {
@ -2828,11 +2845,11 @@ void tcg_gen_qemu_ld_i32(TCGv_i32 val, TCGv addr, TCGArg idx, MemOp memop)
void tcg_gen_qemu_st_i32(TCGv_i32 val, TCGv addr, TCGArg idx, MemOp memop)
{
TCGv_i32 swap = NULL;
uint16_t info = trace_mem_get_info(memop, idx, 1);
tcg_gen_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
memop = tcg_canonicalize_memop(memop, 0, 1);
trace_guest_mem_before_tcg(tcg_ctx->cpu, cpu_env,
addr, trace_mem_get_info(memop, idx, 1));
trace_guest_mem_before_tcg(tcg_ctx->cpu, cpu_env, addr, info);
if (!TCG_TARGET_HAS_MEMORY_BSWAP && (memop & MO_BSWAP)) {
swap = tcg_temp_new_i32();
@ -2852,6 +2869,7 @@ void tcg_gen_qemu_st_i32(TCGv_i32 val, TCGv addr, TCGArg idx, MemOp memop)
}
gen_ldst_i32(INDEX_op_qemu_st_i32, val, addr, memop, idx);
plugin_gen_mem_callbacks(addr, info);
if (swap) {
tcg_temp_free_i32(swap);
@ -2861,6 +2879,7 @@ void tcg_gen_qemu_st_i32(TCGv_i32 val, TCGv addr, TCGArg idx, MemOp memop)
void tcg_gen_qemu_ld_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop)
{
MemOp orig_memop;
uint16_t info;
if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) {
tcg_gen_qemu_ld_i32(TCGV_LOW(val), addr, idx, memop);
@ -2874,8 +2893,8 @@ void tcg_gen_qemu_ld_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop)
tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
memop = tcg_canonicalize_memop(memop, 1, 0);
trace_guest_mem_before_tcg(tcg_ctx->cpu, cpu_env,
addr, trace_mem_get_info(memop, idx, 0));
info = trace_mem_get_info(memop, idx, 0);
trace_guest_mem_before_tcg(tcg_ctx->cpu, cpu_env, addr, info);
orig_memop = memop;
if (!TCG_TARGET_HAS_MEMORY_BSWAP && (memop & MO_BSWAP)) {
@ -2887,6 +2906,7 @@ void tcg_gen_qemu_ld_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop)
}
gen_ldst_i64(INDEX_op_qemu_ld_i64, val, addr, memop, idx);
plugin_gen_mem_callbacks(addr, info);
if ((orig_memop ^ memop) & MO_BSWAP) {
switch (orig_memop & MO_SIZE) {
@ -2914,6 +2934,7 @@ void tcg_gen_qemu_ld_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop)
void tcg_gen_qemu_st_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop)
{
TCGv_i64 swap = NULL;
uint16_t info;
if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) {
tcg_gen_qemu_st_i32(TCGV_LOW(val), addr, idx, memop);
@ -2922,8 +2943,8 @@ void tcg_gen_qemu_st_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop)
tcg_gen_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
memop = tcg_canonicalize_memop(memop, 1, 1);
trace_guest_mem_before_tcg(tcg_ctx->cpu, cpu_env,
addr, trace_mem_get_info(memop, idx, 1));
info = trace_mem_get_info(memop, idx, 1);
trace_guest_mem_before_tcg(tcg_ctx->cpu, cpu_env, addr, info);
if (!TCG_TARGET_HAS_MEMORY_BSWAP && (memop & MO_BSWAP)) {
swap = tcg_temp_new_i64();
@ -2947,6 +2968,7 @@ void tcg_gen_qemu_st_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop)
}
gen_ldst_i64(INDEX_op_qemu_st_i64, val, addr, memop, idx);
plugin_gen_mem_callbacks(addr, info);
if (swap) {
tcg_temp_free_i64(swap);

View File

@ -29,6 +29,7 @@
#include "exec/memop.h"
#include "exec/tb-context.h"
#include "qemu/bitops.h"
#include "qemu/plugin.h"
#include "qemu/queue.h"
#include "tcg-mo.h"
#include "tcg-target.h"