accel/tcg: Probe the proper permissions for atomic ops

We had a single ATOMIC_MMU_LOOKUP macro that probed for
read+write on all atomic ops.  This is incorrect for
plain atomic load and atomic store.

For user-only, we rely on the host page permissions.

Resolves: https://gitlab.com/qemu-project/qemu/-/issues/390
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
Richard Henderson 2021-06-12 17:21:06 -07:00
parent e5b4654907
commit 08dff435e2
3 changed files with 83 additions and 44 deletions

View File

@ -74,7 +74,7 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
ABI_TYPE cmpv, ABI_TYPE newv EXTRA_ARGS)
{
ATOMIC_MMU_DECLS;
DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;
DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP_RW;
DATA_TYPE ret;
uint16_t info = trace_mem_build_info(SHIFT, false, 0, false,
ATOMIC_MMU_IDX);
@ -95,7 +95,7 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr EXTRA_ARGS)
{
ATOMIC_MMU_DECLS;
DATA_TYPE val, *haddr = ATOMIC_MMU_LOOKUP;
DATA_TYPE val, *haddr = ATOMIC_MMU_LOOKUP_R;
uint16_t info = trace_mem_build_info(SHIFT, false, 0, false,
ATOMIC_MMU_IDX);
@ -110,7 +110,7 @@ void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr,
ABI_TYPE val EXTRA_ARGS)
{
ATOMIC_MMU_DECLS;
DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;
DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP_W;
uint16_t info = trace_mem_build_info(SHIFT, false, 0, true,
ATOMIC_MMU_IDX);
@ -125,7 +125,7 @@ ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr,
ABI_TYPE val EXTRA_ARGS)
{
ATOMIC_MMU_DECLS;
DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;
DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP_RW;
DATA_TYPE ret;
uint16_t info = trace_mem_build_info(SHIFT, false, 0, false,
ATOMIC_MMU_IDX);
@ -142,7 +142,7 @@ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
ABI_TYPE val EXTRA_ARGS) \
{ \
ATOMIC_MMU_DECLS; \
DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; \
DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP_RW; \
DATA_TYPE ret; \
uint16_t info = trace_mem_build_info(SHIFT, false, 0, false, \
ATOMIC_MMU_IDX); \
@ -176,7 +176,7 @@ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
ABI_TYPE xval EXTRA_ARGS) \
{ \
ATOMIC_MMU_DECLS; \
XDATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; \
XDATA_TYPE *haddr = ATOMIC_MMU_LOOKUP_RW; \
XDATA_TYPE cmp, old, new, val = xval; \
uint16_t info = trace_mem_build_info(SHIFT, false, 0, false, \
ATOMIC_MMU_IDX); \
@ -221,7 +221,7 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
ABI_TYPE cmpv, ABI_TYPE newv EXTRA_ARGS)
{
ATOMIC_MMU_DECLS;
DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;
DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP_RW;
DATA_TYPE ret;
uint16_t info = trace_mem_build_info(SHIFT, false, MO_BSWAP, false,
ATOMIC_MMU_IDX);
@ -242,7 +242,7 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr EXTRA_ARGS)
{
ATOMIC_MMU_DECLS;
DATA_TYPE val, *haddr = ATOMIC_MMU_LOOKUP;
DATA_TYPE val, *haddr = ATOMIC_MMU_LOOKUP_R;
uint16_t info = trace_mem_build_info(SHIFT, false, MO_BSWAP, false,
ATOMIC_MMU_IDX);
@ -257,7 +257,7 @@ void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr,
ABI_TYPE val EXTRA_ARGS)
{
ATOMIC_MMU_DECLS;
DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;
DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP_W;
uint16_t info = trace_mem_build_info(SHIFT, false, MO_BSWAP, true,
ATOMIC_MMU_IDX);
@ -274,7 +274,7 @@ ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr,
ABI_TYPE val EXTRA_ARGS)
{
ATOMIC_MMU_DECLS;
DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;
DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP_RW;
ABI_TYPE ret;
uint16_t info = trace_mem_build_info(SHIFT, false, MO_BSWAP, false,
ATOMIC_MMU_IDX);
@ -291,7 +291,7 @@ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
ABI_TYPE val EXTRA_ARGS) \
{ \
ATOMIC_MMU_DECLS; \
DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; \
DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP_RW; \
DATA_TYPE ret; \
uint16_t info = trace_mem_build_info(SHIFT, false, MO_BSWAP, \
false, ATOMIC_MMU_IDX); \
@ -323,7 +323,7 @@ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
ABI_TYPE xval EXTRA_ARGS) \
{ \
ATOMIC_MMU_DECLS; \
XDATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; \
XDATA_TYPE *haddr = ATOMIC_MMU_LOOKUP_RW; \
XDATA_TYPE ldo, ldn, old, new, val = xval; \
uint16_t info = trace_mem_build_info(SHIFT, false, MO_BSWAP, \
false, ATOMIC_MMU_IDX); \

View File

@ -1742,18 +1742,22 @@ bool tlb_plugin_lookup(CPUState *cpu, target_ulong addr, int mmu_idx,
#endif
/* Probe for a read-modify-write atomic operation. Do not allow unaligned
* operations, or io operations to proceed. Return the host address. */
/*
* Probe for an atomic operation. Do not allow unaligned operations,
* or io operations to proceed. Return the host address.
*
* @prot may be PAGE_READ, PAGE_WRITE, or PAGE_READ|PAGE_WRITE.
*/
static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
TCGMemOpIdx oi, int size, int prot,
uintptr_t retaddr)
{
size_t mmu_idx = get_mmuidx(oi);
uintptr_t index = tlb_index(env, mmu_idx, addr);
CPUTLBEntry *tlbe = tlb_entry(env, mmu_idx, addr);
target_ulong tlb_addr = tlb_addr_write(tlbe);
MemOp mop = get_memop(oi);
int a_bits = get_alignment_bits(mop);
int s_bits = mop & MO_SIZE;
uintptr_t index;
CPUTLBEntry *tlbe;
target_ulong tlb_addr;
void *hostaddr;
/* Adjust the given return address. */
@ -1767,7 +1771,7 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
}
/* Enforce qemu required alignment. */
if (unlikely(addr & ((1 << s_bits) - 1))) {
if (unlikely(addr & (size - 1))) {
/* We get here if guest alignment was not requested,
or was not enforced by cpu_unaligned_access above.
We might widen the access and emulate, but for now
@ -1775,15 +1779,45 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
goto stop_the_world;
}
index = tlb_index(env, mmu_idx, addr);
tlbe = tlb_entry(env, mmu_idx, addr);
/* Check TLB entry and enforce page permissions. */
if (!tlb_hit(tlb_addr, addr)) {
if (!VICTIM_TLB_HIT(addr_write, addr)) {
tlb_fill(env_cpu(env), addr, 1 << s_bits, MMU_DATA_STORE,
mmu_idx, retaddr);
index = tlb_index(env, mmu_idx, addr);
tlbe = tlb_entry(env, mmu_idx, addr);
if (prot & PAGE_WRITE) {
tlb_addr = tlb_addr_write(tlbe);
if (!tlb_hit(tlb_addr, addr)) {
if (!VICTIM_TLB_HIT(addr_write, addr)) {
tlb_fill(env_cpu(env), addr, size,
MMU_DATA_STORE, mmu_idx, retaddr);
index = tlb_index(env, mmu_idx, addr);
tlbe = tlb_entry(env, mmu_idx, addr);
}
tlb_addr = tlb_addr_write(tlbe) & ~TLB_INVALID_MASK;
}
/* Let the guest notice RMW on a write-only page. */
if ((prot & PAGE_READ) &&
unlikely(tlbe->addr_read != (tlb_addr & ~TLB_NOTDIRTY))) {
tlb_fill(env_cpu(env), addr, size,
MMU_DATA_LOAD, mmu_idx, retaddr);
/*
* Since we don't support reads and writes to different addresses,
* and we do have the proper page loaded for write, this shouldn't
* ever return. But just in case, handle via stop-the-world.
*/
goto stop_the_world;
}
} else /* if (prot & PAGE_READ) */ {
tlb_addr = tlbe->addr_read;
if (!tlb_hit(tlb_addr, addr)) {
if (!VICTIM_TLB_HIT(addr_write, addr)) {
tlb_fill(env_cpu(env), addr, size,
MMU_DATA_LOAD, mmu_idx, retaddr);
index = tlb_index(env, mmu_idx, addr);
tlbe = tlb_entry(env, mmu_idx, addr);
}
tlb_addr = tlbe->addr_read & ~TLB_INVALID_MASK;
}
tlb_addr = tlb_addr_write(tlbe) & ~TLB_INVALID_MASK;
}
/* Notice an IO access or a needs-MMU-lookup access */
@ -1793,20 +1827,10 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
goto stop_the_world;
}
/* Let the guest notice RMW on a write-only page. */
if (unlikely(tlbe->addr_read != (tlb_addr & ~TLB_NOTDIRTY))) {
tlb_fill(env_cpu(env), addr, 1 << s_bits, MMU_DATA_LOAD,
mmu_idx, retaddr);
/* Since we don't support reads and writes to different addresses,
and we do have the proper page loaded for write, this shouldn't
ever return. But just in case, handle via stop-the-world. */
goto stop_the_world;
}
hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
if (unlikely(tlb_addr & TLB_NOTDIRTY)) {
notdirty_write(env_cpu(env), addr, 1 << s_bits,
notdirty_write(env_cpu(env), addr, size,
&env_tlb(env)->d[mmu_idx].iotlb[index], retaddr);
}
@ -2669,7 +2693,12 @@ void cpu_stq_le_data(CPUArchState *env, target_ulong ptr, uint64_t val)
#define ATOMIC_NAME(X) \
HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu))
#define ATOMIC_MMU_DECLS
#define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, retaddr)
#define ATOMIC_MMU_LOOKUP_RW \
atomic_mmu_lookup(env, addr, oi, DATA_SIZE, PAGE_READ | PAGE_WRITE, retaddr)
#define ATOMIC_MMU_LOOKUP_R \
atomic_mmu_lookup(env, addr, oi, DATA_SIZE, PAGE_READ, retaddr)
#define ATOMIC_MMU_LOOKUP_W \
atomic_mmu_lookup(env, addr, oi, DATA_SIZE, PAGE_WRITE, retaddr)
#define ATOMIC_MMU_CLEANUP
#define ATOMIC_MMU_IDX get_mmuidx(oi)
@ -2698,10 +2727,18 @@ void cpu_stq_le_data(CPUArchState *env, target_ulong ptr, uint64_t val)
#undef EXTRA_ARGS
#undef ATOMIC_NAME
#undef ATOMIC_MMU_LOOKUP
#undef ATOMIC_MMU_LOOKUP_RW
#undef ATOMIC_MMU_LOOKUP_R
#undef ATOMIC_MMU_LOOKUP_W
#define EXTRA_ARGS , TCGMemOpIdx oi
#define ATOMIC_NAME(X) HELPER(glue(glue(atomic_ ## X, SUFFIX), END))
#define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, GETPC())
#define ATOMIC_MMU_LOOKUP_RW \
atomic_mmu_lookup(env, addr, oi, DATA_SIZE, PAGE_READ | PAGE_WRITE, GETPC())
#define ATOMIC_MMU_LOOKUP_R \
atomic_mmu_lookup(env, addr, oi, DATA_SIZE, PAGE_READ, GETPC())
#define ATOMIC_MMU_LOOKUP_W \
atomic_mmu_lookup(env, addr, oi, DATA_SIZE, PAGE_WRITE, GETPC())
#define DATA_SIZE 1
#include "atomic_template.h"

View File

@ -1220,7 +1220,9 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
/* Macro to call the above, with local variables from the use context. */
#define ATOMIC_MMU_DECLS do {} while (0)
#define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, DATA_SIZE, GETPC())
#define ATOMIC_MMU_LOOKUP_RW atomic_mmu_lookup(env, addr, DATA_SIZE, GETPC())
#define ATOMIC_MMU_LOOKUP_R ATOMIC_MMU_LOOKUP_RW
#define ATOMIC_MMU_LOOKUP_W ATOMIC_MMU_LOOKUP_RW
#define ATOMIC_MMU_CLEANUP do { clear_helper_retaddr(); } while (0)
#define ATOMIC_MMU_IDX MMU_USER_IDX
@ -1250,12 +1252,12 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
#undef EXTRA_ARGS
#undef ATOMIC_NAME
#undef ATOMIC_MMU_LOOKUP
#undef ATOMIC_MMU_LOOKUP_RW
#define EXTRA_ARGS , TCGMemOpIdx oi, uintptr_t retaddr
#define ATOMIC_NAME(X) \
HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu))
#define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, DATA_SIZE, retaddr)
#define ATOMIC_MMU_LOOKUP_RW atomic_mmu_lookup(env, addr, DATA_SIZE, retaddr)
#define DATA_SIZE 16
#include "atomic_template.h"