accel/tcg: Add 'size' param to probe_access_full

Change to match the recent change to probe_access_flags.
All existing callers updated to supply 0, so no change in behaviour.

Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
Richard Henderson 2023-02-23 14:44:14 -10:00
parent 1770b2f2d3
commit d507e6c565
7 changed files with 10 additions and 10 deletions

View File

@ -1589,12 +1589,12 @@ static int probe_access_internal(CPUArchState *env, target_ulong addr,
return flags;
}
int probe_access_full(CPUArchState *env, target_ulong addr,
int probe_access_full(CPUArchState *env, target_ulong addr, int size,
MMUAccessType access_type, int mmu_idx,
bool nonfault, void **phost, CPUTLBEntryFull **pfull,
uintptr_t retaddr)
{
int flags = probe_access_internal(env, addr, 0, access_type, mmu_idx,
int flags = probe_access_internal(env, addr, size, access_type, mmu_idx,
nonfault, phost, pfull, retaddr);
/* Handle clean RAM pages. */

View File

@ -475,7 +475,7 @@ int probe_access_flags(CPUArchState *env, target_ulong addr, int size,
* and must be consumed or copied immediately, before any further
* access or changes to TLB @mmu_idx.
*/
int probe_access_full(CPUArchState *env, target_ulong addr,
int probe_access_full(CPUArchState *env, target_ulong addr, int size,
MMUAccessType access_type, int mmu_idx,
bool nonfault, void **phost,
CPUTLBEntryFull **pfull, uintptr_t retaddr);

View File

@ -259,7 +259,7 @@ static bool S1_ptw_translate(CPUARMState *env, S1Translate *ptw,
int flags;
env->tlb_fi = fi;
flags = probe_access_full(env, addr, MMU_DATA_LOAD,
flags = probe_access_full(env, addr, 0, MMU_DATA_LOAD,
arm_to_core_mmu_idx(s2_mmu_idx),
true, &ptw->out_host, &full, 0);
env->tlb_fi = NULL;

View File

@ -118,7 +118,7 @@ static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx,
* valid. Indicate to probe_access_flags no-fault, then assert that
* we received a valid page.
*/
flags = probe_access_full(env, ptr, ptr_access, ptr_mmu_idx,
flags = probe_access_full(env, ptr, 0, ptr_access, ptr_mmu_idx,
ra == 0, &host, &full, ra);
assert(!(flags & TLB_INVALID_MASK));
@ -154,7 +154,7 @@ static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx,
*/
in_page = -(ptr | TARGET_PAGE_MASK);
if (unlikely(ptr_size > in_page)) {
flags |= probe_access_full(env, ptr + in_page, ptr_access,
flags |= probe_access_full(env, ptr + in_page, 0, ptr_access,
ptr_mmu_idx, ra == 0, &host, &full, ra);
assert(!(flags & TLB_INVALID_MASK));
}

View File

@ -5356,7 +5356,7 @@ bool sve_probe_page(SVEHostPage *info, bool nofault, CPUARMState *env,
&info->host, retaddr);
#else
CPUTLBEntryFull *full;
flags = probe_access_full(env, addr, access_type, mmu_idx, nofault,
flags = probe_access_full(env, addr, 0, access_type, mmu_idx, nofault,
&info->host, &full, retaddr);
#endif
info->flags = flags;

View File

@ -14651,7 +14651,7 @@ static bool is_guarded_page(CPUARMState *env, DisasContext *s)
* that the TLB entry must be present and valid, and thus this
* access will never raise an exception.
*/
flags = probe_access_full(env, addr, MMU_INST_FETCH, mmu_idx,
flags = probe_access_full(env, addr, 0, MMU_INST_FETCH, mmu_idx,
false, &host, &full, 0);
assert(!(flags & TLB_INVALID_MASK));

View File

@ -64,7 +64,7 @@ static bool ptw_translate(PTETranslate *inout, hwaddr addr)
int flags;
inout->gaddr = addr;
flags = probe_access_full(inout->env, addr, MMU_DATA_STORE,
flags = probe_access_full(inout->env, addr, 0, MMU_DATA_STORE,
inout->ptw_idx, true, &inout->haddr, &full, 0);
if (unlikely(flags & TLB_INVALID_MASK)) {
@ -428,7 +428,7 @@ do_check_protect_pse36:
CPUTLBEntryFull *full;
int flags, nested_page_size;
flags = probe_access_full(env, paddr, access_type,
flags = probe_access_full(env, paddr, 0, access_type,
MMU_NESTED_IDX, true,
&pte_trans.haddr, &full, 0);
if (unlikely(flags & TLB_INVALID_MASK)) {