accel/tcg: Fix unaligned stores to s390x low-address-protected lowcore
If low-address-protection is active, unaligned stores to non-protected
parts of lowcore lead to protection exceptions. The reason is that in
such cases tlb_fill() call in store_helper_unaligned() covers
[0, addr + size) range, which contains the protected portion of
lowcore. This range is too large.
The most straightforward fix would be to make sure we stay within the
original [addr, addr + size) range. However, if an unaligned access
affects a single page, we don't need to call tlb_fill() in
store_helper_unaligned() at all, since it would be identical to
the previous tlb_fill() call in store_helper(), and therefore a no-op.
If an unaligned access covers multiple pages, this situation does not
occur.
Therefore simply skip TLB handling in store_helper_unaligned() if we
are dealing with a single page.
Fixes: 2bcf018340
("s390x/tcg: low-address protection support")
Signed-off-by: Ilya Leoshkevich <iii@linux.ibm.com>
Message-Id: <20220711185640.3558813-2-iii@linux.ibm.com>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
parent
ba8924113c
commit
b0f650f047
|
@ -2248,7 +2248,7 @@ store_helper_unaligned(CPUArchState *env, target_ulong addr, uint64_t val,
|
||||||
const size_t tlb_off = offsetof(CPUTLBEntry, addr_write);
|
const size_t tlb_off = offsetof(CPUTLBEntry, addr_write);
|
||||||
uintptr_t index, index2;
|
uintptr_t index, index2;
|
||||||
CPUTLBEntry *entry, *entry2;
|
CPUTLBEntry *entry, *entry2;
|
||||||
target_ulong page2, tlb_addr, tlb_addr2;
|
target_ulong page1, page2, tlb_addr, tlb_addr2;
|
||||||
MemOpIdx oi;
|
MemOpIdx oi;
|
||||||
size_t size2;
|
size_t size2;
|
||||||
int i;
|
int i;
|
||||||
|
@ -2256,15 +2256,17 @@ store_helper_unaligned(CPUArchState *env, target_ulong addr, uint64_t val,
|
||||||
/*
|
/*
|
||||||
* Ensure the second page is in the TLB. Note that the first page
|
* Ensure the second page is in the TLB. Note that the first page
|
||||||
* is already guaranteed to be filled, and that the second page
|
* is already guaranteed to be filled, and that the second page
|
||||||
* cannot evict the first.
|
* cannot evict the first. An exception to this rule is PAGE_WRITE_INV
|
||||||
|
* handling: the first page could have evicted itself.
|
||||||
*/
|
*/
|
||||||
|
page1 = addr & TARGET_PAGE_MASK;
|
||||||
page2 = (addr + size) & TARGET_PAGE_MASK;
|
page2 = (addr + size) & TARGET_PAGE_MASK;
|
||||||
size2 = (addr + size) & ~TARGET_PAGE_MASK;
|
size2 = (addr + size) & ~TARGET_PAGE_MASK;
|
||||||
index2 = tlb_index(env, mmu_idx, page2);
|
index2 = tlb_index(env, mmu_idx, page2);
|
||||||
entry2 = tlb_entry(env, mmu_idx, page2);
|
entry2 = tlb_entry(env, mmu_idx, page2);
|
||||||
|
|
||||||
tlb_addr2 = tlb_addr_write(entry2);
|
tlb_addr2 = tlb_addr_write(entry2);
|
||||||
if (!tlb_hit_page(tlb_addr2, page2)) {
|
if (page1 != page2 && !tlb_hit_page(tlb_addr2, page2)) {
|
||||||
if (!victim_tlb_hit(env, mmu_idx, index2, tlb_off, page2)) {
|
if (!victim_tlb_hit(env, mmu_idx, index2, tlb_off, page2)) {
|
||||||
tlb_fill(env_cpu(env), page2, size2, MMU_DATA_STORE,
|
tlb_fill(env_cpu(env), page2, size2, MMU_DATA_STORE,
|
||||||
mmu_idx, retaddr);
|
mmu_idx, retaddr);
|
||||||
|
|
Loading…
Reference in New Issue