target-arm queue:
* memory system updates to support transaction attributes * set user-mode and secure attributes for accesses made by ARM CPUs * rename c1_coproc to cpacr_el1 * adjust id_aa64pfr0 when has_el3 CPU property disabled * allow ARMv8 SCR.SMD updates -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQIcBAABCAAGBQJVPlJWAAoJEDwlJe0UNgzeX6wP/3VRL+vFcgTugqLtG5MNpWl1 FOjWikAKY6NNjddduK4e7gusy3a9NVnLJnqEfBL+9PYsWyMkDPJKgVzy+4gBl6Yy M5kGlemzLF1Fnx0ORXRLN4MHXDnrr7JIKtscKVMIZHG4YyqkWpd/iVWu+IlB+F2e S10QT+Djv3eAR/hu64CbdgQ+d2EzP5z84t+qIB/BMSklZ+wv/MFctotUb2LY+6xT 9Sj1s1820BJGK1prFg9yb4NKsGcmwbn490bVb5Q6t5otqwns4O1LHlVLRpN5HAXB XReCuyaTBif1sD/iRlBTZGBdbM9p4UEPFwFDA8CEToZlRQemm7y+YZxkNgVLSVX3 SeCUuYz81kklRmC4egKeMs1l4jKmNCKvHhoO/XpEVAwlIcf/Aap/Bm31G1hB5moI Ao0yEd9PXOinOxUSNUtmdM0CfVx7Rmja98Li4/7+GcgsyqOVO2M+dOp5dR5JUumf YbnYxegoxAGCSvXtmGqwPOnFcIpXJ/0a4mjVx40govnYC1KEc97KA+//pFQIy8s8 cDKFYSVdpZ6VJ5M1V2fr4uUPa7phTZQDw8k7UoxzcjfL/ABQErkQG7ABcx+Q1txZ K7pA8LtCxInn+Ah03VPya2BRpb1eC+7ycr7ezb0Vlc7XtxgRqWGesvcWAr9z+Jtm eyr8k+QtnO9ic7Pgpt8M =hDRI -----END PGP SIGNATURE----- Merge remote-tracking branch 'remotes/pmaydell/tags/pull-target-arm-20150427' into staging target-arm queue: * memory system updates to support transaction attributes * set user-mode and secure attributes for accesses made by ARM CPUs * rename c1_coproc to cpacr_el1 * adjust id_aa64pfr0 when has_el3 CPU property disabled * allow ARMv8 SCR.SMD updates # gpg: Signature made Mon Apr 27 16:14:30 2015 BST using RSA key ID 14360CDE # gpg: Good signature from "Peter Maydell <peter.maydell@linaro.org>" * remotes/pmaydell/tags/pull-target-arm-20150427: Allow ARMv8 SCR.SMD updates target-arm: Adjust id_aa64pfr0 when has_el3 CPU property disabled target-arm: rename c1_coproc to cpacr_el1 target-arm: Check watchpoints against CPU security state target-arm: Use attribute info to handle user-only watchpoints target-arm: Add user-mode transaction attribute target-arm: Use correct memory attributes for page table walks target-arm: Honour NS bits in page tables Switch non-CPU callers from ld/st*_phys to address_space_ld/st* exec.c: Capture the memory attributes for a watchpoint hit exec.c: Add new address_space_ld*/st* functions exec.c: Make address_space_rw take transaction attributes exec.c: Convert subpage memory ops to _with_attrs Add MemTxAttrs to the IOTLB Make CPU iotlb a structure rather than a plain hwaddr memory: Replace io_mem_read/write with memory_region_dispatch_read/write memory: Define API for MemoryRegionOps to take attrs and return status Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
commit
da378d014d
22
cputlb.c
22
cputlb.c
@ -249,9 +249,9 @@ static void tlb_add_large_page(CPUArchState *env, target_ulong vaddr,
|
||||
* Called from TCG-generated code, which is under an RCU read-side
|
||||
* critical section.
|
||||
*/
|
||||
void tlb_set_page(CPUState *cpu, target_ulong vaddr,
|
||||
hwaddr paddr, int prot,
|
||||
int mmu_idx, target_ulong size)
|
||||
void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
|
||||
hwaddr paddr, MemTxAttrs attrs, int prot,
|
||||
int mmu_idx, target_ulong size)
|
||||
{
|
||||
CPUArchState *env = cpu->env_ptr;
|
||||
MemoryRegionSection *section;
|
||||
@ -301,7 +301,8 @@ void tlb_set_page(CPUState *cpu, target_ulong vaddr,
|
||||
env->iotlb_v[mmu_idx][vidx] = env->iotlb[mmu_idx][index];
|
||||
|
||||
/* refill the tlb */
|
||||
env->iotlb[mmu_idx][index] = iotlb - vaddr;
|
||||
env->iotlb[mmu_idx][index].addr = iotlb - vaddr;
|
||||
env->iotlb[mmu_idx][index].attrs = attrs;
|
||||
te->addend = addend - vaddr;
|
||||
if (prot & PAGE_READ) {
|
||||
te->addr_read = address;
|
||||
@ -331,6 +332,17 @@ void tlb_set_page(CPUState *cpu, target_ulong vaddr,
|
||||
}
|
||||
}
|
||||
|
||||
/* Add a new TLB entry, but without specifying the memory
|
||||
* transaction attributes to be used.
|
||||
*/
|
||||
void tlb_set_page(CPUState *cpu, target_ulong vaddr,
|
||||
hwaddr paddr, int prot,
|
||||
int mmu_idx, target_ulong size)
|
||||
{
|
||||
tlb_set_page_with_attrs(cpu, vaddr, paddr, MEMTXATTRS_UNSPECIFIED,
|
||||
prot, mmu_idx, size);
|
||||
}
|
||||
|
||||
/* NOTE: this function can trigger an exception */
|
||||
/* NOTE2: the returned address is not exactly the physical address: it
|
||||
* is actually a ram_addr_t (in system mode; the user mode emulation
|
||||
@ -349,7 +361,7 @@ tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
|
||||
(addr & TARGET_PAGE_MASK))) {
|
||||
cpu_ldub_code(env1, addr);
|
||||
}
|
||||
pd = env1->iotlb[mmu_idx][page_index] & ~TARGET_PAGE_MASK;
|
||||
pd = env1->iotlb[mmu_idx][page_index].addr & ~TARGET_PAGE_MASK;
|
||||
mr = iotlb_to_region(cpu, pd);
|
||||
if (memory_region_is_unassigned(mr)) {
|
||||
CPUClass *cc = CPU_GET_CLASS(cpu);
|
||||
|
@ -28,7 +28,8 @@ int dma_memory_set(AddressSpace *as, dma_addr_t addr, uint8_t c, dma_addr_t len)
|
||||
memset(fillbuf, c, FILLBUF_SIZE);
|
||||
while (len > 0) {
|
||||
l = len < FILLBUF_SIZE ? len : FILLBUF_SIZE;
|
||||
error |= address_space_rw(as, addr, fillbuf, l, true);
|
||||
error |= address_space_rw(as, addr, MEMTXATTRS_UNSPECIFIED,
|
||||
fillbuf, l, true);
|
||||
len -= l;
|
||||
addr += l;
|
||||
}
|
||||
|
436
exec.c
436
exec.c
@ -1858,7 +1858,7 @@ static const MemoryRegionOps notdirty_mem_ops = {
|
||||
};
|
||||
|
||||
/* Generate a debug exception if a watchpoint has been hit. */
|
||||
static void check_watchpoint(int offset, int len, int flags)
|
||||
static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
|
||||
{
|
||||
CPUState *cpu = current_cpu;
|
||||
CPUArchState *env = cpu->env_ptr;
|
||||
@ -1884,6 +1884,7 @@ static void check_watchpoint(int offset, int len, int flags)
|
||||
wp->flags |= BP_WATCHPOINT_HIT_WRITE;
|
||||
}
|
||||
wp->hitaddr = vaddr;
|
||||
wp->hitattrs = attrs;
|
||||
if (!cpu->watchpoint_hit) {
|
||||
cpu->watchpoint_hit = wp;
|
||||
tb_check_watchpoint(cpu);
|
||||
@ -1905,69 +1906,93 @@ static void check_watchpoint(int offset, int len, int flags)
|
||||
/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
|
||||
so these check for a hit then pass through to the normal out-of-line
|
||||
phys routines. */
|
||||
static uint64_t watch_mem_read(void *opaque, hwaddr addr,
|
||||
unsigned size)
|
||||
static MemTxResult watch_mem_read(void *opaque, hwaddr addr, uint64_t *pdata,
|
||||
unsigned size, MemTxAttrs attrs)
|
||||
{
|
||||
check_watchpoint(addr & ~TARGET_PAGE_MASK, size, BP_MEM_READ);
|
||||
switch (size) {
|
||||
case 1: return ldub_phys(&address_space_memory, addr);
|
||||
case 2: return lduw_phys(&address_space_memory, addr);
|
||||
case 4: return ldl_phys(&address_space_memory, addr);
|
||||
default: abort();
|
||||
}
|
||||
}
|
||||
MemTxResult res;
|
||||
uint64_t data;
|
||||
|
||||
static void watch_mem_write(void *opaque, hwaddr addr,
|
||||
uint64_t val, unsigned size)
|
||||
{
|
||||
check_watchpoint(addr & ~TARGET_PAGE_MASK, size, BP_MEM_WRITE);
|
||||
check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_READ);
|
||||
switch (size) {
|
||||
case 1:
|
||||
stb_phys(&address_space_memory, addr, val);
|
||||
data = address_space_ldub(&address_space_memory, addr, attrs, &res);
|
||||
break;
|
||||
case 2:
|
||||
stw_phys(&address_space_memory, addr, val);
|
||||
data = address_space_lduw(&address_space_memory, addr, attrs, &res);
|
||||
break;
|
||||
case 4:
|
||||
stl_phys(&address_space_memory, addr, val);
|
||||
data = address_space_ldl(&address_space_memory, addr, attrs, &res);
|
||||
break;
|
||||
default: abort();
|
||||
}
|
||||
*pdata = data;
|
||||
return res;
|
||||
}
|
||||
|
||||
static MemTxResult watch_mem_write(void *opaque, hwaddr addr,
|
||||
uint64_t val, unsigned size,
|
||||
MemTxAttrs attrs)
|
||||
{
|
||||
MemTxResult res;
|
||||
|
||||
check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_WRITE);
|
||||
switch (size) {
|
||||
case 1:
|
||||
address_space_stb(&address_space_memory, addr, val, attrs, &res);
|
||||
break;
|
||||
case 2:
|
||||
address_space_stw(&address_space_memory, addr, val, attrs, &res);
|
||||
break;
|
||||
case 4:
|
||||
address_space_stl(&address_space_memory, addr, val, attrs, &res);
|
||||
break;
|
||||
default: abort();
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
static const MemoryRegionOps watch_mem_ops = {
|
||||
.read = watch_mem_read,
|
||||
.write = watch_mem_write,
|
||||
.read_with_attrs = watch_mem_read,
|
||||
.write_with_attrs = watch_mem_write,
|
||||
.endianness = DEVICE_NATIVE_ENDIAN,
|
||||
};
|
||||
|
||||
static uint64_t subpage_read(void *opaque, hwaddr addr,
|
||||
unsigned len)
|
||||
static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data,
|
||||
unsigned len, MemTxAttrs attrs)
|
||||
{
|
||||
subpage_t *subpage = opaque;
|
||||
uint8_t buf[8];
|
||||
MemTxResult res;
|
||||
|
||||
#if defined(DEBUG_SUBPAGE)
|
||||
printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
|
||||
subpage, len, addr);
|
||||
#endif
|
||||
address_space_read(subpage->as, addr + subpage->base, buf, len);
|
||||
res = address_space_read(subpage->as, addr + subpage->base,
|
||||
attrs, buf, len);
|
||||
if (res) {
|
||||
return res;
|
||||
}
|
||||
switch (len) {
|
||||
case 1:
|
||||
return ldub_p(buf);
|
||||
*data = ldub_p(buf);
|
||||
return MEMTX_OK;
|
||||
case 2:
|
||||
return lduw_p(buf);
|
||||
*data = lduw_p(buf);
|
||||
return MEMTX_OK;
|
||||
case 4:
|
||||
return ldl_p(buf);
|
||||
*data = ldl_p(buf);
|
||||
return MEMTX_OK;
|
||||
case 8:
|
||||
return ldq_p(buf);
|
||||
*data = ldq_p(buf);
|
||||
return MEMTX_OK;
|
||||
default:
|
||||
abort();
|
||||
}
|
||||
}
|
||||
|
||||
static void subpage_write(void *opaque, hwaddr addr,
|
||||
uint64_t value, unsigned len)
|
||||
static MemTxResult subpage_write(void *opaque, hwaddr addr,
|
||||
uint64_t value, unsigned len, MemTxAttrs attrs)
|
||||
{
|
||||
subpage_t *subpage = opaque;
|
||||
uint8_t buf[8];
|
||||
@ -1993,7 +2018,8 @@ static void subpage_write(void *opaque, hwaddr addr,
|
||||
default:
|
||||
abort();
|
||||
}
|
||||
address_space_write(subpage->as, addr + subpage->base, buf, len);
|
||||
return address_space_write(subpage->as, addr + subpage->base,
|
||||
attrs, buf, len);
|
||||
}
|
||||
|
||||
static bool subpage_accepts(void *opaque, hwaddr addr,
|
||||
@ -2010,8 +2036,8 @@ static bool subpage_accepts(void *opaque, hwaddr addr,
|
||||
}
|
||||
|
||||
static const MemoryRegionOps subpage_ops = {
|
||||
.read = subpage_read,
|
||||
.write = subpage_write,
|
||||
.read_with_attrs = subpage_read,
|
||||
.write_with_attrs = subpage_write,
|
||||
.impl.min_access_size = 1,
|
||||
.impl.max_access_size = 8,
|
||||
.valid.min_access_size = 1,
|
||||
@ -2304,15 +2330,15 @@ static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
|
||||
return l;
|
||||
}
|
||||
|
||||
bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
|
||||
int len, bool is_write)
|
||||
MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
|
||||
uint8_t *buf, int len, bool is_write)
|
||||
{
|
||||
hwaddr l;
|
||||
uint8_t *ptr;
|
||||
uint64_t val;
|
||||
hwaddr addr1;
|
||||
MemoryRegion *mr;
|
||||
bool error = false;
|
||||
MemTxResult result = MEMTX_OK;
|
||||
|
||||
while (len > 0) {
|
||||
l = len;
|
||||
@ -2327,22 +2353,26 @@ bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
|
||||
case 8:
|
||||
/* 64 bit write access */
|
||||
val = ldq_p(buf);
|
||||
error |= io_mem_write(mr, addr1, val, 8);
|
||||
result |= memory_region_dispatch_write(mr, addr1, val, 8,
|
||||
attrs);
|
||||
break;
|
||||
case 4:
|
||||
/* 32 bit write access */
|
||||
val = ldl_p(buf);
|
||||
error |= io_mem_write(mr, addr1, val, 4);
|
||||
result |= memory_region_dispatch_write(mr, addr1, val, 4,
|
||||
attrs);
|
||||
break;
|
||||
case 2:
|
||||
/* 16 bit write access */
|
||||
val = lduw_p(buf);
|
||||
error |= io_mem_write(mr, addr1, val, 2);
|
||||
result |= memory_region_dispatch_write(mr, addr1, val, 2,
|
||||
attrs);
|
||||
break;
|
||||
case 1:
|
||||
/* 8 bit write access */
|
||||
val = ldub_p(buf);
|
||||
error |= io_mem_write(mr, addr1, val, 1);
|
||||
result |= memory_region_dispatch_write(mr, addr1, val, 1,
|
||||
attrs);
|
||||
break;
|
||||
default:
|
||||
abort();
|
||||
@ -2361,22 +2391,26 @@ bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
|
||||
switch (l) {
|
||||
case 8:
|
||||
/* 64 bit read access */
|
||||
error |= io_mem_read(mr, addr1, &val, 8);
|
||||
result |= memory_region_dispatch_read(mr, addr1, &val, 8,
|
||||
attrs);
|
||||
stq_p(buf, val);
|
||||
break;
|
||||
case 4:
|
||||
/* 32 bit read access */
|
||||
error |= io_mem_read(mr, addr1, &val, 4);
|
||||
result |= memory_region_dispatch_read(mr, addr1, &val, 4,
|
||||
attrs);
|
||||
stl_p(buf, val);
|
||||
break;
|
||||
case 2:
|
||||
/* 16 bit read access */
|
||||
error |= io_mem_read(mr, addr1, &val, 2);
|
||||
result |= memory_region_dispatch_read(mr, addr1, &val, 2,
|
||||
attrs);
|
||||
stw_p(buf, val);
|
||||
break;
|
||||
case 1:
|
||||
/* 8 bit read access */
|
||||
error |= io_mem_read(mr, addr1, &val, 1);
|
||||
result |= memory_region_dispatch_read(mr, addr1, &val, 1,
|
||||
attrs);
|
||||
stb_p(buf, val);
|
||||
break;
|
||||
default:
|
||||
@ -2393,25 +2427,27 @@ bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
|
||||
addr += l;
|
||||
}
|
||||
|
||||
return error;
|
||||
return result;
|
||||
}
|
||||
|
||||
bool address_space_write(AddressSpace *as, hwaddr addr,
|
||||
const uint8_t *buf, int len)
|
||||
MemTxResult address_space_write(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
|
||||
const uint8_t *buf, int len)
|
||||
{
|
||||
return address_space_rw(as, addr, (uint8_t *)buf, len, true);
|
||||
return address_space_rw(as, addr, attrs, (uint8_t *)buf, len, true);
|
||||
}
|
||||
|
||||
bool address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
|
||||
MemTxResult address_space_read(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
|
||||
uint8_t *buf, int len)
|
||||
{
|
||||
return address_space_rw(as, addr, buf, len, false);
|
||||
return address_space_rw(as, addr, attrs, buf, len, false);
|
||||
}
|
||||
|
||||
|
||||
void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
|
||||
int len, int is_write)
|
||||
{
|
||||
address_space_rw(&address_space_memory, addr, buf, len, is_write);
|
||||
address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED,
|
||||
buf, len, is_write);
|
||||
}
|
||||
|
||||
enum write_rom_type {
|
||||
@ -2582,7 +2618,8 @@ void *address_space_map(AddressSpace *as,
|
||||
memory_region_ref(mr);
|
||||
bounce.mr = mr;
|
||||
if (!is_write) {
|
||||
address_space_read(as, addr, bounce.buffer, l);
|
||||
address_space_read(as, addr, MEMTXATTRS_UNSPECIFIED,
|
||||
bounce.buffer, l);
|
||||
}
|
||||
|
||||
*plen = l;
|
||||
@ -2635,7 +2672,8 @@ void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
|
||||
return;
|
||||
}
|
||||
if (is_write) {
|
||||
address_space_write(as, bounce.addr, bounce.buffer, access_len);
|
||||
address_space_write(as, bounce.addr, MEMTXATTRS_UNSPECIFIED,
|
||||
bounce.buffer, access_len);
|
||||
}
|
||||
qemu_vfree(bounce.buffer);
|
||||
bounce.buffer = NULL;
|
||||
@ -2657,19 +2695,22 @@ void cpu_physical_memory_unmap(void *buffer, hwaddr len,
|
||||
}
|
||||
|
||||
/* warning: addr must be aligned */
|
||||
static inline uint32_t ldl_phys_internal(AddressSpace *as, hwaddr addr,
|
||||
enum device_endian endian)
|
||||
static inline uint32_t address_space_ldl_internal(AddressSpace *as, hwaddr addr,
|
||||
MemTxAttrs attrs,
|
||||
MemTxResult *result,
|
||||
enum device_endian endian)
|
||||
{
|
||||
uint8_t *ptr;
|
||||
uint64_t val;
|
||||
MemoryRegion *mr;
|
||||
hwaddr l = 4;
|
||||
hwaddr addr1;
|
||||
MemTxResult r;
|
||||
|
||||
mr = address_space_translate(as, addr, &addr1, &l, false);
|
||||
if (l < 4 || !memory_access_is_direct(mr, false)) {
|
||||
/* I/O case */
|
||||
io_mem_read(mr, addr1, &val, 4);
|
||||
r = memory_region_dispatch_read(mr, addr1, &val, 4, attrs);
|
||||
#if defined(TARGET_WORDS_BIGENDIAN)
|
||||
if (endian == DEVICE_LITTLE_ENDIAN) {
|
||||
val = bswap32(val);
|
||||
@ -2695,40 +2736,68 @@ static inline uint32_t ldl_phys_internal(AddressSpace *as, hwaddr addr,
|
||||
val = ldl_p(ptr);
|
||||
break;
|
||||
}
|
||||
r = MEMTX_OK;
|
||||
}
|
||||
if (result) {
|
||||
*result = r;
|
||||
}
|
||||
return val;
|
||||
}
|
||||
|
||||
uint32_t address_space_ldl(AddressSpace *as, hwaddr addr,
|
||||
MemTxAttrs attrs, MemTxResult *result)
|
||||
{
|
||||
return address_space_ldl_internal(as, addr, attrs, result,
|
||||
DEVICE_NATIVE_ENDIAN);
|
||||
}
|
||||
|
||||
uint32_t address_space_ldl_le(AddressSpace *as, hwaddr addr,
|
||||
MemTxAttrs attrs, MemTxResult *result)
|
||||
{
|
||||
return address_space_ldl_internal(as, addr, attrs, result,
|
||||
DEVICE_LITTLE_ENDIAN);
|
||||
}
|
||||
|
||||
uint32_t address_space_ldl_be(AddressSpace *as, hwaddr addr,
|
||||
MemTxAttrs attrs, MemTxResult *result)
|
||||
{
|
||||
return address_space_ldl_internal(as, addr, attrs, result,
|
||||
DEVICE_BIG_ENDIAN);
|
||||
}
|
||||
|
||||
uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
|
||||
{
|
||||
return ldl_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
|
||||
return address_space_ldl(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
}
|
||||
|
||||
uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
|
||||
{
|
||||
return ldl_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
|
||||
return address_space_ldl_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
}
|
||||
|
||||
uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
|
||||
{
|
||||
return ldl_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
|
||||
return address_space_ldl_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
}
|
||||
|
||||
/* warning: addr must be aligned */
|
||||
static inline uint64_t ldq_phys_internal(AddressSpace *as, hwaddr addr,
|
||||
enum device_endian endian)
|
||||
static inline uint64_t address_space_ldq_internal(AddressSpace *as, hwaddr addr,
|
||||
MemTxAttrs attrs,
|
||||
MemTxResult *result,
|
||||
enum device_endian endian)
|
||||
{
|
||||
uint8_t *ptr;
|
||||
uint64_t val;
|
||||
MemoryRegion *mr;
|
||||
hwaddr l = 8;
|
||||
hwaddr addr1;
|
||||
MemTxResult r;
|
||||
|
||||
mr = address_space_translate(as, addr, &addr1, &l,
|
||||
false);
|
||||
if (l < 8 || !memory_access_is_direct(mr, false)) {
|
||||
/* I/O case */
|
||||
io_mem_read(mr, addr1, &val, 8);
|
||||
r = memory_region_dispatch_read(mr, addr1, &val, 8, attrs);
|
||||
#if defined(TARGET_WORDS_BIGENDIAN)
|
||||
if (endian == DEVICE_LITTLE_ENDIAN) {
|
||||
val = bswap64(val);
|
||||
@ -2754,48 +2823,88 @@ static inline uint64_t ldq_phys_internal(AddressSpace *as, hwaddr addr,
|
||||
val = ldq_p(ptr);
|
||||
break;
|
||||
}
|
||||
r = MEMTX_OK;
|
||||
}
|
||||
if (result) {
|
||||
*result = r;
|
||||
}
|
||||
return val;
|
||||
}
|
||||
|
||||
uint64_t address_space_ldq(AddressSpace *as, hwaddr addr,
|
||||
MemTxAttrs attrs, MemTxResult *result)
|
||||
{
|
||||
return address_space_ldq_internal(as, addr, attrs, result,
|
||||
DEVICE_NATIVE_ENDIAN);
|
||||
}
|
||||
|
||||
uint64_t address_space_ldq_le(AddressSpace *as, hwaddr addr,
|
||||
MemTxAttrs attrs, MemTxResult *result)
|
||||
{
|
||||
return address_space_ldq_internal(as, addr, attrs, result,
|
||||
DEVICE_LITTLE_ENDIAN);
|
||||
}
|
||||
|
||||
uint64_t address_space_ldq_be(AddressSpace *as, hwaddr addr,
|
||||
MemTxAttrs attrs, MemTxResult *result)
|
||||
{
|
||||
return address_space_ldq_internal(as, addr, attrs, result,
|
||||
DEVICE_BIG_ENDIAN);
|
||||
}
|
||||
|
||||
uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
|
||||
{
|
||||
return ldq_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
|
||||
return address_space_ldq(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
}
|
||||
|
||||
uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
|
||||
{
|
||||
return ldq_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
|
||||
return address_space_ldq_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
}
|
||||
|
||||
uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
|
||||
{
|
||||
return ldq_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
|
||||
return address_space_ldq_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
}
|
||||
|
||||
/* XXX: optimize */
|
||||
uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
|
||||
uint32_t address_space_ldub(AddressSpace *as, hwaddr addr,
|
||||
MemTxAttrs attrs, MemTxResult *result)
|
||||
{
|
||||
uint8_t val;
|
||||
address_space_rw(as, addr, &val, 1, 0);
|
||||
MemTxResult r;
|
||||
|
||||
r = address_space_rw(as, addr, attrs, &val, 1, 0);
|
||||
if (result) {
|
||||
*result = r;
|
||||
}
|
||||
return val;
|
||||
}
|
||||
|
||||
uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
|
||||
{
|
||||
return address_space_ldub(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
}
|
||||
|
||||
/* warning: addr must be aligned */
|
||||
static inline uint32_t lduw_phys_internal(AddressSpace *as, hwaddr addr,
|
||||
enum device_endian endian)
|
||||
static inline uint32_t address_space_lduw_internal(AddressSpace *as,
|
||||
hwaddr addr,
|
||||
MemTxAttrs attrs,
|
||||
MemTxResult *result,
|
||||
enum device_endian endian)
|
||||
{
|
||||
uint8_t *ptr;
|
||||
uint64_t val;
|
||||
MemoryRegion *mr;
|
||||
hwaddr l = 2;
|
||||
hwaddr addr1;
|
||||
MemTxResult r;
|
||||
|
||||
mr = address_space_translate(as, addr, &addr1, &l,
|
||||
false);
|
||||
if (l < 2 || !memory_access_is_direct(mr, false)) {
|
||||
/* I/O case */
|
||||
io_mem_read(mr, addr1, &val, 2);
|
||||
r = memory_region_dispatch_read(mr, addr1, &val, 2, attrs);
|
||||
#if defined(TARGET_WORDS_BIGENDIAN)
|
||||
if (endian == DEVICE_LITTLE_ENDIAN) {
|
||||
val = bswap16(val);
|
||||
@ -2821,39 +2930,66 @@ static inline uint32_t lduw_phys_internal(AddressSpace *as, hwaddr addr,
|
||||
val = lduw_p(ptr);
|
||||
break;
|
||||
}
|
||||
r = MEMTX_OK;
|
||||
}
|
||||
if (result) {
|
||||
*result = r;
|
||||
}
|
||||
return val;
|
||||
}
|
||||
|
||||
uint32_t address_space_lduw(AddressSpace *as, hwaddr addr,
|
||||
MemTxAttrs attrs, MemTxResult *result)
|
||||
{
|
||||
return address_space_lduw_internal(as, addr, attrs, result,
|
||||
DEVICE_NATIVE_ENDIAN);
|
||||
}
|
||||
|
||||
uint32_t address_space_lduw_le(AddressSpace *as, hwaddr addr,
|
||||
MemTxAttrs attrs, MemTxResult *result)
|
||||
{
|
||||
return address_space_lduw_internal(as, addr, attrs, result,
|
||||
DEVICE_LITTLE_ENDIAN);
|
||||
}
|
||||
|
||||
uint32_t address_space_lduw_be(AddressSpace *as, hwaddr addr,
|
||||
MemTxAttrs attrs, MemTxResult *result)
|
||||
{
|
||||
return address_space_lduw_internal(as, addr, attrs, result,
|
||||
DEVICE_BIG_ENDIAN);
|
||||
}
|
||||
|
||||
uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
|
||||
{
|
||||
return lduw_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
|
||||
return address_space_lduw(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
}
|
||||
|
||||
uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
|
||||
{
|
||||
return lduw_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
|
||||
return address_space_lduw_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
}
|
||||
|
||||
uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
|
||||
{
|
||||
return lduw_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
|
||||
return address_space_lduw_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
}
|
||||
|
||||
/* warning: addr must be aligned. The ram page is not masked as dirty
|
||||
and the code inside is not invalidated. It is useful if the dirty
|
||||
bits are used to track modified PTEs */
|
||||
void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
|
||||
void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val,
|
||||
MemTxAttrs attrs, MemTxResult *result)
|
||||
{
|
||||
uint8_t *ptr;
|
||||
MemoryRegion *mr;
|
||||
hwaddr l = 4;
|
||||
hwaddr addr1;
|
||||
MemTxResult r;
|
||||
|
||||
mr = address_space_translate(as, addr, &addr1, &l,
|
||||
true);
|
||||
if (l < 4 || !memory_access_is_direct(mr, true)) {
|
||||
io_mem_write(mr, addr1, val, 4);
|
||||
r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
|
||||
} else {
|
||||
addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
|
||||
ptr = qemu_get_ram_ptr(addr1);
|
||||
@ -2867,18 +3003,30 @@ void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
|
||||
cpu_physical_memory_set_dirty_range_nocode(addr1, 4);
|
||||
}
|
||||
}
|
||||
r = MEMTX_OK;
|
||||
}
|
||||
if (result) {
|
||||
*result = r;
|
||||
}
|
||||
}
|
||||
|
||||
void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
|
||||
{
|
||||
address_space_stl_notdirty(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
}
|
||||
|
||||
/* warning: addr must be aligned */
|
||||
static inline void stl_phys_internal(AddressSpace *as,
|
||||
hwaddr addr, uint32_t val,
|
||||
enum device_endian endian)
|
||||
static inline void address_space_stl_internal(AddressSpace *as,
|
||||
hwaddr addr, uint32_t val,
|
||||
MemTxAttrs attrs,
|
||||
MemTxResult *result,
|
||||
enum device_endian endian)
|
||||
{
|
||||
uint8_t *ptr;
|
||||
MemoryRegion *mr;
|
||||
hwaddr l = 4;
|
||||
hwaddr addr1;
|
||||
MemTxResult r;
|
||||
|
||||
mr = address_space_translate(as, addr, &addr1, &l,
|
||||
true);
|
||||
@ -2892,7 +3040,7 @@ static inline void stl_phys_internal(AddressSpace *as,
|
||||
val = bswap32(val);
|
||||
}
|
||||
#endif
|
||||
io_mem_write(mr, addr1, val, 4);
|
||||
r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
|
||||
} else {
|
||||
/* RAM case */
|
||||
addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
|
||||
@ -2909,40 +3057,79 @@ static inline void stl_phys_internal(AddressSpace *as,
|
||||
break;
|
||||
}
|
||||
invalidate_and_set_dirty(addr1, 4);
|
||||
r = MEMTX_OK;
|
||||
}
|
||||
if (result) {
|
||||
*result = r;
|
||||
}
|
||||
}
|
||||
|
||||
void address_space_stl(AddressSpace *as, hwaddr addr, uint32_t val,
|
||||
MemTxAttrs attrs, MemTxResult *result)
|
||||
{
|
||||
address_space_stl_internal(as, addr, val, attrs, result,
|
||||
DEVICE_NATIVE_ENDIAN);
|
||||
}
|
||||
|
||||
void address_space_stl_le(AddressSpace *as, hwaddr addr, uint32_t val,
|
||||
MemTxAttrs attrs, MemTxResult *result)
|
||||
{
|
||||
address_space_stl_internal(as, addr, val, attrs, result,
|
||||
DEVICE_LITTLE_ENDIAN);
|
||||
}
|
||||
|
||||
void address_space_stl_be(AddressSpace *as, hwaddr addr, uint32_t val,
|
||||
MemTxAttrs attrs, MemTxResult *result)
|
||||
{
|
||||
address_space_stl_internal(as, addr, val, attrs, result,
|
||||
DEVICE_BIG_ENDIAN);
|
||||
}
|
||||
|
||||
void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
|
||||
{
|
||||
stl_phys_internal(as, addr, val, DEVICE_NATIVE_ENDIAN);
|
||||
address_space_stl(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
}
|
||||
|
||||
void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
|
||||
{
|
||||
stl_phys_internal(as, addr, val, DEVICE_LITTLE_ENDIAN);
|
||||
address_space_stl_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
}
|
||||
|
||||
void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
|
||||
{
|
||||
stl_phys_internal(as, addr, val, DEVICE_BIG_ENDIAN);
|
||||
address_space_stl_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
}
|
||||
|
||||
/* XXX: optimize */
|
||||
void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
|
||||
void address_space_stb(AddressSpace *as, hwaddr addr, uint32_t val,
|
||||
MemTxAttrs attrs, MemTxResult *result)
|
||||
{
|
||||
uint8_t v = val;
|
||||
address_space_rw(as, addr, &v, 1, 1);
|
||||
MemTxResult r;
|
||||
|
||||
r = address_space_rw(as, addr, attrs, &v, 1, 1);
|
||||
if (result) {
|
||||
*result = r;
|
||||
}
|
||||
}
|
||||
|
||||
void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
|
||||
{
|
||||
address_space_stb(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
}
|
||||
|
||||
/* warning: addr must be aligned */
|
||||
static inline void stw_phys_internal(AddressSpace *as,
|
||||
hwaddr addr, uint32_t val,
|
||||
enum device_endian endian)
|
||||
static inline void address_space_stw_internal(AddressSpace *as,
|
||||
hwaddr addr, uint32_t val,
|
||||
MemTxAttrs attrs,
|
||||
MemTxResult *result,
|
||||
enum device_endian endian)
|
||||
{
|
||||
uint8_t *ptr;
|
||||
MemoryRegion *mr;
|
||||
hwaddr l = 2;
|
||||
hwaddr addr1;
|
||||
MemTxResult r;
|
||||
|
||||
mr = address_space_translate(as, addr, &addr1, &l, true);
|
||||
if (l < 2 || !memory_access_is_direct(mr, true)) {
|
||||
@ -2955,7 +3142,7 @@ static inline void stw_phys_internal(AddressSpace *as,
|
||||
val = bswap16(val);
|
||||
}
|
||||
#endif
|
||||
io_mem_write(mr, addr1, val, 2);
|
||||
r = memory_region_dispatch_write(mr, addr1, val, 2, attrs);
|
||||
} else {
|
||||
/* RAM case */
|
||||
addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
|
||||
@ -2972,41 +3159,95 @@ static inline void stw_phys_internal(AddressSpace *as,
|
||||
break;
|
||||
}
|
||||
invalidate_and_set_dirty(addr1, 2);
|
||||
r = MEMTX_OK;
|
||||
}
|
||||
if (result) {
|
||||
*result = r;
|
||||
}
|
||||
}
|
||||
|
||||
void address_space_stw(AddressSpace *as, hwaddr addr, uint32_t val,
|
||||
MemTxAttrs attrs, MemTxResult *result)
|
||||
{
|
||||
address_space_stw_internal(as, addr, val, attrs, result,
|
||||
DEVICE_NATIVE_ENDIAN);
|
||||
}
|
||||
|
||||
void address_space_stw_le(AddressSpace *as, hwaddr addr, uint32_t val,
|
||||
MemTxAttrs attrs, MemTxResult *result)
|
||||
{
|
||||
address_space_stw_internal(as, addr, val, attrs, result,
|
||||
DEVICE_LITTLE_ENDIAN);
|
||||
}
|
||||
|
||||
void address_space_stw_be(AddressSpace *as, hwaddr addr, uint32_t val,
|
||||
MemTxAttrs attrs, MemTxResult *result)
|
||||
{
|
||||
address_space_stw_internal(as, addr, val, attrs, result,
|
||||
DEVICE_BIG_ENDIAN);
|
||||
}
|
||||
|
||||
void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
|
||||
{
|
||||
stw_phys_internal(as, addr, val, DEVICE_NATIVE_ENDIAN);
|
||||
address_space_stw(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
}
|
||||
|
||||
void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
|
||||
{
|
||||
stw_phys_internal(as, addr, val, DEVICE_LITTLE_ENDIAN);
|
||||
address_space_stw_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
}
|
||||
|
||||
void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
|
||||
{
|
||||
stw_phys_internal(as, addr, val, DEVICE_BIG_ENDIAN);
|
||||
address_space_stw_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
}
|
||||
|
||||
/* XXX: optimize */
|
||||
void address_space_stq(AddressSpace *as, hwaddr addr, uint64_t val,
|
||||
MemTxAttrs attrs, MemTxResult *result)
|
||||
{
|
||||
MemTxResult r;
|
||||
val = tswap64(val);
|
||||
r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
|
||||
if (result) {
|
||||
*result = r;
|
||||
}
|
||||
}
|
||||
|
||||
void address_space_stq_le(AddressSpace *as, hwaddr addr, uint64_t val,
|
||||
MemTxAttrs attrs, MemTxResult *result)
|
||||
{
|
||||
MemTxResult r;
|
||||
val = cpu_to_le64(val);
|
||||
r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
|
||||
if (result) {
|
||||
*result = r;
|
||||
}
|
||||
}
|
||||
void address_space_stq_be(AddressSpace *as, hwaddr addr, uint64_t val,
|
||||
MemTxAttrs attrs, MemTxResult *result)
|
||||
{
|
||||
MemTxResult r;
|
||||
val = cpu_to_be64(val);
|
||||
r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
|
||||
if (result) {
|
||||
*result = r;
|
||||
}
|
||||
}
|
||||
|
||||
void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
|
||||
{
|
||||
val = tswap64(val);
|
||||
address_space_rw(as, addr, (void *) &val, 8, 1);
|
||||
address_space_stq(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
}
|
||||
|
||||
void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
|
||||
{
|
||||
val = cpu_to_le64(val);
|
||||
address_space_rw(as, addr, (void *) &val, 8, 1);
|
||||
address_space_stq_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
}
|
||||
|
||||
void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
|
||||
{
|
||||
val = cpu_to_be64(val);
|
||||
address_space_rw(as, addr, (void *) &val, 8, 1);
|
||||
address_space_stq_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
}
|
||||
|
||||
/* virtual memory access for debug (includes writing to ROM) */
|
||||
@ -3030,7 +3271,8 @@ int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
|
||||
if (is_write) {
|
||||
cpu_physical_memory_write_rom(cpu->as, phys_addr, buf, l);
|
||||
} else {
|
||||
address_space_rw(cpu->as, phys_addr, buf, l, 0);
|
||||
address_space_rw(cpu->as, phys_addr, MEMTXATTRS_UNSPECIFIED,
|
||||
buf, l, 0);
|
||||
}
|
||||
len -= l;
|
||||
buf += l;
|
||||
|
@ -157,9 +157,12 @@ static void clipper_init(MachineState *machine)
|
||||
load_image_targphys(initrd_filename, initrd_base,
|
||||
ram_size - initrd_base);
|
||||
|
||||
stq_phys(&address_space_memory,
|
||||
param_offset + 0x100, initrd_base + 0xfffffc0000000000ULL);
|
||||
stq_phys(&address_space_memory, param_offset + 0x108, initrd_size);
|
||||
address_space_stq(&address_space_memory, param_offset + 0x100,
|
||||
initrd_base + 0xfffffc0000000000ULL,
|
||||
MEMTXATTRS_UNSPECIFIED,
|
||||
NULL);
|
||||
address_space_stq(&address_space_memory, param_offset + 0x108,
|
||||
initrd_size, MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -613,7 +613,8 @@ static bool make_iommu_tlbe(hwaddr taddr, hwaddr mask, IOMMUTLBEntry *ret)
|
||||
translation, given the address of the PTE. */
|
||||
static bool pte_translate(hwaddr pte_addr, IOMMUTLBEntry *ret)
|
||||
{
|
||||
uint64_t pte = ldq_phys(&address_space_memory, pte_addr);
|
||||
uint64_t pte = address_space_ldq(&address_space_memory, pte_addr,
|
||||
MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
|
||||
/* Check valid bit. */
|
||||
if ((pte & 1) == 0) {
|
||||
|
@ -170,7 +170,8 @@ static void default_reset_secondary(ARMCPU *cpu,
|
||||
{
|
||||
CPUARMState *env = &cpu->env;
|
||||
|
||||
stl_phys_notdirty(&address_space_memory, info->smp_bootreg_addr, 0);
|
||||
address_space_stl_notdirty(&address_space_memory, info->smp_bootreg_addr,
|
||||
0, MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
env->regs[15] = info->smp_loader_start;
|
||||
}
|
||||
|
||||
@ -180,7 +181,8 @@ static inline bool have_dtb(const struct arm_boot_info *info)
|
||||
}
|
||||
|
||||
#define WRITE_WORD(p, value) do { \
|
||||
stl_phys_notdirty(&address_space_memory, p, value); \
|
||||
address_space_stl_notdirty(&address_space_memory, p, value, \
|
||||
MEMTXATTRS_UNSPECIFIED, NULL); \
|
||||
p += 4; \
|
||||
} while (0)
|
||||
|
||||
|
@ -69,11 +69,17 @@ static void hb_reset_secondary(ARMCPU *cpu, const struct arm_boot_info *info)
|
||||
|
||||
switch (info->nb_cpus) {
|
||||
case 4:
|
||||
stl_phys_notdirty(&address_space_memory, SMP_BOOT_REG + 0x30, 0);
|
||||
address_space_stl_notdirty(&address_space_memory,
|
||||
SMP_BOOT_REG + 0x30, 0,
|
||||
MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
case 3:
|
||||
stl_phys_notdirty(&address_space_memory, SMP_BOOT_REG + 0x20, 0);
|
||||
address_space_stl_notdirty(&address_space_memory,
|
||||
SMP_BOOT_REG + 0x20, 0,
|
||||
MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
case 2:
|
||||
stl_phys_notdirty(&address_space_memory, SMP_BOOT_REG + 0x10, 0);
|
||||
address_space_stl_notdirty(&address_space_memory,
|
||||
SMP_BOOT_REG + 0x10, 0,
|
||||
MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
env->regs[15] = SMP_BOOT_ADDR;
|
||||
break;
|
||||
default:
|
||||
|
@ -274,7 +274,7 @@ static void pxa2xx_pwrmode_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
s->cpu->env.uncached_cpsr = ARM_CPU_MODE_SVC;
|
||||
s->cpu->env.daif = PSTATE_A | PSTATE_F | PSTATE_I;
|
||||
s->cpu->env.cp15.sctlr_ns = 0;
|
||||
s->cpu->env.cp15.c1_coproc = 0;
|
||||
s->cpu->env.cp15.cpacr_el1 = 0;
|
||||
s->cpu->env.cp15.ttbr0_el[1] = 0;
|
||||
s->cpu->env.cp15.dacr_ns = 0;
|
||||
s->pm_regs[PSSR >> 2] |= 0x8; /* Set STS */
|
||||
|
@ -205,10 +205,22 @@ again:
|
||||
if (size == 0) {
|
||||
/* Transfer complete. */
|
||||
if (ch->lli) {
|
||||
ch->src = ldl_le_phys(&address_space_memory, ch->lli);
|
||||
ch->dest = ldl_le_phys(&address_space_memory, ch->lli + 4);
|
||||
ch->ctrl = ldl_le_phys(&address_space_memory, ch->lli + 12);
|
||||
ch->lli = ldl_le_phys(&address_space_memory, ch->lli + 8);
|
||||
ch->src = address_space_ldl_le(&address_space_memory,
|
||||
ch->lli,
|
||||
MEMTXATTRS_UNSPECIFIED,
|
||||
NULL);
|
||||
ch->dest = address_space_ldl_le(&address_space_memory,
|
||||
ch->lli + 4,
|
||||
MEMTXATTRS_UNSPECIFIED,
|
||||
NULL);
|
||||
ch->ctrl = address_space_ldl_le(&address_space_memory,
|
||||
ch->lli + 12,
|
||||
MEMTXATTRS_UNSPECIFIED,
|
||||
NULL);
|
||||
ch->lli = address_space_ldl_le(&address_space_memory,
|
||||
ch->lli + 8,
|
||||
MEMTXATTRS_UNSPECIFIED,
|
||||
NULL);
|
||||
} else {
|
||||
ch->conf &= ~PL080_CCONF_E;
|
||||
}
|
||||
|
@ -263,7 +263,8 @@ static uint32_t iommu_page_get_flags(IOMMUState *s, hwaddr addr)
|
||||
iopte = s->regs[IOMMU_BASE] << 4;
|
||||
addr &= ~s->iostart;
|
||||
iopte += (addr >> (IOMMU_PAGE_SHIFT - 2)) & ~3;
|
||||
ret = ldl_be_phys(&address_space_memory, iopte);
|
||||
ret = address_space_ldl_be(&address_space_memory, iopte,
|
||||
MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
trace_sun4m_iommu_page_get_flags(pa, iopte, ret);
|
||||
return ret;
|
||||
}
|
||||
|
@ -246,7 +246,8 @@ static void vtd_generate_interrupt(IntelIOMMUState *s, hwaddr mesg_addr_reg,
|
||||
data = vtd_get_long_raw(s, mesg_data_reg);
|
||||
|
||||
VTD_DPRINTF(FLOG, "msi: addr 0x%"PRIx64 " data 0x%"PRIx32, addr, data);
|
||||
stl_le_phys(&address_space_memory, addr, data);
|
||||
address_space_stl_le(&address_space_memory, addr, data,
|
||||
MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
}
|
||||
|
||||
/* Generate a fault event to software via MSI if conditions are met.
|
||||
|
@ -61,7 +61,8 @@ static void main_cpu_reset(void *opaque)
|
||||
static uint64_t rtc_read(void *opaque, hwaddr addr, unsigned size)
|
||||
{
|
||||
uint8_t val;
|
||||
address_space_read(&address_space_memory, 0x90000071, &val, 1);
|
||||
address_space_read(&address_space_memory, 0x90000071,
|
||||
MEMTXATTRS_UNSPECIFIED, &val, 1);
|
||||
return val;
|
||||
}
|
||||
|
||||
@ -69,7 +70,8 @@ static void rtc_write(void *opaque, hwaddr addr,
|
||||
uint64_t val, unsigned size)
|
||||
{
|
||||
uint8_t buf = val & 0xff;
|
||||
address_space_write(&address_space_memory, 0x90000071, &buf, 1);
|
||||
address_space_write(&address_space_memory, 0x90000071,
|
||||
MEMTXATTRS_UNSPECIFIED, &buf, 1);
|
||||
}
|
||||
|
||||
static const MemoryRegionOps rtc_ops = {
|
||||
|
@ -289,7 +289,8 @@ static IOMMUTLBEntry pbm_translate_iommu(MemoryRegion *iommu, hwaddr addr,
|
||||
}
|
||||
}
|
||||
|
||||
tte = ldq_be_phys(&address_space_memory, baseaddr + offset);
|
||||
tte = address_space_ldq_be(&address_space_memory, baseaddr + offset,
|
||||
MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
|
||||
if (!(tte & IOMMU_TTE_DATA_V)) {
|
||||
/* Invalid mapping */
|
||||
|
@ -140,7 +140,8 @@ static uint64_t raven_io_read(void *opaque, hwaddr addr,
|
||||
uint8_t buf[4];
|
||||
|
||||
addr = raven_io_address(s, addr);
|
||||
address_space_read(&s->pci_io_as, addr + 0x80000000, buf, size);
|
||||
address_space_read(&s->pci_io_as, addr + 0x80000000,
|
||||
MEMTXATTRS_UNSPECIFIED, buf, size);
|
||||
|
||||
if (size == 1) {
|
||||
return buf[0];
|
||||
@ -171,7 +172,8 @@ static void raven_io_write(void *opaque, hwaddr addr,
|
||||
g_assert_not_reached();
|
||||
}
|
||||
|
||||
address_space_write(&s->pci_io_as, addr + 0x80000000, buf, size);
|
||||
address_space_write(&s->pci_io_as, addr + 0x80000000,
|
||||
MEMTXATTRS_UNSPECIFIED, buf, size);
|
||||
}
|
||||
|
||||
static const MemoryRegionOps raven_io_ops = {
|
||||
|
@ -291,7 +291,8 @@ void msi_notify(PCIDevice *dev, unsigned int vector)
|
||||
"notify vector 0x%x"
|
||||
" address: 0x%"PRIx64" data: 0x%"PRIx32"\n",
|
||||
vector, msg.address, msg.data);
|
||||
stl_le_phys(&dev->bus_master_as, msg.address, msg.data);
|
||||
address_space_stl_le(&dev->bus_master_as, msg.address, msg.data,
|
||||
MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
}
|
||||
|
||||
/* Normally called by pci_default_write_config(). */
|
||||
|
@ -435,7 +435,8 @@ void msix_notify(PCIDevice *dev, unsigned vector)
|
||||
|
||||
msg = msix_get_message(dev, vector);
|
||||
|
||||
stl_le_phys(&dev->bus_master_as, msg.address, msg.data);
|
||||
address_space_stl_le(&dev->bus_master_as, msg.address, msg.data,
|
||||
MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
}
|
||||
|
||||
void msix_reset(PCIDevice *dev)
|
||||
|
@ -745,20 +745,27 @@ static void css_update_chnmon(SubchDev *sch)
|
||||
/* Format 1, per-subchannel area. */
|
||||
uint32_t count;
|
||||
|
||||
count = ldl_phys(&address_space_memory, sch->curr_status.mba);
|
||||
count = address_space_ldl(&address_space_memory,
|
||||
sch->curr_status.mba,
|
||||
MEMTXATTRS_UNSPECIFIED,
|
||||
NULL);
|
||||
count++;
|
||||
stl_phys(&address_space_memory, sch->curr_status.mba, count);
|
||||
address_space_stl(&address_space_memory, sch->curr_status.mba, count,
|
||||
MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
} else {
|
||||
/* Format 0, global area. */
|
||||
uint32_t offset;
|
||||
uint16_t count;
|
||||
|
||||
offset = sch->curr_status.pmcw.mbi << 5;
|
||||
count = lduw_phys(&address_space_memory,
|
||||
channel_subsys->chnmon_area + offset);
|
||||
count = address_space_lduw(&address_space_memory,
|
||||
channel_subsys->chnmon_area + offset,
|
||||
MEMTXATTRS_UNSPECIFIED,
|
||||
NULL);
|
||||
count++;
|
||||
stw_phys(&address_space_memory,
|
||||
channel_subsys->chnmon_area + offset, count);
|
||||
address_space_stw(&address_space_memory,
|
||||
channel_subsys->chnmon_area + offset, count,
|
||||
MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -278,7 +278,8 @@ static uint64_t s390_guest_io_table_walk(uint64_t guest_iota,
|
||||
px = calc_px(guest_dma_address);
|
||||
|
||||
sto_a = guest_iota + rtx * sizeof(uint64_t);
|
||||
sto = ldq_phys(&address_space_memory, sto_a);
|
||||
sto = address_space_ldq(&address_space_memory, sto_a,
|
||||
MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
sto = get_rt_sto(sto);
|
||||
if (!sto) {
|
||||
pte = 0;
|
||||
@ -286,7 +287,8 @@ static uint64_t s390_guest_io_table_walk(uint64_t guest_iota,
|
||||
}
|
||||
|
||||
pto_a = sto + sx * sizeof(uint64_t);
|
||||
pto = ldq_phys(&address_space_memory, pto_a);
|
||||
pto = address_space_ldq(&address_space_memory, pto_a,
|
||||
MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
pto = get_st_pto(pto);
|
||||
if (!pto) {
|
||||
pte = 0;
|
||||
@ -294,7 +296,8 @@ static uint64_t s390_guest_io_table_walk(uint64_t guest_iota,
|
||||
}
|
||||
|
||||
px_a = pto + px * sizeof(uint64_t);
|
||||
pte = ldq_phys(&address_space_memory, px_a);
|
||||
pte = address_space_ldq(&address_space_memory, px_a,
|
||||
MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
|
||||
out:
|
||||
return pte;
|
||||
|
@ -331,7 +331,8 @@ int pcilg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2)
|
||||
return 0;
|
||||
}
|
||||
MemoryRegion *mr = pbdev->pdev->io_regions[pcias].memory;
|
||||
io_mem_read(mr, offset, &data, len);
|
||||
memory_region_dispatch_read(mr, offset, &data, len,
|
||||
MEMTXATTRS_UNSPECIFIED);
|
||||
} else if (pcias == 15) {
|
||||
if ((4 - (offset & 0x3)) < len) {
|
||||
program_interrupt(env, PGM_OPERAND, 4);
|
||||
@ -456,7 +457,8 @@ int pcistg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2)
|
||||
mr = pbdev->pdev->io_regions[pcias].memory;
|
||||
}
|
||||
|
||||
io_mem_write(mr, offset, data, len);
|
||||
memory_region_dispatch_write(mr, offset, data, len,
|
||||
MEMTXATTRS_UNSPECIFIED);
|
||||
} else if (pcias == 15) {
|
||||
if ((4 - (offset & 0x3)) < len) {
|
||||
program_interrupt(env, PGM_OPERAND, 4);
|
||||
@ -606,7 +608,9 @@ int pcistb_service_call(S390CPU *cpu, uint8_t r1, uint8_t r3, uint64_t gaddr)
|
||||
}
|
||||
|
||||
for (i = 0; i < len / 8; i++) {
|
||||
io_mem_write(mr, env->regs[r3] + i * 8, ldq_p(buffer + i * 8), 8);
|
||||
memory_region_dispatch_write(mr, env->regs[r3] + i * 8,
|
||||
ldq_p(buffer + i * 8), 8,
|
||||
MEMTXATTRS_UNSPECIFIED);
|
||||
}
|
||||
|
||||
setcc(cpu, ZPCI_PCI_LS_OK);
|
||||
|
@ -75,10 +75,12 @@ void s390_virtio_reset_idx(VirtIOS390Device *dev)
|
||||
for (i = 0; i < num_vq; i++) {
|
||||
idx_addr = virtio_queue_get_avail_addr(dev->vdev, i) +
|
||||
VIRTIO_VRING_AVAIL_IDX_OFFS;
|
||||
stw_phys(&address_space_memory, idx_addr, 0);
|
||||
address_space_stw(&address_space_memory, idx_addr, 0,
|
||||
MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
idx_addr = virtio_queue_get_used_addr(dev->vdev, i) +
|
||||
VIRTIO_VRING_USED_IDX_OFFS;
|
||||
stw_phys(&address_space_memory, idx_addr, 0);
|
||||
address_space_stw(&address_space_memory, idx_addr, 0,
|
||||
MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
}
|
||||
}
|
||||
|
||||
@ -336,7 +338,8 @@ static uint64_t s390_virtio_device_vq_token(VirtIOS390Device *dev, int vq)
|
||||
(vq * VIRTIO_VQCONFIG_LEN) +
|
||||
VIRTIO_VQCONFIG_OFFS_TOKEN;
|
||||
|
||||
return ldq_be_phys(&address_space_memory, token_off);
|
||||
return address_space_ldq_be(&address_space_memory, token_off,
|
||||
MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
}
|
||||
|
||||
static ram_addr_t s390_virtio_device_num_vq(VirtIOS390Device *dev)
|
||||
@ -371,21 +374,33 @@ void s390_virtio_device_sync(VirtIOS390Device *dev)
|
||||
virtio_reset(dev->vdev);
|
||||
|
||||
/* Sync dev space */
|
||||
stb_phys(&address_space_memory,
|
||||
dev->dev_offs + VIRTIO_DEV_OFFS_TYPE, dev->vdev->device_id);
|
||||
address_space_stb(&address_space_memory,
|
||||
dev->dev_offs + VIRTIO_DEV_OFFS_TYPE,
|
||||
dev->vdev->device_id,
|
||||
MEMTXATTRS_UNSPECIFIED,
|
||||
NULL);
|
||||
|
||||
stb_phys(&address_space_memory,
|
||||
dev->dev_offs + VIRTIO_DEV_OFFS_NUM_VQ,
|
||||
s390_virtio_device_num_vq(dev));
|
||||
stb_phys(&address_space_memory,
|
||||
dev->dev_offs + VIRTIO_DEV_OFFS_FEATURE_LEN, dev->feat_len);
|
||||
address_space_stb(&address_space_memory,
|
||||
dev->dev_offs + VIRTIO_DEV_OFFS_NUM_VQ,
|
||||
s390_virtio_device_num_vq(dev),
|
||||
MEMTXATTRS_UNSPECIFIED,
|
||||
NULL);
|
||||
address_space_stb(&address_space_memory,
|
||||
dev->dev_offs + VIRTIO_DEV_OFFS_FEATURE_LEN,
|
||||
dev->feat_len,
|
||||
MEMTXATTRS_UNSPECIFIED,
|
||||
NULL);
|
||||
|
||||
stb_phys(&address_space_memory,
|
||||
dev->dev_offs + VIRTIO_DEV_OFFS_CONFIG_LEN, dev->vdev->config_len);
|
||||
address_space_stb(&address_space_memory,
|
||||
dev->dev_offs + VIRTIO_DEV_OFFS_CONFIG_LEN,
|
||||
dev->vdev->config_len,
|
||||
MEMTXATTRS_UNSPECIFIED,
|
||||
NULL);
|
||||
|
||||
num_vq = s390_virtio_device_num_vq(dev);
|
||||
stb_phys(&address_space_memory,
|
||||
dev->dev_offs + VIRTIO_DEV_OFFS_NUM_VQ, num_vq);
|
||||
address_space_stb(&address_space_memory,
|
||||
dev->dev_offs + VIRTIO_DEV_OFFS_NUM_VQ, num_vq,
|
||||
MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
|
||||
/* Sync virtqueues */
|
||||
for (i = 0; i < num_vq; i++) {
|
||||
@ -396,11 +411,14 @@ void s390_virtio_device_sync(VirtIOS390Device *dev)
|
||||
vring = s390_virtio_next_ring(bus);
|
||||
virtio_queue_set_addr(dev->vdev, i, vring);
|
||||
virtio_queue_set_vector(dev->vdev, i, i);
|
||||
stq_be_phys(&address_space_memory,
|
||||
vq + VIRTIO_VQCONFIG_OFFS_ADDRESS, vring);
|
||||
stw_be_phys(&address_space_memory,
|
||||
vq + VIRTIO_VQCONFIG_OFFS_NUM,
|
||||
virtio_queue_get_num(dev->vdev, i));
|
||||
address_space_stq_be(&address_space_memory,
|
||||
vq + VIRTIO_VQCONFIG_OFFS_ADDRESS, vring,
|
||||
MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
address_space_stw_be(&address_space_memory,
|
||||
vq + VIRTIO_VQCONFIG_OFFS_NUM,
|
||||
virtio_queue_get_num(dev->vdev, i),
|
||||
MEMTXATTRS_UNSPECIFIED,
|
||||
NULL);
|
||||
}
|
||||
|
||||
cur_offs = dev->dev_offs;
|
||||
@ -408,7 +426,8 @@ void s390_virtio_device_sync(VirtIOS390Device *dev)
|
||||
cur_offs += num_vq * VIRTIO_VQCONFIG_LEN;
|
||||
|
||||
/* Sync feature bitmap */
|
||||
stl_le_phys(&address_space_memory, cur_offs, dev->host_features);
|
||||
address_space_stl_le(&address_space_memory, cur_offs, dev->host_features,
|
||||
MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
|
||||
dev->feat_offs = cur_offs + dev->feat_len;
|
||||
cur_offs += dev->feat_len * 2;
|
||||
@ -426,12 +445,16 @@ void s390_virtio_device_update_status(VirtIOS390Device *dev)
|
||||
VirtIODevice *vdev = dev->vdev;
|
||||
uint32_t features;
|
||||
|
||||
virtio_set_status(vdev, ldub_phys(&address_space_memory,
|
||||
dev->dev_offs + VIRTIO_DEV_OFFS_STATUS));
|
||||
virtio_set_status(vdev,
|
||||
address_space_ldub(&address_space_memory,
|
||||
dev->dev_offs + VIRTIO_DEV_OFFS_STATUS,
|
||||
MEMTXATTRS_UNSPECIFIED, NULL));
|
||||
|
||||
/* Update guest supported feature bitmap */
|
||||
|
||||
features = bswap32(ldl_be_phys(&address_space_memory, dev->feat_offs));
|
||||
features = bswap32(address_space_ldl_be(&address_space_memory,
|
||||
dev->feat_offs,
|
||||
MEMTXATTRS_UNSPECIFIED, NULL));
|
||||
virtio_set_features(vdev, features);
|
||||
}
|
||||
|
||||
|
@ -97,7 +97,9 @@ static int s390_virtio_hcall_reset(const uint64_t *args)
|
||||
return -EINVAL;
|
||||
}
|
||||
virtio_reset(dev->vdev);
|
||||
stb_phys(&address_space_memory, dev->dev_offs + VIRTIO_DEV_OFFS_STATUS, 0);
|
||||
address_space_stb(&address_space_memory,
|
||||
dev->dev_offs + VIRTIO_DEV_OFFS_STATUS, 0,
|
||||
MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
s390_virtio_device_sync(dev);
|
||||
s390_virtio_reset_idx(dev);
|
||||
|
||||
|
@ -335,16 +335,23 @@ static int virtio_ccw_cb(SubchDev *sch, CCW1 ccw)
|
||||
if (!ccw.cda) {
|
||||
ret = -EFAULT;
|
||||
} else {
|
||||
info.queue = ldq_phys(&address_space_memory, ccw.cda);
|
||||
info.align = ldl_phys(&address_space_memory,
|
||||
ccw.cda + sizeof(info.queue));
|
||||
info.index = lduw_phys(&address_space_memory,
|
||||
ccw.cda + sizeof(info.queue)
|
||||
+ sizeof(info.align));
|
||||
info.num = lduw_phys(&address_space_memory,
|
||||
ccw.cda + sizeof(info.queue)
|
||||
+ sizeof(info.align)
|
||||
+ sizeof(info.index));
|
||||
info.queue = address_space_ldq(&address_space_memory, ccw.cda,
|
||||
MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
info.align = address_space_ldl(&address_space_memory,
|
||||
ccw.cda + sizeof(info.queue),
|
||||
MEMTXATTRS_UNSPECIFIED,
|
||||
NULL);
|
||||
info.index = address_space_lduw(&address_space_memory,
|
||||
ccw.cda + sizeof(info.queue)
|
||||
+ sizeof(info.align),
|
||||
MEMTXATTRS_UNSPECIFIED,
|
||||
NULL);
|
||||
info.num = address_space_lduw(&address_space_memory,
|
||||
ccw.cda + sizeof(info.queue)
|
||||
+ sizeof(info.align)
|
||||
+ sizeof(info.index),
|
||||
MEMTXATTRS_UNSPECIFIED,
|
||||
NULL);
|
||||
ret = virtio_ccw_set_vqs(sch, info.queue, info.align, info.index,
|
||||
info.num);
|
||||
sch->curr_status.scsw.count = 0;
|
||||
@ -369,15 +376,20 @@ static int virtio_ccw_cb(SubchDev *sch, CCW1 ccw)
|
||||
if (!ccw.cda) {
|
||||
ret = -EFAULT;
|
||||
} else {
|
||||
features.index = ldub_phys(&address_space_memory,
|
||||
ccw.cda + sizeof(features.features));
|
||||
features.index = address_space_ldub(&address_space_memory,
|
||||
ccw.cda
|
||||
+ sizeof(features.features),
|
||||
MEMTXATTRS_UNSPECIFIED,
|
||||
NULL);
|
||||
if (features.index < ARRAY_SIZE(dev->host_features)) {
|
||||
features.features = dev->host_features[features.index];
|
||||
} else {
|
||||
/* Return zeroes if the guest supports more feature bits. */
|
||||
features.features = 0;
|
||||
}
|
||||
stl_le_phys(&address_space_memory, ccw.cda, features.features);
|
||||
address_space_stl_le(&address_space_memory, ccw.cda,
|
||||
features.features, MEMTXATTRS_UNSPECIFIED,
|
||||
NULL);
|
||||
sch->curr_status.scsw.count = ccw.count - sizeof(features);
|
||||
ret = 0;
|
||||
}
|
||||
@ -396,9 +408,15 @@ static int virtio_ccw_cb(SubchDev *sch, CCW1 ccw)
|
||||
if (!ccw.cda) {
|
||||
ret = -EFAULT;
|
||||
} else {
|
||||
features.index = ldub_phys(&address_space_memory,
|
||||
ccw.cda + sizeof(features.features));
|
||||
features.features = ldl_le_phys(&address_space_memory, ccw.cda);
|
||||
features.index = address_space_ldub(&address_space_memory,
|
||||
ccw.cda
|
||||
+ sizeof(features.features),
|
||||
MEMTXATTRS_UNSPECIFIED,
|
||||
NULL);
|
||||
features.features = address_space_ldl_le(&address_space_memory,
|
||||
ccw.cda,
|
||||
MEMTXATTRS_UNSPECIFIED,
|
||||
NULL);
|
||||
if (features.index < ARRAY_SIZE(dev->host_features)) {
|
||||
virtio_set_features(vdev, features.features);
|
||||
} else {
|
||||
@ -474,7 +492,8 @@ static int virtio_ccw_cb(SubchDev *sch, CCW1 ccw)
|
||||
if (!ccw.cda) {
|
||||
ret = -EFAULT;
|
||||
} else {
|
||||
status = ldub_phys(&address_space_memory, ccw.cda);
|
||||
status = address_space_ldub(&address_space_memory, ccw.cda,
|
||||
MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
if (!(status & VIRTIO_CONFIG_S_DRIVER_OK)) {
|
||||
virtio_ccw_stop_ioeventfd(dev);
|
||||
}
|
||||
@ -508,7 +527,8 @@ static int virtio_ccw_cb(SubchDev *sch, CCW1 ccw)
|
||||
if (!ccw.cda) {
|
||||
ret = -EFAULT;
|
||||
} else {
|
||||
indicators = ldq_be_phys(&address_space_memory, ccw.cda);
|
||||
indicators = address_space_ldq_be(&address_space_memory, ccw.cda,
|
||||
MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
dev->indicators = get_indicator(indicators, sizeof(uint64_t));
|
||||
sch->curr_status.scsw.count = ccw.count - sizeof(indicators);
|
||||
ret = 0;
|
||||
@ -528,7 +548,8 @@ static int virtio_ccw_cb(SubchDev *sch, CCW1 ccw)
|
||||
if (!ccw.cda) {
|
||||
ret = -EFAULT;
|
||||
} else {
|
||||
indicators = ldq_be_phys(&address_space_memory, ccw.cda);
|
||||
indicators = address_space_ldq_be(&address_space_memory, ccw.cda,
|
||||
MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
dev->indicators2 = get_indicator(indicators, sizeof(uint64_t));
|
||||
sch->curr_status.scsw.count = ccw.count - sizeof(indicators);
|
||||
ret = 0;
|
||||
@ -548,15 +569,21 @@ static int virtio_ccw_cb(SubchDev *sch, CCW1 ccw)
|
||||
if (!ccw.cda) {
|
||||
ret = -EFAULT;
|
||||
} else {
|
||||
vq_config.index = lduw_be_phys(&address_space_memory, ccw.cda);
|
||||
vq_config.index = address_space_lduw_be(&address_space_memory,
|
||||
ccw.cda,
|
||||
MEMTXATTRS_UNSPECIFIED,
|
||||
NULL);
|
||||
if (vq_config.index >= VIRTIO_PCI_QUEUE_MAX) {
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
vq_config.num_max = virtio_queue_get_num(vdev,
|
||||
vq_config.index);
|
||||
stw_be_phys(&address_space_memory,
|
||||
ccw.cda + sizeof(vq_config.index), vq_config.num_max);
|
||||
address_space_stw_be(&address_space_memory,
|
||||
ccw.cda + sizeof(vq_config.index),
|
||||
vq_config.num_max,
|
||||
MEMTXATTRS_UNSPECIFIED,
|
||||
NULL);
|
||||
sch->curr_status.scsw.count = ccw.count - sizeof(vq_config);
|
||||
ret = 0;
|
||||
}
|
||||
@ -1068,9 +1095,13 @@ static void virtio_ccw_notify(DeviceState *d, uint16_t vector)
|
||||
css_adapter_interrupt(dev->thinint_isc);
|
||||
}
|
||||
} else {
|
||||
indicators = ldq_phys(&address_space_memory, dev->indicators->addr);
|
||||
indicators = address_space_ldq(&address_space_memory,
|
||||
dev->indicators->addr,
|
||||
MEMTXATTRS_UNSPECIFIED,
|
||||
NULL);
|
||||
indicators |= 1ULL << vector;
|
||||
stq_phys(&address_space_memory, dev->indicators->addr, indicators);
|
||||
address_space_stq(&address_space_memory, dev->indicators->addr,
|
||||
indicators, MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
css_conditional_io_interrupt(sch);
|
||||
}
|
||||
} else {
|
||||
@ -1078,9 +1109,13 @@ static void virtio_ccw_notify(DeviceState *d, uint16_t vector)
|
||||
return;
|
||||
}
|
||||
vector = 0;
|
||||
indicators = ldq_phys(&address_space_memory, dev->indicators2->addr);
|
||||
indicators = address_space_ldq(&address_space_memory,
|
||||
dev->indicators2->addr,
|
||||
MEMTXATTRS_UNSPECIFIED,
|
||||
NULL);
|
||||
indicators |= 1ULL << vector;
|
||||
stq_phys(&address_space_memory, dev->indicators2->addr, indicators);
|
||||
address_space_stq(&address_space_memory, dev->indicators2->addr,
|
||||
indicators, MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
css_conditional_io_interrupt(sch);
|
||||
}
|
||||
}
|
||||
|
@ -318,8 +318,10 @@ static void r2d_init(MachineState *machine)
|
||||
}
|
||||
|
||||
/* initialization which should be done by firmware */
|
||||
stl_phys(&address_space_memory, SH7750_BCR1, 1<<3); /* cs3 SDRAM */
|
||||
stw_phys(&address_space_memory, SH7750_BCR2, 3<<(3*2)); /* cs3 32bit */
|
||||
address_space_stl(&address_space_memory, SH7750_BCR1, 1 << 3,
|
||||
MEMTXATTRS_UNSPECIFIED, NULL); /* cs3 SDRAM */
|
||||
address_space_stw(&address_space_memory, SH7750_BCR2, 3 << (3 * 2),
|
||||
MEMTXATTRS_UNSPECIFIED, NULL); /* cs3 32bit */
|
||||
reset_info->vector = (SDRAM_BASE + LINUX_LOAD_OFFSET) | 0xa0000000; /* Start from P2 area */
|
||||
}
|
||||
|
||||
|
@ -206,8 +206,9 @@ static void update_irq(struct HPETTimer *timer, int set)
|
||||
}
|
||||
}
|
||||
} else if (timer_fsb_route(timer)) {
|
||||
stl_le_phys(&address_space_memory,
|
||||
timer->fsb >> 32, timer->fsb & 0xffffffff);
|
||||
address_space_stl_le(&address_space_memory, timer->fsb >> 32,
|
||||
timer->fsb & 0xffffffff, MEMTXATTRS_UNSPECIFIED,
|
||||
NULL);
|
||||
} else if (timer->config & HPET_TN_TYPE_LEVEL) {
|
||||
s->isr |= mask;
|
||||
/* fold the ICH PIRQ# pin's internal inversion logic into hpet */
|
||||
|
@ -1531,9 +1531,12 @@ static uint64_t vfio_rtl8168_window_quirk_read(void *opaque,
|
||||
return 0;
|
||||
}
|
||||
|
||||
io_mem_read(&vdev->pdev.msix_table_mmio,
|
||||
(hwaddr)(quirk->data.address_match & 0xfff),
|
||||
&val, size);
|
||||
memory_region_dispatch_read(&vdev->pdev.msix_table_mmio,
|
||||
(hwaddr)(quirk->data.address_match
|
||||
& 0xfff),
|
||||
&val,
|
||||
size,
|
||||
MEMTXATTRS_UNSPECIFIED);
|
||||
return val;
|
||||
}
|
||||
}
|
||||
@ -1561,9 +1564,12 @@ static void vfio_rtl8168_window_quirk_write(void *opaque, hwaddr addr,
|
||||
memory_region_name(&quirk->mem),
|
||||
vdev->vbasedev.name);
|
||||
|
||||
io_mem_write(&vdev->pdev.msix_table_mmio,
|
||||
(hwaddr)(quirk->data.address_match & 0xfff),
|
||||
data, size);
|
||||
memory_region_dispatch_write(&vdev->pdev.msix_table_mmio,
|
||||
(hwaddr)(quirk->data.address_match
|
||||
& 0xfff),
|
||||
data,
|
||||
size,
|
||||
MEMTXATTRS_UNSPECIFIED);
|
||||
}
|
||||
|
||||
quirk->data.flags = 1;
|
||||
|
@ -30,6 +30,7 @@
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
#include "exec/hwaddr.h"
|
||||
#endif
|
||||
#include "exec/memattrs.h"
|
||||
|
||||
#ifndef TARGET_LONG_BITS
|
||||
#error TARGET_LONG_BITS must be defined before including this header
|
||||
@ -102,12 +103,22 @@ typedef struct CPUTLBEntry {
|
||||
|
||||
QEMU_BUILD_BUG_ON(sizeof(CPUTLBEntry) != (1 << CPU_TLB_ENTRY_BITS));
|
||||
|
||||
/* The IOTLB is not accessed directly inline by generated TCG code,
|
||||
* so the CPUIOTLBEntry layout is not as critical as that of the
|
||||
* CPUTLBEntry. (This is also why we don't want to combine the two
|
||||
* structs into one.)
|
||||
*/
|
||||
typedef struct CPUIOTLBEntry {
|
||||
hwaddr addr;
|
||||
MemTxAttrs attrs;
|
||||
} CPUIOTLBEntry;
|
||||
|
||||
#define CPU_COMMON_TLB \
|
||||
/* The meaning of the MMU modes is defined in the target code. */ \
|
||||
CPUTLBEntry tlb_table[NB_MMU_MODES][CPU_TLB_SIZE]; \
|
||||
CPUTLBEntry tlb_v_table[NB_MMU_MODES][CPU_VTLB_SIZE]; \
|
||||
hwaddr iotlb[NB_MMU_MODES][CPU_TLB_SIZE]; \
|
||||
hwaddr iotlb_v[NB_MMU_MODES][CPU_VTLB_SIZE]; \
|
||||
CPUIOTLBEntry iotlb[NB_MMU_MODES][CPU_TLB_SIZE]; \
|
||||
CPUIOTLBEntry iotlb_v[NB_MMU_MODES][CPU_VTLB_SIZE]; \
|
||||
target_ulong tlb_flush_addr; \
|
||||
target_ulong tlb_flush_mask; \
|
||||
target_ulong vtlb_index; \
|
||||
|
@ -105,6 +105,9 @@ void tlb_flush(CPUState *cpu, int flush_global);
|
||||
void tlb_set_page(CPUState *cpu, target_ulong vaddr,
|
||||
hwaddr paddr, int prot,
|
||||
int mmu_idx, target_ulong size);
|
||||
void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
|
||||
hwaddr paddr, MemTxAttrs attrs,
|
||||
int prot, int mmu_idx, target_ulong size);
|
||||
void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr);
|
||||
#else
|
||||
static inline void tlb_flush_page(CPUState *cpu, target_ulong addr)
|
||||
@ -341,10 +344,6 @@ void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align));
|
||||
|
||||
struct MemoryRegion *iotlb_to_region(CPUState *cpu,
|
||||
hwaddr index);
|
||||
bool io_mem_read(struct MemoryRegion *mr, hwaddr addr,
|
||||
uint64_t *pvalue, unsigned size);
|
||||
bool io_mem_write(struct MemoryRegion *mr, hwaddr addr,
|
||||
uint64_t value, unsigned size);
|
||||
|
||||
void tlb_fill(CPUState *cpu, target_ulong addr, int is_write, int mmu_idx,
|
||||
uintptr_t retaddr);
|
||||
|
45
include/exec/memattrs.h
Normal file
45
include/exec/memattrs.h
Normal file
@ -0,0 +1,45 @@
|
||||
/*
|
||||
* Memory transaction attributes
|
||||
*
|
||||
* Copyright (c) 2015 Linaro Limited.
|
||||
*
|
||||
* Authors:
|
||||
* Peter Maydell <peter.maydell@linaro.org>
|
||||
*
|
||||
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
||||
* See the COPYING file in the top-level directory.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef MEMATTRS_H
|
||||
#define MEMATTRS_H
|
||||
|
||||
/* Every memory transaction has associated with it a set of
|
||||
* attributes. Some of these are generic (such as the ID of
|
||||
* the bus master); some are specific to a particular kind of
|
||||
* bus (such as the ARM Secure/NonSecure bit). We define them
|
||||
* all as non-overlapping bitfields in a single struct to avoid
|
||||
* confusion if different parts of QEMU used the same bit for
|
||||
* different semantics.
|
||||
*/
|
||||
typedef struct MemTxAttrs {
|
||||
/* Bus masters which don't specify any attributes will get this
|
||||
* (via the MEMTXATTRS_UNSPECIFIED constant), so that we can
|
||||
* distinguish "all attributes deliberately clear" from
|
||||
* "didn't specify" if necessary.
|
||||
*/
|
||||
unsigned int unspecified:1;
|
||||
/* ARM/AMBA TrustZone Secure access */
|
||||
unsigned int secure:1;
|
||||
/* Memory access is usermode (unprivileged) */
|
||||
unsigned int user:1;
|
||||
} MemTxAttrs;
|
||||
|
||||
/* Bus masters which don't specify any attributes will get this,
|
||||
* which has all attribute bits clear except the topmost one
|
||||
* (so that we can distinguish "all attributes deliberately clear"
|
||||
* from "didn't specify" if necessary).
|
||||
*/
|
||||
#define MEMTXATTRS_UNSPECIFIED ((MemTxAttrs) { .unspecified = 1 })
|
||||
|
||||
#endif
|
@ -28,6 +28,7 @@
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
#include "exec/hwaddr.h"
|
||||
#endif
|
||||
#include "exec/memattrs.h"
|
||||
#include "qemu/queue.h"
|
||||
#include "qemu/int128.h"
|
||||
#include "qemu/notify.h"
|
||||
@ -68,6 +69,16 @@ struct IOMMUTLBEntry {
|
||||
IOMMUAccessFlags perm;
|
||||
};
|
||||
|
||||
/* New-style MMIO accessors can indicate that the transaction failed.
|
||||
* A zero (MEMTX_OK) response means success; anything else is a failure
|
||||
* of some kind. The memory subsystem will bitwise-OR together results
|
||||
* if it is synthesizing an operation from multiple smaller accesses.
|
||||
*/
|
||||
#define MEMTX_OK 0
|
||||
#define MEMTX_ERROR (1U << 0) /* device returned an error */
|
||||
#define MEMTX_DECODE_ERROR (1U << 1) /* nothing at that address */
|
||||
typedef uint32_t MemTxResult;
|
||||
|
||||
/*
|
||||
* Memory region callbacks
|
||||
*/
|
||||
@ -84,6 +95,17 @@ struct MemoryRegionOps {
|
||||
uint64_t data,
|
||||
unsigned size);
|
||||
|
||||
MemTxResult (*read_with_attrs)(void *opaque,
|
||||
hwaddr addr,
|
||||
uint64_t *data,
|
||||
unsigned size,
|
||||
MemTxAttrs attrs);
|
||||
MemTxResult (*write_with_attrs)(void *opaque,
|
||||
hwaddr addr,
|
||||
uint64_t data,
|
||||
unsigned size,
|
||||
MemTxAttrs attrs);
|
||||
|
||||
enum device_endian endianness;
|
||||
/* Guest-visible constraints: */
|
||||
struct {
|
||||
@ -1030,6 +1052,37 @@ void memory_global_dirty_log_stop(void);
|
||||
|
||||
void mtree_info(fprintf_function mon_printf, void *f);
|
||||
|
||||
/**
|
||||
* memory_region_dispatch_read: perform a read directly to the specified
|
||||
* MemoryRegion.
|
||||
*
|
||||
* @mr: #MemoryRegion to access
|
||||
* @addr: address within that region
|
||||
* @pval: pointer to uint64_t which the data is written to
|
||||
* @size: size of the access in bytes
|
||||
* @attrs: memory transaction attributes to use for the access
|
||||
*/
|
||||
MemTxResult memory_region_dispatch_read(MemoryRegion *mr,
|
||||
hwaddr addr,
|
||||
uint64_t *pval,
|
||||
unsigned size,
|
||||
MemTxAttrs attrs);
|
||||
/**
|
||||
* memory_region_dispatch_write: perform a write directly to the specified
|
||||
* MemoryRegion.
|
||||
*
|
||||
* @mr: #MemoryRegion to access
|
||||
* @addr: address within that region
|
||||
* @data: data to write
|
||||
* @size: size of the access in bytes
|
||||
* @attrs: memory transaction attributes to use for the access
|
||||
*/
|
||||
MemTxResult memory_region_dispatch_write(MemoryRegion *mr,
|
||||
hwaddr addr,
|
||||
uint64_t data,
|
||||
unsigned size,
|
||||
MemTxAttrs attrs);
|
||||
|
||||
/**
|
||||
* address_space_init: initializes an address space
|
||||
*
|
||||
@ -1055,41 +1108,117 @@ void address_space_destroy(AddressSpace *as);
|
||||
/**
|
||||
* address_space_rw: read from or write to an address space.
|
||||
*
|
||||
* Return true if the operation hit any unassigned memory or encountered an
|
||||
* IOMMU fault.
|
||||
* Return a MemTxResult indicating whether the operation succeeded
|
||||
* or failed (eg unassigned memory, device rejected the transaction,
|
||||
* IOMMU fault).
|
||||
*
|
||||
* @as: #AddressSpace to be accessed
|
||||
* @addr: address within that address space
|
||||
* @attrs: memory transaction attributes
|
||||
* @buf: buffer with the data transferred
|
||||
* @is_write: indicates the transfer direction
|
||||
*/
|
||||
bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
|
||||
int len, bool is_write);
|
||||
MemTxResult address_space_rw(AddressSpace *as, hwaddr addr,
|
||||
MemTxAttrs attrs, uint8_t *buf,
|
||||
int len, bool is_write);
|
||||
|
||||
/**
|
||||
* address_space_write: write to address space.
|
||||
*
|
||||
* Return true if the operation hit any unassigned memory or encountered an
|
||||
* IOMMU fault.
|
||||
* Return a MemTxResult indicating whether the operation succeeded
|
||||
* or failed (eg unassigned memory, device rejected the transaction,
|
||||
* IOMMU fault).
|
||||
*
|
||||
* @as: #AddressSpace to be accessed
|
||||
* @addr: address within that address space
|
||||
* @attrs: memory transaction attributes
|
||||
* @buf: buffer with the data transferred
|
||||
*/
|
||||
bool address_space_write(AddressSpace *as, hwaddr addr,
|
||||
const uint8_t *buf, int len);
|
||||
MemTxResult address_space_write(AddressSpace *as, hwaddr addr,
|
||||
MemTxAttrs attrs,
|
||||
const uint8_t *buf, int len);
|
||||
|
||||
/**
|
||||
* address_space_read: read from an address space.
|
||||
*
|
||||
* Return true if the operation hit any unassigned memory or encountered an
|
||||
* IOMMU fault.
|
||||
* Return a MemTxResult indicating whether the operation succeeded
|
||||
* or failed (eg unassigned memory, device rejected the transaction,
|
||||
* IOMMU fault).
|
||||
*
|
||||
* @as: #AddressSpace to be accessed
|
||||
* @addr: address within that address space
|
||||
* @attrs: memory transaction attributes
|
||||
* @buf: buffer with the data transferred
|
||||
*/
|
||||
bool address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len);
|
||||
MemTxResult address_space_read(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
|
||||
uint8_t *buf, int len);
|
||||
|
||||
/**
|
||||
* address_space_ld*: load from an address space
|
||||
* address_space_st*: store to an address space
|
||||
*
|
||||
* These functions perform a load or store of the byte, word,
|
||||
* longword or quad to the specified address within the AddressSpace.
|
||||
* The _le suffixed functions treat the data as little endian;
|
||||
* _be indicates big endian; no suffix indicates "same endianness
|
||||
* as guest CPU".
|
||||
*
|
||||
* The "guest CPU endianness" accessors are deprecated for use outside
|
||||
* target-* code; devices should be CPU-agnostic and use either the LE
|
||||
* or the BE accessors.
|
||||
*
|
||||
* @as #AddressSpace to be accessed
|
||||
* @addr: address within that address space
|
||||
* @val: data value, for stores
|
||||
* @attrs: memory transaction attributes
|
||||
* @result: location to write the success/failure of the transaction;
|
||||
* if NULL, this information is discarded
|
||||
*/
|
||||
uint32_t address_space_ldub(AddressSpace *as, hwaddr addr,
|
||||
MemTxAttrs attrs, MemTxResult *result);
|
||||
uint32_t address_space_lduw_le(AddressSpace *as, hwaddr addr,
|
||||
MemTxAttrs attrs, MemTxResult *result);
|
||||
uint32_t address_space_lduw_be(AddressSpace *as, hwaddr addr,
|
||||
MemTxAttrs attrs, MemTxResult *result);
|
||||
uint32_t address_space_ldl_le(AddressSpace *as, hwaddr addr,
|
||||
MemTxAttrs attrs, MemTxResult *result);
|
||||
uint32_t address_space_ldl_be(AddressSpace *as, hwaddr addr,
|
||||
MemTxAttrs attrs, MemTxResult *result);
|
||||
uint64_t address_space_ldq_le(AddressSpace *as, hwaddr addr,
|
||||
MemTxAttrs attrs, MemTxResult *result);
|
||||
uint64_t address_space_ldq_be(AddressSpace *as, hwaddr addr,
|
||||
MemTxAttrs attrs, MemTxResult *result);
|
||||
void address_space_stb(AddressSpace *as, hwaddr addr, uint32_t val,
|
||||
MemTxAttrs attrs, MemTxResult *result);
|
||||
void address_space_stw_le(AddressSpace *as, hwaddr addr, uint32_t val,
|
||||
MemTxAttrs attrs, MemTxResult *result);
|
||||
void address_space_stw_be(AddressSpace *as, hwaddr addr, uint32_t val,
|
||||
MemTxAttrs attrs, MemTxResult *result);
|
||||
void address_space_stl_le(AddressSpace *as, hwaddr addr, uint32_t val,
|
||||
MemTxAttrs attrs, MemTxResult *result);
|
||||
void address_space_stl_be(AddressSpace *as, hwaddr addr, uint32_t val,
|
||||
MemTxAttrs attrs, MemTxResult *result);
|
||||
void address_space_stq_le(AddressSpace *as, hwaddr addr, uint64_t val,
|
||||
MemTxAttrs attrs, MemTxResult *result);
|
||||
void address_space_stq_be(AddressSpace *as, hwaddr addr, uint64_t val,
|
||||
MemTxAttrs attrs, MemTxResult *result);
|
||||
|
||||
#ifdef NEED_CPU_H
|
||||
uint32_t address_space_lduw(AddressSpace *as, hwaddr addr,
|
||||
MemTxAttrs attrs, MemTxResult *result);
|
||||
uint32_t address_space_ldl(AddressSpace *as, hwaddr addr,
|
||||
MemTxAttrs attrs, MemTxResult *result);
|
||||
uint64_t address_space_ldq(AddressSpace *as, hwaddr addr,
|
||||
MemTxAttrs attrs, MemTxResult *result);
|
||||
void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val,
|
||||
MemTxAttrs attrs, MemTxResult *result);
|
||||
void address_space_stw(AddressSpace *as, hwaddr addr, uint32_t val,
|
||||
MemTxAttrs attrs, MemTxResult *result);
|
||||
void address_space_stl(AddressSpace *as, hwaddr addr, uint32_t val,
|
||||
MemTxAttrs attrs, MemTxResult *result);
|
||||
void address_space_stq(AddressSpace *as, hwaddr addr, uint64_t val,
|
||||
MemTxAttrs attrs, MemTxResult *result);
|
||||
#endif
|
||||
|
||||
/* address_space_translate: translate an address range into an address space
|
||||
* into a MemoryRegion and an address range into that section
|
||||
|
@ -24,6 +24,7 @@
|
||||
#include <setjmp.h>
|
||||
#include "hw/qdev-core.h"
|
||||
#include "exec/hwaddr.h"
|
||||
#include "exec/memattrs.h"
|
||||
#include "qemu/queue.h"
|
||||
#include "qemu/thread.h"
|
||||
#include "qemu/tls.h"
|
||||
@ -195,6 +196,7 @@ typedef struct CPUWatchpoint {
|
||||
vaddr vaddr;
|
||||
vaddr len;
|
||||
vaddr hitaddr;
|
||||
MemTxAttrs hitattrs;
|
||||
int flags; /* BP_* */
|
||||
QTAILQ_ENTRY(CPUWatchpoint) entry;
|
||||
} CPUWatchpoint;
|
||||
|
@ -88,7 +88,8 @@ static inline int dma_memory_rw_relaxed(AddressSpace *as, dma_addr_t addr,
|
||||
void *buf, dma_addr_t len,
|
||||
DMADirection dir)
|
||||
{
|
||||
return address_space_rw(as, addr, buf, len, dir == DMA_DIRECTION_FROM_DEVICE);
|
||||
return (bool)address_space_rw(as, addr, MEMTXATTRS_UNSPECIFIED,
|
||||
buf, len, dir == DMA_DIRECTION_FROM_DEVICE);
|
||||
}
|
||||
|
||||
static inline int dma_memory_read_relaxed(AddressSpace *as, dma_addr_t addr,
|
||||
|
16
ioport.c
16
ioport.c
@ -64,7 +64,8 @@ void cpu_outb(pio_addr_t addr, uint8_t val)
|
||||
{
|
||||
LOG_IOPORT("outb: %04"FMT_pioaddr" %02"PRIx8"\n", addr, val);
|
||||
trace_cpu_out(addr, val);
|
||||
address_space_write(&address_space_io, addr, &val, 1);
|
||||
address_space_write(&address_space_io, addr, MEMTXATTRS_UNSPECIFIED,
|
||||
&val, 1);
|
||||
}
|
||||
|
||||
void cpu_outw(pio_addr_t addr, uint16_t val)
|
||||
@ -74,7 +75,8 @@ void cpu_outw(pio_addr_t addr, uint16_t val)
|
||||
LOG_IOPORT("outw: %04"FMT_pioaddr" %04"PRIx16"\n", addr, val);
|
||||
trace_cpu_out(addr, val);
|
||||
stw_p(buf, val);
|
||||
address_space_write(&address_space_io, addr, buf, 2);
|
||||
address_space_write(&address_space_io, addr, MEMTXATTRS_UNSPECIFIED,
|
||||
buf, 2);
|
||||
}
|
||||
|
||||
void cpu_outl(pio_addr_t addr, uint32_t val)
|
||||
@ -84,14 +86,16 @@ void cpu_outl(pio_addr_t addr, uint32_t val)
|
||||
LOG_IOPORT("outl: %04"FMT_pioaddr" %08"PRIx32"\n", addr, val);
|
||||
trace_cpu_out(addr, val);
|
||||
stl_p(buf, val);
|
||||
address_space_write(&address_space_io, addr, buf, 4);
|
||||
address_space_write(&address_space_io, addr, MEMTXATTRS_UNSPECIFIED,
|
||||
buf, 4);
|
||||
}
|
||||
|
||||
uint8_t cpu_inb(pio_addr_t addr)
|
||||
{
|
||||
uint8_t val;
|
||||
|
||||
address_space_read(&address_space_io, addr, &val, 1);
|
||||
address_space_read(&address_space_io, addr, MEMTXATTRS_UNSPECIFIED,
|
||||
&val, 1);
|
||||
trace_cpu_in(addr, val);
|
||||
LOG_IOPORT("inb : %04"FMT_pioaddr" %02"PRIx8"\n", addr, val);
|
||||
return val;
|
||||
@ -102,7 +106,7 @@ uint16_t cpu_inw(pio_addr_t addr)
|
||||
uint8_t buf[2];
|
||||
uint16_t val;
|
||||
|
||||
address_space_read(&address_space_io, addr, buf, 2);
|
||||
address_space_read(&address_space_io, addr, MEMTXATTRS_UNSPECIFIED, buf, 2);
|
||||
val = lduw_p(buf);
|
||||
trace_cpu_in(addr, val);
|
||||
LOG_IOPORT("inw : %04"FMT_pioaddr" %04"PRIx16"\n", addr, val);
|
||||
@ -114,7 +118,7 @@ uint32_t cpu_inl(pio_addr_t addr)
|
||||
uint8_t buf[4];
|
||||
uint32_t val;
|
||||
|
||||
address_space_read(&address_space_io, addr, buf, 4);
|
||||
address_space_read(&address_space_io, addr, MEMTXATTRS_UNSPECIFIED, buf, 4);
|
||||
val = ldl_p(buf);
|
||||
trace_cpu_in(addr, val);
|
||||
LOG_IOPORT("inl : %04"FMT_pioaddr" %08"PRIx32"\n", addr, val);
|
||||
|
@ -1667,7 +1667,8 @@ static void kvm_handle_io(uint16_t port, void *data, int direction, int size,
|
||||
uint8_t *ptr = data;
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
address_space_rw(&address_space_io, port, ptr, size,
|
||||
address_space_rw(&address_space_io, port, MEMTXATTRS_UNSPECIFIED,
|
||||
ptr, size,
|
||||
direction == KVM_EXIT_IO_OUT);
|
||||
ptr += size;
|
||||
}
|
||||
|
206
memory.c
206
memory.c
@ -368,26 +368,29 @@ static void adjust_endianness(MemoryRegion *mr, uint64_t *data, unsigned size)
|
||||
}
|
||||
}
|
||||
|
||||
static void memory_region_oldmmio_read_accessor(MemoryRegion *mr,
|
||||
hwaddr addr,
|
||||
uint64_t *value,
|
||||
unsigned size,
|
||||
unsigned shift,
|
||||
uint64_t mask)
|
||||
static MemTxResult memory_region_oldmmio_read_accessor(MemoryRegion *mr,
|
||||
hwaddr addr,
|
||||
uint64_t *value,
|
||||
unsigned size,
|
||||
unsigned shift,
|
||||
uint64_t mask,
|
||||
MemTxAttrs attrs)
|
||||
{
|
||||
uint64_t tmp;
|
||||
|
||||
tmp = mr->ops->old_mmio.read[ctz32(size)](mr->opaque, addr);
|
||||
trace_memory_region_ops_read(mr, addr, tmp, size);
|
||||
*value |= (tmp & mask) << shift;
|
||||
return MEMTX_OK;
|
||||
}
|
||||
|
||||
static void memory_region_read_accessor(MemoryRegion *mr,
|
||||
hwaddr addr,
|
||||
uint64_t *value,
|
||||
unsigned size,
|
||||
unsigned shift,
|
||||
uint64_t mask)
|
||||
static MemTxResult memory_region_read_accessor(MemoryRegion *mr,
|
||||
hwaddr addr,
|
||||
uint64_t *value,
|
||||
unsigned size,
|
||||
unsigned shift,
|
||||
uint64_t mask,
|
||||
MemTxAttrs attrs)
|
||||
{
|
||||
uint64_t tmp;
|
||||
|
||||
@ -397,28 +400,52 @@ static void memory_region_read_accessor(MemoryRegion *mr,
|
||||
tmp = mr->ops->read(mr->opaque, addr, size);
|
||||
trace_memory_region_ops_read(mr, addr, tmp, size);
|
||||
*value |= (tmp & mask) << shift;
|
||||
return MEMTX_OK;
|
||||
}
|
||||
|
||||
static void memory_region_oldmmio_write_accessor(MemoryRegion *mr,
|
||||
hwaddr addr,
|
||||
uint64_t *value,
|
||||
unsigned size,
|
||||
unsigned shift,
|
||||
uint64_t mask)
|
||||
static MemTxResult memory_region_read_with_attrs_accessor(MemoryRegion *mr,
|
||||
hwaddr addr,
|
||||
uint64_t *value,
|
||||
unsigned size,
|
||||
unsigned shift,
|
||||
uint64_t mask,
|
||||
MemTxAttrs attrs)
|
||||
{
|
||||
uint64_t tmp = 0;
|
||||
MemTxResult r;
|
||||
|
||||
if (mr->flush_coalesced_mmio) {
|
||||
qemu_flush_coalesced_mmio_buffer();
|
||||
}
|
||||
r = mr->ops->read_with_attrs(mr->opaque, addr, &tmp, size, attrs);
|
||||
trace_memory_region_ops_read(mr, addr, tmp, size);
|
||||
*value |= (tmp & mask) << shift;
|
||||
return r;
|
||||
}
|
||||
|
||||
static MemTxResult memory_region_oldmmio_write_accessor(MemoryRegion *mr,
|
||||
hwaddr addr,
|
||||
uint64_t *value,
|
||||
unsigned size,
|
||||
unsigned shift,
|
||||
uint64_t mask,
|
||||
MemTxAttrs attrs)
|
||||
{
|
||||
uint64_t tmp;
|
||||
|
||||
tmp = (*value >> shift) & mask;
|
||||
trace_memory_region_ops_write(mr, addr, tmp, size);
|
||||
mr->ops->old_mmio.write[ctz32(size)](mr->opaque, addr, tmp);
|
||||
return MEMTX_OK;
|
||||
}
|
||||
|
||||
static void memory_region_write_accessor(MemoryRegion *mr,
|
||||
hwaddr addr,
|
||||
uint64_t *value,
|
||||
unsigned size,
|
||||
unsigned shift,
|
||||
uint64_t mask)
|
||||
static MemTxResult memory_region_write_accessor(MemoryRegion *mr,
|
||||
hwaddr addr,
|
||||
uint64_t *value,
|
||||
unsigned size,
|
||||
unsigned shift,
|
||||
uint64_t mask,
|
||||
MemTxAttrs attrs)
|
||||
{
|
||||
uint64_t tmp;
|
||||
|
||||
@ -428,24 +455,46 @@ static void memory_region_write_accessor(MemoryRegion *mr,
|
||||
tmp = (*value >> shift) & mask;
|
||||
trace_memory_region_ops_write(mr, addr, tmp, size);
|
||||
mr->ops->write(mr->opaque, addr, tmp, size);
|
||||
return MEMTX_OK;
|
||||
}
|
||||
|
||||
static void access_with_adjusted_size(hwaddr addr,
|
||||
static MemTxResult memory_region_write_with_attrs_accessor(MemoryRegion *mr,
|
||||
hwaddr addr,
|
||||
uint64_t *value,
|
||||
unsigned size,
|
||||
unsigned shift,
|
||||
uint64_t mask,
|
||||
MemTxAttrs attrs)
|
||||
{
|
||||
uint64_t tmp;
|
||||
|
||||
if (mr->flush_coalesced_mmio) {
|
||||
qemu_flush_coalesced_mmio_buffer();
|
||||
}
|
||||
tmp = (*value >> shift) & mask;
|
||||
trace_memory_region_ops_write(mr, addr, tmp, size);
|
||||
return mr->ops->write_with_attrs(mr->opaque, addr, tmp, size, attrs);
|
||||
}
|
||||
|
||||
static MemTxResult access_with_adjusted_size(hwaddr addr,
|
||||
uint64_t *value,
|
||||
unsigned size,
|
||||
unsigned access_size_min,
|
||||
unsigned access_size_max,
|
||||
void (*access)(MemoryRegion *mr,
|
||||
hwaddr addr,
|
||||
uint64_t *value,
|
||||
unsigned size,
|
||||
unsigned shift,
|
||||
uint64_t mask),
|
||||
MemoryRegion *mr)
|
||||
MemTxResult (*access)(MemoryRegion *mr,
|
||||
hwaddr addr,
|
||||
uint64_t *value,
|
||||
unsigned size,
|
||||
unsigned shift,
|
||||
uint64_t mask,
|
||||
MemTxAttrs attrs),
|
||||
MemoryRegion *mr,
|
||||
MemTxAttrs attrs)
|
||||
{
|
||||
uint64_t access_mask;
|
||||
unsigned access_size;
|
||||
unsigned i;
|
||||
MemTxResult r = MEMTX_OK;
|
||||
|
||||
if (!access_size_min) {
|
||||
access_size_min = 1;
|
||||
@ -459,14 +508,16 @@ static void access_with_adjusted_size(hwaddr addr,
|
||||
access_mask = -1ULL >> (64 - access_size * 8);
|
||||
if (memory_region_big_endian(mr)) {
|
||||
for (i = 0; i < size; i += access_size) {
|
||||
access(mr, addr + i, value, access_size,
|
||||
(size - access_size - i) * 8, access_mask);
|
||||
r |= access(mr, addr + i, value, access_size,
|
||||
(size - access_size - i) * 8, access_mask, attrs);
|
||||
}
|
||||
} else {
|
||||
for (i = 0; i < size; i += access_size) {
|
||||
access(mr, addr + i, value, access_size, i * 8, access_mask);
|
||||
r |= access(mr, addr + i, value, access_size, i * 8,
|
||||
access_mask, attrs);
|
||||
}
|
||||
}
|
||||
return r;
|
||||
}
|
||||
|
||||
static AddressSpace *memory_region_to_address_space(MemoryRegion *mr)
|
||||
@ -1053,62 +1104,82 @@ bool memory_region_access_valid(MemoryRegion *mr,
|
||||
return true;
|
||||
}
|
||||
|
||||
static uint64_t memory_region_dispatch_read1(MemoryRegion *mr,
|
||||
hwaddr addr,
|
||||
unsigned size)
|
||||
static MemTxResult memory_region_dispatch_read1(MemoryRegion *mr,
|
||||
hwaddr addr,
|
||||
uint64_t *pval,
|
||||
unsigned size,
|
||||
MemTxAttrs attrs)
|
||||
{
|
||||
uint64_t data = 0;
|
||||
*pval = 0;
|
||||
|
||||
if (mr->ops->read) {
|
||||
access_with_adjusted_size(addr, &data, size,
|
||||
mr->ops->impl.min_access_size,
|
||||
mr->ops->impl.max_access_size,
|
||||
memory_region_read_accessor, mr);
|
||||
return access_with_adjusted_size(addr, pval, size,
|
||||
mr->ops->impl.min_access_size,
|
||||
mr->ops->impl.max_access_size,
|
||||
memory_region_read_accessor,
|
||||
mr, attrs);
|
||||
} else if (mr->ops->read_with_attrs) {
|
||||
return access_with_adjusted_size(addr, pval, size,
|
||||
mr->ops->impl.min_access_size,
|
||||
mr->ops->impl.max_access_size,
|
||||
memory_region_read_with_attrs_accessor,
|
||||
mr, attrs);
|
||||
} else {
|
||||
access_with_adjusted_size(addr, &data, size, 1, 4,
|
||||
memory_region_oldmmio_read_accessor, mr);
|
||||
return access_with_adjusted_size(addr, pval, size, 1, 4,
|
||||
memory_region_oldmmio_read_accessor,
|
||||
mr, attrs);
|
||||
}
|
||||
|
||||
return data;
|
||||
}
|
||||
|
||||
static bool memory_region_dispatch_read(MemoryRegion *mr,
|
||||
MemTxResult memory_region_dispatch_read(MemoryRegion *mr,
|
||||
hwaddr addr,
|
||||
uint64_t *pval,
|
||||
unsigned size)
|
||||
unsigned size,
|
||||
MemTxAttrs attrs)
|
||||
{
|
||||
MemTxResult r;
|
||||
|
||||
if (!memory_region_access_valid(mr, addr, size, false)) {
|
||||
*pval = unassigned_mem_read(mr, addr, size);
|
||||
return true;
|
||||
return MEMTX_DECODE_ERROR;
|
||||
}
|
||||
|
||||
*pval = memory_region_dispatch_read1(mr, addr, size);
|
||||
r = memory_region_dispatch_read1(mr, addr, pval, size, attrs);
|
||||
adjust_endianness(mr, pval, size);
|
||||
return false;
|
||||
return r;
|
||||
}
|
||||
|
||||
static bool memory_region_dispatch_write(MemoryRegion *mr,
|
||||
MemTxResult memory_region_dispatch_write(MemoryRegion *mr,
|
||||
hwaddr addr,
|
||||
uint64_t data,
|
||||
unsigned size)
|
||||
unsigned size,
|
||||
MemTxAttrs attrs)
|
||||
{
|
||||
if (!memory_region_access_valid(mr, addr, size, true)) {
|
||||
unassigned_mem_write(mr, addr, data, size);
|
||||
return true;
|
||||
return MEMTX_DECODE_ERROR;
|
||||
}
|
||||
|
||||
adjust_endianness(mr, &data, size);
|
||||
|
||||
if (mr->ops->write) {
|
||||
access_with_adjusted_size(addr, &data, size,
|
||||
mr->ops->impl.min_access_size,
|
||||
mr->ops->impl.max_access_size,
|
||||
memory_region_write_accessor, mr);
|
||||
return access_with_adjusted_size(addr, &data, size,
|
||||
mr->ops->impl.min_access_size,
|
||||
mr->ops->impl.max_access_size,
|
||||
memory_region_write_accessor, mr,
|
||||
attrs);
|
||||
} else if (mr->ops->write_with_attrs) {
|
||||
return
|
||||
access_with_adjusted_size(addr, &data, size,
|
||||
mr->ops->impl.min_access_size,
|
||||
mr->ops->impl.max_access_size,
|
||||
memory_region_write_with_attrs_accessor,
|
||||
mr, attrs);
|
||||
} else {
|
||||
access_with_adjusted_size(addr, &data, size, 1, 4,
|
||||
memory_region_oldmmio_write_accessor, mr);
|
||||
return access_with_adjusted_size(addr, &data, size, 1, 4,
|
||||
memory_region_oldmmio_write_accessor,
|
||||
mr, attrs);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
void memory_region_init_io(MemoryRegion *mr,
|
||||
@ -1992,17 +2063,6 @@ void address_space_destroy(AddressSpace *as)
|
||||
call_rcu(as, do_address_space_destroy, rcu);
|
||||
}
|
||||
|
||||
bool io_mem_read(MemoryRegion *mr, hwaddr addr, uint64_t *pval, unsigned size)
|
||||
{
|
||||
return memory_region_dispatch_read(mr, addr, pval, size);
|
||||
}
|
||||
|
||||
bool io_mem_write(MemoryRegion *mr, hwaddr addr,
|
||||
uint64_t val, unsigned size)
|
||||
{
|
||||
return memory_region_dispatch_write(mr, addr, val, size);
|
||||
}
|
||||
|
||||
typedef struct MemoryRegionList MemoryRegionList;
|
||||
|
||||
struct MemoryRegionList {
|
||||
|
@ -1384,7 +1384,8 @@ static void hmp_sum(Monitor *mon, const QDict *qdict)
|
||||
|
||||
sum = 0;
|
||||
for(addr = start; addr < (start + size); addr++) {
|
||||
uint8_t val = ldub_phys(&address_space_memory, addr);
|
||||
uint8_t val = address_space_ldub(&address_space_memory, addr,
|
||||
MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
/* BSD sum algorithm ('sum' Unix command) */
|
||||
sum = (sum >> 1) | (sum << 15);
|
||||
sum += val;
|
||||
|
@ -46,6 +46,8 @@ typedef struct va_list_str *va_list;
|
||||
|
||||
typedef struct AddressSpace AddressSpace;
|
||||
typedef uint64_t hwaddr;
|
||||
typedef uint32_t MemTxResult;
|
||||
typedef uint64_t MemTxAttrs;
|
||||
|
||||
static void __write(uint8_t *buf, ssize_t len)
|
||||
{
|
||||
@ -65,10 +67,10 @@ static void __read(uint8_t *buf, ssize_t len)
|
||||
int last = buf[len-1];
|
||||
}
|
||||
|
||||
bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
|
||||
int len, bool is_write)
|
||||
MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
|
||||
uint8_t *buf, int len, bool is_write)
|
||||
{
|
||||
bool result;
|
||||
MemTxResult result;
|
||||
|
||||
// TODO: investigate impact of treating reads as producing
|
||||
// tainted data, with __coverity_tainted_data_argument__(buf).
|
||||
|
@ -123,7 +123,7 @@
|
||||
* victim tlb. try to refill from the victim tlb before walking the \
|
||||
* page table. */ \
|
||||
int vidx; \
|
||||
hwaddr tmpiotlb; \
|
||||
CPUIOTLBEntry tmpiotlb; \
|
||||
CPUTLBEntry tmptlb; \
|
||||
for (vidx = CPU_VTLB_SIZE-1; vidx >= 0; --vidx) { \
|
||||
if (env->tlb_v_table[mmu_idx][vidx].ty == (addr & TARGET_PAGE_MASK)) {\
|
||||
@ -143,12 +143,13 @@
|
||||
|
||||
#ifndef SOFTMMU_CODE_ACCESS
|
||||
static inline DATA_TYPE glue(io_read, SUFFIX)(CPUArchState *env,
|
||||
hwaddr physaddr,
|
||||
CPUIOTLBEntry *iotlbentry,
|
||||
target_ulong addr,
|
||||
uintptr_t retaddr)
|
||||
{
|
||||
uint64_t val;
|
||||
CPUState *cpu = ENV_GET_CPU(env);
|
||||
hwaddr physaddr = iotlbentry->addr;
|
||||
MemoryRegion *mr = iotlb_to_region(cpu, physaddr);
|
||||
|
||||
physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
|
||||
@ -158,7 +159,8 @@ static inline DATA_TYPE glue(io_read, SUFFIX)(CPUArchState *env,
|
||||
}
|
||||
|
||||
cpu->mem_io_vaddr = addr;
|
||||
io_mem_read(mr, physaddr, &val, 1 << SHIFT);
|
||||
memory_region_dispatch_read(mr, physaddr, &val, 1 << SHIFT,
|
||||
iotlbentry->attrs);
|
||||
return val;
|
||||
}
|
||||
#endif
|
||||
@ -195,15 +197,15 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx,
|
||||
|
||||
/* Handle an IO access. */
|
||||
if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
|
||||
hwaddr ioaddr;
|
||||
CPUIOTLBEntry *iotlbentry;
|
||||
if ((addr & (DATA_SIZE - 1)) != 0) {
|
||||
goto do_unaligned_access;
|
||||
}
|
||||
ioaddr = env->iotlb[mmu_idx][index];
|
||||
iotlbentry = &env->iotlb[mmu_idx][index];
|
||||
|
||||
/* ??? Note that the io helpers always read data in the target
|
||||
byte ordering. We should push the LE/BE request down into io. */
|
||||
res = glue(io_read, SUFFIX)(env, ioaddr, addr, retaddr);
|
||||
res = glue(io_read, SUFFIX)(env, iotlbentry, addr, retaddr);
|
||||
res = TGT_LE(res);
|
||||
return res;
|
||||
}
|
||||
@ -283,15 +285,15 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx,
|
||||
|
||||
/* Handle an IO access. */
|
||||
if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
|
||||
hwaddr ioaddr;
|
||||
CPUIOTLBEntry *iotlbentry;
|
||||
if ((addr & (DATA_SIZE - 1)) != 0) {
|
||||
goto do_unaligned_access;
|
||||
}
|
||||
ioaddr = env->iotlb[mmu_idx][index];
|
||||
iotlbentry = &env->iotlb[mmu_idx][index];
|
||||
|
||||
/* ??? Note that the io helpers always read data in the target
|
||||
byte ordering. We should push the LE/BE request down into io. */
|
||||
res = glue(io_read, SUFFIX)(env, ioaddr, addr, retaddr);
|
||||
res = glue(io_read, SUFFIX)(env, iotlbentry, addr, retaddr);
|
||||
res = TGT_BE(res);
|
||||
return res;
|
||||
}
|
||||
@ -363,12 +365,13 @@ WORD_TYPE helper_be_lds_name(CPUArchState *env, target_ulong addr,
|
||||
#endif
|
||||
|
||||
static inline void glue(io_write, SUFFIX)(CPUArchState *env,
|
||||
hwaddr physaddr,
|
||||
CPUIOTLBEntry *iotlbentry,
|
||||
DATA_TYPE val,
|
||||
target_ulong addr,
|
||||
uintptr_t retaddr)
|
||||
{
|
||||
CPUState *cpu = ENV_GET_CPU(env);
|
||||
hwaddr physaddr = iotlbentry->addr;
|
||||
MemoryRegion *mr = iotlb_to_region(cpu, physaddr);
|
||||
|
||||
physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
|
||||
@ -378,7 +381,8 @@ static inline void glue(io_write, SUFFIX)(CPUArchState *env,
|
||||
|
||||
cpu->mem_io_vaddr = addr;
|
||||
cpu->mem_io_pc = retaddr;
|
||||
io_mem_write(mr, physaddr, val, 1 << SHIFT);
|
||||
memory_region_dispatch_write(mr, physaddr, val, 1 << SHIFT,
|
||||
iotlbentry->attrs);
|
||||
}
|
||||
|
||||
void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
|
||||
@ -408,16 +412,16 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
|
||||
|
||||
/* Handle an IO access. */
|
||||
if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
|
||||
hwaddr ioaddr;
|
||||
CPUIOTLBEntry *iotlbentry;
|
||||
if ((addr & (DATA_SIZE - 1)) != 0) {
|
||||
goto do_unaligned_access;
|
||||
}
|
||||
ioaddr = env->iotlb[mmu_idx][index];
|
||||
iotlbentry = &env->iotlb[mmu_idx][index];
|
||||
|
||||
/* ??? Note that the io helpers always read data in the target
|
||||
byte ordering. We should push the LE/BE request down into io. */
|
||||
val = TGT_LE(val);
|
||||
glue(io_write, SUFFIX)(env, ioaddr, val, addr, retaddr);
|
||||
glue(io_write, SUFFIX)(env, iotlbentry, val, addr, retaddr);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -489,16 +493,16 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
|
||||
|
||||
/* Handle an IO access. */
|
||||
if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
|
||||
hwaddr ioaddr;
|
||||
CPUIOTLBEntry *iotlbentry;
|
||||
if ((addr & (DATA_SIZE - 1)) != 0) {
|
||||
goto do_unaligned_access;
|
||||
}
|
||||
ioaddr = env->iotlb[mmu_idx][index];
|
||||
iotlbentry = &env->iotlb[mmu_idx][index];
|
||||
|
||||
/* ??? Note that the io helpers always read data in the target
|
||||
byte ordering. We should push the LE/BE request down into io. */
|
||||
val = TGT_BE(val);
|
||||
glue(io_write, SUFFIX)(env, ioaddr, val, addr, retaddr);
|
||||
glue(io_write, SUFFIX)(env, iotlbentry, val, addr, retaddr);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -111,7 +111,7 @@ static void arm_cpu_reset(CPUState *s)
|
||||
/* Userspace expects access to DC ZVA, CTL_EL0 and the cache ops */
|
||||
env->cp15.sctlr_el[1] |= SCTLR_UCT | SCTLR_UCI | SCTLR_DZE;
|
||||
/* and to the FP/Neon instructions */
|
||||
env->cp15.c1_coproc = deposit64(env->cp15.c1_coproc, 20, 2, 3);
|
||||
env->cp15.cpacr_el1 = deposit64(env->cp15.cpacr_el1, 20, 2, 3);
|
||||
#else
|
||||
/* Reset into the highest available EL */
|
||||
if (arm_feature(env, ARM_FEATURE_EL3)) {
|
||||
@ -126,7 +126,7 @@ static void arm_cpu_reset(CPUState *s)
|
||||
} else {
|
||||
#if defined(CONFIG_USER_ONLY)
|
||||
/* Userspace expects access to cp10 and cp11 for FP/Neon */
|
||||
env->cp15.c1_coproc = deposit64(env->cp15.c1_coproc, 20, 4, 0xf);
|
||||
env->cp15.cpacr_el1 = deposit64(env->cp15.cpacr_el1, 20, 4, 0xf);
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -524,9 +524,10 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
|
||||
unset_feature(env, ARM_FEATURE_EL3);
|
||||
|
||||
/* Disable the security extension feature bits in the processor feature
|
||||
* register as well. This is id_pfr1[7:4].
|
||||
* registers as well. These are id_pfr1[7:4] and id_aa64pfr0[15:12].
|
||||
*/
|
||||
cpu->id_pfr1 &= ~0xf0;
|
||||
cpu->id_aa64pfr0 &= ~0xf000;
|
||||
}
|
||||
|
||||
register_cp_regs_for_features(cpu);
|
||||
|
@ -201,7 +201,7 @@ typedef struct CPUARMState {
|
||||
};
|
||||
uint64_t sctlr_el[4];
|
||||
};
|
||||
uint64_t c1_coproc; /* Coprocessor access register. */
|
||||
uint64_t cpacr_el1; /* Architectural feature access control register */
|
||||
uint32_t c1_xscaleauxcr; /* XScale auxiliary control register. */
|
||||
uint64_t sder; /* Secure debug enable register. */
|
||||
uint32_t nsacr; /* Non-secure access control register. */
|
||||
@ -1813,7 +1813,7 @@ static inline void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
|
||||
int fpen;
|
||||
|
||||
if (arm_feature(env, ARM_FEATURE_V6)) {
|
||||
fpen = extract32(env->cp15.c1_coproc, 20, 2);
|
||||
fpen = extract32(env->cp15.cpacr_el1, 20, 2);
|
||||
} else {
|
||||
/* CPACR doesn't exist before v6, so VFP is always accessible */
|
||||
fpen = 3;
|
||||
|
@ -14,7 +14,7 @@
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
static inline int get_phys_addr(CPUARMState *env, target_ulong address,
|
||||
int access_type, ARMMMUIdx mmu_idx,
|
||||
hwaddr *phys_ptr, int *prot,
|
||||
hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
|
||||
target_ulong *page_size);
|
||||
|
||||
/* Definitions for the PMCCNTR and PMCR registers */
|
||||
@ -589,7 +589,7 @@ static void cpacr_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
}
|
||||
value &= mask;
|
||||
}
|
||||
env->cp15.c1_coproc = value;
|
||||
env->cp15.cpacr_el1 = value;
|
||||
}
|
||||
|
||||
static const ARMCPRegInfo v6_cp_reginfo[] = {
|
||||
@ -615,7 +615,7 @@ static const ARMCPRegInfo v6_cp_reginfo[] = {
|
||||
.access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0, },
|
||||
{ .name = "CPACR", .state = ARM_CP_STATE_BOTH, .opc0 = 3,
|
||||
.crn = 1, .crm = 0, .opc1 = 0, .opc2 = 2,
|
||||
.access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c1_coproc),
|
||||
.access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.cpacr_el1),
|
||||
.resetvalue = 0, .writefn = cpacr_write },
|
||||
REGINFO_SENTINEL
|
||||
};
|
||||
@ -816,8 +816,10 @@ static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
|
||||
* supported if EL2 exists. The bit is UNK/SBZP when
|
||||
* EL2 is unavailable. In QEMU ARMv7, we force it to always zero
|
||||
* when EL2 is unavailable.
|
||||
* On ARMv8, this bit is always available.
|
||||
*/
|
||||
if (arm_feature(env, ARM_FEATURE_V7)) {
|
||||
if (arm_feature(env, ARM_FEATURE_V7) &&
|
||||
!arm_feature(env, ARM_FEATURE_V8)) {
|
||||
valid_mask &= ~SCR_SMD;
|
||||
}
|
||||
}
|
||||
@ -1466,9 +1468,10 @@ static uint64_t do_ats_write(CPUARMState *env, uint64_t value,
|
||||
int prot;
|
||||
int ret;
|
||||
uint64_t par64;
|
||||
MemTxAttrs attrs = {};
|
||||
|
||||
ret = get_phys_addr(env, value, access_type, mmu_idx,
|
||||
&phys_addr, &prot, &page_size);
|
||||
&phys_addr, &attrs, &prot, &page_size);
|
||||
if (extended_addresses_enabled(env)) {
|
||||
/* ret is a DFSR/IFSR value for the long descriptor
|
||||
* translation table format, but with WnR always clear.
|
||||
@ -1477,6 +1480,9 @@ static uint64_t do_ats_write(CPUARMState *env, uint64_t value,
|
||||
par64 = (1 << 11); /* LPAE bit always set */
|
||||
if (ret == 0) {
|
||||
par64 |= phys_addr & ~0xfffULL;
|
||||
if (!attrs.secure) {
|
||||
par64 |= (1 << 9); /* NS */
|
||||
}
|
||||
/* We don't set the ATTR or SH fields in the PAR. */
|
||||
} else {
|
||||
par64 |= 1; /* F */
|
||||
@ -1499,6 +1505,9 @@ static uint64_t do_ats_write(CPUARMState *env, uint64_t value,
|
||||
} else {
|
||||
par64 = phys_addr & 0xfffff000;
|
||||
}
|
||||
if (!attrs.secure) {
|
||||
par64 |= (1 << 9); /* NS */
|
||||
}
|
||||
} else {
|
||||
par64 = ((ret & (1 << 10)) >> 5) | ((ret & (1 << 12)) >> 6) |
|
||||
((ret & 0xf) << 1) | 1;
|
||||
@ -4858,6 +4867,26 @@ static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx)
|
||||
}
|
||||
}
|
||||
|
||||
/* Return true if this address translation regime is secure */
|
||||
static inline bool regime_is_secure(CPUARMState *env, ARMMMUIdx mmu_idx)
|
||||
{
|
||||
switch (mmu_idx) {
|
||||
case ARMMMUIdx_S12NSE0:
|
||||
case ARMMMUIdx_S12NSE1:
|
||||
case ARMMMUIdx_S1NSE0:
|
||||
case ARMMMUIdx_S1NSE1:
|
||||
case ARMMMUIdx_S1E2:
|
||||
case ARMMMUIdx_S2NS:
|
||||
return false;
|
||||
case ARMMMUIdx_S1E3:
|
||||
case ARMMMUIdx_S1SE0:
|
||||
case ARMMMUIdx_S1SE1:
|
||||
return true;
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
}
|
||||
|
||||
/* Return the SCTLR value which controls this address translation regime */
|
||||
static inline uint32_t regime_sctlr(CPUARMState *env, ARMMMUIdx mmu_idx)
|
||||
{
|
||||
@ -5102,6 +5131,29 @@ static bool get_level1_table_address(CPUARMState *env, ARMMMUIdx mmu_idx,
|
||||
return true;
|
||||
}
|
||||
|
||||
/* All loads done in the course of a page table walk go through here.
|
||||
* TODO: rather than ignoring errors from physical memory reads (which
|
||||
* are external aborts in ARM terminology) we should propagate this
|
||||
* error out so that we can turn it into a Data Abort if this walk
|
||||
* was being done for a CPU load/store or an address translation instruction
|
||||
* (but not if it was for a debug access).
|
||||
*/
|
||||
static uint32_t arm_ldl_ptw(CPUState *cs, hwaddr addr, bool is_secure)
|
||||
{
|
||||
MemTxAttrs attrs = {};
|
||||
|
||||
attrs.secure = is_secure;
|
||||
return address_space_ldl(cs->as, addr, attrs, NULL);
|
||||
}
|
||||
|
||||
static uint64_t arm_ldq_ptw(CPUState *cs, hwaddr addr, bool is_secure)
|
||||
{
|
||||
MemTxAttrs attrs = {};
|
||||
|
||||
attrs.secure = is_secure;
|
||||
return address_space_ldq(cs->as, addr, attrs, NULL);
|
||||
}
|
||||
|
||||
static int get_phys_addr_v5(CPUARMState *env, uint32_t address, int access_type,
|
||||
ARMMMUIdx mmu_idx, hwaddr *phys_ptr,
|
||||
int *prot, target_ulong *page_size)
|
||||
@ -5124,7 +5176,7 @@ static int get_phys_addr_v5(CPUARMState *env, uint32_t address, int access_type,
|
||||
code = 5;
|
||||
goto do_fault;
|
||||
}
|
||||
desc = ldl_phys(cs->as, table);
|
||||
desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx));
|
||||
type = (desc & 3);
|
||||
domain = (desc >> 5) & 0x0f;
|
||||
if (regime_el(env, mmu_idx) == 1) {
|
||||
@ -5160,7 +5212,7 @@ static int get_phys_addr_v5(CPUARMState *env, uint32_t address, int access_type,
|
||||
/* Fine pagetable. */
|
||||
table = (desc & 0xfffff000) | ((address >> 8) & 0xffc);
|
||||
}
|
||||
desc = ldl_phys(cs->as, table);
|
||||
desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx));
|
||||
switch (desc & 3) {
|
||||
case 0: /* Page translation fault. */
|
||||
code = 7;
|
||||
@ -5210,6 +5262,7 @@ do_fault:
|
||||
|
||||
static int get_phys_addr_v6(CPUARMState *env, uint32_t address, int access_type,
|
||||
ARMMMUIdx mmu_idx, hwaddr *phys_ptr,
|
||||
MemTxAttrs *attrs,
|
||||
int *prot, target_ulong *page_size)
|
||||
{
|
||||
CPUState *cs = CPU(arm_env_get_cpu(env));
|
||||
@ -5224,6 +5277,7 @@ static int get_phys_addr_v6(CPUARMState *env, uint32_t address, int access_type,
|
||||
int domain_prot;
|
||||
hwaddr phys_addr;
|
||||
uint32_t dacr;
|
||||
bool ns;
|
||||
|
||||
/* Pagetable walk. */
|
||||
/* Lookup l1 descriptor. */
|
||||
@ -5232,7 +5286,7 @@ static int get_phys_addr_v6(CPUARMState *env, uint32_t address, int access_type,
|
||||
code = 5;
|
||||
goto do_fault;
|
||||
}
|
||||
desc = ldl_phys(cs->as, table);
|
||||
desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx));
|
||||
type = (desc & 3);
|
||||
if (type == 0 || (type == 3 && !arm_feature(env, ARM_FEATURE_PXN))) {
|
||||
/* Section translation fault, or attempt to use the encoding
|
||||
@ -5273,13 +5327,15 @@ static int get_phys_addr_v6(CPUARMState *env, uint32_t address, int access_type,
|
||||
xn = desc & (1 << 4);
|
||||
pxn = desc & 1;
|
||||
code = 13;
|
||||
ns = extract32(desc, 19, 1);
|
||||
} else {
|
||||
if (arm_feature(env, ARM_FEATURE_PXN)) {
|
||||
pxn = (desc >> 2) & 1;
|
||||
}
|
||||
ns = extract32(desc, 3, 1);
|
||||
/* Lookup l2 entry. */
|
||||
table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
|
||||
desc = ldl_phys(cs->as, table);
|
||||
desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx));
|
||||
ap = ((desc >> 4) & 3) | ((desc >> 7) & 4);
|
||||
switch (desc & 3) {
|
||||
case 0: /* Page translation fault. */
|
||||
@ -5330,6 +5386,13 @@ static int get_phys_addr_v6(CPUARMState *env, uint32_t address, int access_type,
|
||||
goto do_fault;
|
||||
}
|
||||
}
|
||||
if (ns) {
|
||||
/* The NS bit will (as required by the architecture) have no effect if
|
||||
* the CPU doesn't support TZ or this is a non-secure translation
|
||||
* regime, because the attribute will already be non-secure.
|
||||
*/
|
||||
attrs->secure = false;
|
||||
}
|
||||
*phys_ptr = phys_addr;
|
||||
return 0;
|
||||
do_fault:
|
||||
@ -5347,7 +5410,7 @@ typedef enum {
|
||||
|
||||
static int get_phys_addr_lpae(CPUARMState *env, target_ulong address,
|
||||
int access_type, ARMMMUIdx mmu_idx,
|
||||
hwaddr *phys_ptr, int *prot,
|
||||
hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot,
|
||||
target_ulong *page_size_ptr)
|
||||
{
|
||||
CPUState *cs = CPU(arm_env_get_cpu(env));
|
||||
@ -5487,13 +5550,20 @@ static int get_phys_addr_lpae(CPUARMState *env, target_ulong address,
|
||||
descaddr = extract64(ttbr, 0, 48);
|
||||
descaddr &= ~((1ULL << (va_size - tsz - (granule_sz * (4 - level)))) - 1);
|
||||
|
||||
tableattrs = 0;
|
||||
/* Secure accesses start with the page table in secure memory and
|
||||
* can be downgraded to non-secure at any step. Non-secure accesses
|
||||
* remain non-secure. We implement this by just ORing in the NSTable/NS
|
||||
* bits at each step.
|
||||
*/
|
||||
tableattrs = regime_is_secure(env, mmu_idx) ? 0 : (1 << 4);
|
||||
for (;;) {
|
||||
uint64_t descriptor;
|
||||
bool nstable;
|
||||
|
||||
descaddr |= (address >> (granule_sz * (4 - level))) & descmask;
|
||||
descaddr &= ~7ULL;
|
||||
descriptor = ldq_phys(cs->as, descaddr);
|
||||
nstable = extract32(tableattrs, 4, 1);
|
||||
descriptor = arm_ldq_ptw(cs, descaddr, !nstable);
|
||||
if (!(descriptor & 1) ||
|
||||
(!(descriptor & 2) && (level == 3))) {
|
||||
/* Invalid, or the Reserved level 3 encoding */
|
||||
@ -5528,7 +5598,7 @@ static int get_phys_addr_lpae(CPUARMState *env, target_ulong address,
|
||||
if (extract32(tableattrs, 2, 1)) {
|
||||
attrs &= ~(1 << 4);
|
||||
}
|
||||
attrs |= extract32(tableattrs, 4, 1) << 3; /* NS */
|
||||
attrs |= nstable << 3; /* NS */
|
||||
break;
|
||||
}
|
||||
/* Here descaddr is the final physical address, and attributes
|
||||
@ -5552,6 +5622,13 @@ static int get_phys_addr_lpae(CPUARMState *env, target_ulong address,
|
||||
goto do_fault;
|
||||
}
|
||||
|
||||
if (ns) {
|
||||
/* The NS bit will (as required by the architecture) have no effect if
|
||||
* the CPU doesn't support TZ or this is a non-secure translation
|
||||
* regime, because the attribute will already be non-secure.
|
||||
*/
|
||||
txattrs->secure = false;
|
||||
}
|
||||
*phys_ptr = descaddr;
|
||||
*page_size_ptr = page_size;
|
||||
return 0;
|
||||
@ -5635,8 +5712,8 @@ static int get_phys_addr_mpu(CPUARMState *env, uint32_t address,
|
||||
* by doing a translation table walk on MMU based systems or using the
|
||||
* MPU state on MPU based systems.
|
||||
*
|
||||
* Returns 0 if the translation was successful. Otherwise, phys_ptr,
|
||||
* prot and page_size are not filled in, and the return value provides
|
||||
* Returns 0 if the translation was successful. Otherwise, phys_ptr, attrs,
|
||||
* prot and page_size may not be filled in, and the return value provides
|
||||
* information on why the translation aborted, in the format of a
|
||||
* DFSR/IFSR fault register, with the following caveats:
|
||||
* * we honour the short vs long DFSR format differences.
|
||||
@ -5649,24 +5726,33 @@ static int get_phys_addr_mpu(CPUARMState *env, uint32_t address,
|
||||
* @access_type: 0 for read, 1 for write, 2 for execute
|
||||
* @mmu_idx: MMU index indicating required translation regime
|
||||
* @phys_ptr: set to the physical address corresponding to the virtual address
|
||||
* @attrs: set to the memory transaction attributes to use
|
||||
* @prot: set to the permissions for the page containing phys_ptr
|
||||
* @page_size: set to the size of the page containing phys_ptr
|
||||
*/
|
||||
static inline int get_phys_addr(CPUARMState *env, target_ulong address,
|
||||
int access_type, ARMMMUIdx mmu_idx,
|
||||
hwaddr *phys_ptr, int *prot,
|
||||
hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
|
||||
target_ulong *page_size)
|
||||
{
|
||||
if (mmu_idx == ARMMMUIdx_S12NSE0 || mmu_idx == ARMMMUIdx_S12NSE1) {
|
||||
/* TODO: when we support EL2 we should here call ourselves recursively
|
||||
* to do the stage 1 and then stage 2 translations. The ldl_phys
|
||||
* calls for stage 1 will also need changing.
|
||||
* to do the stage 1 and then stage 2 translations. The arm_ld*_ptw
|
||||
* functions will also need changing to perform ARMMMUIdx_S2NS loads
|
||||
* rather than direct physical memory loads when appropriate.
|
||||
* For non-EL2 CPUs a stage1+stage2 translation is just stage 1.
|
||||
*/
|
||||
assert(!arm_feature(env, ARM_FEATURE_EL2));
|
||||
mmu_idx += ARMMMUIdx_S1NSE0;
|
||||
}
|
||||
|
||||
/* The page table entries may downgrade secure to non-secure, but
|
||||
* cannot upgrade an non-secure translation regime's attributes
|
||||
* to secure.
|
||||
*/
|
||||
attrs->secure = regime_is_secure(env, mmu_idx);
|
||||
attrs->user = regime_is_user(env, mmu_idx);
|
||||
|
||||
/* Fast Context Switch Extension. This doesn't exist at all in v8.
|
||||
* In v7 and earlier it affects all stage 1 translations.
|
||||
*/
|
||||
@ -5695,10 +5781,10 @@ static inline int get_phys_addr(CPUARMState *env, target_ulong address,
|
||||
|
||||
if (regime_using_lpae_format(env, mmu_idx)) {
|
||||
return get_phys_addr_lpae(env, address, access_type, mmu_idx, phys_ptr,
|
||||
prot, page_size);
|
||||
attrs, prot, page_size);
|
||||
} else if (regime_sctlr(env, mmu_idx) & SCTLR_XP) {
|
||||
return get_phys_addr_v6(env, address, access_type, mmu_idx, phys_ptr,
|
||||
prot, page_size);
|
||||
attrs, prot, page_size);
|
||||
} else {
|
||||
return get_phys_addr_v5(env, address, access_type, mmu_idx, phys_ptr,
|
||||
prot, page_size);
|
||||
@ -5716,14 +5802,16 @@ int arm_cpu_handle_mmu_fault(CPUState *cs, vaddr address,
|
||||
int ret;
|
||||
uint32_t syn;
|
||||
bool same_el = (arm_current_el(env) != 0);
|
||||
MemTxAttrs attrs = {};
|
||||
|
||||
ret = get_phys_addr(env, address, access_type, mmu_idx, &phys_addr, &prot,
|
||||
&page_size);
|
||||
ret = get_phys_addr(env, address, access_type, mmu_idx, &phys_addr,
|
||||
&attrs, &prot, &page_size);
|
||||
if (ret == 0) {
|
||||
/* Map a single [sub]page. */
|
||||
phys_addr &= TARGET_PAGE_MASK;
|
||||
address &= TARGET_PAGE_MASK;
|
||||
tlb_set_page(cs, address, phys_addr, prot, mmu_idx, page_size);
|
||||
tlb_set_page_with_attrs(cs, address, phys_addr, attrs,
|
||||
prot, mmu_idx, page_size);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -5758,9 +5846,10 @@ hwaddr arm_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
|
||||
target_ulong page_size;
|
||||
int prot;
|
||||
int ret;
|
||||
MemTxAttrs attrs = {};
|
||||
|
||||
ret = get_phys_addr(env, addr, 0, cpu_mmu_index(env), &phys_addr,
|
||||
&prot, &page_size);
|
||||
&attrs, &prot, &page_size);
|
||||
|
||||
if (ret != 0) {
|
||||
return -1;
|
||||
|
@ -600,15 +600,26 @@ static bool bp_wp_matches(ARMCPU *cpu, int n, bool is_wp)
|
||||
CPUARMState *env = &cpu->env;
|
||||
uint64_t cr;
|
||||
int pac, hmc, ssc, wt, lbn;
|
||||
/* TODO: check against CPU security state when we implement TrustZone */
|
||||
bool is_secure = false;
|
||||
/* Note that for watchpoints the check is against the CPU security
|
||||
* state, not the S/NS attribute on the offending data access.
|
||||
*/
|
||||
bool is_secure = arm_is_secure(env);
|
||||
int access_el = arm_current_el(env);
|
||||
|
||||
if (is_wp) {
|
||||
if (!env->cpu_watchpoint[n]
|
||||
|| !(env->cpu_watchpoint[n]->flags & BP_WATCHPOINT_HIT)) {
|
||||
CPUWatchpoint *wp = env->cpu_watchpoint[n];
|
||||
|
||||
if (!wp || !(wp->flags & BP_WATCHPOINT_HIT)) {
|
||||
return false;
|
||||
}
|
||||
cr = env->cp15.dbgwcr[n];
|
||||
if (wp->hitattrs.user) {
|
||||
/* The LDRT/STRT/LDT/STT "unprivileged access" instructions should
|
||||
* match watchpoints as if they were accesses done at EL0, even if
|
||||
* the CPU is at EL1 or higher.
|
||||
*/
|
||||
access_el = 0;
|
||||
}
|
||||
} else {
|
||||
uint64_t pc = is_a64(env) ? env->pc : env->regs[15];
|
||||
|
||||
@ -649,15 +660,7 @@ static bool bp_wp_matches(ARMCPU *cpu, int n, bool is_wp)
|
||||
break;
|
||||
}
|
||||
|
||||
/* TODO: this is not strictly correct because the LDRT/STRT/LDT/STT
|
||||
* "unprivileged access" instructions should match watchpoints as if
|
||||
* they were accesses done at EL0, even if the CPU is at EL1 or higher.
|
||||
* Implementing this would require reworking the core watchpoint code
|
||||
* to plumb the mmu_idx through to this point. Luckily Linux does not
|
||||
* rely on this behaviour currently.
|
||||
* For breakpoints we do want to use the current CPU state.
|
||||
*/
|
||||
switch (arm_current_el(env)) {
|
||||
switch (access_el) {
|
||||
case 3:
|
||||
case 2:
|
||||
if (!hmc) {
|
||||
|
@ -27,7 +27,7 @@ static void walk_pte(MemoryMappingList *list, AddressSpace *as,
|
||||
|
||||
for (i = 0; i < 512; i++) {
|
||||
pte_addr = (pte_start_addr + i * 8) & a20_mask;
|
||||
pte = ldq_phys(as, pte_addr);
|
||||
pte = address_space_ldq(as, pte_addr, MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
if (!(pte & PG_PRESENT_MASK)) {
|
||||
/* not present */
|
||||
continue;
|
||||
@ -57,7 +57,7 @@ static void walk_pte2(MemoryMappingList *list, AddressSpace *as,
|
||||
|
||||
for (i = 0; i < 1024; i++) {
|
||||
pte_addr = (pte_start_addr + i * 4) & a20_mask;
|
||||
pte = ldl_phys(as, pte_addr);
|
||||
pte = address_space_ldl(as, pte_addr, MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
if (!(pte & PG_PRESENT_MASK)) {
|
||||
/* not present */
|
||||
continue;
|
||||
@ -89,7 +89,7 @@ static void walk_pde(MemoryMappingList *list, AddressSpace *as,
|
||||
|
||||
for (i = 0; i < 512; i++) {
|
||||
pde_addr = (pde_start_addr + i * 8) & a20_mask;
|
||||
pde = ldq_phys(as, pde_addr);
|
||||
pde = address_space_ldq(as, pde_addr, MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
if (!(pde & PG_PRESENT_MASK)) {
|
||||
/* not present */
|
||||
continue;
|
||||
@ -126,7 +126,7 @@ static void walk_pde2(MemoryMappingList *list, AddressSpace *as,
|
||||
|
||||
for (i = 0; i < 1024; i++) {
|
||||
pde_addr = (pde_start_addr + i * 4) & a20_mask;
|
||||
pde = ldl_phys(as, pde_addr);
|
||||
pde = address_space_ldl(as, pde_addr, MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
if (!(pde & PG_PRESENT_MASK)) {
|
||||
/* not present */
|
||||
continue;
|
||||
@ -167,7 +167,7 @@ static void walk_pdpe2(MemoryMappingList *list, AddressSpace *as,
|
||||
|
||||
for (i = 0; i < 4; i++) {
|
||||
pdpe_addr = (pdpe_start_addr + i * 8) & a20_mask;
|
||||
pdpe = ldq_phys(as, pdpe_addr);
|
||||
pdpe = address_space_ldq(as, pdpe_addr, MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
if (!(pdpe & PG_PRESENT_MASK)) {
|
||||
/* not present */
|
||||
continue;
|
||||
@ -192,7 +192,7 @@ static void walk_pdpe(MemoryMappingList *list, AddressSpace *as,
|
||||
|
||||
for (i = 0; i < 512; i++) {
|
||||
pdpe_addr = (pdpe_start_addr + i * 8) & a20_mask;
|
||||
pdpe = ldq_phys(as, pdpe_addr);
|
||||
pdpe = address_space_ldq(as, pdpe_addr, MEMTXATTRS_UNSPECIFIED, NULL);
|
||||
if (!(pdpe & PG_PRESENT_MASK)) {
|
||||
/* not present */
|
||||
continue;
|
||||
@ -228,7 +228,8 @@ static void walk_pml4e(MemoryMappingList *list, AddressSpace *as,
|
||||
|
||||
for (i = 0; i < 512; i++) {
|
||||
pml4e_addr = (pml4e_start_addr + i * 8) & a20_mask;
|
||||
pml4e = ldq_phys(as, pml4e_addr);
|
||||
pml4e = address_space_ldq(as, pml4e_addr, MEMTXATTRS_UNSPECIFIED,
|
||||
NULL);
|
||||
if (!(pml4e & PG_PRESENT_MASK)) {
|
||||
/* not present */
|
||||
continue;
|
||||
|
Loading…
x
Reference in New Issue
Block a user