exec: move rcu_read_lock/unlock to address_space_translate callers

Once address_space_translate will be called outside the BQL, the returned
MemoryRegion might disappear as soon as the RCU read-side critical section
ends.  Avoid this by moving the critical section to the callers.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Message-Id: <1426684909-95030-3-git-send-email-pbonzini@redhat.com>
This commit is contained in:
Paolo Bonzini 2015-03-18 14:21:43 +01:00
parent 4c66375252
commit 41063e1e7a
4 changed files with 40 additions and 7 deletions

33
exec.c
View File

@ -373,6 +373,7 @@ static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
return false;
}
/* Called from RCU critical section */
MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
hwaddr *xlat, hwaddr *plen,
bool is_write)
@ -381,7 +382,6 @@ MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
MemoryRegionSection *section;
MemoryRegion *mr;
rcu_read_lock();
for (;;) {
AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch);
section = address_space_translate_internal(d, addr, &addr, plen, true);
@ -409,7 +409,6 @@ MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
}
*xlat = addr;
rcu_read_unlock();
return mr;
}
@ -2329,6 +2328,7 @@ MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
MemoryRegion *mr;
MemTxResult result = MEMTX_OK;
rcu_read_lock();
while (len > 0) {
l = len;
mr = address_space_translate(as, addr, &addr1, &l, is_write);
@ -2415,6 +2415,7 @@ MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
buf += l;
addr += l;
}
rcu_read_unlock();
return result;
}
@ -2452,6 +2453,7 @@ static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
hwaddr addr1;
MemoryRegion *mr;
rcu_read_lock();
while (len > 0) {
l = len;
mr = address_space_translate(as, addr, &addr1, &l, true);
@ -2477,6 +2479,7 @@ static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
buf += l;
addr += l;
}
rcu_read_unlock();
}
/* used for ROM loading : can write in RAM and ROM */
@ -2585,6 +2588,7 @@ bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_
MemoryRegion *mr;
hwaddr l, xlat;
rcu_read_lock();
while (len > 0) {
l = len;
mr = address_space_translate(as, addr, &xlat, &l, is_write);
@ -2598,6 +2602,7 @@ bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_
len -= l;
addr += l;
}
rcu_read_unlock();
return true;
}
@ -2624,9 +2629,12 @@ void *address_space_map(AddressSpace *as,
}
l = len;
rcu_read_lock();
mr = address_space_translate(as, addr, &xlat, &l, is_write);
if (!memory_access_is_direct(mr, is_write)) {
if (atomic_xchg(&bounce.in_use, true)) {
rcu_read_unlock();
return NULL;
}
/* Avoid unbounded allocations */
@ -2642,6 +2650,7 @@ void *address_space_map(AddressSpace *as,
bounce.buffer, l);
}
rcu_read_unlock();
*plen = l;
return bounce.buffer;
}
@ -2665,6 +2674,7 @@ void *address_space_map(AddressSpace *as,
}
memory_region_ref(mr);
rcu_read_unlock();
*plen = done;
return qemu_ram_ptr_length(raddr + base, plen);
}
@ -2728,6 +2738,7 @@ static inline uint32_t address_space_ldl_internal(AddressSpace *as, hwaddr addr,
hwaddr addr1;
MemTxResult r;
rcu_read_lock();
mr = address_space_translate(as, addr, &addr1, &l, false);
if (l < 4 || !memory_access_is_direct(mr, false)) {
/* I/O case */
@ -2762,6 +2773,7 @@ static inline uint32_t address_space_ldl_internal(AddressSpace *as, hwaddr addr,
if (result) {
*result = r;
}
rcu_read_unlock();
return val;
}
@ -2814,6 +2826,7 @@ static inline uint64_t address_space_ldq_internal(AddressSpace *as, hwaddr addr,
hwaddr addr1;
MemTxResult r;
rcu_read_lock();
mr = address_space_translate(as, addr, &addr1, &l,
false);
if (l < 8 || !memory_access_is_direct(mr, false)) {
@ -2849,6 +2862,7 @@ static inline uint64_t address_space_ldq_internal(AddressSpace *as, hwaddr addr,
if (result) {
*result = r;
}
rcu_read_unlock();
return val;
}
@ -2921,6 +2935,7 @@ static inline uint32_t address_space_lduw_internal(AddressSpace *as,
hwaddr addr1;
MemTxResult r;
rcu_read_lock();
mr = address_space_translate(as, addr, &addr1, &l,
false);
if (l < 2 || !memory_access_is_direct(mr, false)) {
@ -2956,6 +2971,7 @@ static inline uint32_t address_space_lduw_internal(AddressSpace *as,
if (result) {
*result = r;
}
rcu_read_unlock();
return val;
}
@ -3007,6 +3023,7 @@ void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val,
hwaddr addr1;
MemTxResult r;
rcu_read_lock();
mr = address_space_translate(as, addr, &addr1, &l,
true);
if (l < 4 || !memory_access_is_direct(mr, true)) {
@ -3029,6 +3046,7 @@ void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val,
if (result) {
*result = r;
}
rcu_read_unlock();
}
void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
@ -3049,6 +3067,7 @@ static inline void address_space_stl_internal(AddressSpace *as,
hwaddr addr1;
MemTxResult r;
rcu_read_lock();
mr = address_space_translate(as, addr, &addr1, &l,
true);
if (l < 4 || !memory_access_is_direct(mr, true)) {
@ -3083,6 +3102,7 @@ static inline void address_space_stl_internal(AddressSpace *as,
if (result) {
*result = r;
}
rcu_read_unlock();
}
void address_space_stl(AddressSpace *as, hwaddr addr, uint32_t val,
@ -3152,6 +3172,7 @@ static inline void address_space_stw_internal(AddressSpace *as,
hwaddr addr1;
MemTxResult r;
rcu_read_lock();
mr = address_space_translate(as, addr, &addr1, &l, true);
if (l < 2 || !memory_access_is_direct(mr, true)) {
#if defined(TARGET_WORDS_BIGENDIAN)
@ -3185,6 +3206,7 @@ static inline void address_space_stw_internal(AddressSpace *as,
if (result) {
*result = r;
}
rcu_read_unlock();
}
void address_space_stw(AddressSpace *as, hwaddr addr, uint32_t val,
@ -3322,12 +3344,15 @@ bool cpu_physical_memory_is_io(hwaddr phys_addr)
{
MemoryRegion*mr;
hwaddr l = 1;
bool res;
rcu_read_lock();
mr = address_space_translate(&address_space_memory,
phys_addr, &phys_addr, &l, false);
return !(memory_region_is_ram(mr) ||
memory_region_is_romd(mr));
res = !(memory_region_is_ram(mr) || memory_region_is_romd(mr));
rcu_read_unlock();
return res;
}
void qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)

View File

@ -270,13 +270,14 @@ static void vfio_iommu_map_notify(Notifier *n, void *data)
* this IOMMU to its immediate target. We need to translate
* it the rest of the way through to memory.
*/
rcu_read_lock();
mr = address_space_translate(&address_space_memory,
iotlb->translated_addr,
&xlat, &len, iotlb->perm & IOMMU_WO);
if (!memory_region_is_ram(mr)) {
error_report("iommu map to non memory area %"HWADDR_PRIx"",
xlat);
return;
goto out;
}
/*
* Translation truncates length to the IOMMU page size,
@ -284,7 +285,7 @@ static void vfio_iommu_map_notify(Notifier *n, void *data)
*/
if (len & iotlb->addr_mask) {
error_report("iommu has granularity incompatible with target AS");
return;
goto out;
}
if ((iotlb->perm & IOMMU_RW) != IOMMU_NONE) {
@ -307,6 +308,8 @@ static void vfio_iommu_map_notify(Notifier *n, void *data)
iotlb->addr_mask + 1, ret);
}
}
out:
rcu_read_unlock();
}
static void vfio_listener_region_add(MemoryListener *listener,

View File

@ -1233,7 +1233,9 @@ void address_space_stq(AddressSpace *as, hwaddr addr, uint64_t val,
#endif
/* address_space_translate: translate an address range into an address space
* into a MemoryRegion and an address range into that section
* into a MemoryRegion and an address range into that section. Should be
* called from an RCU critical section, to avoid that the last reference
* to the returned region disappears after address_space_translate returns.
*
* @as: #AddressSpace to be accessed
* @addr: address within that address space

View File

@ -1416,14 +1416,17 @@ void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr)
MemoryRegion *mr;
hwaddr l = 1;
rcu_read_lock();
mr = address_space_translate(as, addr, &addr, &l, false);
if (!(memory_region_is_ram(mr)
|| memory_region_is_romd(mr))) {
rcu_read_unlock();
return;
}
ram_addr = (memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK)
+ addr;
tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
rcu_read_unlock();
}
#endif /* !defined(CONFIG_USER_ONLY) */