rcu: Use automatic rc_read unlock in core memory/exec code
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com> Reviewed-by: Daniel P. Berrangé <berrange@redhat.com> Message-Id: <20191007143642.301445-6-dgilbert@redhat.com> Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
This commit is contained in:
parent
987ab2a549
commit
694ea274d9
116
exec.c
116
exec.c
@ -1037,16 +1037,14 @@ void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs)
|
||||
return;
|
||||
}
|
||||
|
||||
rcu_read_lock();
|
||||
RCU_READ_LOCK_GUARD();
|
||||
mr = address_space_translate(as, addr, &addr, &l, false, attrs);
|
||||
if (!(memory_region_is_ram(mr)
|
||||
|| memory_region_is_romd(mr))) {
|
||||
rcu_read_unlock();
|
||||
return;
|
||||
}
|
||||
ram_addr = memory_region_get_ram_addr(mr) + addr;
|
||||
tb_invalidate_phys_page_range(ram_addr, ram_addr + 1);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
|
||||
@ -1332,14 +1330,13 @@ static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
|
||||
end = TARGET_PAGE_ALIGN(start + length);
|
||||
start &= TARGET_PAGE_MASK;
|
||||
|
||||
rcu_read_lock();
|
||||
RCU_READ_LOCK_GUARD();
|
||||
block = qemu_get_ram_block(start);
|
||||
assert(block == qemu_get_ram_block(end - 1));
|
||||
start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
|
||||
CPU_FOREACH(cpu) {
|
||||
tlb_reset_dirty(cpu, start1, length);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
/* Note: start and end must be within the same ram block. */
|
||||
@ -1360,30 +1357,29 @@ bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
|
||||
end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
|
||||
page = start >> TARGET_PAGE_BITS;
|
||||
|
||||
rcu_read_lock();
|
||||
WITH_RCU_READ_LOCK_GUARD() {
|
||||
blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
|
||||
ramblock = qemu_get_ram_block(start);
|
||||
/* Range sanity check on the ramblock */
|
||||
assert(start >= ramblock->offset &&
|
||||
start + length <= ramblock->offset + ramblock->used_length);
|
||||
|
||||
blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
|
||||
ramblock = qemu_get_ram_block(start);
|
||||
/* Range sanity check on the ramblock */
|
||||
assert(start >= ramblock->offset &&
|
||||
start + length <= ramblock->offset + ramblock->used_length);
|
||||
while (page < end) {
|
||||
unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE;
|
||||
unsigned long offset = page % DIRTY_MEMORY_BLOCK_SIZE;
|
||||
unsigned long num = MIN(end - page,
|
||||
DIRTY_MEMORY_BLOCK_SIZE - offset);
|
||||
|
||||
while (page < end) {
|
||||
unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE;
|
||||
unsigned long offset = page % DIRTY_MEMORY_BLOCK_SIZE;
|
||||
unsigned long num = MIN(end - page, DIRTY_MEMORY_BLOCK_SIZE - offset);
|
||||
dirty |= bitmap_test_and_clear_atomic(blocks->blocks[idx],
|
||||
offset, num);
|
||||
page += num;
|
||||
}
|
||||
|
||||
dirty |= bitmap_test_and_clear_atomic(blocks->blocks[idx],
|
||||
offset, num);
|
||||
page += num;
|
||||
mr_offset = (ram_addr_t)(page << TARGET_PAGE_BITS) - ramblock->offset;
|
||||
mr_size = (end - page) << TARGET_PAGE_BITS;
|
||||
memory_region_clear_dirty_bitmap(ramblock->mr, mr_offset, mr_size);
|
||||
}
|
||||
|
||||
mr_offset = (ram_addr_t)(page << TARGET_PAGE_BITS) - ramblock->offset;
|
||||
mr_size = (end - page) << TARGET_PAGE_BITS;
|
||||
memory_region_clear_dirty_bitmap(ramblock->mr, mr_offset, mr_size);
|
||||
|
||||
rcu_read_unlock();
|
||||
|
||||
if (dirty && tcg_enabled()) {
|
||||
tlb_reset_dirty_range_all(start, length);
|
||||
}
|
||||
@ -1411,28 +1407,27 @@ DirtyBitmapSnapshot *cpu_physical_memory_snapshot_and_clear_dirty
|
||||
end = last >> TARGET_PAGE_BITS;
|
||||
dest = 0;
|
||||
|
||||
rcu_read_lock();
|
||||
WITH_RCU_READ_LOCK_GUARD() {
|
||||
blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
|
||||
|
||||
blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
|
||||
while (page < end) {
|
||||
unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE;
|
||||
unsigned long offset = page % DIRTY_MEMORY_BLOCK_SIZE;
|
||||
unsigned long num = MIN(end - page,
|
||||
DIRTY_MEMORY_BLOCK_SIZE - offset);
|
||||
|
||||
while (page < end) {
|
||||
unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE;
|
||||
unsigned long offset = page % DIRTY_MEMORY_BLOCK_SIZE;
|
||||
unsigned long num = MIN(end - page, DIRTY_MEMORY_BLOCK_SIZE - offset);
|
||||
assert(QEMU_IS_ALIGNED(offset, (1 << BITS_PER_LEVEL)));
|
||||
assert(QEMU_IS_ALIGNED(num, (1 << BITS_PER_LEVEL)));
|
||||
offset >>= BITS_PER_LEVEL;
|
||||
|
||||
assert(QEMU_IS_ALIGNED(offset, (1 << BITS_PER_LEVEL)));
|
||||
assert(QEMU_IS_ALIGNED(num, (1 << BITS_PER_LEVEL)));
|
||||
offset >>= BITS_PER_LEVEL;
|
||||
|
||||
bitmap_copy_and_clear_atomic(snap->dirty + dest,
|
||||
blocks->blocks[idx] + offset,
|
||||
num);
|
||||
page += num;
|
||||
dest += num >> BITS_PER_LEVEL;
|
||||
bitmap_copy_and_clear_atomic(snap->dirty + dest,
|
||||
blocks->blocks[idx] + offset,
|
||||
num);
|
||||
page += num;
|
||||
dest += num >> BITS_PER_LEVEL;
|
||||
}
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
|
||||
if (tcg_enabled()) {
|
||||
tlb_reset_dirty_range_all(start, length);
|
||||
}
|
||||
@ -1643,7 +1638,7 @@ void ram_block_dump(Monitor *mon)
|
||||
RAMBlock *block;
|
||||
char *psize;
|
||||
|
||||
rcu_read_lock();
|
||||
RCU_READ_LOCK_GUARD();
|
||||
monitor_printf(mon, "%24s %8s %18s %18s %18s\n",
|
||||
"Block Name", "PSize", "Offset", "Used", "Total");
|
||||
RAMBLOCK_FOREACH(block) {
|
||||
@ -1655,7 +1650,6 @@ void ram_block_dump(Monitor *mon)
|
||||
(uint64_t)block->max_length);
|
||||
g_free(psize);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
#ifdef __linux__
|
||||
@ -2009,11 +2003,10 @@ static unsigned long last_ram_page(void)
|
||||
RAMBlock *block;
|
||||
ram_addr_t last = 0;
|
||||
|
||||
rcu_read_lock();
|
||||
RCU_READ_LOCK_GUARD();
|
||||
RAMBLOCK_FOREACH(block) {
|
||||
last = MAX(last, block->offset + block->max_length);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
return last >> TARGET_PAGE_BITS;
|
||||
}
|
||||
|
||||
@ -2100,7 +2093,7 @@ void qemu_ram_set_idstr(RAMBlock *new_block, const char *name, DeviceState *dev)
|
||||
}
|
||||
pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
|
||||
|
||||
rcu_read_lock();
|
||||
RCU_READ_LOCK_GUARD();
|
||||
RAMBLOCK_FOREACH(block) {
|
||||
if (block != new_block &&
|
||||
!strcmp(block->idstr, new_block->idstr)) {
|
||||
@ -2109,7 +2102,6 @@ void qemu_ram_set_idstr(RAMBlock *new_block, const char *name, DeviceState *dev)
|
||||
abort();
|
||||
}
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
/* Called with iothread lock held. */
|
||||
@ -2651,17 +2643,16 @@ RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset,
|
||||
|
||||
if (xen_enabled()) {
|
||||
ram_addr_t ram_addr;
|
||||
rcu_read_lock();
|
||||
RCU_READ_LOCK_GUARD();
|
||||
ram_addr = xen_ram_addr_from_mapcache(ptr);
|
||||
block = qemu_get_ram_block(ram_addr);
|
||||
if (block) {
|
||||
*offset = ram_addr - block->offset;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
return block;
|
||||
}
|
||||
|
||||
rcu_read_lock();
|
||||
RCU_READ_LOCK_GUARD();
|
||||
block = atomic_rcu_read(&ram_list.mru_block);
|
||||
if (block && block->host && host - block->host < block->max_length) {
|
||||
goto found;
|
||||
@ -2677,7 +2668,6 @@ RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset,
|
||||
}
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
return NULL;
|
||||
|
||||
found:
|
||||
@ -2685,7 +2675,6 @@ found:
|
||||
if (round_offset) {
|
||||
*offset &= TARGET_PAGE_MASK;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
return block;
|
||||
}
|
||||
|
||||
@ -3281,10 +3270,9 @@ MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr,
|
||||
FlatView *fv;
|
||||
|
||||
if (len > 0) {
|
||||
rcu_read_lock();
|
||||
RCU_READ_LOCK_GUARD();
|
||||
fv = address_space_to_flatview(as);
|
||||
result = flatview_read(fv, addr, attrs, buf, len);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
return result;
|
||||
@ -3298,10 +3286,9 @@ MemTxResult address_space_write(AddressSpace *as, hwaddr addr,
|
||||
FlatView *fv;
|
||||
|
||||
if (len > 0) {
|
||||
rcu_read_lock();
|
||||
RCU_READ_LOCK_GUARD();
|
||||
fv = address_space_to_flatview(as);
|
||||
result = flatview_write(fv, addr, attrs, buf, len);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
return result;
|
||||
@ -3341,7 +3328,7 @@ static inline MemTxResult address_space_write_rom_internal(AddressSpace *as,
|
||||
hwaddr addr1;
|
||||
MemoryRegion *mr;
|
||||
|
||||
rcu_read_lock();
|
||||
RCU_READ_LOCK_GUARD();
|
||||
while (len > 0) {
|
||||
l = len;
|
||||
mr = address_space_translate(as, addr, &addr1, &l, true, attrs);
|
||||
@ -3366,7 +3353,6 @@ static inline MemTxResult address_space_write_rom_internal(AddressSpace *as,
|
||||
buf += l;
|
||||
addr += l;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
return MEMTX_OK;
|
||||
}
|
||||
|
||||
@ -3511,10 +3497,9 @@ bool address_space_access_valid(AddressSpace *as, hwaddr addr,
|
||||
FlatView *fv;
|
||||
bool result;
|
||||
|
||||
rcu_read_lock();
|
||||
RCU_READ_LOCK_GUARD();
|
||||
fv = address_space_to_flatview(as);
|
||||
result = flatview_access_valid(fv, addr, len, is_write, attrs);
|
||||
rcu_read_unlock();
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -3569,13 +3554,12 @@ void *address_space_map(AddressSpace *as,
|
||||
}
|
||||
|
||||
l = len;
|
||||
rcu_read_lock();
|
||||
RCU_READ_LOCK_GUARD();
|
||||
fv = address_space_to_flatview(as);
|
||||
mr = flatview_translate(fv, addr, &xlat, &l, is_write, attrs);
|
||||
|
||||
if (!memory_access_is_direct(mr, is_write)) {
|
||||
if (atomic_xchg(&bounce.in_use, true)) {
|
||||
rcu_read_unlock();
|
||||
return NULL;
|
||||
}
|
||||
/* Avoid unbounded allocations */
|
||||
@ -3591,7 +3575,6 @@ void *address_space_map(AddressSpace *as,
|
||||
bounce.buffer, l);
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
*plen = l;
|
||||
return bounce.buffer;
|
||||
}
|
||||
@ -3601,7 +3584,6 @@ void *address_space_map(AddressSpace *as,
|
||||
*plen = flatview_extend_translation(fv, addr, len, mr, xlat,
|
||||
l, is_write, attrs);
|
||||
ptr = qemu_ram_ptr_length(mr->ram_block, xlat, plen, true);
|
||||
rcu_read_unlock();
|
||||
|
||||
return ptr;
|
||||
}
|
||||
@ -3869,13 +3851,12 @@ bool cpu_physical_memory_is_io(hwaddr phys_addr)
|
||||
hwaddr l = 1;
|
||||
bool res;
|
||||
|
||||
rcu_read_lock();
|
||||
RCU_READ_LOCK_GUARD();
|
||||
mr = address_space_translate(&address_space_memory,
|
||||
phys_addr, &phys_addr, &l, false,
|
||||
MEMTXATTRS_UNSPECIFIED);
|
||||
|
||||
res = !(memory_region_is_ram(mr) || memory_region_is_romd(mr));
|
||||
rcu_read_unlock();
|
||||
return res;
|
||||
}
|
||||
|
||||
@ -3884,14 +3865,13 @@ int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
|
||||
RAMBlock *block;
|
||||
int ret = 0;
|
||||
|
||||
rcu_read_lock();
|
||||
RCU_READ_LOCK_GUARD();
|
||||
RAMBLOCK_FOREACH(block) {
|
||||
ret = func(block, opaque);
|
||||
if (ret) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
rcu_read_unlock();
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -193,30 +193,29 @@ static inline bool cpu_physical_memory_get_dirty(ram_addr_t start,
|
||||
end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
|
||||
page = start >> TARGET_PAGE_BITS;
|
||||
|
||||
rcu_read_lock();
|
||||
WITH_RCU_READ_LOCK_GUARD() {
|
||||
blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
|
||||
|
||||
blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
|
||||
idx = page / DIRTY_MEMORY_BLOCK_SIZE;
|
||||
offset = page % DIRTY_MEMORY_BLOCK_SIZE;
|
||||
base = page - offset;
|
||||
while (page < end) {
|
||||
unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE);
|
||||
unsigned long num = next - base;
|
||||
unsigned long found = find_next_bit(blocks->blocks[idx],
|
||||
num, offset);
|
||||
if (found < num) {
|
||||
dirty = true;
|
||||
break;
|
||||
}
|
||||
|
||||
idx = page / DIRTY_MEMORY_BLOCK_SIZE;
|
||||
offset = page % DIRTY_MEMORY_BLOCK_SIZE;
|
||||
base = page - offset;
|
||||
while (page < end) {
|
||||
unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE);
|
||||
unsigned long num = next - base;
|
||||
unsigned long found = find_next_bit(blocks->blocks[idx], num, offset);
|
||||
if (found < num) {
|
||||
dirty = true;
|
||||
break;
|
||||
page = next;
|
||||
idx++;
|
||||
offset = 0;
|
||||
base += DIRTY_MEMORY_BLOCK_SIZE;
|
||||
}
|
||||
|
||||
page = next;
|
||||
idx++;
|
||||
offset = 0;
|
||||
base += DIRTY_MEMORY_BLOCK_SIZE;
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
|
||||
return dirty;
|
||||
}
|
||||
|
||||
@ -234,7 +233,7 @@ static inline bool cpu_physical_memory_all_dirty(ram_addr_t start,
|
||||
end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
|
||||
page = start >> TARGET_PAGE_BITS;
|
||||
|
||||
rcu_read_lock();
|
||||
RCU_READ_LOCK_GUARD();
|
||||
|
||||
blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
|
||||
|
||||
@ -256,8 +255,6 @@ static inline bool cpu_physical_memory_all_dirty(ram_addr_t start,
|
||||
base += DIRTY_MEMORY_BLOCK_SIZE;
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
|
||||
return dirty;
|
||||
}
|
||||
|
||||
@ -309,13 +306,11 @@ static inline void cpu_physical_memory_set_dirty_flag(ram_addr_t addr,
|
||||
idx = page / DIRTY_MEMORY_BLOCK_SIZE;
|
||||
offset = page % DIRTY_MEMORY_BLOCK_SIZE;
|
||||
|
||||
rcu_read_lock();
|
||||
RCU_READ_LOCK_GUARD();
|
||||
|
||||
blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
|
||||
|
||||
set_bit_atomic(offset, blocks->blocks[idx]);
|
||||
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
static inline void cpu_physical_memory_set_dirty_range(ram_addr_t start,
|
||||
@ -334,39 +329,37 @@ static inline void cpu_physical_memory_set_dirty_range(ram_addr_t start,
|
||||
end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
|
||||
page = start >> TARGET_PAGE_BITS;
|
||||
|
||||
rcu_read_lock();
|
||||
WITH_RCU_READ_LOCK_GUARD() {
|
||||
for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
|
||||
blocks[i] = atomic_rcu_read(&ram_list.dirty_memory[i]);
|
||||
}
|
||||
|
||||
for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
|
||||
blocks[i] = atomic_rcu_read(&ram_list.dirty_memory[i]);
|
||||
idx = page / DIRTY_MEMORY_BLOCK_SIZE;
|
||||
offset = page % DIRTY_MEMORY_BLOCK_SIZE;
|
||||
base = page - offset;
|
||||
while (page < end) {
|
||||
unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE);
|
||||
|
||||
if (likely(mask & (1 << DIRTY_MEMORY_MIGRATION))) {
|
||||
bitmap_set_atomic(blocks[DIRTY_MEMORY_MIGRATION]->blocks[idx],
|
||||
offset, next - page);
|
||||
}
|
||||
if (unlikely(mask & (1 << DIRTY_MEMORY_VGA))) {
|
||||
bitmap_set_atomic(blocks[DIRTY_MEMORY_VGA]->blocks[idx],
|
||||
offset, next - page);
|
||||
}
|
||||
if (unlikely(mask & (1 << DIRTY_MEMORY_CODE))) {
|
||||
bitmap_set_atomic(blocks[DIRTY_MEMORY_CODE]->blocks[idx],
|
||||
offset, next - page);
|
||||
}
|
||||
|
||||
page = next;
|
||||
idx++;
|
||||
offset = 0;
|
||||
base += DIRTY_MEMORY_BLOCK_SIZE;
|
||||
}
|
||||
}
|
||||
|
||||
idx = page / DIRTY_MEMORY_BLOCK_SIZE;
|
||||
offset = page % DIRTY_MEMORY_BLOCK_SIZE;
|
||||
base = page - offset;
|
||||
while (page < end) {
|
||||
unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE);
|
||||
|
||||
if (likely(mask & (1 << DIRTY_MEMORY_MIGRATION))) {
|
||||
bitmap_set_atomic(blocks[DIRTY_MEMORY_MIGRATION]->blocks[idx],
|
||||
offset, next - page);
|
||||
}
|
||||
if (unlikely(mask & (1 << DIRTY_MEMORY_VGA))) {
|
||||
bitmap_set_atomic(blocks[DIRTY_MEMORY_VGA]->blocks[idx],
|
||||
offset, next - page);
|
||||
}
|
||||
if (unlikely(mask & (1 << DIRTY_MEMORY_CODE))) {
|
||||
bitmap_set_atomic(blocks[DIRTY_MEMORY_CODE]->blocks[idx],
|
||||
offset, next - page);
|
||||
}
|
||||
|
||||
page = next;
|
||||
idx++;
|
||||
offset = 0;
|
||||
base += DIRTY_MEMORY_BLOCK_SIZE;
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
|
||||
xen_hvm_modified_memory(start, length);
|
||||
}
|
||||
|
||||
@ -396,36 +389,35 @@ static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap,
|
||||
offset = BIT_WORD((start >> TARGET_PAGE_BITS) %
|
||||
DIRTY_MEMORY_BLOCK_SIZE);
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
|
||||
blocks[i] = atomic_rcu_read(&ram_list.dirty_memory[i])->blocks;
|
||||
}
|
||||
|
||||
for (k = 0; k < nr; k++) {
|
||||
if (bitmap[k]) {
|
||||
unsigned long temp = leul_to_cpu(bitmap[k]);
|
||||
|
||||
atomic_or(&blocks[DIRTY_MEMORY_VGA][idx][offset], temp);
|
||||
|
||||
if (global_dirty_log) {
|
||||
atomic_or(&blocks[DIRTY_MEMORY_MIGRATION][idx][offset],
|
||||
temp);
|
||||
}
|
||||
|
||||
if (tcg_enabled()) {
|
||||
atomic_or(&blocks[DIRTY_MEMORY_CODE][idx][offset], temp);
|
||||
}
|
||||
WITH_RCU_READ_LOCK_GUARD() {
|
||||
for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
|
||||
blocks[i] = atomic_rcu_read(&ram_list.dirty_memory[i])->blocks;
|
||||
}
|
||||
|
||||
if (++offset >= BITS_TO_LONGS(DIRTY_MEMORY_BLOCK_SIZE)) {
|
||||
offset = 0;
|
||||
idx++;
|
||||
for (k = 0; k < nr; k++) {
|
||||
if (bitmap[k]) {
|
||||
unsigned long temp = leul_to_cpu(bitmap[k]);
|
||||
|
||||
atomic_or(&blocks[DIRTY_MEMORY_VGA][idx][offset], temp);
|
||||
|
||||
if (global_dirty_log) {
|
||||
atomic_or(&blocks[DIRTY_MEMORY_MIGRATION][idx][offset],
|
||||
temp);
|
||||
}
|
||||
|
||||
if (tcg_enabled()) {
|
||||
atomic_or(&blocks[DIRTY_MEMORY_CODE][idx][offset],
|
||||
temp);
|
||||
}
|
||||
}
|
||||
|
||||
if (++offset >= BITS_TO_LONGS(DIRTY_MEMORY_BLOCK_SIZE)) {
|
||||
offset = 0;
|
||||
idx++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
|
||||
xen_hvm_modified_memory(start, pages << TARGET_PAGE_BITS);
|
||||
} else {
|
||||
uint8_t clients = tcg_enabled() ? DIRTY_CLIENTS_ALL : DIRTY_CLIENTS_NOCODE;
|
||||
|
15
memory.c
15
memory.c
@ -779,14 +779,13 @@ FlatView *address_space_get_flatview(AddressSpace *as)
|
||||
{
|
||||
FlatView *view;
|
||||
|
||||
rcu_read_lock();
|
||||
RCU_READ_LOCK_GUARD();
|
||||
do {
|
||||
view = address_space_to_flatview(as);
|
||||
/* If somebody has replaced as->current_map concurrently,
|
||||
* flatview_ref returns false.
|
||||
*/
|
||||
} while (!flatview_ref(view));
|
||||
rcu_read_unlock();
|
||||
return view;
|
||||
}
|
||||
|
||||
@ -2166,12 +2165,11 @@ int memory_region_get_fd(MemoryRegion *mr)
|
||||
{
|
||||
int fd;
|
||||
|
||||
rcu_read_lock();
|
||||
RCU_READ_LOCK_GUARD();
|
||||
while (mr->alias) {
|
||||
mr = mr->alias;
|
||||
}
|
||||
fd = mr->ram_block->fd;
|
||||
rcu_read_unlock();
|
||||
|
||||
return fd;
|
||||
}
|
||||
@ -2181,14 +2179,13 @@ void *memory_region_get_ram_ptr(MemoryRegion *mr)
|
||||
void *ptr;
|
||||
uint64_t offset = 0;
|
||||
|
||||
rcu_read_lock();
|
||||
RCU_READ_LOCK_GUARD();
|
||||
while (mr->alias) {
|
||||
offset += mr->alias_offset;
|
||||
mr = mr->alias;
|
||||
}
|
||||
assert(mr->ram_block);
|
||||
ptr = qemu_map_ram_ptr(mr->ram_block, offset);
|
||||
rcu_read_unlock();
|
||||
|
||||
return ptr;
|
||||
}
|
||||
@ -2578,12 +2575,11 @@ MemoryRegionSection memory_region_find(MemoryRegion *mr,
|
||||
hwaddr addr, uint64_t size)
|
||||
{
|
||||
MemoryRegionSection ret;
|
||||
rcu_read_lock();
|
||||
RCU_READ_LOCK_GUARD();
|
||||
ret = memory_region_find_rcu(mr, addr, size);
|
||||
if (ret.mr) {
|
||||
memory_region_ref(ret.mr);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -2591,9 +2587,8 @@ bool memory_region_present(MemoryRegion *container, hwaddr addr)
|
||||
{
|
||||
MemoryRegion *mr;
|
||||
|
||||
rcu_read_lock();
|
||||
RCU_READ_LOCK_GUARD();
|
||||
mr = memory_region_find_rcu(container, addr, 1).mr;
|
||||
rcu_read_unlock();
|
||||
return mr && mr != container;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user