accel/tcg: Use interval tree for TARGET_PAGE_DATA_SIZE
Continue weaning user-only away from PageDesc. Use an interval tree to record target data. Chunk the data, to minimize allocation overhead. Reviewed-by: Alex Bennée <alex.bennee@linaro.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
parent
a97d5d2c8b
commit
f88f3ac90f
|
@ -26,7 +26,6 @@
|
||||||
typedef struct PageDesc {
|
typedef struct PageDesc {
|
||||||
#ifdef CONFIG_USER_ONLY
|
#ifdef CONFIG_USER_ONLY
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
void *target_data;
|
|
||||||
#else
|
#else
|
||||||
QemuSpin lock;
|
QemuSpin lock;
|
||||||
/* list of TBs intersecting this ram page */
|
/* list of TBs intersecting this ram page */
|
||||||
|
|
|
@ -210,47 +210,96 @@ tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
|
||||||
return addr;
|
return addr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef TARGET_PAGE_DATA_SIZE
|
||||||
|
/*
|
||||||
|
* Allocate chunks of target data together. For the only current user,
|
||||||
|
* if we allocate one hunk per page, we have overhead of 40/128 or 40%.
|
||||||
|
* Therefore, allocate memory for 64 pages at a time for overhead < 1%.
|
||||||
|
*/
|
||||||
|
#define TPD_PAGES 64
|
||||||
|
#define TBD_MASK (TARGET_PAGE_MASK * TPD_PAGES)
|
||||||
|
|
||||||
|
typedef struct TargetPageDataNode {
|
||||||
|
IntervalTreeNode itree;
|
||||||
|
char data[TPD_PAGES][TARGET_PAGE_DATA_SIZE] __attribute__((aligned));
|
||||||
|
} TargetPageDataNode;
|
||||||
|
|
||||||
|
static IntervalTreeRoot targetdata_root;
|
||||||
|
|
||||||
void page_reset_target_data(target_ulong start, target_ulong end)
|
void page_reset_target_data(target_ulong start, target_ulong end)
|
||||||
{
|
{
|
||||||
#ifdef TARGET_PAGE_DATA_SIZE
|
IntervalTreeNode *n, *next;
|
||||||
target_ulong addr, len;
|
target_ulong last;
|
||||||
|
|
||||||
/*
|
|
||||||
* This function should never be called with addresses outside the
|
|
||||||
* guest address space. If this assert fires, it probably indicates
|
|
||||||
* a missing call to h2g_valid.
|
|
||||||
*/
|
|
||||||
assert(end - 1 <= GUEST_ADDR_MAX);
|
|
||||||
assert(start < end);
|
|
||||||
assert_memory_lock();
|
assert_memory_lock();
|
||||||
|
|
||||||
start = start & TARGET_PAGE_MASK;
|
start = start & TARGET_PAGE_MASK;
|
||||||
end = TARGET_PAGE_ALIGN(end);
|
last = TARGET_PAGE_ALIGN(end) - 1;
|
||||||
|
|
||||||
for (addr = start, len = end - start;
|
for (n = interval_tree_iter_first(&targetdata_root, start, last),
|
||||||
len != 0;
|
next = n ? interval_tree_iter_next(n, start, last) : NULL;
|
||||||
len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
|
n != NULL;
|
||||||
PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
|
n = next,
|
||||||
|
next = next ? interval_tree_iter_next(n, start, last) : NULL) {
|
||||||
|
target_ulong n_start, n_last, p_ofs, p_len;
|
||||||
|
TargetPageDataNode *t;
|
||||||
|
|
||||||
g_free(p->target_data);
|
if (n->start >= start && n->last <= last) {
|
||||||
p->target_data = NULL;
|
interval_tree_remove(n, &targetdata_root);
|
||||||
}
|
g_free(n);
|
||||||
#endif
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (n->start < start) {
|
||||||
|
n_start = start;
|
||||||
|
p_ofs = (start - n->start) >> TARGET_PAGE_BITS;
|
||||||
|
} else {
|
||||||
|
n_start = n->start;
|
||||||
|
p_ofs = 0;
|
||||||
|
}
|
||||||
|
n_last = MIN(last, n->last);
|
||||||
|
p_len = (n_last + 1 - n_start) >> TARGET_PAGE_BITS;
|
||||||
|
|
||||||
|
t = container_of(n, TargetPageDataNode, itree);
|
||||||
|
memset(t->data[p_ofs], 0, p_len * TARGET_PAGE_DATA_SIZE);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef TARGET_PAGE_DATA_SIZE
|
|
||||||
void *page_get_target_data(target_ulong address)
|
void *page_get_target_data(target_ulong address)
|
||||||
{
|
{
|
||||||
PageDesc *p = page_find(address >> TARGET_PAGE_BITS);
|
IntervalTreeNode *n;
|
||||||
void *ret = p->target_data;
|
TargetPageDataNode *t;
|
||||||
|
target_ulong page, region;
|
||||||
|
|
||||||
if (!ret) {
|
page = address & TARGET_PAGE_MASK;
|
||||||
ret = g_malloc0(TARGET_PAGE_DATA_SIZE);
|
region = address & TBD_MASK;
|
||||||
p->target_data = ret;
|
|
||||||
|
n = interval_tree_iter_first(&targetdata_root, page, page);
|
||||||
|
if (!n) {
|
||||||
|
/*
|
||||||
|
* See util/interval-tree.c re lockless lookups: no false positives
|
||||||
|
* but there are false negatives. If we find nothing, retry with
|
||||||
|
* the mmap lock acquired. We also need the lock for the
|
||||||
|
* allocation + insert.
|
||||||
|
*/
|
||||||
|
mmap_lock();
|
||||||
|
n = interval_tree_iter_first(&targetdata_root, page, page);
|
||||||
|
if (!n) {
|
||||||
|
t = g_new0(TargetPageDataNode, 1);
|
||||||
|
n = &t->itree;
|
||||||
|
n->start = region;
|
||||||
|
n->last = region | ~TBD_MASK;
|
||||||
|
interval_tree_insert(n, &targetdata_root);
|
||||||
}
|
}
|
||||||
return ret;
|
mmap_unlock();
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
|
t = container_of(n, TargetPageDataNode, itree);
|
||||||
|
return t->data[(page - region) >> TARGET_PAGE_BITS];
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
void page_reset_target_data(target_ulong start, target_ulong end) { }
|
||||||
|
#endif /* TARGET_PAGE_DATA_SIZE */
|
||||||
|
|
||||||
/* The softmmu versions of these helpers are in cputlb.c. */
|
/* The softmmu versions of these helpers are in cputlb.c. */
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue