accel/tcg: Use g_free_rcu for user-exec interval trees
Because we allow lockless lookups, we have to be careful when it is freed. Use rcu to delay the free until safe. Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
parent
1b660f42ef
commit
177a8cb83b
|
@ -22,6 +22,7 @@
|
||||||
#include "exec/exec-all.h"
|
#include "exec/exec-all.h"
|
||||||
#include "tcg/tcg.h"
|
#include "tcg/tcg.h"
|
||||||
#include "qemu/bitops.h"
|
#include "qemu/bitops.h"
|
||||||
|
#include "qemu/rcu.h"
|
||||||
#include "exec/cpu_ldst.h"
|
#include "exec/cpu_ldst.h"
|
||||||
#include "exec/translate-all.h"
|
#include "exec/translate-all.h"
|
||||||
#include "exec/helper-proto.h"
|
#include "exec/helper-proto.h"
|
||||||
|
@ -136,6 +137,7 @@ bool handle_sigsegv_accerr_write(CPUState *cpu, sigset_t *old_set,
|
||||||
}
|
}
|
||||||
|
|
||||||
typedef struct PageFlagsNode {
|
typedef struct PageFlagsNode {
|
||||||
|
struct rcu_head rcu;
|
||||||
IntervalTreeNode itree;
|
IntervalTreeNode itree;
|
||||||
int flags;
|
int flags;
|
||||||
} PageFlagsNode;
|
} PageFlagsNode;
|
||||||
|
@ -266,7 +268,7 @@ static bool pageflags_unset(target_ulong start, target_ulong last)
|
||||||
}
|
}
|
||||||
} else if (p_last <= last) {
|
} else if (p_last <= last) {
|
||||||
/* Range completely covers node -- remove it. */
|
/* Range completely covers node -- remove it. */
|
||||||
g_free(p);
|
g_free_rcu(p, rcu);
|
||||||
} else {
|
} else {
|
||||||
/* Truncate the node from the start. */
|
/* Truncate the node from the start. */
|
||||||
p->itree.start = last + 1;
|
p->itree.start = last + 1;
|
||||||
|
@ -311,7 +313,7 @@ static void pageflags_create_merge(target_ulong start, target_ulong last,
|
||||||
if (prev) {
|
if (prev) {
|
||||||
if (next) {
|
if (next) {
|
||||||
prev->itree.last = next->itree.last;
|
prev->itree.last = next->itree.last;
|
||||||
g_free(next);
|
g_free_rcu(next, rcu);
|
||||||
} else {
|
} else {
|
||||||
prev->itree.last = last;
|
prev->itree.last = last;
|
||||||
}
|
}
|
||||||
|
@ -376,7 +378,7 @@ static bool pageflags_set_clear(target_ulong start, target_ulong last,
|
||||||
p->flags = merge_flags;
|
p->flags = merge_flags;
|
||||||
} else {
|
} else {
|
||||||
interval_tree_remove(&p->itree, &pageflags_root);
|
interval_tree_remove(&p->itree, &pageflags_root);
|
||||||
g_free(p);
|
g_free_rcu(p, rcu);
|
||||||
}
|
}
|
||||||
goto done;
|
goto done;
|
||||||
}
|
}
|
||||||
|
@ -421,7 +423,7 @@ static bool pageflags_set_clear(target_ulong start, target_ulong last,
|
||||||
p->flags = merge_flags;
|
p->flags = merge_flags;
|
||||||
} else {
|
} else {
|
||||||
interval_tree_remove(&p->itree, &pageflags_root);
|
interval_tree_remove(&p->itree, &pageflags_root);
|
||||||
g_free(p);
|
g_free_rcu(p, rcu);
|
||||||
}
|
}
|
||||||
if (p_last < last) {
|
if (p_last < last) {
|
||||||
start = p_last + 1;
|
start = p_last + 1;
|
||||||
|
@ -462,7 +464,7 @@ static bool pageflags_set_clear(target_ulong start, target_ulong last,
|
||||||
p->itree.start = last + 1;
|
p->itree.start = last + 1;
|
||||||
interval_tree_insert(&p->itree, &pageflags_root);
|
interval_tree_insert(&p->itree, &pageflags_root);
|
||||||
} else {
|
} else {
|
||||||
g_free(p);
|
g_free_rcu(p, rcu);
|
||||||
goto restart;
|
goto restart;
|
||||||
}
|
}
|
||||||
if (set_flags) {
|
if (set_flags) {
|
||||||
|
@ -779,6 +781,7 @@ tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
|
||||||
#define TBD_MASK (TARGET_PAGE_MASK * TPD_PAGES)
|
#define TBD_MASK (TARGET_PAGE_MASK * TPD_PAGES)
|
||||||
|
|
||||||
typedef struct TargetPageDataNode {
|
typedef struct TargetPageDataNode {
|
||||||
|
struct rcu_head rcu;
|
||||||
IntervalTreeNode itree;
|
IntervalTreeNode itree;
|
||||||
char data[TPD_PAGES][TARGET_PAGE_DATA_SIZE] __attribute__((aligned));
|
char data[TPD_PAGES][TARGET_PAGE_DATA_SIZE] __attribute__((aligned));
|
||||||
} TargetPageDataNode;
|
} TargetPageDataNode;
|
||||||
|
@ -801,11 +804,11 @@ void page_reset_target_data(target_ulong start, target_ulong end)
|
||||||
n = next,
|
n = next,
|
||||||
next = next ? interval_tree_iter_next(n, start, last) : NULL) {
|
next = next ? interval_tree_iter_next(n, start, last) : NULL) {
|
||||||
target_ulong n_start, n_last, p_ofs, p_len;
|
target_ulong n_start, n_last, p_ofs, p_len;
|
||||||
TargetPageDataNode *t;
|
TargetPageDataNode *t = container_of(n, TargetPageDataNode, itree);
|
||||||
|
|
||||||
if (n->start >= start && n->last <= last) {
|
if (n->start >= start && n->last <= last) {
|
||||||
interval_tree_remove(n, &targetdata_root);
|
interval_tree_remove(n, &targetdata_root);
|
||||||
g_free(n);
|
g_free_rcu(t, rcu);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -819,7 +822,6 @@ void page_reset_target_data(target_ulong start, target_ulong end)
|
||||||
n_last = MIN(last, n->last);
|
n_last = MIN(last, n->last);
|
||||||
p_len = (n_last + 1 - n_start) >> TARGET_PAGE_BITS;
|
p_len = (n_last + 1 - n_start) >> TARGET_PAGE_BITS;
|
||||||
|
|
||||||
t = container_of(n, TargetPageDataNode, itree);
|
|
||||||
memset(t->data[p_ofs], 0, p_len * TARGET_PAGE_DATA_SIZE);
|
memset(t->data[p_ofs], 0, p_len * TARGET_PAGE_DATA_SIZE);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue