tcg: Do not set guard pages on the rx portion of code_gen_buffer
The rw portion of the buffer is the only one in which overruns can be generated. Allow the rx portion to be more completely covered by huge pages. Signed-off-by: Richard Henderson <richard.henderson@linaro.org> Tested-by: Roman Bolshakov <r.bolshakov@yadro.com> Reviewed-by: Roman Bolshakov <r.bolshakov@yadro.com> Message-Id: <20210320165720.1813545-2-richard.henderson@linaro.org>
This commit is contained in:
parent
2664699471
commit
15c4e8fe44
12
tcg/tcg.c
12
tcg/tcg.c
@ -828,7 +828,6 @@ void tcg_region_init(void)
|
||||
size_t region_size;
|
||||
size_t n_regions;
|
||||
size_t i;
|
||||
uintptr_t splitwx_diff;
|
||||
|
||||
n_regions = tcg_n_regions();
|
||||
|
||||
@ -858,8 +857,11 @@ void tcg_region_init(void)
|
||||
/* account for that last guard page */
|
||||
region.end -= page_size;
|
||||
|
||||
/* set guard pages */
|
||||
splitwx_diff = tcg_splitwx_diff;
|
||||
/*
|
||||
* Set guard pages in the rw buffer, as that's the one into which
|
||||
* buffer overruns could occur. Do not set guard pages in the rx
|
||||
* buffer -- let that one use hugepages throughout.
|
||||
*/
|
||||
for (i = 0; i < region.n; i++) {
|
||||
void *start, *end;
|
||||
int rc;
|
||||
@ -867,10 +869,6 @@ void tcg_region_init(void)
|
||||
tcg_region_bounds(i, &start, &end);
|
||||
rc = qemu_mprotect_none(end, page_size);
|
||||
g_assert(!rc);
|
||||
if (splitwx_diff) {
|
||||
rc = qemu_mprotect_none(end + splitwx_diff, page_size);
|
||||
g_assert(!rc);
|
||||
}
|
||||
}
|
||||
|
||||
tcg_region_trees_init();
|
||||
|
Loading…
Reference in New Issue
Block a user