2015-05-31 08:11:45 +02:00
|
|
|
/*
|
|
|
|
* internal execution defines for qemu
|
|
|
|
*
|
|
|
|
* Copyright (c) 2003 Fabrice Bellard
|
|
|
|
*
|
|
|
|
* This library is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU Lesser General Public
|
|
|
|
* License as published by the Free Software Foundation; either
|
2020-10-23 14:33:53 +02:00
|
|
|
* version 2.1 of the License, or (at your option) any later version.
|
2015-05-31 08:11:45 +02:00
|
|
|
*
|
|
|
|
* This library is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* Lesser General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU Lesser General Public
|
|
|
|
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
|
|
|
|
*/
|
|
|
|
|
2016-06-29 13:47:03 +02:00
|
|
|
#ifndef EXEC_TB_HASH_H
|
|
|
|
#define EXEC_TB_HASH_H
|
2015-05-31 08:11:45 +02:00
|
|
|
|
2019-08-12 07:23:31 +02:00
|
|
|
#include "exec/cpu-defs.h"
|
|
|
|
#include "exec/exec-all.h"
|
2018-10-21 00:49:53 +02:00
|
|
|
#include "qemu/xxhash.h"
|
2022-08-15 22:13:05 +02:00
|
|
|
#include "tb-jmp-cache.h"
|
tb hash: hash phys_pc, pc, and flags with xxhash
For some workloads such as arm bootup, tb_phys_hash is performance-critical.
The is due to the high frequency of accesses to the hash table, originated
by (frequent) TLB flushes that wipe out the cpu-private tb_jmp_cache's.
More info:
https://lists.nongnu.org/archive/html/qemu-devel/2016-03/msg05098.html
To dig further into this I modified an arm image booting debian jessie to
immediately shut down after boot. Analysis revealed that quite a bit of time
is unnecessarily spent in tb_phys_hash: the cause is poor hashing that
results in very uneven loading of chains in the hash table's buckets;
the longest observed chain had ~550 elements.
The appended addresses this with two changes:
1) Use xxhash as the hash table's hash function. xxhash is a fast,
high-quality hashing function.
2) Feed the hashing function with not just tb_phys, but also pc and flags.
This improves performance over using just tb_phys for hashing, since that
resulted in some hash buckets having many TB's, while others getting very few;
with these changes, the longest observed chain on a single hash bucket is
brought down from ~550 to ~40.
Tests show that the other element checked for in tb_find_physical,
cs_base, is always a match when tb_phys+pc+flags are a match,
so hashing cs_base is wasteful. It could be that this is an ARM-only
thing, though. UPDATE:
On Tue, Apr 05, 2016 at 08:41:43 -0700, Richard Henderson wrote:
> The cs_base field is only used by i386 (in 16-bit modes), and sparc (for a TB
> consisting of only a delay slot).
> It may well still turn out to be reasonable to ignore cs_base for hashing.
BTW, after this change the hash table should not be called "tb_hash_phys"
anymore; this is addressed later in this series.
This change gives consistent bootup time improvements. I tested two
host machines:
- Intel Xeon E5-2690: 11.6% less time
- Intel i7-4790K: 19.2% less time
Increasing the number of hash buckets yields further improvements. However,
using a larger, fixed number of buckets can degrade performance for other
workloads that do not translate as many blocks (600K+ for debian-jessie arm
bootup). This is dealt with later in this series.
Reviewed-by: Sergey Fedorov <sergey.fedorov@linaro.org>
Reviewed-by: Richard Henderson <rth@twiddle.net>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Message-Id: <1465412133-3029-8-git-send-email-cota@braap.org>
Signed-off-by: Richard Henderson <rth@twiddle.net>
2016-06-08 20:55:25 +02:00
|
|
|
|
2017-04-27 05:29:24 +02:00
|
|
|
#ifdef CONFIG_SOFTMMU
|
|
|
|
|
2015-05-31 08:11:46 +02:00
|
|
|
/* Only the bottom TB_JMP_PAGE_BITS of the jump cache hash bits vary for
|
|
|
|
addresses on the same page. The top bits are the same. This allows
|
|
|
|
TLB invalidation to quickly clear a subset of the hash table. */
|
|
|
|
#define TB_JMP_PAGE_BITS (TB_JMP_CACHE_BITS / 2)
|
|
|
|
#define TB_JMP_PAGE_SIZE (1 << TB_JMP_PAGE_BITS)
|
|
|
|
#define TB_JMP_ADDR_MASK (TB_JMP_PAGE_SIZE - 1)
|
|
|
|
#define TB_JMP_PAGE_MASK (TB_JMP_CACHE_SIZE - TB_JMP_PAGE_SIZE)
|
|
|
|
|
2015-05-31 08:11:45 +02:00
|
|
|
static inline unsigned int tb_jmp_cache_hash_page(target_ulong pc)
|
|
|
|
{
|
|
|
|
target_ulong tmp;
|
|
|
|
tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS));
|
|
|
|
return (tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline unsigned int tb_jmp_cache_hash_func(target_ulong pc)
|
|
|
|
{
|
|
|
|
target_ulong tmp;
|
|
|
|
tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS));
|
|
|
|
return (((tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK)
|
|
|
|
| (tmp & TB_JMP_ADDR_MASK));
|
|
|
|
}
|
|
|
|
|
2017-04-27 05:29:24 +02:00
|
|
|
#else
|
|
|
|
|
|
|
|
/* In user-mode we can get better hashing because we do not have a TLB */
|
|
|
|
static inline unsigned int tb_jmp_cache_hash_func(target_ulong pc)
|
|
|
|
{
|
|
|
|
return (pc ^ (pc >> TB_JMP_CACHE_BITS)) & (TB_JMP_CACHE_SIZE - 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif /* CONFIG_SOFTMMU */
|
|
|
|
|
tb hash: hash phys_pc, pc, and flags with xxhash
For some workloads such as arm bootup, tb_phys_hash is performance-critical.
The is due to the high frequency of accesses to the hash table, originated
by (frequent) TLB flushes that wipe out the cpu-private tb_jmp_cache's.
More info:
https://lists.nongnu.org/archive/html/qemu-devel/2016-03/msg05098.html
To dig further into this I modified an arm image booting debian jessie to
immediately shut down after boot. Analysis revealed that quite a bit of time
is unnecessarily spent in tb_phys_hash: the cause is poor hashing that
results in very uneven loading of chains in the hash table's buckets;
the longest observed chain had ~550 elements.
The appended addresses this with two changes:
1) Use xxhash as the hash table's hash function. xxhash is a fast,
high-quality hashing function.
2) Feed the hashing function with not just tb_phys, but also pc and flags.
This improves performance over using just tb_phys for hashing, since that
resulted in some hash buckets having many TB's, while others getting very few;
with these changes, the longest observed chain on a single hash bucket is
brought down from ~550 to ~40.
Tests show that the other element checked for in tb_find_physical,
cs_base, is always a match when tb_phys+pc+flags are a match,
so hashing cs_base is wasteful. It could be that this is an ARM-only
thing, though. UPDATE:
On Tue, Apr 05, 2016 at 08:41:43 -0700, Richard Henderson wrote:
> The cs_base field is only used by i386 (in 16-bit modes), and sparc (for a TB
> consisting of only a delay slot).
> It may well still turn out to be reasonable to ignore cs_base for hashing.
BTW, after this change the hash table should not be called "tb_hash_phys"
anymore; this is addressed later in this series.
This change gives consistent bootup time improvements. I tested two
host machines:
- Intel Xeon E5-2690: 11.6% less time
- Intel i7-4790K: 19.2% less time
Increasing the number of hash buckets yields further improvements. However,
using a larger, fixed number of buckets can degrade performance for other
workloads that do not translate as many blocks (600K+ for debian-jessie arm
bootup). This is dealt with later in this series.
Reviewed-by: Sergey Fedorov <sergey.fedorov@linaro.org>
Reviewed-by: Richard Henderson <rth@twiddle.net>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Message-Id: <1465412133-3029-8-git-send-email-cota@braap.org>
Signed-off-by: Richard Henderson <rth@twiddle.net>
2016-06-08 20:55:25 +02:00
|
|
|
static inline
|
2017-07-04 10:42:32 +02:00
|
|
|
uint32_t tb_hash_func(tb_page_addr_t phys_pc, target_ulong pc, uint32_t flags,
|
2017-07-11 20:29:37 +02:00
|
|
|
uint32_t cf_mask, uint32_t trace_vcpu_dstate)
|
2015-05-31 08:11:45 +02:00
|
|
|
{
|
2018-10-21 00:46:28 +02:00
|
|
|
return qemu_xxhash7(phys_pc, pc, flags, cf_mask, trace_vcpu_dstate);
|
2015-05-31 08:11:45 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
#endif
|