2003-08-10 23:35:13 +02:00
|
|
|
/*
|
|
|
|
* common defines for all CPUs
|
2007-09-16 23:08:06 +02:00
|
|
|
*
|
2003-08-10 23:35:13 +02:00
|
|
|
* Copyright (c) 2003 Fabrice Bellard
|
|
|
|
*
|
|
|
|
* This library is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU Lesser General Public
|
|
|
|
* License as published by the Free Software Foundation; either
|
|
|
|
* version 2 of the License, or (at your option) any later version.
|
|
|
|
*
|
|
|
|
* This library is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* Lesser General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU Lesser General Public
|
2009-07-16 22:47:01 +02:00
|
|
|
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
|
2003-08-10 23:35:13 +02:00
|
|
|
*/
|
|
|
|
#ifndef CPU_DEFS_H
|
|
|
|
#define CPU_DEFS_H
|
|
|
|
|
2007-11-17 18:14:51 +01:00
|
|
|
#ifndef NEED_CPU_H
|
|
|
|
#error cpu.h included from common code
|
|
|
|
#endif
|
|
|
|
|
2016-03-15 15:36:13 +01:00
|
|
|
#include "qemu/host-utils.h"
|
2012-12-17 18:20:00 +01:00
|
|
|
#include "qemu/queue.h"
|
2015-05-05 09:18:23 +02:00
|
|
|
#include "tcg-target.h"
|
2013-05-28 14:02:38 +02:00
|
|
|
#ifndef CONFIG_USER_ONLY
|
2012-12-17 18:19:49 +01:00
|
|
|
#include "exec/hwaddr.h"
|
2013-05-28 14:02:38 +02:00
|
|
|
#endif
|
2015-04-26 17:49:24 +02:00
|
|
|
#include "exec/memattrs.h"
|
2003-08-10 23:35:13 +02:00
|
|
|
|
2004-01-24 16:26:06 +01:00
|
|
|
#ifndef TARGET_LONG_BITS
|
|
|
|
#error TARGET_LONG_BITS must be defined before including this header
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#define TARGET_LONG_SIZE (TARGET_LONG_BITS / 8)
|
|
|
|
|
2004-04-25 23:25:15 +02:00
|
|
|
/* target_ulong is the type of a virtual address */
|
2004-01-24 16:26:06 +01:00
|
|
|
#if TARGET_LONG_SIZE == 4
|
2013-04-17 16:26:41 +02:00
|
|
|
typedef int32_t target_long;
|
|
|
|
typedef uint32_t target_ulong;
|
2005-01-04 00:35:10 +01:00
|
|
|
#define TARGET_FMT_lx "%08x"
|
2007-04-04 09:58:14 +02:00
|
|
|
#define TARGET_FMT_ld "%d"
|
2007-09-19 07:46:03 +02:00
|
|
|
#define TARGET_FMT_lu "%u"
|
2004-01-24 16:26:06 +01:00
|
|
|
#elif TARGET_LONG_SIZE == 8
|
2013-04-17 16:26:41 +02:00
|
|
|
typedef int64_t target_long;
|
|
|
|
typedef uint64_t target_ulong;
|
2006-06-25 20:15:32 +02:00
|
|
|
#define TARGET_FMT_lx "%016" PRIx64
|
2007-04-04 09:58:14 +02:00
|
|
|
#define TARGET_FMT_ld "%" PRId64
|
2007-09-19 07:46:03 +02:00
|
|
|
#define TARGET_FMT_lu "%" PRIu64
|
2004-01-24 16:26:06 +01:00
|
|
|
#else
|
|
|
|
#error TARGET_LONG_SIZE undefined
|
|
|
|
#endif
|
|
|
|
|
2010-03-12 17:54:58 +01:00
|
|
|
#if !defined(CONFIG_USER_ONLY)
|
implementing victim TLB for QEMU system emulated TLB
QEMU system mode page table walks are expensive. Taken by running QEMU
qemu-system-x86_64 system mode on Intel PIN , a TLB miss and walking a
4-level page tables in guest Linux OS takes ~450 X86 instructions on
average.
QEMU system mode TLB is implemented using a directly-mapped hashtable.
This structure suffers from conflict misses. Increasing the
associativity of the TLB may not be the solution to conflict misses as
all the ways may have to be walked in serial.
A victim TLB is a TLB used to hold translations evicted from the
primary TLB upon replacement. The victim TLB lies between the main TLB
and its refill path. Victim TLB is of greater associativity (fully
associative in this patch). It takes longer to lookup the victim TLB,
but its likely better than a full page table walk. The memory
translation path is changed as follows :
Before Victim TLB:
1. Inline TLB lookup
2. Exit code cache on TLB miss.
3. Check for unaligned, IO accesses
4. TLB refill.
5. Do the memory access.
6. Return to code cache.
After Victim TLB:
1. Inline TLB lookup
2. Exit code cache on TLB miss.
3. Check for unaligned, IO accesses
4. Victim TLB lookup.
5. If victim TLB misses, TLB refill
6. Do the memory access.
7. Return to code cache
The advantage is that victim TLB can offer more associativity to a
directly mapped TLB and thus potentially fewer page table walks while
still keeping the time taken to flush within reasonable limits.
However, placing a victim TLB before the refill path increase TLB
refill path as the victim TLB is consulted before the TLB refill. The
performance results demonstrate that the pros outweigh the cons.
some performance results taken on SPECINT2006 train
datasets and kernel boot and qemu configure script on an
Intel(R) Xeon(R) CPU E5620 @ 2.40GHz Linux machine are shown in the
Google Doc link below.
https://docs.google.com/spreadsheets/d/1eiItzekZwNQOal_h-5iJmC4tMDi051m9qidi5_nwvH4/edit?usp=sharing
In summary, victim TLB improves the performance of qemu-system-x86_64 by
11% on average on SPECINT2006, kernelboot and qemu configscript and with
highest improvement of in 26% in 456.hmmer. And victim TLB does not result
in any performance degradation in any of the measured benchmarks. Furthermore,
the implemented victim TLB is architecture independent and is expected to
benefit other architectures in QEMU as well.
Although there are measurement fluctuations, the performance
improvement is very significant and by no means in the range of
noises.
Signed-off-by: Xin Tong <trent.tong@gmail.com>
Message-id: 1407202523-23553-1-git-send-email-trent.tong@gmail.com
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
2014-08-05 03:35:23 +02:00
|
|
|
/* use a fully associative victim tlb of 8 entries */
|
|
|
|
#define CPU_VTLB_SIZE 8
|
2003-08-10 23:35:13 +02:00
|
|
|
|
2010-04-05 01:28:53 +02:00
|
|
|
#if HOST_LONG_BITS == 32 && TARGET_LONG_BITS == 32
|
2008-01-31 10:22:27 +01:00
|
|
|
#define CPU_TLB_ENTRY_BITS 4
|
|
|
|
#else
|
|
|
|
#define CPU_TLB_ENTRY_BITS 5
|
|
|
|
#endif
|
|
|
|
|
2015-05-05 09:18:23 +02:00
|
|
|
/* TCG_TARGET_TLB_DISPLACEMENT_BITS is used in CPU_TLB_BITS to ensure that
|
|
|
|
* the TLB is not unnecessarily small, but still small enough for the
|
|
|
|
* TLB lookup instruction sequence used by the TCG target.
|
|
|
|
*
|
|
|
|
* TCG will have to generate an operand as large as the distance between
|
|
|
|
* env and the tlb_table[NB_MMU_MODES - 1][0].addend. For simplicity,
|
|
|
|
* the TCG targets just round everything up to the next power of two, and
|
|
|
|
* count bits. This works because: 1) the size of each TLB is a largish
|
|
|
|
* power of two, 2) and because the limit of the displacement is really close
|
|
|
|
* to a power of two, 3) the offset of tlb_table[0][0] inside env is smaller
|
|
|
|
* than the size of a TLB.
|
|
|
|
*
|
|
|
|
* For example, the maximum displacement 0xFFF0 on PPC and MIPS, but TCG
|
|
|
|
* just says "the displacement is 16 bits". TCG_TARGET_TLB_DISPLACEMENT_BITS
|
|
|
|
* then ensures that tlb_table at least 0x8000 bytes large ("not unnecessarily
|
|
|
|
* small": 2^15). The operand then will come up smaller than 0xFFF0 without
|
|
|
|
* any particular care, because the TLB for a single MMU mode is larger than
|
|
|
|
* 0x10000-0xFFF0=16 bytes. In the end, the maximum value of the operand
|
|
|
|
* could be something like 0xC000 (the offset of the last TLB table) plus
|
|
|
|
* 0x18 (the offset of the addend field in each TLB entry) plus the offset
|
|
|
|
* of tlb_table inside env (which is non-trivial but not huge).
|
|
|
|
*/
|
|
|
|
#define CPU_TLB_BITS \
|
|
|
|
MIN(8, \
|
|
|
|
TCG_TARGET_TLB_DISPLACEMENT_BITS - CPU_TLB_ENTRY_BITS - \
|
|
|
|
(NB_MMU_MODES <= 1 ? 0 : \
|
|
|
|
NB_MMU_MODES <= 2 ? 1 : \
|
|
|
|
NB_MMU_MODES <= 4 ? 2 : \
|
|
|
|
NB_MMU_MODES <= 8 ? 3 : 4))
|
|
|
|
|
|
|
|
#define CPU_TLB_SIZE (1 << CPU_TLB_BITS)
|
|
|
|
|
2003-08-10 23:35:13 +02:00
|
|
|
typedef struct CPUTLBEntry {
|
2008-06-09 02:20:13 +02:00
|
|
|
/* bit TARGET_LONG_BITS to TARGET_PAGE_BITS : virtual address
|
|
|
|
bit TARGET_PAGE_BITS-1..4 : Nonzero for accesses that should not
|
|
|
|
go directly to ram.
|
2003-10-27 22:12:17 +01:00
|
|
|
bit 3 : indicates that the entry is invalid
|
|
|
|
bit 2..0 : zero
|
|
|
|
*/
|
2015-07-05 23:08:53 +02:00
|
|
|
union {
|
|
|
|
struct {
|
|
|
|
target_ulong addr_read;
|
|
|
|
target_ulong addr_write;
|
|
|
|
target_ulong addr_code;
|
|
|
|
/* Addend to virtual address to get host address. IO accesses
|
|
|
|
use the corresponding iotlb value. */
|
|
|
|
uintptr_t addend;
|
|
|
|
};
|
|
|
|
/* padding to get a power of two size */
|
|
|
|
uint8_t dummy[1 << CPU_TLB_ENTRY_BITS];
|
|
|
|
};
|
2003-08-10 23:35:13 +02:00
|
|
|
} CPUTLBEntry;
|
|
|
|
|
2013-06-04 18:51:59 +02:00
|
|
|
QEMU_BUILD_BUG_ON(sizeof(CPUTLBEntry) != (1 << CPU_TLB_ENTRY_BITS));
|
2010-04-05 01:28:53 +02:00
|
|
|
|
2015-04-26 17:49:23 +02:00
|
|
|
/* The IOTLB is not accessed directly inline by generated TCG code,
|
|
|
|
* so the CPUIOTLBEntry layout is not as critical as that of the
|
|
|
|
* CPUTLBEntry. (This is also why we don't want to combine the two
|
|
|
|
* structs into one.)
|
|
|
|
*/
|
|
|
|
typedef struct CPUIOTLBEntry {
|
|
|
|
hwaddr addr;
|
2015-04-26 17:49:24 +02:00
|
|
|
MemTxAttrs attrs;
|
2015-04-26 17:49:23 +02:00
|
|
|
} CPUIOTLBEntry;
|
|
|
|
|
2010-03-12 17:54:58 +01:00
|
|
|
#define CPU_COMMON_TLB \
|
|
|
|
/* The meaning of the MMU modes is defined in the target code. */ \
|
|
|
|
CPUTLBEntry tlb_table[NB_MMU_MODES][CPU_TLB_SIZE]; \
|
implementing victim TLB for QEMU system emulated TLB
QEMU system mode page table walks are expensive. Taken by running QEMU
qemu-system-x86_64 system mode on Intel PIN , a TLB miss and walking a
4-level page tables in guest Linux OS takes ~450 X86 instructions on
average.
QEMU system mode TLB is implemented using a directly-mapped hashtable.
This structure suffers from conflict misses. Increasing the
associativity of the TLB may not be the solution to conflict misses as
all the ways may have to be walked in serial.
A victim TLB is a TLB used to hold translations evicted from the
primary TLB upon replacement. The victim TLB lies between the main TLB
and its refill path. Victim TLB is of greater associativity (fully
associative in this patch). It takes longer to lookup the victim TLB,
but its likely better than a full page table walk. The memory
translation path is changed as follows :
Before Victim TLB:
1. Inline TLB lookup
2. Exit code cache on TLB miss.
3. Check for unaligned, IO accesses
4. TLB refill.
5. Do the memory access.
6. Return to code cache.
After Victim TLB:
1. Inline TLB lookup
2. Exit code cache on TLB miss.
3. Check for unaligned, IO accesses
4. Victim TLB lookup.
5. If victim TLB misses, TLB refill
6. Do the memory access.
7. Return to code cache
The advantage is that victim TLB can offer more associativity to a
directly mapped TLB and thus potentially fewer page table walks while
still keeping the time taken to flush within reasonable limits.
However, placing a victim TLB before the refill path increase TLB
refill path as the victim TLB is consulted before the TLB refill. The
performance results demonstrate that the pros outweigh the cons.
some performance results taken on SPECINT2006 train
datasets and kernel boot and qemu configure script on an
Intel(R) Xeon(R) CPU E5620 @ 2.40GHz Linux machine are shown in the
Google Doc link below.
https://docs.google.com/spreadsheets/d/1eiItzekZwNQOal_h-5iJmC4tMDi051m9qidi5_nwvH4/edit?usp=sharing
In summary, victim TLB improves the performance of qemu-system-x86_64 by
11% on average on SPECINT2006, kernelboot and qemu configscript and with
highest improvement of in 26% in 456.hmmer. And victim TLB does not result
in any performance degradation in any of the measured benchmarks. Furthermore,
the implemented victim TLB is architecture independent and is expected to
benefit other architectures in QEMU as well.
Although there are measurement fluctuations, the performance
improvement is very significant and by no means in the range of
noises.
Signed-off-by: Xin Tong <trent.tong@gmail.com>
Message-id: 1407202523-23553-1-git-send-email-trent.tong@gmail.com
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
2014-08-05 03:35:23 +02:00
|
|
|
CPUTLBEntry tlb_v_table[NB_MMU_MODES][CPU_VTLB_SIZE]; \
|
2015-04-26 17:49:23 +02:00
|
|
|
CPUIOTLBEntry iotlb[NB_MMU_MODES][CPU_TLB_SIZE]; \
|
|
|
|
CPUIOTLBEntry iotlb_v[NB_MMU_MODES][CPU_VTLB_SIZE]; \
|
2010-03-17 03:14:28 +01:00
|
|
|
target_ulong tlb_flush_addr; \
|
implementing victim TLB for QEMU system emulated TLB
QEMU system mode page table walks are expensive. Taken by running QEMU
qemu-system-x86_64 system mode on Intel PIN , a TLB miss and walking a
4-level page tables in guest Linux OS takes ~450 X86 instructions on
average.
QEMU system mode TLB is implemented using a directly-mapped hashtable.
This structure suffers from conflict misses. Increasing the
associativity of the TLB may not be the solution to conflict misses as
all the ways may have to be walked in serial.
A victim TLB is a TLB used to hold translations evicted from the
primary TLB upon replacement. The victim TLB lies between the main TLB
and its refill path. Victim TLB is of greater associativity (fully
associative in this patch). It takes longer to lookup the victim TLB,
but its likely better than a full page table walk. The memory
translation path is changed as follows :
Before Victim TLB:
1. Inline TLB lookup
2. Exit code cache on TLB miss.
3. Check for unaligned, IO accesses
4. TLB refill.
5. Do the memory access.
6. Return to code cache.
After Victim TLB:
1. Inline TLB lookup
2. Exit code cache on TLB miss.
3. Check for unaligned, IO accesses
4. Victim TLB lookup.
5. If victim TLB misses, TLB refill
6. Do the memory access.
7. Return to code cache
The advantage is that victim TLB can offer more associativity to a
directly mapped TLB and thus potentially fewer page table walks while
still keeping the time taken to flush within reasonable limits.
However, placing a victim TLB before the refill path increase TLB
refill path as the victim TLB is consulted before the TLB refill. The
performance results demonstrate that the pros outweigh the cons.
some performance results taken on SPECINT2006 train
datasets and kernel boot and qemu configure script on an
Intel(R) Xeon(R) CPU E5620 @ 2.40GHz Linux machine are shown in the
Google Doc link below.
https://docs.google.com/spreadsheets/d/1eiItzekZwNQOal_h-5iJmC4tMDi051m9qidi5_nwvH4/edit?usp=sharing
In summary, victim TLB improves the performance of qemu-system-x86_64 by
11% on average on SPECINT2006, kernelboot and qemu configscript and with
highest improvement of in 26% in 456.hmmer. And victim TLB does not result
in any performance degradation in any of the measured benchmarks. Furthermore,
the implemented victim TLB is architecture independent and is expected to
benefit other architectures in QEMU as well.
Although there are measurement fluctuations, the performance
improvement is very significant and by no means in the range of
noises.
Signed-off-by: Xin Tong <trent.tong@gmail.com>
Message-id: 1407202523-23553-1-git-send-email-trent.tong@gmail.com
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
2014-08-05 03:35:23 +02:00
|
|
|
target_ulong tlb_flush_mask; \
|
|
|
|
target_ulong vtlb_index; \
|
2010-03-12 17:54:58 +01:00
|
|
|
|
|
|
|
#else
|
|
|
|
|
|
|
|
#define CPU_COMMON_TLB
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
2005-11-20 11:32:34 +01:00
|
|
|
#define CPU_COMMON \
|
|
|
|
/* soft mmu support */ \
|
2010-03-12 17:54:58 +01:00
|
|
|
CPU_COMMON_TLB \
|
2005-11-20 11:32:34 +01:00
|
|
|
|
2003-08-10 23:35:13 +02:00
|
|
|
#endif
|