a1d1bb3101
This patch prepares the QEMU cpu_watchpoint/breakpoint API to allow the succeeding enhancements this series comes with. First of all, it overcomes MAX_BREAKPOINTS/MAX_WATCHPOINTS by switching to dynamically allocated data structures that are kept in linked lists. This also allows to return a stable reference to the related objects, required for later introduced x86 debug register support. Breakpoints and watchpoints are stored with their full information set and an additional flag field that makes them easily extensible for use beyond pure guest debugging. Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com> Signed-off-by: Anthony Liguori <aliguori@us.ibm.com> git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@5738 c046a42c-6fe2-441c-8c8c-71466251a162
217 lines
8.8 KiB
C
217 lines
8.8 KiB
C
/*
|
|
* common defines for all CPUs
|
|
*
|
|
* Copyright (c) 2003 Fabrice Bellard
|
|
*
|
|
* This library is free software; you can redistribute it and/or
|
|
* modify it under the terms of the GNU Lesser General Public
|
|
* License as published by the Free Software Foundation; either
|
|
* version 2 of the License, or (at your option) any later version.
|
|
*
|
|
* This library is distributed in the hope that it will be useful,
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
* Lesser General Public License for more details.
|
|
*
|
|
* You should have received a copy of the GNU Lesser General Public
|
|
* License along with this library; if not, write to the Free Software
|
|
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
|
*/
|
|
#ifndef CPU_DEFS_H
|
|
#define CPU_DEFS_H
|
|
|
|
#ifndef NEED_CPU_H
|
|
#error cpu.h included from common code
|
|
#endif
|
|
|
|
#include "config.h"
|
|
#include <setjmp.h>
|
|
#include <inttypes.h>
|
|
#include "osdep.h"
|
|
|
|
#ifndef TARGET_LONG_BITS
|
|
#error TARGET_LONG_BITS must be defined before including this header
|
|
#endif
|
|
|
|
#ifndef TARGET_PHYS_ADDR_BITS
|
|
#if TARGET_LONG_BITS >= HOST_LONG_BITS
|
|
#define TARGET_PHYS_ADDR_BITS TARGET_LONG_BITS
|
|
#else
|
|
#define TARGET_PHYS_ADDR_BITS HOST_LONG_BITS
|
|
#endif
|
|
#endif
|
|
|
|
#define TARGET_LONG_SIZE (TARGET_LONG_BITS / 8)
|
|
|
|
/* target_ulong is the type of a virtual address */
|
|
#if TARGET_LONG_SIZE == 4
|
|
typedef int32_t target_long;
|
|
typedef uint32_t target_ulong;
|
|
#define TARGET_FMT_lx "%08x"
|
|
#define TARGET_FMT_ld "%d"
|
|
#define TARGET_FMT_lu "%u"
|
|
#elif TARGET_LONG_SIZE == 8
|
|
typedef int64_t target_long;
|
|
typedef uint64_t target_ulong;
|
|
#define TARGET_FMT_lx "%016" PRIx64
|
|
#define TARGET_FMT_ld "%" PRId64
|
|
#define TARGET_FMT_lu "%" PRIu64
|
|
#else
|
|
#error TARGET_LONG_SIZE undefined
|
|
#endif
|
|
|
|
/* target_phys_addr_t is the type of a physical address (its size can
|
|
be different from 'target_ulong'). We have sizeof(target_phys_addr)
|
|
= max(sizeof(unsigned long),
|
|
sizeof(size_of_target_physical_address)) because we must pass a
|
|
host pointer to memory operations in some cases */
|
|
|
|
#if TARGET_PHYS_ADDR_BITS == 32
|
|
typedef uint32_t target_phys_addr_t;
|
|
#define TARGET_FMT_plx "%08x"
|
|
#elif TARGET_PHYS_ADDR_BITS == 64
|
|
typedef uint64_t target_phys_addr_t;
|
|
#define TARGET_FMT_plx "%016" PRIx64
|
|
#else
|
|
#error TARGET_PHYS_ADDR_BITS undefined
|
|
#endif
|
|
|
|
#define HOST_LONG_SIZE (HOST_LONG_BITS / 8)
|
|
|
|
#define EXCP_INTERRUPT 0x10000 /* async interruption */
|
|
#define EXCP_HLT 0x10001 /* hlt instruction reached */
|
|
#define EXCP_DEBUG 0x10002 /* cpu stopped after a breakpoint or singlestep */
|
|
#define EXCP_HALTED 0x10003 /* cpu is halted (waiting for external event) */
|
|
|
|
#define TB_JMP_CACHE_BITS 12
|
|
#define TB_JMP_CACHE_SIZE (1 << TB_JMP_CACHE_BITS)
|
|
|
|
/* Only the bottom TB_JMP_PAGE_BITS of the jump cache hash bits vary for
|
|
addresses on the same page. The top bits are the same. This allows
|
|
TLB invalidation to quickly clear a subset of the hash table. */
|
|
#define TB_JMP_PAGE_BITS (TB_JMP_CACHE_BITS / 2)
|
|
#define TB_JMP_PAGE_SIZE (1 << TB_JMP_PAGE_BITS)
|
|
#define TB_JMP_ADDR_MASK (TB_JMP_PAGE_SIZE - 1)
|
|
#define TB_JMP_PAGE_MASK (TB_JMP_CACHE_SIZE - TB_JMP_PAGE_SIZE)
|
|
|
|
#define CPU_TLB_BITS 8
|
|
#define CPU_TLB_SIZE (1 << CPU_TLB_BITS)
|
|
|
|
#if TARGET_PHYS_ADDR_BITS == 32 && TARGET_LONG_BITS == 32
|
|
#define CPU_TLB_ENTRY_BITS 4
|
|
#else
|
|
#define CPU_TLB_ENTRY_BITS 5
|
|
#endif
|
|
|
|
typedef struct CPUTLBEntry {
|
|
/* bit TARGET_LONG_BITS to TARGET_PAGE_BITS : virtual address
|
|
bit TARGET_PAGE_BITS-1..4 : Nonzero for accesses that should not
|
|
go directly to ram.
|
|
bit 3 : indicates that the entry is invalid
|
|
bit 2..0 : zero
|
|
*/
|
|
target_ulong addr_read;
|
|
target_ulong addr_write;
|
|
target_ulong addr_code;
|
|
/* Addend to virtual address to get physical address. IO accesses
|
|
use the correcponding iotlb value. */
|
|
#if TARGET_PHYS_ADDR_BITS == 64
|
|
/* on i386 Linux make sure it is aligned */
|
|
target_phys_addr_t addend __attribute__((aligned(8)));
|
|
#else
|
|
target_phys_addr_t addend;
|
|
#endif
|
|
/* padding to get a power of two size */
|
|
uint8_t dummy[(1 << CPU_TLB_ENTRY_BITS) -
|
|
(sizeof(target_ulong) * 3 +
|
|
((-sizeof(target_ulong) * 3) & (sizeof(target_phys_addr_t) - 1)) +
|
|
sizeof(target_phys_addr_t))];
|
|
} CPUTLBEntry;
|
|
|
|
#ifdef WORDS_BIGENDIAN
|
|
typedef struct icount_decr_u16 {
|
|
uint16_t high;
|
|
uint16_t low;
|
|
} icount_decr_u16;
|
|
#else
|
|
typedef struct icount_decr_u16 {
|
|
uint16_t low;
|
|
uint16_t high;
|
|
} icount_decr_u16;
|
|
#endif
|
|
|
|
struct kvm_run;
|
|
struct KVMState;
|
|
|
|
typedef struct CPUBreakpoint {
|
|
target_ulong pc;
|
|
int flags; /* BP_* */
|
|
struct CPUBreakpoint *prev, *next;
|
|
} CPUBreakpoint;
|
|
|
|
typedef struct CPUWatchpoint {
|
|
target_ulong vaddr;
|
|
target_ulong len_mask;
|
|
int flags; /* BP_* */
|
|
struct CPUWatchpoint *prev, *next;
|
|
} CPUWatchpoint;
|
|
|
|
#define CPU_TEMP_BUF_NLONGS 128
|
|
#define CPU_COMMON \
|
|
struct TranslationBlock *current_tb; /* currently executing TB */ \
|
|
/* soft mmu support */ \
|
|
/* in order to avoid passing too many arguments to the MMIO \
|
|
helpers, we store some rarely used information in the CPU \
|
|
context) */ \
|
|
unsigned long mem_io_pc; /* host pc at which the memory was \
|
|
accessed */ \
|
|
target_ulong mem_io_vaddr; /* target virtual addr at which the \
|
|
memory was accessed */ \
|
|
uint32_t halted; /* Nonzero if the CPU is in suspend state */ \
|
|
uint32_t interrupt_request; \
|
|
/* The meaning of the MMU modes is defined in the target code. */ \
|
|
CPUTLBEntry tlb_table[NB_MMU_MODES][CPU_TLB_SIZE]; \
|
|
target_phys_addr_t iotlb[NB_MMU_MODES][CPU_TLB_SIZE]; \
|
|
struct TranslationBlock *tb_jmp_cache[TB_JMP_CACHE_SIZE]; \
|
|
/* buffer for temporaries in the code generator */ \
|
|
long temp_buf[CPU_TEMP_BUF_NLONGS]; \
|
|
\
|
|
int64_t icount_extra; /* Instructions until next timer event. */ \
|
|
/* Number of cycles left, with interrupt flag in high bit. \
|
|
This allows a single read-compare-cbranch-write sequence to test \
|
|
for both decrementer underflow and exceptions. */ \
|
|
union { \
|
|
uint32_t u32; \
|
|
icount_decr_u16 u16; \
|
|
} icount_decr; \
|
|
uint32_t can_do_io; /* nonzero if memory mapped IO is safe. */ \
|
|
\
|
|
/* from this point: preserved by CPU reset */ \
|
|
/* ice debug support */ \
|
|
CPUBreakpoint *breakpoints; \
|
|
int singlestep_enabled; \
|
|
\
|
|
CPUWatchpoint *watchpoints; \
|
|
CPUWatchpoint *watchpoint_hit; \
|
|
\
|
|
struct GDBRegisterState *gdb_regs; \
|
|
\
|
|
/* Core interrupt code */ \
|
|
jmp_buf jmp_env; \
|
|
int exception_index; \
|
|
\
|
|
int user_mode_only; \
|
|
\
|
|
void *next_cpu; /* next CPU sharing TB cache */ \
|
|
int cpu_index; /* CPU index (informative) */ \
|
|
int running; /* Nonzero if cpu is currently running(usermode). */ \
|
|
/* user data */ \
|
|
void *opaque; \
|
|
\
|
|
const char *cpu_model_str; \
|
|
struct KVMState *kvm_state; \
|
|
struct kvm_run *kvm_run; \
|
|
int kvm_fd;
|
|
|
|
#endif
|