Move softmmu tlb into CPUNegativeOffsetState

-----BEGIN PGP SIGNATURE-----
 
 iQFRBAABCgA7FiEEekgeeIaLTbaoWgXAZN846K9+IV8FAlz+ZDsdHHJpY2hhcmQu
 aGVuZGVyc29uQGxpbmFyby5vcmcACgkQZN846K9+IV90cgf/UEVw788bwedPMsoG
 IIwneWyJhuHUhEPcNfyvR192hIiAP31/CdCtJPA73f2l61ezT0izugvrYtpLY8Fq
 bRttfL9UQ6VE69pcriie/VZgXXZF7Gf1+cfIVs9eEs4qMnHx26ABSnF3jR8Qjytz
 4kSYPHQx0y1gGTYd96HiAt99v+KjcCTofXPMvFxpHEukHg2iWIMyiw+QBDwJ6rUb
 fP5grw5XIK/j4lL9gQkHUruUFW6FFThHsM+H7oGA7EaoRbzoL/kLLZwtx9zAsqYN
 JNg8984rHu5gq4H/K+f9WvTqEIYc91+EfEs1WxrWzUsi2khtF0iSbX7Usj9B/W7p
 225D9Q==
 =zsDC
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/rth/tags/pull-tcg-20190610' into staging

Move softmmu tlb into CPUNegativeOffsetState

# gpg: Signature made Mon 10 Jun 2019 15:07:55 BST
# gpg:                using RSA key 7A481E78868B4DB6A85A05C064DF38E8AF7E215F
# gpg:                issuer "richard.henderson@linaro.org"
# gpg: Good signature from "Richard Henderson <richard.henderson@linaro.org>" [full]
# Primary key fingerprint: 7A48 1E78 868B 4DB6 A85A  05C0 64DF 38E8 AF7E 215F

* remotes/rth/tags/pull-tcg-20190610: (39 commits)
  tcg/arm: Remove mostly unreachable tlb special case
  tcg/arm: Use LDRD to load tlb mask+table
  tcg/aarch64: Use LDP to load tlb mask+table
  cpu: Remove CPU_COMMON
  cpu: Move the softmmu tlb to CPUNegativeOffsetState
  cpu: Move icount_decr to CPUNegativeOffsetState
  cpu: Introduce CPUNegativeOffsetState
  cpu: Introduce cpu_set_cpustate_pointers
  cpu: Move ENV_OFFSET to exec/gen-icount.h
  target/xtensa: Use env_cpu, env_archcpu
  target/unicore32: Use env_cpu, env_archcpu
  target/tricore: Use env_cpu
  target/tilegx: Use env_cpu
  target/sparc: Use env_cpu, env_archcpu
  target/sh4: Use env_cpu, env_archcpu
  target/s390x: Use env_cpu, env_archcpu
  target/riscv: Use env_cpu, env_archcpu
  target/ppc: Use env_cpu, env_archcpu
  target/openrisc: Use env_cpu, env_archcpu
  target/nios2: Use env_cpu, env_archcpu
  ...

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2019-06-10 16:09:19 +01:00
commit a578cdfbdd
226 changed files with 2389 additions and 2573 deletions

View File

@ -62,21 +62,21 @@
#define ATOMIC_TRACE_RMW do { \
uint8_t info = glue(trace_mem_build_info_no_se, MEND)(SHIFT, false); \
\
trace_guest_mem_before_exec(ENV_GET_CPU(env), addr, info); \
trace_guest_mem_before_exec(ENV_GET_CPU(env), addr, \
trace_guest_mem_before_exec(env_cpu(env), addr, info); \
trace_guest_mem_before_exec(env_cpu(env), addr, \
info | TRACE_MEM_ST); \
} while (0)
#define ATOMIC_TRACE_LD do { \
uint8_t info = glue(trace_mem_build_info_no_se, MEND)(SHIFT, false); \
\
trace_guest_mem_before_exec(ENV_GET_CPU(env), addr, info); \
trace_guest_mem_before_exec(env_cpu(env), addr, info); \
} while (0)
# define ATOMIC_TRACE_ST do { \
uint8_t info = glue(trace_mem_build_info_no_se, MEND)(SHIFT, true); \
\
trace_guest_mem_before_exec(ENV_GET_CPU(env), addr, info); \
trace_guest_mem_before_exec(env_cpu(env), addr, info); \
} while (0)
/* Define host-endian atomic operations. Note that END is used within

View File

@ -54,7 +54,7 @@ typedef struct SyncClocks {
#define MAX_DELAY_PRINT_RATE 2000000000LL
#define MAX_NB_PRINTS 100
static void align_clocks(SyncClocks *sc, const CPUState *cpu)
static void align_clocks(SyncClocks *sc, CPUState *cpu)
{
int64_t cpu_icount;
@ -62,7 +62,7 @@ static void align_clocks(SyncClocks *sc, const CPUState *cpu)
return;
}
cpu_icount = cpu->icount_extra + cpu->icount_decr.u16.low;
cpu_icount = cpu->icount_extra + cpu_neg(cpu)->icount_decr.u16.low;
sc->diff_clk += cpu_icount_to_ns(sc->last_cpu_icount - cpu_icount);
sc->last_cpu_icount = cpu_icount;
@ -105,15 +105,15 @@ static void print_delay(const SyncClocks *sc)
}
}
static void init_delay_params(SyncClocks *sc,
const CPUState *cpu)
static void init_delay_params(SyncClocks *sc, CPUState *cpu)
{
if (!icount_align_option) {
return;
}
sc->realtime_clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT);
sc->diff_clk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - sc->realtime_clock;
sc->last_cpu_icount = cpu->icount_extra + cpu->icount_decr.u16.low;
sc->last_cpu_icount
= cpu->icount_extra + cpu_neg(cpu)->icount_decr.u16.low;
if (sc->diff_clk < max_delay) {
max_delay = sc->diff_clk;
}
@ -467,7 +467,7 @@ static inline bool cpu_handle_exception(CPUState *cpu, int *ret)
if (cpu->exception_index < 0) {
#ifndef CONFIG_USER_ONLY
if (replay_has_exception()
&& cpu->icount_decr.u16.low + cpu->icount_extra == 0) {
&& cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra == 0) {
/* try to cause an exception pending in the log */
cpu_exec_nocache(cpu, 1, tb_find(cpu, NULL, 0, curr_cflags()), true);
}
@ -525,7 +525,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
* Ensure zeroing happens before reading cpu->exit_request or
* cpu->interrupt_request (see also smp_wmb in cpu_exit())
*/
atomic_mb_set(&cpu->icount_decr.u16.high, 0);
atomic_mb_set(&cpu_neg(cpu)->icount_decr.u16.high, 0);
if (unlikely(atomic_read(&cpu->interrupt_request))) {
int interrupt_request;
@ -596,8 +596,9 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
}
/* Finally, check if we need to exit to the main loop. */
if (unlikely(atomic_read(&cpu->exit_request)
|| (use_icount && cpu->icount_decr.u16.low + cpu->icount_extra == 0))) {
if (unlikely(atomic_read(&cpu->exit_request))
|| (use_icount
&& cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra == 0)) {
atomic_set(&cpu->exit_request, 0);
if (cpu->exception_index == -1) {
cpu->exception_index = EXCP_INTERRUPT;
@ -624,7 +625,7 @@ static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb,
}
*last_tb = NULL;
insns_left = atomic_read(&cpu->icount_decr.u32);
insns_left = atomic_read(&cpu_neg(cpu)->icount_decr.u32);
if (insns_left < 0) {
/* Something asked us to stop executing chained TBs; just
* continue round the main loop. Whatever requested the exit
@ -643,7 +644,7 @@ static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb,
cpu_update_icount(cpu);
/* Refill decrementer and continue execution. */
insns_left = MIN(0xffff, cpu->icount_budget);
cpu->icount_decr.u16.low = insns_left;
cpu_neg(cpu)->icount_decr.u16.low = insns_left;
cpu->icount_extra = cpu->icount_budget - insns_left;
if (!cpu->icount_extra) {
/* Execute any remaining instructions, then let the main loop

View File

@ -76,14 +76,14 @@ QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16);
static inline size_t sizeof_tlb(CPUArchState *env, uintptr_t mmu_idx)
{
return env->tlb_mask[mmu_idx] + (1 << CPU_TLB_ENTRY_BITS);
return env_tlb(env)->f[mmu_idx].mask + (1 << CPU_TLB_ENTRY_BITS);
}
static void tlb_window_reset(CPUTLBWindow *window, int64_t ns,
static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns,
size_t max_entries)
{
window->begin_ns = ns;
window->max_entries = max_entries;
desc->window_begin_ns = ns;
desc->window_max_entries = max_entries;
}
static void tlb_dyn_init(CPUArchState *env)
@ -91,14 +91,14 @@ static void tlb_dyn_init(CPUArchState *env)
int i;
for (i = 0; i < NB_MMU_MODES; i++) {
CPUTLBDesc *desc = &env->tlb_d[i];
CPUTLBDesc *desc = &env_tlb(env)->d[i];
size_t n_entries = 1 << CPU_TLB_DYN_DEFAULT_BITS;
tlb_window_reset(&desc->window, get_clock_realtime(), 0);
tlb_window_reset(desc, get_clock_realtime(), 0);
desc->n_used_entries = 0;
env->tlb_mask[i] = (n_entries - 1) << CPU_TLB_ENTRY_BITS;
env->tlb_table[i] = g_new(CPUTLBEntry, n_entries);
env->iotlb[i] = g_new(CPUIOTLBEntry, n_entries);
env_tlb(env)->f[i].mask = (n_entries - 1) << CPU_TLB_ENTRY_BITS;
env_tlb(env)->f[i].table = g_new(CPUTLBEntry, n_entries);
env_tlb(env)->d[i].iotlb = g_new(CPUIOTLBEntry, n_entries);
}
}
@ -144,25 +144,25 @@ static void tlb_dyn_init(CPUArchState *env)
*/
static void tlb_mmu_resize_locked(CPUArchState *env, int mmu_idx)
{
CPUTLBDesc *desc = &env->tlb_d[mmu_idx];
CPUTLBDesc *desc = &env_tlb(env)->d[mmu_idx];
size_t old_size = tlb_n_entries(env, mmu_idx);
size_t rate;
size_t new_size = old_size;
int64_t now = get_clock_realtime();
int64_t window_len_ms = 100;
int64_t window_len_ns = window_len_ms * 1000 * 1000;
bool window_expired = now > desc->window.begin_ns + window_len_ns;
bool window_expired = now > desc->window_begin_ns + window_len_ns;
if (desc->n_used_entries > desc->window.max_entries) {
desc->window.max_entries = desc->n_used_entries;
if (desc->n_used_entries > desc->window_max_entries) {
desc->window_max_entries = desc->n_used_entries;
}
rate = desc->window.max_entries * 100 / old_size;
rate = desc->window_max_entries * 100 / old_size;
if (rate > 70) {
new_size = MIN(old_size << 1, 1 << CPU_TLB_DYN_MAX_BITS);
} else if (rate < 30 && window_expired) {
size_t ceil = pow2ceil(desc->window.max_entries);
size_t expected_rate = desc->window.max_entries * 100 / ceil;
size_t ceil = pow2ceil(desc->window_max_entries);
size_t expected_rate = desc->window_max_entries * 100 / ceil;
/*
* Avoid undersizing when the max number of entries seen is just below
@ -182,19 +182,19 @@ static void tlb_mmu_resize_locked(CPUArchState *env, int mmu_idx)
if (new_size == old_size) {
if (window_expired) {
tlb_window_reset(&desc->window, now, desc->n_used_entries);
tlb_window_reset(desc, now, desc->n_used_entries);
}
return;
}
g_free(env->tlb_table[mmu_idx]);
g_free(env->iotlb[mmu_idx]);
g_free(env_tlb(env)->f[mmu_idx].table);
g_free(env_tlb(env)->d[mmu_idx].iotlb);
tlb_window_reset(&desc->window, now, 0);
tlb_window_reset(desc, now, 0);
/* desc->n_used_entries is cleared by the caller */
env->tlb_mask[mmu_idx] = (new_size - 1) << CPU_TLB_ENTRY_BITS;
env->tlb_table[mmu_idx] = g_try_new(CPUTLBEntry, new_size);
env->iotlb[mmu_idx] = g_try_new(CPUIOTLBEntry, new_size);
env_tlb(env)->f[mmu_idx].mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
env_tlb(env)->f[mmu_idx].table = g_try_new(CPUTLBEntry, new_size);
env_tlb(env)->d[mmu_idx].iotlb = g_try_new(CPUIOTLBEntry, new_size);
/*
* If the allocations fail, try smaller sizes. We just freed some
* memory, so going back to half of new_size has a good chance of working.
@ -202,46 +202,47 @@ static void tlb_mmu_resize_locked(CPUArchState *env, int mmu_idx)
* allocations to fail though, so we progressively reduce the allocation
* size, aborting if we cannot even allocate the smallest TLB we support.
*/
while (env->tlb_table[mmu_idx] == NULL || env->iotlb[mmu_idx] == NULL) {
while (env_tlb(env)->f[mmu_idx].table == NULL ||
env_tlb(env)->d[mmu_idx].iotlb == NULL) {
if (new_size == (1 << CPU_TLB_DYN_MIN_BITS)) {
error_report("%s: %s", __func__, strerror(errno));
abort();
}
new_size = MAX(new_size >> 1, 1 << CPU_TLB_DYN_MIN_BITS);
env->tlb_mask[mmu_idx] = (new_size - 1) << CPU_TLB_ENTRY_BITS;
env_tlb(env)->f[mmu_idx].mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
g_free(env->tlb_table[mmu_idx]);
g_free(env->iotlb[mmu_idx]);
env->tlb_table[mmu_idx] = g_try_new(CPUTLBEntry, new_size);
env->iotlb[mmu_idx] = g_try_new(CPUIOTLBEntry, new_size);
g_free(env_tlb(env)->f[mmu_idx].table);
g_free(env_tlb(env)->d[mmu_idx].iotlb);
env_tlb(env)->f[mmu_idx].table = g_try_new(CPUTLBEntry, new_size);
env_tlb(env)->d[mmu_idx].iotlb = g_try_new(CPUIOTLBEntry, new_size);
}
}
static inline void tlb_table_flush_by_mmuidx(CPUArchState *env, int mmu_idx)
{
tlb_mmu_resize_locked(env, mmu_idx);
memset(env->tlb_table[mmu_idx], -1, sizeof_tlb(env, mmu_idx));
env->tlb_d[mmu_idx].n_used_entries = 0;
memset(env_tlb(env)->f[mmu_idx].table, -1, sizeof_tlb(env, mmu_idx));
env_tlb(env)->d[mmu_idx].n_used_entries = 0;
}
static inline void tlb_n_used_entries_inc(CPUArchState *env, uintptr_t mmu_idx)
{
env->tlb_d[mmu_idx].n_used_entries++;
env_tlb(env)->d[mmu_idx].n_used_entries++;
}
static inline void tlb_n_used_entries_dec(CPUArchState *env, uintptr_t mmu_idx)
{
env->tlb_d[mmu_idx].n_used_entries--;
env_tlb(env)->d[mmu_idx].n_used_entries--;
}
void tlb_init(CPUState *cpu)
{
CPUArchState *env = cpu->env_ptr;
qemu_spin_init(&env->tlb_c.lock);
qemu_spin_init(&env_tlb(env)->c.lock);
/* Ensure that cpu_reset performs a full flush. */
env->tlb_c.dirty = ALL_MMUIDX_BITS;
env_tlb(env)->c.dirty = ALL_MMUIDX_BITS;
tlb_dyn_init(env);
}
@ -273,9 +274,9 @@ void tlb_flush_counts(size_t *pfull, size_t *ppart, size_t *pelide)
CPU_FOREACH(cpu) {
CPUArchState *env = cpu->env_ptr;
full += atomic_read(&env->tlb_c.full_flush_count);
part += atomic_read(&env->tlb_c.part_flush_count);
elide += atomic_read(&env->tlb_c.elide_flush_count);
full += atomic_read(&env_tlb(env)->c.full_flush_count);
part += atomic_read(&env_tlb(env)->c.part_flush_count);
elide += atomic_read(&env_tlb(env)->c.elide_flush_count);
}
*pfull = full;
*ppart = part;
@ -285,10 +286,11 @@ void tlb_flush_counts(size_t *pfull, size_t *ppart, size_t *pelide)
static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx)
{
tlb_table_flush_by_mmuidx(env, mmu_idx);
memset(env->tlb_v_table[mmu_idx], -1, sizeof(env->tlb_v_table[0]));
env->tlb_d[mmu_idx].large_page_addr = -1;
env->tlb_d[mmu_idx].large_page_mask = -1;
env->tlb_d[mmu_idx].vindex = 0;
env_tlb(env)->d[mmu_idx].large_page_addr = -1;
env_tlb(env)->d[mmu_idx].large_page_mask = -1;
env_tlb(env)->d[mmu_idx].vindex = 0;
memset(env_tlb(env)->d[mmu_idx].vtable, -1,
sizeof(env_tlb(env)->d[0].vtable));
}
static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data)
@ -301,31 +303,31 @@ static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data)
tlb_debug("mmu_idx:0x%04" PRIx16 "\n", asked);
qemu_spin_lock(&env->tlb_c.lock);
qemu_spin_lock(&env_tlb(env)->c.lock);
all_dirty = env->tlb_c.dirty;
all_dirty = env_tlb(env)->c.dirty;
to_clean = asked & all_dirty;
all_dirty &= ~to_clean;
env->tlb_c.dirty = all_dirty;
env_tlb(env)->c.dirty = all_dirty;
for (work = to_clean; work != 0; work &= work - 1) {
int mmu_idx = ctz32(work);
tlb_flush_one_mmuidx_locked(env, mmu_idx);
}
qemu_spin_unlock(&env->tlb_c.lock);
qemu_spin_unlock(&env_tlb(env)->c.lock);
cpu_tb_jmp_cache_clear(cpu);
if (to_clean == ALL_MMUIDX_BITS) {
atomic_set(&env->tlb_c.full_flush_count,
env->tlb_c.full_flush_count + 1);
atomic_set(&env_tlb(env)->c.full_flush_count,
env_tlb(env)->c.full_flush_count + 1);
} else {
atomic_set(&env->tlb_c.part_flush_count,
env->tlb_c.part_flush_count + ctpop16(to_clean));
atomic_set(&env_tlb(env)->c.part_flush_count,
env_tlb(env)->c.part_flush_count + ctpop16(to_clean));
if (to_clean != asked) {
atomic_set(&env->tlb_c.elide_flush_count,
env->tlb_c.elide_flush_count +
atomic_set(&env_tlb(env)->c.elide_flush_count,
env_tlb(env)->c.elide_flush_count +
ctpop16(asked & ~to_clean));
}
}
@ -410,11 +412,12 @@ static inline bool tlb_flush_entry_locked(CPUTLBEntry *tlb_entry,
static inline void tlb_flush_vtlb_page_locked(CPUArchState *env, int mmu_idx,
target_ulong page)
{
CPUTLBDesc *d = &env_tlb(env)->d[mmu_idx];
int k;
assert_cpu_is_self(ENV_GET_CPU(env));
assert_cpu_is_self(env_cpu(env));
for (k = 0; k < CPU_VTLB_SIZE; k++) {
if (tlb_flush_entry_locked(&env->tlb_v_table[mmu_idx][k], page)) {
if (tlb_flush_entry_locked(&d->vtable[k], page)) {
tlb_n_used_entries_dec(env, mmu_idx);
}
}
@ -423,8 +426,8 @@ static inline void tlb_flush_vtlb_page_locked(CPUArchState *env, int mmu_idx,
static void tlb_flush_page_locked(CPUArchState *env, int midx,
target_ulong page)
{
target_ulong lp_addr = env->tlb_d[midx].large_page_addr;
target_ulong lp_mask = env->tlb_d[midx].large_page_mask;
target_ulong lp_addr = env_tlb(env)->d[midx].large_page_addr;
target_ulong lp_mask = env_tlb(env)->d[midx].large_page_mask;
/* Check if we need to flush due to large pages. */
if ((page & lp_mask) == lp_addr) {
@ -459,13 +462,13 @@ static void tlb_flush_page_by_mmuidx_async_work(CPUState *cpu,
tlb_debug("page addr:" TARGET_FMT_lx " mmu_map:0x%lx\n",
addr, mmu_idx_bitmap);
qemu_spin_lock(&env->tlb_c.lock);
qemu_spin_lock(&env_tlb(env)->c.lock);
for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
if (test_bit(mmu_idx, &mmu_idx_bitmap)) {
tlb_flush_page_locked(env, mmu_idx, addr);
}
}
qemu_spin_unlock(&env->tlb_c.lock);
qemu_spin_unlock(&env_tlb(env)->c.lock);
tb_flush_jmp_cache(cpu, addr);
}
@ -609,22 +612,22 @@ void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length)
int mmu_idx;
env = cpu->env_ptr;
qemu_spin_lock(&env->tlb_c.lock);
qemu_spin_lock(&env_tlb(env)->c.lock);
for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
unsigned int i;
unsigned int n = tlb_n_entries(env, mmu_idx);
for (i = 0; i < n; i++) {
tlb_reset_dirty_range_locked(&env->tlb_table[mmu_idx][i], start1,
length);
tlb_reset_dirty_range_locked(&env_tlb(env)->f[mmu_idx].table[i],
start1, length);
}
for (i = 0; i < CPU_VTLB_SIZE; i++) {
tlb_reset_dirty_range_locked(&env->tlb_v_table[mmu_idx][i], start1,
length);
tlb_reset_dirty_range_locked(&env_tlb(env)->d[mmu_idx].vtable[i],
start1, length);
}
}
qemu_spin_unlock(&env->tlb_c.lock);
qemu_spin_unlock(&env_tlb(env)->c.lock);
}
/* Called with tlb_c.lock held */
@ -646,7 +649,7 @@ void tlb_set_dirty(CPUState *cpu, target_ulong vaddr)
assert_cpu_is_self(cpu);
vaddr &= TARGET_PAGE_MASK;
qemu_spin_lock(&env->tlb_c.lock);
qemu_spin_lock(&env_tlb(env)->c.lock);
for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
tlb_set_dirty1_locked(tlb_entry(env, mmu_idx, vaddr), vaddr);
}
@ -654,10 +657,10 @@ void tlb_set_dirty(CPUState *cpu, target_ulong vaddr)
for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
int k;
for (k = 0; k < CPU_VTLB_SIZE; k++) {
tlb_set_dirty1_locked(&env->tlb_v_table[mmu_idx][k], vaddr);
tlb_set_dirty1_locked(&env_tlb(env)->d[mmu_idx].vtable[k], vaddr);
}
}
qemu_spin_unlock(&env->tlb_c.lock);
qemu_spin_unlock(&env_tlb(env)->c.lock);
}
/* Our TLB does not support large pages, so remember the area covered by
@ -665,7 +668,7 @@ void tlb_set_dirty(CPUState *cpu, target_ulong vaddr)
static void tlb_add_large_page(CPUArchState *env, int mmu_idx,
target_ulong vaddr, target_ulong size)
{
target_ulong lp_addr = env->tlb_d[mmu_idx].large_page_addr;
target_ulong lp_addr = env_tlb(env)->d[mmu_idx].large_page_addr;
target_ulong lp_mask = ~(size - 1);
if (lp_addr == (target_ulong)-1) {
@ -675,13 +678,13 @@ static void tlb_add_large_page(CPUArchState *env, int mmu_idx,
/* Extend the existing region to include the new page.
This is a compromise between unnecessary flushes and
the cost of maintaining a full variable size TLB. */
lp_mask &= env->tlb_d[mmu_idx].large_page_mask;
lp_mask &= env_tlb(env)->d[mmu_idx].large_page_mask;
while (((lp_addr ^ vaddr) & lp_mask) != 0) {
lp_mask <<= 1;
}
}
env->tlb_d[mmu_idx].large_page_addr = lp_addr & lp_mask;
env->tlb_d[mmu_idx].large_page_mask = lp_mask;
env_tlb(env)->d[mmu_idx].large_page_addr = lp_addr & lp_mask;
env_tlb(env)->d[mmu_idx].large_page_mask = lp_mask;
}
/* Add a new TLB entry. At most one entry for a given virtual address
@ -696,6 +699,8 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
int mmu_idx, target_ulong size)
{
CPUArchState *env = cpu->env_ptr;
CPUTLB *tlb = env_tlb(env);
CPUTLBDesc *desc = &tlb->d[mmu_idx];
MemoryRegionSection *section;
unsigned int index;
target_ulong address;
@ -757,10 +762,10 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
* a longer critical section, but this is not a concern since the TLB lock
* is unlikely to be contended.
*/
qemu_spin_lock(&env->tlb_c.lock);
qemu_spin_lock(&tlb->c.lock);
/* Note that the tlb is no longer clean. */
env->tlb_c.dirty |= 1 << mmu_idx;
tlb->c.dirty |= 1 << mmu_idx;
/* Make sure there's no cached translation for the new page. */
tlb_flush_vtlb_page_locked(env, mmu_idx, vaddr_page);
@ -770,12 +775,12 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
* different page; otherwise just overwrite the stale data.
*/
if (!tlb_hit_page_anyprot(te, vaddr_page) && !tlb_entry_is_empty(te)) {
unsigned vidx = env->tlb_d[mmu_idx].vindex++ % CPU_VTLB_SIZE;
CPUTLBEntry *tv = &env->tlb_v_table[mmu_idx][vidx];
unsigned vidx = desc->vindex++ % CPU_VTLB_SIZE;
CPUTLBEntry *tv = &desc->vtable[vidx];
/* Evict the old entry into the victim tlb. */
copy_tlb_helper_locked(tv, te);
env->iotlb_v[mmu_idx][vidx] = env->iotlb[mmu_idx][index];
desc->viotlb[vidx] = desc->iotlb[index];
tlb_n_used_entries_dec(env, mmu_idx);
}
@ -792,8 +797,8 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
* subtract here is that of the page base, and not the same as the
* vaddr we add back in io_readx()/io_writex()/get_page_addr_code().
*/
env->iotlb[mmu_idx][index].addr = iotlb - vaddr_page;
env->iotlb[mmu_idx][index].attrs = attrs;
desc->iotlb[index].addr = iotlb - vaddr_page;
desc->iotlb[index].attrs = attrs;
/* Now calculate the new entry */
tn.addend = addend - vaddr_page;
@ -829,7 +834,7 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
copy_tlb_helper_locked(te, &tn);
tlb_n_used_entries_inc(env, mmu_idx);
qemu_spin_unlock(&env->tlb_c.lock);
qemu_spin_unlock(&tlb->c.lock);
}
/* Add a new TLB entry, but without specifying the memory
@ -878,7 +883,7 @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
int mmu_idx, target_ulong addr, uintptr_t retaddr,
MMUAccessType access_type, int size)
{
CPUState *cpu = ENV_GET_CPU(env);
CPUState *cpu = env_cpu(env);
hwaddr mr_offset;
MemoryRegionSection *section;
MemoryRegion *mr;
@ -922,7 +927,7 @@ static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
int mmu_idx, uint64_t val, target_ulong addr,
uintptr_t retaddr, int size)
{
CPUState *cpu = ENV_GET_CPU(env);
CPUState *cpu = env_cpu(env);
hwaddr mr_offset;
MemoryRegionSection *section;
MemoryRegion *mr;
@ -974,23 +979,30 @@ static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
{
size_t vidx;
assert_cpu_is_self(ENV_GET_CPU(env));
assert_cpu_is_self(env_cpu(env));
for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) {
CPUTLBEntry *vtlb = &env->tlb_v_table[mmu_idx][vidx];
target_ulong cmp = tlb_read_ofs(vtlb, elt_ofs);
CPUTLBEntry *vtlb = &env_tlb(env)->d[mmu_idx].vtable[vidx];
target_ulong cmp;
/* elt_ofs might correspond to .addr_write, so use atomic_read */
#if TCG_OVERSIZED_GUEST
cmp = *(target_ulong *)((uintptr_t)vtlb + elt_ofs);
#else
cmp = atomic_read((target_ulong *)((uintptr_t)vtlb + elt_ofs));
#endif
if (cmp == page) {
/* Found entry in victim tlb, swap tlb and iotlb. */
CPUTLBEntry tmptlb, *tlb = &env->tlb_table[mmu_idx][index];
CPUTLBEntry tmptlb, *tlb = &env_tlb(env)->f[mmu_idx].table[index];
qemu_spin_lock(&env->tlb_c.lock);
qemu_spin_lock(&env_tlb(env)->c.lock);
copy_tlb_helper_locked(&tmptlb, tlb);
copy_tlb_helper_locked(tlb, vtlb);
copy_tlb_helper_locked(vtlb, &tmptlb);
qemu_spin_unlock(&env->tlb_c.lock);
qemu_spin_unlock(&env_tlb(env)->c.lock);
CPUIOTLBEntry tmpio, *io = &env->iotlb[mmu_idx][index];
CPUIOTLBEntry *vio = &env->iotlb_v[mmu_idx][vidx];
CPUIOTLBEntry tmpio, *io = &env_tlb(env)->d[mmu_idx].iotlb[index];
CPUIOTLBEntry *vio = &env_tlb(env)->d[mmu_idx].viotlb[vidx];
tmpio = *io; *io = *vio; *vio = tmpio;
return true;
}
@ -1017,7 +1029,7 @@ tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr)
if (unlikely(!tlb_hit(entry->addr_code, addr))) {
if (!VICTIM_TLB_HIT(addr_code, addr)) {
tlb_fill(ENV_GET_CPU(env), addr, 0, MMU_INST_FETCH, mmu_idx, 0);
tlb_fill(env_cpu(env), addr, 0, MMU_INST_FETCH, mmu_idx, 0);
index = tlb_index(env, mmu_idx, addr);
entry = tlb_entry(env, mmu_idx, addr);
}
@ -1055,7 +1067,7 @@ void probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx,
if (!tlb_hit(tlb_addr_write(entry), addr)) {
/* TLB entry is for a different page */
if (!VICTIM_TLB_HIT(addr_write, addr)) {
tlb_fill(ENV_GET_CPU(env), addr, size, MMU_DATA_STORE,
tlb_fill(env_cpu(env), addr, size, MMU_DATA_STORE,
mmu_idx, retaddr);
}
}
@ -1089,7 +1101,7 @@ void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
uintptr_t index = tlb_index(env, mmu_idx, addr);
if (!victim_tlb_hit(env, mmu_idx, index, elt_ofs, page)) {
CPUState *cs = ENV_GET_CPU(env);
CPUState *cs = env_cpu(env);
CPUClass *cc = CPU_GET_CLASS(cs);
if (!cc->tlb_fill(cs, addr, 0, access_type, mmu_idx, true, 0)) {
@ -1132,7 +1144,7 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
/* Enforce guest required alignment. */
if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) {
/* ??? Maybe indicate atomic op to cpu_unaligned_access */
cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE,
mmu_idx, retaddr);
}
@ -1148,7 +1160,7 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
/* Check TLB entry and enforce page permissions. */
if (!tlb_hit(tlb_addr, addr)) {
if (!VICTIM_TLB_HIT(addr_write, addr)) {
tlb_fill(ENV_GET_CPU(env), addr, 1 << s_bits, MMU_DATA_STORE,
tlb_fill(env_cpu(env), addr, 1 << s_bits, MMU_DATA_STORE,
mmu_idx, retaddr);
index = tlb_index(env, mmu_idx, addr);
tlbe = tlb_entry(env, mmu_idx, addr);
@ -1165,7 +1177,7 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
/* Let the guest notice RMW on a write-only page. */
if (unlikely(tlbe->addr_read != (tlb_addr & ~TLB_NOTDIRTY))) {
tlb_fill(ENV_GET_CPU(env), addr, 1 << s_bits, MMU_DATA_LOAD,
tlb_fill(env_cpu(env), addr, 1 << s_bits, MMU_DATA_LOAD,
mmu_idx, retaddr);
/* Since we don't support reads and writes to different addresses,
and we do have the proper page loaded for write, this shouldn't
@ -1178,7 +1190,7 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
ndi->active = false;
if (unlikely(tlb_addr & TLB_NOTDIRTY)) {
ndi->active = true;
memory_notdirty_write_prepare(ndi, ENV_GET_CPU(env), addr,
memory_notdirty_write_prepare(ndi, env_cpu(env), addr,
qemu_ram_addr_from_host_nofail(hostaddr),
1 << s_bits);
}
@ -1186,7 +1198,7 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
return hostaddr;
stop_the_world:
cpu_loop_exit_atomic(ENV_GET_CPU(env), retaddr);
cpu_loop_exit_atomic(env_cpu(env), retaddr);
}
#ifdef TARGET_WORDS_BIGENDIAN
@ -1251,7 +1263,7 @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
/* Handle CPU specific unaligned behaviour */
if (addr & ((1 << a_bits) - 1)) {
cpu_unaligned_access(ENV_GET_CPU(env), addr, access_type,
cpu_unaligned_access(env_cpu(env), addr, access_type,
mmu_idx, retaddr);
}
@ -1259,7 +1271,7 @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
if (!tlb_hit(tlb_addr, addr)) {
if (!victim_tlb_hit(env, mmu_idx, index, tlb_off,
addr & TARGET_PAGE_MASK)) {
tlb_fill(ENV_GET_CPU(env), addr, size,
tlb_fill(env_cpu(env), addr, size,
access_type, mmu_idx, retaddr);
index = tlb_index(env, mmu_idx, addr);
entry = tlb_entry(env, mmu_idx, addr);
@ -1280,7 +1292,7 @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
* repeat the MMU check here. This tlb_fill() call might
* longjump out if this access should cause a guest exception.
*/
tlb_fill(ENV_GET_CPU(env), addr, size,
tlb_fill(env_cpu(env), addr, size,
access_type, mmu_idx, retaddr);
index = tlb_index(env, mmu_idx, addr);
entry = tlb_entry(env, mmu_idx, addr);
@ -1293,8 +1305,8 @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
}
}
res = io_readx(env, &env->iotlb[mmu_idx][index], mmu_idx, addr,
retaddr, access_type, size);
res = io_readx(env, &env_tlb(env)->d[mmu_idx].iotlb[index],
mmu_idx, addr, retaddr, access_type, size);
return handle_bswap(res, size, big_endian);
}
@ -1499,7 +1511,7 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
/* Handle CPU specific unaligned behaviour */
if (addr & ((1 << a_bits) - 1)) {
cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE,
mmu_idx, retaddr);
}
@ -1507,7 +1519,7 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
if (!tlb_hit(tlb_addr, addr)) {
if (!victim_tlb_hit(env, mmu_idx, index, tlb_off,
addr & TARGET_PAGE_MASK)) {
tlb_fill(ENV_GET_CPU(env), addr, size, MMU_DATA_STORE,
tlb_fill(env_cpu(env), addr, size, MMU_DATA_STORE,
mmu_idx, retaddr);
index = tlb_index(env, mmu_idx, addr);
entry = tlb_entry(env, mmu_idx, addr);
@ -1528,7 +1540,7 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
* repeat the MMU check here. This tlb_fill() call might
* longjump out if this access should cause a guest exception.
*/
tlb_fill(ENV_GET_CPU(env), addr, size, MMU_DATA_STORE,
tlb_fill(env_cpu(env), addr, size, MMU_DATA_STORE,
mmu_idx, retaddr);
index = tlb_index(env, mmu_idx, addr);
entry = tlb_entry(env, mmu_idx, addr);
@ -1541,7 +1553,7 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
}
}
io_writex(env, &env->iotlb[mmu_idx][index], mmu_idx,
io_writex(env, &env_tlb(env)->d[mmu_idx].iotlb[index], mmu_idx,
handle_bswap(val, size, big_endian),
addr, retaddr, size);
return;
@ -1568,7 +1580,7 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
if (!tlb_hit_page(tlb_addr2, page2)
&& !victim_tlb_hit(env, mmu_idx, index2, tlb_off,
page2 & TARGET_PAGE_MASK)) {
tlb_fill(ENV_GET_CPU(env), page2, size, MMU_DATA_STORE,
tlb_fill(env_cpu(env), page2, size, MMU_DATA_STORE,
mmu_idx, retaddr);
}

View File

@ -28,13 +28,12 @@
#include "sysemu/sysemu.h"
#include "qom/object.h"
#include "qemu-common.h"
#include "qom/cpu.h"
#include "cpu.h"
#include "sysemu/cpus.h"
#include "qemu/main-loop.h"
unsigned long tcg_tb_size;
#ifndef CONFIG_USER_ONLY
/* mask must never be zero, except for A20 change call */
static void tcg_handle_interrupt(CPUState *cpu, int mask)
{
@ -51,7 +50,7 @@ static void tcg_handle_interrupt(CPUState *cpu, int mask)
if (!qemu_cpu_is_self(cpu)) {
qemu_cpu_kick(cpu);
} else {
atomic_set(&cpu->icount_decr.u16.high, -1);
atomic_set(&cpu_neg(cpu)->icount_decr.u16.high, -1);
if (use_icount &&
!cpu->can_do_io
&& (mask & ~old_mask) != 0) {
@ -59,7 +58,6 @@ static void tcg_handle_interrupt(CPUState *cpu, int mask)
}
}
}
#endif
static int tcg_init(MachineState *ms)
{

View File

@ -146,7 +146,7 @@ uint64_t HELPER(ctpop_i64)(uint64_t arg)
void *HELPER(lookup_tb_ptr)(CPUArchState *env)
{
CPUState *cpu = ENV_GET_CPU(env);
CPUState *cpu = env_cpu(env);
TranslationBlock *tb;
target_ulong cs_base, pc;
uint32_t flags;
@ -165,5 +165,5 @@ void *HELPER(lookup_tb_ptr)(CPUArchState *env)
void HELPER(exit_atomic)(CPUArchState *env)
{
cpu_loop_exit_atomic(ENV_GET_CPU(env), GETPC());
cpu_loop_exit_atomic(env_cpu(env), GETPC());
}

View File

@ -364,7 +364,7 @@ static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
assert(use_icount);
/* Reset the cycle counter to the start of the block
and shift if to the number of actually executed instructions */
cpu->icount_decr.u16.low += num_insns - i;
cpu_neg(cpu)->icount_decr.u16.low += num_insns - i;
}
restore_state_to_opc(env, tb, data);
@ -1732,7 +1732,7 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
tcg_func_start(tcg_ctx);
tcg_ctx->cpu = ENV_GET_CPU(env);
tcg_ctx->cpu = env_cpu(env);
gen_intermediate_code(cpu, tb, max_insns);
tcg_ctx->cpu = NULL;
@ -2200,7 +2200,7 @@ void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
if ((env->hflags & MIPS_HFLAG_BMASK) != 0
&& env->active_tc.PC != tb->pc) {
env->active_tc.PC -= (env->hflags & MIPS_HFLAG_B16 ? 2 : 4);
cpu->icount_decr.u16.low++;
cpu_neg(cpu)->icount_decr.u16.low++;
env->hflags &= ~MIPS_HFLAG_BMASK;
n = 2;
}
@ -2208,7 +2208,7 @@ void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
&& env->pc != tb->pc) {
env->pc -= 2;
cpu->icount_decr.u16.low++;
cpu_neg(cpu)->icount_decr.u16.low++;
env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
n = 2;
}
@ -2382,7 +2382,7 @@ void cpu_interrupt(CPUState *cpu, int mask)
{
g_assert(qemu_mutex_iothread_locked());
cpu->interrupt_request |= mask;
atomic_set(&cpu->icount_decr.u16.high, -1);
atomic_set(&cpu_neg(cpu)->icount_decr.u16.high, -1);
}
/*

View File

@ -680,7 +680,7 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
{
/* Enforce qemu required alignment. */
if (unlikely(addr & (size - 1))) {
cpu_loop_exit_atomic(ENV_GET_CPU(env), retaddr);
cpu_loop_exit_atomic(env_cpu(env), retaddr);
}
helper_retaddr = retaddr;
return g2h(addr);

View File

@ -140,8 +140,7 @@ static void set_idt(int n, unsigned int dpl)
void cpu_loop(CPUX86State *env)
{
X86CPU *cpu = x86_env_get_cpu(env);
CPUState *cs = CPU(cpu);
CPUState *cs = env_cpu(env);
int trapnr;
abi_ulong pc;
//target_siginfo_t info;
@ -487,7 +486,7 @@ static void flush_windows(CPUSPARCState *env)
void cpu_loop(CPUSPARCState *env)
{
CPUState *cs = CPU(sparc_env_get_cpu(env));
CPUState *cs = env_cpu(env);
int trapnr, ret, syscall_nr;
//target_siginfo_t info;

View File

@ -315,7 +315,7 @@ abi_long do_freebsd_syscall(void *cpu_env, int num, abi_long arg1,
abi_long arg5, abi_long arg6, abi_long arg7,
abi_long arg8)
{
CPUState *cpu = ENV_GET_CPU(cpu_env);
CPUState *cpu = env_cpu(cpu_env);
abi_long ret;
void *p;
@ -413,7 +413,7 @@ abi_long do_netbsd_syscall(void *cpu_env, int num, abi_long arg1,
abi_long arg2, abi_long arg3, abi_long arg4,
abi_long arg5, abi_long arg6)
{
CPUState *cpu = ENV_GET_CPU(cpu_env);
CPUState *cpu = env_cpu(cpu_env);
abi_long ret;
void *p;
@ -488,7 +488,7 @@ abi_long do_openbsd_syscall(void *cpu_env, int num, abi_long arg1,
abi_long arg2, abi_long arg3, abi_long arg4,
abi_long arg5, abi_long arg6)
{
CPUState *cpu = ENV_GET_CPU(cpu_env);
CPUState *cpu = env_cpu(cpu_env);
abi_long ret;
void *p;

9
cpus.c
View File

@ -239,7 +239,8 @@ void qemu_tcg_configure(QemuOpts *opts, Error **errp)
*/
static int64_t cpu_get_icount_executed(CPUState *cpu)
{
return cpu->icount_budget - (cpu->icount_decr.u16.low + cpu->icount_extra);
return (cpu->icount_budget -
(cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra));
}
/*
@ -1389,12 +1390,12 @@ static void prepare_icount_for_run(CPUState *cpu)
* each vCPU execution. However u16.high can be raised
* asynchronously by cpu_exit/cpu_interrupt/tcg_handle_interrupt
*/
g_assert(cpu->icount_decr.u16.low == 0);
g_assert(cpu_neg(cpu)->icount_decr.u16.low == 0);
g_assert(cpu->icount_extra == 0);
cpu->icount_budget = tcg_get_icount_limit();
insns_left = MIN(0xffff, cpu->icount_budget);
cpu->icount_decr.u16.low = insns_left;
cpu_neg(cpu)->icount_decr.u16.low = insns_left;
cpu->icount_extra = cpu->icount_budget - insns_left;
replay_mutex_lock();
@ -1408,7 +1409,7 @@ static void process_icount_data(CPUState *cpu)
cpu_update_icount(cpu);
/* Reset the counters */
cpu->icount_decr.u16.low = 0;
cpu_neg(cpu)->icount_decr.u16.low = 0;
cpu->icount_extra = 0;
cpu->icount_budget = 0;

View File

@ -434,9 +434,9 @@ Can be used as:
/* trace emitted at this point */
trace_foo(0xd1);
/* trace emitted at this point */
trace_bar(ENV_GET_CPU(env), 0xd2);
trace_bar(env_cpu(env), 0xd2);
/* trace emitted at this point (env) and when guest code is executed (cpu_env) */
trace_baz_tcg(ENV_GET_CPU(env), cpu_env, 0xd3);
trace_baz_tcg(env_cpu(env), cpu_env, 0xd3);
}
If the translating vCPU has address 0xc1 and code is later executed by vCPU

View File

@ -152,7 +152,7 @@ static void update_guest_rom_state(VAPICROMState *s)
static int find_real_tpr_addr(VAPICROMState *s, CPUX86State *env)
{
CPUState *cs = CPU(x86_env_get_cpu(env));
CPUState *cs = env_cpu(env);
hwaddr paddr;
target_ulong addr;
@ -279,7 +279,7 @@ instruction_ok:
static int update_rom_mapping(VAPICROMState *s, CPUX86State *env, target_ulong ip)
{
CPUState *cs = CPU(x86_env_get_cpu(env));
CPUState *cs = env_cpu(env);
hwaddr paddr;
uint32_t rom_state_vaddr;
uint32_t pos, patch, offset;

View File

@ -406,7 +406,7 @@ uint64_t cpu_get_tsc(CPUX86State *env)
/* IRQ handling */
int cpu_get_pic_interrupt(CPUX86State *env)
{
X86CPU *cpu = x86_env_get_cpu(env);
X86CPU *cpu = env_archcpu(env);
int intno;
if (!kvm_irqchip_in_kernel()) {

View File

@ -44,7 +44,7 @@ static void mips_gic_set_vp_irq(MIPSGICState *gic, int vp, int pin)
GIC_VP_MASK_CMP_SHF;
}
if (kvm_enabled()) {
kvm_mips_set_ipi_interrupt(mips_env_get_cpu(gic->vps[vp].env),
kvm_mips_set_ipi_interrupt(env_archcpu(gic->vps[vp].env),
pin + GIC_CPU_PIN_OFFSET,
ored_level);
} else {

View File

@ -76,7 +76,7 @@ void cpu_mips_irq_init_cpu(MIPSCPU *cpu)
qemu_irq *qi;
int i;
qi = qemu_allocate_irqs(cpu_mips_irq_request, mips_env_get_cpu(env), 8);
qi = qemu_allocate_irqs(cpu_mips_irq_request, env_archcpu(env), 8);
for (i = 0; i < 8; i++) {
env->irq[i] = qi[i];
}

View File

@ -54,12 +54,9 @@ static void nios2_pic_cpu_handler(void *opaque, int irq, int level)
void nios2_check_interrupts(CPUNios2State *env)
{
Nios2CPU *cpu = nios2_env_get_cpu(env);
CPUState *cs = CPU(cpu);
if (env->irq_pending) {
env->irq_pending = 0;
cpu_interrupt(cs, CPU_INTERRUPT_HARD);
cpu_interrupt(env_cpu(env), CPU_INTERRUPT_HARD);
}
}

View File

@ -385,7 +385,7 @@ void ppc40x_system_reset(PowerPCCPU *cpu)
void store_40x_dbcr0(CPUPPCState *env, uint32_t val)
{
PowerPCCPU *cpu = ppc_env_get_cpu(env);
PowerPCCPU *cpu = env_archcpu(env);
switch ((val >> 28) & 0x3) {
case 0x0:
@ -785,7 +785,7 @@ target_ulong cpu_ppc_load_decr(CPUPPCState *env)
target_ulong cpu_ppc_load_hdecr(CPUPPCState *env)
{
PowerPCCPU *cpu = ppc_env_get_cpu(env);
PowerPCCPU *cpu = env_archcpu(env);
PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
ppc_tb_t *tb_env = env->tb_env;
uint64_t hdecr;
@ -923,7 +923,7 @@ static inline void _cpu_ppc_store_decr(PowerPCCPU *cpu, target_ulong decr,
void cpu_ppc_store_decr(CPUPPCState *env, target_ulong value)
{
PowerPCCPU *cpu = ppc_env_get_cpu(env);
PowerPCCPU *cpu = env_archcpu(env);
PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
int nr_bits = 32;
@ -955,7 +955,7 @@ static inline void _cpu_ppc_store_hdecr(PowerPCCPU *cpu, target_ulong hdecr,
void cpu_ppc_store_hdecr(CPUPPCState *env, target_ulong value)
{
PowerPCCPU *cpu = ppc_env_get_cpu(env);
PowerPCCPU *cpu = env_archcpu(env);
PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
_cpu_ppc_store_hdecr(cpu, cpu_ppc_load_hdecr(env), value,
@ -980,7 +980,7 @@ static void cpu_ppc_store_purr(PowerPCCPU *cpu, uint64_t value)
static void cpu_ppc_set_tb_clk (void *opaque, uint32_t freq)
{
CPUPPCState *env = opaque;
PowerPCCPU *cpu = ppc_env_get_cpu(env);
PowerPCCPU *cpu = env_archcpu(env);
ppc_tb_t *tb_env = env->tb_env;
tb_env->tb_freq = freq;
@ -1095,7 +1095,7 @@ const VMStateDescription vmstate_ppc_timebase = {
/* Set up (once) timebase frequency (in Hz) */
clk_setup_cb cpu_ppc_tb_init (CPUPPCState *env, uint32_t freq)
{
PowerPCCPU *cpu = ppc_env_get_cpu(env);
PowerPCCPU *cpu = env_archcpu(env);
ppc_tb_t *tb_env;
tb_env = g_malloc0(sizeof(ppc_tb_t));
@ -1165,7 +1165,7 @@ static void cpu_4xx_fit_cb (void *opaque)
uint64_t now, next;
env = opaque;
cpu = ppc_env_get_cpu(env);
cpu = env_archcpu(env);
tb_env = env->tb_env;
ppc40x_timer = tb_env->opaque;
now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
@ -1235,7 +1235,7 @@ static void cpu_4xx_pit_cb (void *opaque)
ppc40x_timer_t *ppc40x_timer;
env = opaque;
cpu = ppc_env_get_cpu(env);
cpu = env_archcpu(env);
tb_env = env->tb_env;
ppc40x_timer = tb_env->opaque;
env->spr[SPR_40x_TSR] |= 1 << 27;
@ -1261,7 +1261,7 @@ static void cpu_4xx_wdt_cb (void *opaque)
uint64_t now, next;
env = opaque;
cpu = ppc_env_get_cpu(env);
cpu = env_archcpu(env);
tb_env = env->tb_env;
ppc40x_timer = tb_env->opaque;
now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);

View File

@ -49,7 +49,7 @@
ram_addr_t ppc405_set_bootinfo (CPUPPCState *env, ppc4xx_bd_info_t *bd,
uint32_t flags)
{
CPUState *cs = CPU(ppc_env_get_cpu(env));
CPUState *cs = env_cpu(env);
ram_addr_t bdloc;
int i, n;

View File

@ -249,7 +249,7 @@ static void booke_wdt_cb(void *opaque)
void store_booke_tsr(CPUPPCState *env, target_ulong val)
{
PowerPCCPU *cpu = ppc_env_get_cpu(env);
PowerPCCPU *cpu = env_archcpu(env);
ppc_tb_t *tb_env = env->tb_env;
booke_timer_t *booke_timer = tb_env->opaque;
@ -277,7 +277,7 @@ void store_booke_tsr(CPUPPCState *env, target_ulong val)
void store_booke_tcr(CPUPPCState *env, target_ulong val)
{
PowerPCCPU *cpu = ppc_env_get_cpu(env);
PowerPCCPU *cpu = env_archcpu(env);
ppc_tb_t *tb_env = env->tb_env;
booke_timer_t *booke_timer = tb_env->opaque;

View File

@ -40,7 +40,7 @@ int qemu_semihosting_log_out(const char *s, int len)
*/
static GString *copy_user_string(CPUArchState *env, target_ulong addr, int len)
{
CPUState *cpu = ENV_GET_CPU(env);
CPUState *cpu = env_cpu(env);
GString *s = g_string_sized_new(len ? len : 128);
uint8_t c;
bool done;

View File

@ -159,7 +159,7 @@ static void leon3_set_pil_in(void *opaque, uint32_t pil_in)
env->interrupt_index = TT_EXTINT | i;
if (old_interrupt != env->interrupt_index) {
cs = CPU(sparc_env_get_cpu(env));
cs = env_cpu(env);
trace_leon3_set_irq(i);
cpu_interrupt(cs, CPU_INTERRUPT_HARD);
}
@ -167,7 +167,7 @@ static void leon3_set_pil_in(void *opaque, uint32_t pil_in)
}
}
} else if (!env->pil_in && (env->interrupt_index & ~15) == TT_EXTINT) {
cs = CPU(sparc_env_get_cpu(env));
cs = env_cpu(env);
trace_leon3_reset_irq(env->interrupt_index & 15);
env->interrupt_index = 0;
cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD);

View File

@ -166,7 +166,7 @@ void cpu_check_irqs(CPUSPARCState *env)
env->interrupt_index = TT_EXTINT | i;
if (old_interrupt != env->interrupt_index) {
cs = CPU(sparc_env_get_cpu(env));
cs = env_cpu(env);
trace_sun4m_cpu_interrupt(i);
cpu_interrupt(cs, CPU_INTERRUPT_HARD);
}
@ -174,7 +174,7 @@ void cpu_check_irqs(CPUSPARCState *env)
}
}
} else if (!env->pil_in && (env->interrupt_index & ~15) == TT_EXTINT) {
cs = CPU(sparc_env_get_cpu(env));
cs = env_cpu(env);
trace_sun4m_cpu_reset_interrupt(env->interrupt_index & 15);
env->interrupt_index = 0;
cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD);

View File

@ -46,7 +46,7 @@ void cpu_check_irqs(CPUSPARCState *env)
if (env->ivec_status & 0x20) {
return;
}
cs = CPU(sparc_env_get_cpu(env));
cs = env_cpu(env);
/* check if TM or SM in SOFTINT are set
setting these also causes interrupt 14 */
if (env->softint & (SOFTINT_TIMER | SOFTINT_STIMER)) {

View File

@ -56,7 +56,7 @@ static void puv3_soc_init(CPUUniCore32State *env)
/* Initialize interrupt controller */
cpu_intc = qemu_allocate_irq(puv3_intc_cpu_handler,
uc32_env_get_cpu(env), 0);
env_archcpu(env), 0);
dev = sysbus_create_simple("puv3_intc", PUV3_INTC_BASE, cpu_intc);
for (i = 0; i < PUV3_IRQS_NR; i++) {
irqs[i] = qdev_get_gpio_in(dev, i);

View File

@ -33,7 +33,7 @@
void check_interrupts(CPUXtensaState *env)
{
CPUState *cs = CPU(xtensa_env_get_cpu(env));
CPUState *cs = env_cpu(env);
int minlevel = xtensa_get_cintlevel(env);
uint32_t int_set_enabled = env->sregs[INTSET] & env->sregs[INTENABLE];
int level;

View File

@ -371,4 +371,73 @@ int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
int cpu_exec(CPUState *cpu);
/**
* cpu_set_cpustate_pointers(cpu)
* @cpu: The cpu object
*
* Set the generic pointers in CPUState into the outer object.
*/
static inline void cpu_set_cpustate_pointers(ArchCPU *cpu)
{
cpu->parent_obj.env_ptr = &cpu->env;
cpu->parent_obj.icount_decr_ptr = &cpu->neg.icount_decr;
}
/**
* env_archcpu(env)
* @env: The architecture environment
*
* Return the ArchCPU associated with the environment.
*/
static inline ArchCPU *env_archcpu(CPUArchState *env)
{
return container_of(env, ArchCPU, env);
}
/**
* env_cpu(env)
* @env: The architecture environment
*
* Return the CPUState associated with the environment.
*/
static inline CPUState *env_cpu(CPUArchState *env)
{
return &env_archcpu(env)->parent_obj;
}
/**
* env_neg(env)
* @env: The architecture environment
*
* Return the CPUNegativeOffsetState associated with the environment.
*/
static inline CPUNegativeOffsetState *env_neg(CPUArchState *env)
{
ArchCPU *arch_cpu = container_of(env, ArchCPU, env);
return &arch_cpu->neg;
}
/**
* cpu_neg(cpu)
* @cpu: The generic CPUState
*
* Return the CPUNegativeOffsetState associated with the cpu.
*/
static inline CPUNegativeOffsetState *cpu_neg(CPUState *cpu)
{
ArchCPU *arch_cpu = container_of(cpu, ArchCPU, parent_obj);
return &arch_cpu->neg;
}
/**
* env_tlb(env)
* @env: The architecture environment
*
* Return the CPUTLB state associated with the environment.
*/
static inline CPUTLB *env_tlb(CPUArchState *env)
{
return &env_neg(env)->tlb;
}
#endif /* CPU_ALL_H */

View File

@ -33,9 +33,30 @@
#include "exec/hwaddr.h"
#endif
#include "exec/memattrs.h"
#include "qom/cpu.h"
#include "cpu-param.h"
#ifndef TARGET_LONG_BITS
#error TARGET_LONG_BITS must be defined before including this header
# error TARGET_LONG_BITS must be defined in cpu-param.h
#endif
#ifndef NB_MMU_MODES
# error NB_MMU_MODES must be defined in cpu-param.h
#endif
#ifndef TARGET_PHYS_ADDR_SPACE_BITS
# error TARGET_PHYS_ADDR_SPACE_BITS must be defined in cpu-param.h
#endif
#ifndef TARGET_VIRT_ADDR_SPACE_BITS
# error TARGET_VIRT_ADDR_SPACE_BITS must be defined in cpu-param.h
#endif
#ifndef TARGET_PAGE_BITS
# ifdef TARGET_PAGE_BITS_VARY
# ifndef TARGET_PAGE_BITS_MIN
# error TARGET_PAGE_BITS_MIN must be defined in cpu-param.h
# endif
# else
# error TARGET_PAGE_BITS must be defined in cpu-param.h
# endif
#endif
#define TARGET_LONG_SIZE (TARGET_LONG_BITS / 8)
@ -58,6 +79,7 @@ typedef uint64_t target_ulong;
#endif
#if !defined(CONFIG_USER_ONLY) && defined(CONFIG_TCG)
/* use a fully associative victim tlb of 8 entries */
#define CPU_VTLB_SIZE 8
@ -127,18 +149,10 @@ typedef struct CPUIOTLBEntry {
MemTxAttrs attrs;
} CPUIOTLBEntry;
/**
* struct CPUTLBWindow
* @begin_ns: host time (in ns) at the beginning of the time window
* @max_entries: maximum number of entries observed in the window
*
* See also: tlb_mmu_resize_locked()
/*
* Data elements that are per MMU mode, minus the bits accessed by
* the TCG fast path.
*/
typedef struct CPUTLBWindow {
int64_t begin_ns;
size_t max_entries;
} CPUTLBWindow;
typedef struct CPUTLBDesc {
/*
* Describe a region covering all of the large pages allocated
@ -148,17 +162,36 @@ typedef struct CPUTLBDesc {
*/
target_ulong large_page_addr;
target_ulong large_page_mask;
/* host time (in ns) at the beginning of the time window */
int64_t window_begin_ns;
/* maximum number of entries observed in the window */
size_t window_max_entries;
size_t n_used_entries;
/* The next index to use in the tlb victim table. */
size_t vindex;
CPUTLBWindow window;
size_t n_used_entries;
/* The tlb victim table, in two parts. */
CPUTLBEntry vtable[CPU_VTLB_SIZE];
CPUIOTLBEntry viotlb[CPU_VTLB_SIZE];
/* The iotlb. */
CPUIOTLBEntry *iotlb;
} CPUTLBDesc;
/*
* Data elements that are per MMU mode, accessed by the fast path.
* The structure is aligned to aid loading the pair with one insn.
*/
typedef struct CPUTLBDescFast {
/* Contains (n_entries - 1) << CPU_TLB_ENTRY_BITS */
uintptr_t mask;
/* The array of tlb entries itself. */
CPUTLBEntry *table;
} CPUTLBDescFast QEMU_ALIGNED(2 * sizeof(void *));
/*
* Data elements that are shared between all MMU modes.
*/
typedef struct CPUTLBCommon {
/* Serialize updates to tlb_table and tlb_v_table, and others as noted. */
/* Serialize updates to f.table and d.vtable, and others as noted. */
QemuSpin lock;
/*
* Within dirty, for each bit N, modifications have been made to
@ -176,35 +209,35 @@ typedef struct CPUTLBCommon {
size_t elide_flush_count;
} CPUTLBCommon;
# define CPU_TLB \
/* tlb_mask[i] contains (n_entries - 1) << CPU_TLB_ENTRY_BITS */ \
uintptr_t tlb_mask[NB_MMU_MODES]; \
CPUTLBEntry *tlb_table[NB_MMU_MODES];
# define CPU_IOTLB \
CPUIOTLBEntry *iotlb[NB_MMU_MODES];
/*
* The entire softmmu tlb, for all MMU modes.
* The meaning of each of the MMU modes is defined in the target code.
* Note that NB_MMU_MODES is not yet defined; we can only reference it
* within preprocessor defines that will be expanded later.
* Since this is placed within CPUNegativeOffsetState, the smallest
* negative offsets are at the end of the struct.
*/
#define CPU_COMMON_TLB \
CPUTLBCommon tlb_c; \
CPUTLBDesc tlb_d[NB_MMU_MODES]; \
CPU_TLB \
CPUTLBEntry tlb_v_table[NB_MMU_MODES][CPU_VTLB_SIZE]; \
CPU_IOTLB \
CPUIOTLBEntry iotlb_v[NB_MMU_MODES][CPU_VTLB_SIZE];
typedef struct CPUTLB {
CPUTLBCommon c;
CPUTLBDesc d[NB_MMU_MODES];
CPUTLBDescFast f[NB_MMU_MODES];
} CPUTLB;
/* This will be used by TCG backends to compute offsets. */
#define TLB_MASK_TABLE_OFS(IDX) \
((int)offsetof(ArchCPU, neg.tlb.f[IDX]) - (int)offsetof(ArchCPU, env))
#else
#define CPU_COMMON_TLB
#endif
#define CPU_COMMON \
/* soft mmu support */ \
CPU_COMMON_TLB \
typedef struct CPUTLB { } CPUTLB;
#endif /* !CONFIG_USER_ONLY && CONFIG_TCG */
/*
* This structure must be placed in ArchCPU immedately
* before CPUArchState, as a field named "neg".
*/
typedef struct CPUNegativeOffsetState {
CPUTLB tlb;
IcountDecr icount_decr;
} CPUNegativeOffsetState;
#endif

View File

@ -139,21 +139,21 @@ static inline target_ulong tlb_addr_write(const CPUTLBEntry *entry)
static inline uintptr_t tlb_index(CPUArchState *env, uintptr_t mmu_idx,
target_ulong addr)
{
uintptr_t size_mask = env->tlb_mask[mmu_idx] >> CPU_TLB_ENTRY_BITS;
uintptr_t size_mask = env_tlb(env)->f[mmu_idx].mask >> CPU_TLB_ENTRY_BITS;
return (addr >> TARGET_PAGE_BITS) & size_mask;
}
static inline size_t tlb_n_entries(CPUArchState *env, uintptr_t mmu_idx)
{
return (env->tlb_mask[mmu_idx] >> CPU_TLB_ENTRY_BITS) + 1;
return (env_tlb(env)->f[mmu_idx].mask >> CPU_TLB_ENTRY_BITS) + 1;
}
/* Find the TLB entry corresponding to the mmu_idx + address pair. */
static inline CPUTLBEntry *tlb_entry(CPUArchState *env, uintptr_t mmu_idx,
target_ulong addr)
{
return &env->tlb_table[mmu_idx][tlb_index(env, mmu_idx, addr)];
return &env_tlb(env)->f[mmu_idx].table[tlb_index(env, mmu_idx, addr)];
}
#ifdef MMU_MODE0_SUFFIX

View File

@ -89,7 +89,7 @@ glue(glue(glue(cpu_ld, USUFFIX), MEMSUFFIX), _ra)(CPUArchState *env,
#if !defined(SOFTMMU_CODE_ACCESS)
trace_guest_mem_before_exec(
ENV_GET_CPU(env), ptr,
env_cpu(env), ptr,
trace_mem_build_info(SHIFT, false, MO_TE, false));
#endif
@ -128,7 +128,7 @@ glue(glue(glue(cpu_lds, SUFFIX), MEMSUFFIX), _ra)(CPUArchState *env,
#if !defined(SOFTMMU_CODE_ACCESS)
trace_guest_mem_before_exec(
ENV_GET_CPU(env), ptr,
env_cpu(env), ptr,
trace_mem_build_info(SHIFT, true, MO_TE, false));
#endif
@ -170,7 +170,7 @@ glue(glue(glue(cpu_st, SUFFIX), MEMSUFFIX), _ra)(CPUArchState *env,
#if !defined(SOFTMMU_CODE_ACCESS)
trace_guest_mem_before_exec(
ENV_GET_CPU(env), ptr,
env_cpu(env), ptr,
trace_mem_build_info(SHIFT, false, MO_TE, true));
#endif

View File

@ -66,7 +66,7 @@ glue(glue(cpu_ld, USUFFIX), MEMSUFFIX)(CPUArchState *env, abi_ptr ptr)
{
#if !defined(CODE_ACCESS)
trace_guest_mem_before_exec(
ENV_GET_CPU(env), ptr,
env_cpu(env), ptr,
trace_mem_build_info(SHIFT, false, MO_TE, false));
#endif
return glue(glue(ld, USUFFIX), _p)(g2h(ptr));
@ -90,7 +90,7 @@ glue(glue(cpu_lds, SUFFIX), MEMSUFFIX)(CPUArchState *env, abi_ptr ptr)
{
#if !defined(CODE_ACCESS)
trace_guest_mem_before_exec(
ENV_GET_CPU(env), ptr,
env_cpu(env), ptr,
trace_mem_build_info(SHIFT, true, MO_TE, false));
#endif
return glue(glue(lds, SUFFIX), _p)(g2h(ptr));
@ -116,7 +116,7 @@ glue(glue(cpu_st, SUFFIX), MEMSUFFIX)(CPUArchState *env, abi_ptr ptr,
{
#if !defined(CODE_ACCESS)
trace_guest_mem_before_exec(
ENV_GET_CPU(env), ptr,
env_cpu(env), ptr,
trace_mem_build_info(SHIFT, false, MO_TE, true));
#endif
glue(glue(st, SUFFIX), _p)(g2h(ptr), v);

View File

@ -19,7 +19,8 @@ static inline void gen_tb_start(TranslationBlock *tb)
}
tcg_gen_ld_i32(count, cpu_env,
-ENV_OFFSET + offsetof(CPUState, icount_decr.u32));
offsetof(ArchCPU, neg.icount_decr.u32) -
offsetof(ArchCPU, env));
if (tb_cflags(tb) & CF_USE_ICOUNT) {
imm = tcg_temp_new_i32();
@ -37,7 +38,8 @@ static inline void gen_tb_start(TranslationBlock *tb)
if (tb_cflags(tb) & CF_USE_ICOUNT) {
tcg_gen_st16_i32(count, cpu_env,
-ENV_OFFSET + offsetof(CPUState, icount_decr.u16.low));
offsetof(ArchCPU, neg.icount_decr.u16.low) -
offsetof(ArchCPU, env));
}
tcg_temp_free_i32(count);
@ -58,14 +60,18 @@ static inline void gen_tb_end(TranslationBlock *tb, int num_insns)
static inline void gen_io_start(void)
{
TCGv_i32 tmp = tcg_const_i32(1);
tcg_gen_st_i32(tmp, cpu_env, -ENV_OFFSET + offsetof(CPUState, can_do_io));
tcg_gen_st_i32(tmp, cpu_env,
offsetof(ArchCPU, parent_obj.can_do_io) -
offsetof(ArchCPU, env));
tcg_temp_free_i32(tmp);
}
static inline void gen_io_end(void)
{
TCGv_i32 tmp = tcg_const_i32(0);
tcg_gen_st_i32(tmp, cpu_env, -ENV_OFFSET + offsetof(CPUState, can_do_io));
tcg_gen_st_i32(tmp, cpu_env,
offsetof(ArchCPU, parent_obj.can_do_io) -
offsetof(ArchCPU, env));
tcg_temp_free_i32(tmp);
}

View File

@ -14,7 +14,7 @@ static inline uint64_t softmmu_tget64(CPUArchState *env, target_ulong addr)
{
uint64_t val;
cpu_memory_rw_debug(ENV_GET_CPU(env), addr, (uint8_t *)&val, 8, 0);
cpu_memory_rw_debug(env_cpu(env), addr, (uint8_t *)&val, 8, 0);
return tswap64(val);
}
@ -22,7 +22,7 @@ static inline uint32_t softmmu_tget32(CPUArchState *env, target_ulong addr)
{
uint32_t val;
cpu_memory_rw_debug(ENV_GET_CPU(env), addr, (uint8_t *)&val, 4, 0);
cpu_memory_rw_debug(env_cpu(env), addr, (uint8_t *)&val, 4, 0);
return tswap32(val);
}
@ -30,7 +30,7 @@ static inline uint32_t softmmu_tget8(CPUArchState *env, target_ulong addr)
{
uint8_t val;
cpu_memory_rw_debug(ENV_GET_CPU(env), addr, &val, 1, 0);
cpu_memory_rw_debug(env_cpu(env), addr, &val, 1, 0);
return val;
}
@ -43,14 +43,14 @@ static inline void softmmu_tput64(CPUArchState *env,
target_ulong addr, uint64_t val)
{
val = tswap64(val);
cpu_memory_rw_debug(ENV_GET_CPU(env), addr, (uint8_t *)&val, 8, 1);
cpu_memory_rw_debug(env_cpu(env), addr, (uint8_t *)&val, 8, 1);
}
static inline void softmmu_tput32(CPUArchState *env,
target_ulong addr, uint32_t val)
{
val = tswap32(val);
cpu_memory_rw_debug(ENV_GET_CPU(env), addr, (uint8_t *)&val, 4, 1);
cpu_memory_rw_debug(env_cpu(env), addr, (uint8_t *)&val, 4, 1);
}
#define put_user_u64(arg, p) ({ softmmu_tput64(env, p, arg) ; 0; })
#define put_user_u32(arg, p) ({ softmmu_tput32(env, p, arg) ; 0; })
@ -63,7 +63,7 @@ static void *softmmu_lock_user(CPUArchState *env,
/* TODO: Make this something that isn't fixed size. */
p = malloc(len);
if (p && copy) {
cpu_memory_rw_debug(ENV_GET_CPU(env), addr, p, len, 0);
cpu_memory_rw_debug(env_cpu(env), addr, p, len, 0);
}
return p;
}
@ -79,7 +79,7 @@ static char *softmmu_lock_user_string(CPUArchState *env, target_ulong addr)
return NULL;
}
do {
cpu_memory_rw_debug(ENV_GET_CPU(env), addr, &c, 1, 0);
cpu_memory_rw_debug(env_cpu(env), addr, &c, 1, 0);
addr++;
*(p++) = c;
} while (c);
@ -90,7 +90,7 @@ static void softmmu_unlock_user(CPUArchState *env, void *p, target_ulong addr,
target_ulong len)
{
if (len) {
cpu_memory_rw_debug(ENV_GET_CPU(env), addr, p, len, 1);
cpu_memory_rw_debug(env_cpu(env), addr, p, len, 1);
}
free(p);
}

View File

@ -232,17 +232,25 @@ typedef struct CPUClass {
bool gdb_stop_before_watchpoint;
} CPUClass;
/*
* Low 16 bits: number of cycles left, used only in icount mode.
* High 16 bits: Set to -1 to force TCG to stop executing linked TBs
* for this CPU and return to its top level loop (even in non-icount mode).
* This allows a single read-compare-cbranch-write sequence to test
* for both decrementer underflow and exceptions.
*/
typedef union IcountDecr {
uint32_t u32;
struct {
#ifdef HOST_WORDS_BIGENDIAN
typedef struct icount_decr_u16 {
uint16_t high;
uint16_t low;
} icount_decr_u16;
uint16_t high;
uint16_t low;
#else
typedef struct icount_decr_u16 {
uint16_t low;
uint16_t high;
} icount_decr_u16;
uint16_t low;
uint16_t high;
#endif
} u16;
} IcountDecr;
typedef struct CPUBreakpoint {
vaddr pc;
@ -314,11 +322,6 @@ struct qemu_work_item;
* @crash_occurred: Indicates the OS reported a crash (panic) for this CPU
* @singlestep_enabled: Flags for single-stepping.
* @icount_extra: Instructions until next timer event.
* @icount_decr: Low 16 bits: number of cycles left, only used in icount mode.
* High 16 bits: Set to -1 to force TCG to stop executing linked TBs for this
* CPU and return to its top level loop (even in non-icount mode).
* This allows a single read-compare-cbranch-write sequence to test
* for both decrementer underflow and exceptions.
* @can_do_io: Nonzero if memory-mapped IO is safe. Deterministic execution
* requires that IO only be performed on the last instruction of a TB
* so that interrupts take effect immediately.
@ -328,6 +331,7 @@ struct qemu_work_item;
* @as: Pointer to the first AddressSpace, for the convenience of targets which
* only have a single AddressSpace
* @env_ptr: Pointer to subclass-specific CPUArchState field.
* @icount_decr_ptr: Pointer to IcountDecr field within subclass.
* @gdb_regs: Additional GDB registers.
* @gdb_num_regs: Number of total registers accessible to GDB.
* @gdb_num_g_regs: Number of registers in GDB 'g' packets.
@ -387,6 +391,7 @@ struct CPUState {
MemoryRegion *memory;
void *env_ptr; /* CPUArchState */
IcountDecr *icount_decr_ptr;
/* Accessed in parallel; all accesses must be atomic */
struct TranslationBlock *tb_jmp_cache[TB_JMP_CACHE_SIZE];
@ -441,15 +446,6 @@ struct CPUState {
bool ignore_memory_transaction_failures;
/* Note that this is accessed at the start of every TB via a negative
offset from AREG0. Leave this field at the end so as to make the
(absolute value) offset as small as possible. This reduces code
size, especially for hosts without large memory offsets. */
union {
uint32_t u32;
icount_decr_u16 u16;
} icount_decr;
struct hax_vcpu_state *hax_vcpu;
int hvf_fd;

View File

@ -73,7 +73,7 @@
/* AArch64 main loop */
void cpu_loop(CPUARMState *env)
{
CPUState *cs = CPU(arm_env_get_cpu(env));
CPUState *cs = env_cpu(env);
int trapnr;
abi_long ret;
target_siginfo_t info;
@ -150,8 +150,8 @@ void cpu_loop(CPUARMState *env)
void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs)
{
ARMCPU *cpu = arm_env_get_cpu(env);
CPUState *cs = CPU(cpu);
ARMCPU *cpu = env_archcpu(env);
CPUState *cs = env_cpu(env);
TaskState *ts = cs->opaque;
struct image_info *info = ts->info;
int i;

View File

@ -314,7 +314,7 @@ static int target_restore_sigframe(CPUARMState *env,
break;
case TARGET_SVE_MAGIC:
if (cpu_isar_feature(aa64_sve, arm_env_get_cpu(env))) {
if (cpu_isar_feature(aa64_sve, env_archcpu(env))) {
vq = (env->vfp.zcr_el[1] & 0xf) + 1;
sve_size = QEMU_ALIGN_UP(TARGET_SVE_SIG_CONTEXT_SIZE(vq), 16);
if (!sve && size == sve_size) {
@ -433,7 +433,7 @@ static void target_setup_frame(int usig, struct target_sigaction *ka,
&layout);
/* SVE state needs saving only if it exists. */
if (cpu_isar_feature(aa64_sve, arm_env_get_cpu(env))) {
if (cpu_isar_feature(aa64_sve, env_archcpu(env))) {
vq = (env->vfp.zcr_el[1] & 0xf) + 1;
sve_size = QEMU_ALIGN_UP(TARGET_SVE_SIG_CONTEXT_SIZE(vq), 16);
sve_ofs = alloc_sigframe_space(sve_size, &layout);

View File

@ -23,7 +23,7 @@
void cpu_loop(CPUAlphaState *env)
{
CPUState *cs = CPU(alpha_env_get_cpu(env));
CPUState *cs = env_cpu(env);
int trapnr;
target_siginfo_t info;
abi_long sysret;

View File

@ -206,7 +206,7 @@ do_kernel_trap(CPUARMState *env)
void cpu_loop(CPUARMState *env)
{
CPUState *cs = CPU(arm_env_get_cpu(env));
CPUState *cs = env_cpu(env);
int trapnr;
unsigned int n, insn;
target_siginfo_t info;
@ -423,7 +423,7 @@ void cpu_loop(CPUARMState *env)
void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs)
{
CPUState *cpu = ENV_GET_CPU(env);
CPUState *cpu = env_cpu(env);
TaskState *ts = cpu->opaque;
struct image_info *info = ts->info;
int i;

View File

@ -24,7 +24,7 @@
#define EXCP_DUMP(env, fmt, ...) \
do { \
CPUState *cs = ENV_GET_CPU(env); \
CPUState *cs = env_cpu(env); \
fprintf(stderr, fmt , ## __VA_ARGS__); \
cpu_dump_state(cs, stderr, 0); \
if (qemu_log_separate()) { \

View File

@ -23,7 +23,7 @@
void cpu_loop(CPUCRISState *env)
{
CPUState *cs = CPU(cris_env_get_cpu(env));
CPUState *cs = env_cpu(env);
int trapnr, ret;
target_siginfo_t info;
@ -83,7 +83,7 @@ void cpu_loop(CPUCRISState *env)
void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs)
{
CPUState *cpu = ENV_GET_CPU(env);
CPUState *cpu = env_cpu(env);
TaskState *ts = cpu->opaque;
struct image_info *info = ts->info;

View File

@ -3377,7 +3377,7 @@ static int write_note(struct memelfnote *men, int fd)
static void fill_thread_info(struct elf_note_info *info, const CPUArchState *env)
{
CPUState *cpu = ENV_GET_CPU((CPUArchState *)env);
CPUState *cpu = env_cpu((CPUArchState *)env);
TaskState *ts = (TaskState *)cpu->opaque;
struct elf_thread_status *ets;
@ -3407,7 +3407,7 @@ static int fill_note_info(struct elf_note_info *info,
long signr, const CPUArchState *env)
{
#define NUMNOTES 3
CPUState *cpu = ENV_GET_CPU((CPUArchState *)env);
CPUState *cpu = env_cpu((CPUArchState *)env);
TaskState *ts = (TaskState *)cpu->opaque;
int i;
@ -3531,7 +3531,7 @@ static int write_note_info(struct elf_note_info *info, int fd)
*/
static int elf_core_dump(int signr, const CPUArchState *env)
{
const CPUState *cpu = ENV_GET_CPU((CPUArchState *)env);
const CPUState *cpu = env_cpu((CPUArchState *)env);
const TaskState *ts = (const TaskState *)cpu->opaque;
struct vm_area_struct *vma = NULL;
char corefile[PATH_MAX];

View File

@ -105,7 +105,7 @@ static abi_ulong hppa_lws(CPUHPPAState *env)
void cpu_loop(CPUHPPAState *env)
{
CPUState *cs = CPU(hppa_env_get_cpu(env));
CPUState *cs = env_cpu(env);
target_siginfo_t info;
abi_ulong ret;
int trapnr;

View File

@ -82,7 +82,7 @@ static void set_idt(int n, unsigned int dpl)
void cpu_loop(CPUX86State *env)
{
CPUState *cs = CPU(x86_env_get_cpu(env));
CPUState *cs = env_cpu(env);
int trapnr;
abi_ulong pc;
abi_ulong ret;

View File

@ -198,7 +198,7 @@ static void setup_sigcontext(struct target_sigcontext *sc,
struct target_fpstate *fpstate, CPUX86State *env, abi_ulong mask,
abi_ulong fpstate_addr)
{
CPUState *cs = CPU(x86_env_get_cpu(env));
CPUState *cs = env_cpu(env);
#ifndef TARGET_X86_64
uint16_t magic;

View File

@ -91,7 +91,6 @@ static int translate_openflags(int flags)
#define ARG(x) tswap32(args[x])
void do_m68k_simcall(CPUM68KState *env, int nr)
{
M68kCPU *cpu = m68k_env_get_cpu(env);
uint32_t *args;
args = (uint32_t *)(unsigned long)(env->aregs[7] + 4);
@ -159,6 +158,6 @@ void do_m68k_simcall(CPUM68KState *env, int nr)
check_err(env, lseek(ARG(0), (int32_t)ARG(1), ARG(2)));
break;
default:
cpu_abort(CPU(cpu), "Unsupported m68k sim syscall %d\n", nr);
cpu_abort(env_cpu(env), "Unsupported m68k sim syscall %d\n", nr);
}
}

View File

@ -23,7 +23,7 @@
void cpu_loop(CPUM68KState *env)
{
CPUState *cs = CPU(m68k_env_get_cpu(env));
CPUState *cs = env_cpu(env);
int trapnr;
unsigned int n;
target_siginfo_t info;
@ -130,7 +130,7 @@ void cpu_loop(CPUM68KState *env)
void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs)
{
CPUState *cpu = ENV_GET_CPU(env);
CPUState *cpu = env_cpu(env);
TaskState *ts = cpu->opaque;
struct image_info *info = ts->info;

View File

@ -31,7 +31,7 @@ static inline void cpu_clone_regs(CPUM68KState *env, target_ulong newsp)
static inline void cpu_set_tls(CPUM68KState *env, target_ulong newtls)
{
CPUState *cs = CPU(m68k_env_get_cpu(env));
CPUState *cs = env_cpu(env);
TaskState *ts = cs->opaque;
ts->tp_value = newtls;

View File

@ -180,7 +180,7 @@ void init_task_state(TaskState *ts)
CPUArchState *cpu_copy(CPUArchState *env)
{
CPUState *cpu = ENV_GET_CPU(env);
CPUState *cpu = env_cpu(env);
CPUState *new_cpu = cpu_create(cpu_type);
CPUArchState *new_env = new_cpu->env_ptr;
CPUBreakpoint *bp;

View File

@ -23,7 +23,7 @@
void cpu_loop(CPUMBState *env)
{
CPUState *cs = CPU(mb_env_get_cpu(env));
CPUState *cs = env_cpu(env);
int trapnr, ret;
target_siginfo_t info;

View File

@ -425,7 +425,7 @@ static int do_break(CPUMIPSState *env, target_siginfo_t *info,
void cpu_loop(CPUMIPSState *env)
{
CPUState *cs = CPU(mips_env_get_cpu(env));
CPUState *cs = env_cpu(env);
target_siginfo_t info;
int trapnr;
abi_long ret;
@ -654,7 +654,7 @@ error:
void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs)
{
CPUState *cpu = ENV_GET_CPU(env);
CPUState *cpu = env_cpu(env);
TaskState *ts = cpu->opaque;
struct image_info *info = ts->info;
int i;

View File

@ -23,7 +23,7 @@
void cpu_loop(CPUNios2State *env)
{
CPUState *cs = ENV_GET_CPU(env);
CPUState *cs = env_cpu(env);
Nios2CPU *cpu = NIOS2_CPU(cs);
target_siginfo_t info;
int trapnr, ret;

View File

@ -23,7 +23,7 @@
void cpu_loop(CPUOpenRISCState *env)
{
CPUState *cs = CPU(openrisc_env_get_cpu(env));
CPUState *cs = env_cpu(env);
int trapnr;
abi_long ret;
target_siginfo_t info;

View File

@ -67,7 +67,7 @@ int ppc_dcr_write (ppc_dcr_t *dcr_env, int dcrn, uint32_t val)
void cpu_loop(CPUPPCState *env)
{
CPUState *cs = CPU(ppc_env_get_cpu(env));
CPUState *cs = env_cpu(env);
target_siginfo_t info;
int trapnr;
target_ulong ret;

View File

@ -25,7 +25,7 @@
void cpu_loop(CPURISCVState *env)
{
CPUState *cs = CPU(riscv_env_get_cpu(env));
CPUState *cs = env_cpu(env);
int trapnr, signum, sigcode;
target_ulong sigaddr;
target_ulong ret;
@ -116,7 +116,7 @@ void cpu_loop(CPURISCVState *env)
void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs)
{
CPUState *cpu = ENV_GET_CPU(env);
CPUState *cpu = env_cpu(env);
TaskState *ts = cpu->opaque;
struct image_info *info = ts->info;

View File

@ -26,7 +26,7 @@
void cpu_loop(CPUS390XState *env)
{
CPUState *cs = CPU(s390_env_get_cpu(env));
CPUState *cs = env_cpu(env);
int trapnr, n, sig;
target_siginfo_t info;
target_ulong addr;

View File

@ -23,7 +23,7 @@
void cpu_loop(CPUSH4State *env)
{
CPUState *cs = CPU(sh_env_get_cpu(env));
CPUState *cs = env_cpu(env);
int trapnr, ret;
target_siginfo_t info;

View File

@ -626,7 +626,7 @@ static void QEMU_NORETURN dump_core_and_abort(int target_sig)
int queue_signal(CPUArchState *env, int sig, int si_type,
target_siginfo_t *info)
{
CPUState *cpu = ENV_GET_CPU(env);
CPUState *cpu = env_cpu(env);
TaskState *ts = cpu->opaque;
trace_user_queue_signal(env, sig);
@ -651,7 +651,7 @@ static void host_signal_handler(int host_signum, siginfo_t *info,
void *puc)
{
CPUArchState *env = thread_cpu->env_ptr;
CPUState *cpu = ENV_GET_CPU(env);
CPUState *cpu = env_cpu(env);
TaskState *ts = cpu->opaque;
int sig;
@ -842,7 +842,7 @@ int do_sigaction(int sig, const struct target_sigaction *act,
static void handle_pending_signal(CPUArchState *cpu_env, int sig,
struct emulated_sigtable *k)
{
CPUState *cpu = ENV_GET_CPU(cpu_env);
CPUState *cpu = env_cpu(cpu_env);
abi_ulong handler;
sigset_t set;
target_sigset_t target_old_set;
@ -927,7 +927,7 @@ static void handle_pending_signal(CPUArchState *cpu_env, int sig,
void process_pending_signals(CPUArchState *cpu_env)
{
CPUState *cpu = ENV_GET_CPU(cpu_env);
CPUState *cpu = env_cpu(cpu_env);
int sig;
TaskState *ts = cpu->opaque;
sigset_t set;

View File

@ -145,7 +145,7 @@ static void flush_windows(CPUSPARCState *env)
void cpu_loop (CPUSPARCState *env)
{
CPUState *cs = CPU(sparc_env_get_cpu(env));
CPUState *cs = env_cpu(env);
int trapnr;
abi_long ret;
target_siginfo_t info;

View File

@ -5484,7 +5484,7 @@ static void *clone_func(void *arg)
rcu_register_thread();
tcg_register_thread();
env = info->env;
cpu = ENV_GET_CPU(env);
cpu = env_cpu(env);
thread_cpu = cpu;
ts = (TaskState *)cpu->opaque;
info->tid = sys_gettid();
@ -5514,7 +5514,7 @@ static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
abi_ulong parent_tidptr, target_ulong newtls,
abi_ulong child_tidptr)
{
CPUState *cpu = ENV_GET_CPU(env);
CPUState *cpu = env_cpu(env);
int ret;
TaskState *ts;
CPUState *new_cpu;
@ -5547,7 +5547,7 @@ static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
new_env = cpu_copy(env);
/* Init regs that differ from the parent. */
cpu_clone_regs(new_env, newsp);
new_cpu = ENV_GET_CPU(new_env);
new_cpu = env_cpu(new_env);
new_cpu->opaque = ts;
ts->bprm = parent_ts->bprm;
ts->info = parent_ts->info;
@ -6654,7 +6654,7 @@ int host_to_target_waitstatus(int status)
static int open_self_cmdline(void *cpu_env, int fd)
{
CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
int i;
@ -6671,7 +6671,7 @@ static int open_self_cmdline(void *cpu_env, int fd)
static int open_self_maps(void *cpu_env, int fd)
{
CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
TaskState *ts = cpu->opaque;
FILE *fp;
char *line = NULL;
@ -6720,7 +6720,7 @@ static int open_self_maps(void *cpu_env, int fd)
static int open_self_stat(void *cpu_env, int fd)
{
CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
TaskState *ts = cpu->opaque;
abi_ulong start_stack = ts->info->start_stack;
int i;
@ -6757,7 +6757,7 @@ static int open_self_stat(void *cpu_env, int fd)
static int open_self_auxv(void *cpu_env, int fd)
{
CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
TaskState *ts = cpu->opaque;
abi_ulong auxv = ts->info->saved_auxv;
abi_ulong len = ts->info->auxv_len;
@ -7042,7 +7042,7 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
abi_long arg5, abi_long arg6, abi_long arg7,
abi_long arg8)
{
CPUState *cpu = ENV_GET_CPU(cpu_env);
CPUState *cpu = env_cpu(cpu_env);
abi_long ret;
#if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
|| defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
@ -9781,10 +9781,10 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
* even though the current architectural maximum is VQ=16.
*/
ret = -TARGET_EINVAL;
if (cpu_isar_feature(aa64_sve, arm_env_get_cpu(cpu_env))
if (cpu_isar_feature(aa64_sve, env_archcpu(cpu_env))
&& arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
CPUARMState *env = cpu_env;
ARMCPU *cpu = arm_env_get_cpu(env);
ARMCPU *cpu = env_archcpu(env);
uint32_t vq, old_vq;
old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
@ -9801,7 +9801,7 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
case TARGET_PR_SVE_GET_VL:
ret = -TARGET_EINVAL;
{
ARMCPU *cpu = arm_env_get_cpu(cpu_env);
ARMCPU *cpu = env_archcpu(cpu_env);
if (cpu_isar_feature(aa64_sve, cpu)) {
ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16;
}
@ -9810,7 +9810,7 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
case TARGET_PR_PAC_RESET_KEYS:
{
CPUARMState *env = cpu_env;
ARMCPU *cpu = arm_env_get_cpu(env);
ARMCPU *cpu = env_archcpu(env);
if (arg3 || arg4 || arg5) {
return -TARGET_EINVAL;
@ -11706,7 +11706,7 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
abi_long arg5, abi_long arg6, abi_long arg7,
abi_long arg8)
{
CPUState *cpu = ENV_GET_CPU(cpu_env);
CPUState *cpu = env_cpu(cpu_env);
abi_long ret;
#ifdef DEBUG_ERESTARTSYS

View File

@ -206,7 +206,7 @@ static void do_fetch(CPUTLGState *env, int trapnr, bool quad)
void cpu_loop(CPUTLGState *env)
{
CPUState *cs = CPU(tilegx_env_get_cpu(env));
CPUState *cs = env_cpu(env);
int trapnr;
while (1) {

View File

@ -54,7 +54,7 @@ const char *cpu_to_uname_machine(void *cpu_env)
return "armv5te" utsname_suffix;
#elif defined(TARGET_I386) && !defined(TARGET_X86_64)
/* see arch/x86/kernel/cpu/bugs.c: check_bugs(), 386, 486, 586, 686 */
CPUState *cpu = ENV_GET_CPU((CPUX86State *)cpu_env);
CPUState *cpu = env_cpu((CPUX86State *)cpu_env);
int family = object_property_get_int(OBJECT(cpu), "family", NULL);
if (family == 4) {
return "i486";

View File

@ -72,7 +72,7 @@ static inline unsigned int vm_getl(CPUX86State *env,
void save_v86_state(CPUX86State *env)
{
CPUState *cs = CPU(x86_env_get_cpu(env));
CPUState *cs = env_cpu(env);
TaskState *ts = cs->opaque;
struct target_vm86plus_struct * target_v86;
@ -132,7 +132,7 @@ static inline void return_to_32bit(CPUX86State *env, int retval)
static inline int set_IF(CPUX86State *env)
{
CPUState *cs = CPU(x86_env_get_cpu(env));
CPUState *cs = env_cpu(env);
TaskState *ts = cs->opaque;
ts->v86flags |= VIF_MASK;
@ -145,7 +145,7 @@ static inline int set_IF(CPUX86State *env)
static inline void clear_IF(CPUX86State *env)
{
CPUState *cs = CPU(x86_env_get_cpu(env));
CPUState *cs = env_cpu(env);
TaskState *ts = cs->opaque;
ts->v86flags &= ~VIF_MASK;
@ -163,7 +163,7 @@ static inline void clear_AC(CPUX86State *env)
static inline int set_vflags_long(unsigned long eflags, CPUX86State *env)
{
CPUState *cs = CPU(x86_env_get_cpu(env));
CPUState *cs = env_cpu(env);
TaskState *ts = cs->opaque;
set_flags(ts->v86flags, eflags, ts->v86mask);
@ -177,7 +177,7 @@ static inline int set_vflags_long(unsigned long eflags, CPUX86State *env)
static inline int set_vflags_short(unsigned short flags, CPUX86State *env)
{
CPUState *cs = CPU(x86_env_get_cpu(env));
CPUState *cs = env_cpu(env);
TaskState *ts = cs->opaque;
set_flags(ts->v86flags, flags, ts->v86mask & 0xffff);
@ -191,7 +191,7 @@ static inline int set_vflags_short(unsigned short flags, CPUX86State *env)
static inline unsigned int get_vflags(CPUX86State *env)
{
CPUState *cs = CPU(x86_env_get_cpu(env));
CPUState *cs = env_cpu(env);
TaskState *ts = cs->opaque;
unsigned int flags;
@ -208,7 +208,7 @@ static inline unsigned int get_vflags(CPUX86State *env)
support TSS interrupt revectoring, so this code is always executed) */
static void do_int(CPUX86State *env, int intno)
{
CPUState *cs = CPU(x86_env_get_cpu(env));
CPUState *cs = env_cpu(env);
TaskState *ts = cs->opaque;
uint32_t int_addr, segoffs, ssp;
unsigned int sp;
@ -267,7 +267,7 @@ void handle_vm86_trap(CPUX86State *env, int trapno)
void handle_vm86_fault(CPUX86State *env)
{
CPUState *cs = CPU(x86_env_get_cpu(env));
CPUState *cs = env_cpu(env);
TaskState *ts = cs->opaque;
uint32_t csp, ssp;
unsigned int ip, sp, newflags, newip, newcs, opcode, intno;
@ -392,7 +392,7 @@ void handle_vm86_fault(CPUX86State *env)
int do_vm86(CPUX86State *env, long subfunction, abi_ulong vm86_addr)
{
CPUState *cs = CPU(x86_env_get_cpu(env));
CPUState *cs = env_cpu(env);
TaskState *ts = cs->opaque;
struct target_vm86plus_struct * target_v86;
int ret;

View File

@ -123,7 +123,7 @@ static void xtensa_underflow12(CPUXtensaState *env)
void cpu_loop(CPUXtensaState *env)
{
CPUState *cs = CPU(xtensa_env_get_cpu(env));
CPUState *cs = env_cpu(env);
target_siginfo_t info;
abi_ulong ret;
int trapnr;

View File

@ -115,7 +115,7 @@ void cpu_exit(CPUState *cpu)
atomic_set(&cpu->exit_request, 1);
/* Ensure cpu_exec will see the exit request after TCG has exited. */
smp_wmb();
atomic_set(&cpu->icount_decr.u16.high, -1);
atomic_set(&cpu->icount_decr_ptr->u16.high, -1);
}
int cpu_write_elf32_qemunote(WriteCoreDumpFunction f, CPUState *cpu,
@ -264,7 +264,7 @@ static void cpu_common_reset(CPUState *cpu)
cpu->mem_io_pc = 0;
cpu->mem_io_vaddr = 0;
cpu->icount_extra = 0;
atomic_set(&cpu->icount_decr.u32, 0);
atomic_set(&cpu->icount_decr_ptr->u32, 0);
cpu->can_do_io = 1;
cpu->exception_index = -1;
cpu->crash_occurred = false;

View File

@ -25,7 +25,7 @@ def vcpu_transform_args(args, mode):
if mode == "code":
return Arguments([
# Does cast from helper requirements to tracing types
("CPUState *", "ENV_GET_CPU(%s)" % args.names()[0]),
("CPUState *", "env_cpu(%s)" % args.names()[0]),
])
else:
args = Arguments([

31
target/alpha/cpu-param.h Normal file
View File

@ -0,0 +1,31 @@
/*
* Alpha cpu parameters for qemu.
*
* Copyright (c) 2007 Jocelyn Mayer
* SPDX-License-Identifier: LGPL-2.0+
*/
#ifndef ALPHA_CPU_PARAM_H
#define ALPHA_CPU_PARAM_H 1
#define TARGET_LONG_BITS 64
#define TARGET_PAGE_BITS 13
#ifdef CONFIG_USER_ONLY
/*
* ??? The kernel likes to give addresses in high memory. If the host has
* more virtual address space than the guest, this can lead to impossible
* allocations. Honor the long-standing assumption that only kernel addrs
* are negative, but otherwise allow allocations anywhere. This could lead
* to tricky emulation problems for programs doing tagged addressing, but
* that's far fewer than encounter the impossible allocation problem.
*/
#define TARGET_PHYS_ADDR_SPACE_BITS 63
#define TARGET_VIRT_ADDR_SPACE_BITS 63
#else
/* ??? EV4 has 34 phys addr bits, EV5 has 40, EV6 has 44. */
#define TARGET_PHYS_ADDR_SPACE_BITS 44
#define TARGET_VIRT_ADDR_SPACE_BITS (30 + TARGET_PAGE_BITS)
#endif
#define NB_MMU_MODES 3
#endif

View File

@ -191,11 +191,10 @@ static void ev67_cpu_initfn(Object *obj)
static void alpha_cpu_initfn(Object *obj)
{
CPUState *cs = CPU(obj);
AlphaCPU *cpu = ALPHA_CPU(obj);
CPUAlphaState *env = &cpu->env;
cs->env_ptr = env;
cpu_set_cpustate_pointers(cpu);
env->lock_addr = -1;
#if defined(CONFIG_USER_ONLY)

View File

@ -22,37 +22,16 @@
#include "qemu-common.h"
#include "cpu-qom.h"
#include "exec/cpu-defs.h"
#define TARGET_LONG_BITS 64
#define ALIGNED_ONLY
#define CPUArchState struct CPUAlphaState
/* Alpha processors have a weak memory model */
#define TCG_GUEST_DEFAULT_MO (0)
#include "exec/cpu-defs.h"
#define ICACHE_LINE_SIZE 32
#define DCACHE_LINE_SIZE 32
#define TARGET_PAGE_BITS 13
#ifdef CONFIG_USER_ONLY
/* ??? The kernel likes to give addresses in high memory. If the host has
more virtual address space than the guest, this can lead to impossible
allocations. Honor the long-standing assumption that only kernel addrs
are negative, but otherwise allow allocations anywhere. This could lead
to tricky emulation problems for programs doing tagged addressing, but
that's far fewer than encounter the impossible allocation problem. */
#define TARGET_PHYS_ADDR_SPACE_BITS 63
#define TARGET_VIRT_ADDR_SPACE_BITS 63
#else
/* ??? EV4 has 34 phys addr bits, EV5 has 40, EV6 has 44. */
#define TARGET_PHYS_ADDR_SPACE_BITS 44
#define TARGET_VIRT_ADDR_SPACE_BITS (30 + TARGET_PAGE_BITS)
#endif
/* Alpha major type */
enum {
ALPHA_EV3 = 1,
@ -217,8 +196,6 @@ enum {
PALcode cheats and usees the KSEG mapping for its code+data rather than
physical addresses. */
#define NB_MMU_MODES 3
#define MMU_MODE0_SUFFIX _kernel
#define MMU_MODE1_SUFFIX _user
#define MMU_KERNEL_IDX 0
@ -274,9 +251,6 @@ struct CPUAlphaState {
/* This alarm doesn't exist in real hardware; we wish it did. */
uint64_t alarm_expire;
/* Those resources are used only in QEMU core */
CPU_COMMON
int error_code;
uint32_t features;
@ -295,20 +269,13 @@ struct AlphaCPU {
CPUState parent_obj;
/*< public >*/
CPUNegativeOffsetState neg;
CPUAlphaState env;
/* This alarm doesn't exist in real hardware; we wish it did. */
QEMUTimer *alarm_timer;
};
static inline AlphaCPU *alpha_env_get_cpu(CPUAlphaState *env)
{
return container_of(env, AlphaCPU, env);
}
#define ENV_GET_CPU(e) CPU(alpha_env_get_cpu(e))
#define ENV_OFFSET offsetof(AlphaCPU, env)
#ifndef CONFIG_USER_ONLY
extern const struct VMStateDescription vmstate_alpha_cpu;
@ -327,6 +294,9 @@ void alpha_cpu_do_unaligned_access(CPUState *cpu, vaddr addr,
#define cpu_list alpha_cpu_list
#define cpu_signal_handler cpu_alpha_signal_handler
typedef CPUAlphaState CPUArchState;
typedef AlphaCPU ArchCPU;
#include "exec/cpu-all.h"
enum {

View File

@ -136,7 +136,7 @@ static int get_physical_address(CPUAlphaState *env, target_ulong addr,
int prot_need, int mmu_idx,
target_ulong *pphys, int *pprot)
{
CPUState *cs = CPU(alpha_env_get_cpu(env));
CPUState *cs = env_cpu(env);
target_long saddr = addr;
target_ulong phys = 0;
target_ulong L1pte, L2pte, L3pte;
@ -486,8 +486,7 @@ void alpha_cpu_dump_state(CPUState *cs, FILE *f, int flags)
We expect that ENV->PC has already been updated. */
void QEMU_NORETURN helper_excp(CPUAlphaState *env, int excp, int error)
{
AlphaCPU *cpu = alpha_env_get_cpu(env);
CPUState *cs = CPU(cpu);
CPUState *cs = env_cpu(env);
cs->exception_index = excp;
env->error_code = error;
@ -498,8 +497,7 @@ void QEMU_NORETURN helper_excp(CPUAlphaState *env, int excp, int error)
void QEMU_NORETURN dynamic_excp(CPUAlphaState *env, uintptr_t retaddr,
int excp, int error)
{
AlphaCPU *cpu = alpha_env_get_cpu(env);
CPUState *cs = CPU(cpu);
CPUState *cs = env_cpu(env);
cs->exception_index = excp;
env->error_code = error;

View File

@ -44,17 +44,17 @@ uint64_t helper_load_pcc(CPUAlphaState *env)
#ifndef CONFIG_USER_ONLY
void helper_tbia(CPUAlphaState *env)
{
tlb_flush(CPU(alpha_env_get_cpu(env)));
tlb_flush(env_cpu(env));
}
void helper_tbis(CPUAlphaState *env, uint64_t p)
{
tlb_flush_page(CPU(alpha_env_get_cpu(env)), p);
tlb_flush_page(env_cpu(env), p);
}
void helper_tb_flush(CPUAlphaState *env)
{
tb_flush(CPU(alpha_env_get_cpu(env)));
tb_flush(env_cpu(env));
}
void helper_halt(uint64_t restart)
@ -78,7 +78,7 @@ uint64_t helper_get_walltime(void)
void helper_set_alarm(CPUAlphaState *env, uint64_t expire)
{
AlphaCPU *cpu = alpha_env_get_cpu(env);
AlphaCPU *cpu = env_archcpu(env);
if (expire) {
env->alarm_expire = expire;

View File

@ -257,8 +257,8 @@ static target_ulong arm_gdb_syscall(ARMCPU *cpu, gdb_syscall_complete_cb cb,
*/
target_ulong do_arm_semihosting(CPUARMState *env)
{
ARMCPU *cpu = arm_env_get_cpu(env);
CPUState *cs = CPU(cpu);
ARMCPU *cpu = env_archcpu(env);
CPUState *cs = env_cpu(env);
target_ulong args;
target_ulong arg0, arg1, arg2, arg3;
char * s;

34
target/arm/cpu-param.h Normal file
View File

@ -0,0 +1,34 @@
/*
* ARM cpu parameters for qemu.
*
* Copyright (c) 2003 Fabrice Bellard
* SPDX-License-Identifier: LGPL-2.0+
*/
#ifndef ARM_CPU_PARAM_H
#define ARM_CPU_PARAM_H 1
#ifdef TARGET_AARCH64
# define TARGET_LONG_BITS 64
# define TARGET_PHYS_ADDR_SPACE_BITS 48
# define TARGET_VIRT_ADDR_SPACE_BITS 48
#else
# define TARGET_LONG_BITS 32
# define TARGET_PHYS_ADDR_SPACE_BITS 40
# define TARGET_VIRT_ADDR_SPACE_BITS 32
#endif
#ifdef CONFIG_USER_ONLY
#define TARGET_PAGE_BITS 12
#else
/*
* ARMv7 and later CPUs have 4K pages minimum, but ARMv5 and v6
* have to support 1K tiny pages.
*/
# define TARGET_PAGE_BITS_VARY
# define TARGET_PAGE_BITS_MIN 10
#endif
#define NB_MMU_MODES 8
#endif

View File

@ -697,10 +697,9 @@ static void cpreg_hashtable_data_destroy(gpointer data)
static void arm_cpu_initfn(Object *obj)
{
CPUState *cs = CPU(obj);
ARMCPU *cpu = ARM_CPU(obj);
cs->env_ptr = &cpu->env;
cpu_set_cpustate_pointers(cpu);
cpu->cp_regs = g_hash_table_new_full(g_int_hash, g_int_equal,
g_free, cpreg_hashtable_data_destroy);

View File

@ -22,23 +22,13 @@
#include "kvm-consts.h"
#include "hw/registerfields.h"
#if defined(TARGET_AARCH64)
/* AArch64 definitions */
# define TARGET_LONG_BITS 64
#else
# define TARGET_LONG_BITS 32
#endif
/* ARM processors have a weak memory model */
#define TCG_GUEST_DEFAULT_MO (0)
#define CPUArchState struct CPUARMState
#include "qemu-common.h"
#include "cpu-qom.h"
#include "exec/cpu-defs.h"
/* ARM processors have a weak memory model */
#define TCG_GUEST_DEFAULT_MO (0)
#define EXCP_UDEF 1 /* undefined instruction */
#define EXCP_SWI 2 /* software interrupt */
#define EXCP_PREFETCH_ABORT 3
@ -114,7 +104,6 @@ enum {
#define ARM_CPU_VIRQ 2
#define ARM_CPU_VFIQ 3
#define NB_MMU_MODES 8
/* ARM-specific extra insn start words:
* 1: Conditional execution bits
* 2: Partial exception syndrome for data aborts
@ -656,9 +645,7 @@ typedef struct CPUARMState {
/* Fields up to this point are cleared by a CPU reset */
struct {} end_reset_fields;
CPU_COMMON
/* Fields after CPU_COMMON are preserved across CPU reset. */
/* Fields after this point are preserved across CPU reset. */
/* Internal CPU feature flags. */
uint64_t features;
@ -732,6 +719,7 @@ struct ARMCPU {
CPUState parent_obj;
/*< public >*/
CPUNegativeOffsetState neg;
CPUARMState env;
/* Coprocessor information */
@ -924,19 +912,10 @@ struct ARMCPU {
uint32_t sve_max_vq;
};
static inline ARMCPU *arm_env_get_cpu(CPUARMState *env)
{
return container_of(env, ARMCPU, env);
}
void arm_cpu_post_init(Object *obj);
uint64_t arm_cpu_mp_affinity(int idx, uint8_t clustersz);
#define ENV_GET_CPU(e) CPU(arm_env_get_cpu(e))
#define ENV_OFFSET offsetof(ARMCPU, env)
#ifndef CONFIG_USER_ONLY
extern const struct VMStateDescription vmstate_arm_cpu;
#endif
@ -2639,24 +2618,6 @@ bool write_cpustate_to_list(ARMCPU *cpu, bool kvm_sync);
#define ARM_CPUID_TI915T 0x54029152
#define ARM_CPUID_TI925T 0x54029252
#if defined(CONFIG_USER_ONLY)
#define TARGET_PAGE_BITS 12
#else
/* ARMv7 and later CPUs have 4K pages minimum, but ARMv5 and v6
* have to support 1K tiny pages.
*/
#define TARGET_PAGE_BITS_VARY
#define TARGET_PAGE_BITS_MIN 10
#endif
#if defined(TARGET_AARCH64)
# define TARGET_PHYS_ADDR_SPACE_BITS 48
# define TARGET_VIRT_ADDR_SPACE_BITS 48
#else
# define TARGET_PHYS_ADDR_SPACE_BITS 40
# define TARGET_VIRT_ADDR_SPACE_BITS 32
#endif
static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,
unsigned int target_el)
{
@ -3154,6 +3115,9 @@ static inline bool arm_cpu_data_is_big_endian(CPUARMState *env)
}
}
typedef CPUARMState CPUArchState;
typedef ARMCPU ArchCPU;
#include "exec/cpu-all.h"
/* Bit usage in the TB flags field: bit 31 indicates whether we are

View File

@ -43,7 +43,7 @@ static inline void unset_feature(CPUARMState *env, int feature)
#ifndef CONFIG_USER_ONLY
static uint64_t a57_a53_l2ctlr_read(CPUARMState *env, const ARMCPRegInfo *ri)
{
ARMCPU *cpu = arm_env_get_cpu(env);
ARMCPU *cpu = env_archcpu(env);
/* Number of cores is in [25:24]; otherwise we RAZ */
return (cpu->core_count - 1) << 24;

View File

@ -1005,7 +1005,7 @@ void HELPER(exception_return)(CPUARMState *env, uint64_t new_pc)
}
qemu_mutex_lock_iothread();
arm_call_pre_el_change_hook(arm_env_get_cpu(env));
arm_call_pre_el_change_hook(env_archcpu(env));
qemu_mutex_unlock_iothread();
if (!return_to_aa64) {
@ -1047,7 +1047,7 @@ void HELPER(exception_return)(CPUARMState *env, uint64_t new_pc)
aarch64_sve_change_el(env, cur_el, new_el, return_to_aa64);
qemu_mutex_lock_iothread();
arm_call_el_change_hook(arm_env_get_cpu(env));
arm_call_el_change_hook(env_archcpu(env));
qemu_mutex_unlock_iothread();
return;

View File

@ -227,7 +227,7 @@ static void write_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri,
static int arm_gdb_get_sysreg(CPUARMState *env, uint8_t *buf, int reg)
{
ARMCPU *cpu = arm_env_get_cpu(env);
ARMCPU *cpu = env_archcpu(env);
const ARMCPRegInfo *ri;
uint32_t key;
@ -548,7 +548,7 @@ static CPAccessResult access_tpm(CPUARMState *env, const ARMCPRegInfo *ri,
static void dacr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
{
ARMCPU *cpu = arm_env_get_cpu(env);
ARMCPU *cpu = env_archcpu(env);
raw_write(env, ri, value);
tlb_flush(CPU(cpu)); /* Flush TLB as domain not tracked in TLB */
@ -556,7 +556,7 @@ static void dacr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
static void fcse_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
{
ARMCPU *cpu = arm_env_get_cpu(env);
ARMCPU *cpu = env_archcpu(env);
if (raw_read(env, ri) != value) {
/* Unlike real hardware the qemu TLB uses virtual addresses,
@ -570,7 +570,7 @@ static void fcse_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
static void contextidr_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
ARMCPU *cpu = arm_env_get_cpu(env);
ARMCPU *cpu = env_archcpu(env);
if (raw_read(env, ri) != value && !arm_feature(env, ARM_FEATURE_PMSA)
&& !extended_addresses_enabled(env)) {
@ -587,7 +587,7 @@ static void contextidr_write(CPUARMState *env, const ARMCPRegInfo *ri,
static void tlbiall_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
CPUState *cs = ENV_GET_CPU(env);
CPUState *cs = env_cpu(env);
tlb_flush_all_cpus_synced(cs);
}
@ -595,7 +595,7 @@ static void tlbiall_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
static void tlbiasid_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
CPUState *cs = ENV_GET_CPU(env);
CPUState *cs = env_cpu(env);
tlb_flush_all_cpus_synced(cs);
}
@ -603,7 +603,7 @@ static void tlbiasid_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
static void tlbimva_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
CPUState *cs = ENV_GET_CPU(env);
CPUState *cs = env_cpu(env);
tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK);
}
@ -611,7 +611,7 @@ static void tlbimva_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
static void tlbimvaa_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
CPUState *cs = ENV_GET_CPU(env);
CPUState *cs = env_cpu(env);
tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK);
}
@ -631,7 +631,7 @@ static void tlbiall_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
/* Invalidate all (TLBIALL) */
ARMCPU *cpu = arm_env_get_cpu(env);
ARMCPU *cpu = env_archcpu(env);
if (tlb_force_broadcast(env)) {
tlbiall_is_write(env, NULL, value);
@ -645,7 +645,7 @@ static void tlbimva_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
/* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */
ARMCPU *cpu = arm_env_get_cpu(env);
ARMCPU *cpu = env_archcpu(env);
if (tlb_force_broadcast(env)) {
tlbimva_is_write(env, NULL, value);
@ -659,7 +659,7 @@ static void tlbiasid_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
/* Invalidate by ASID (TLBIASID) */
ARMCPU *cpu = arm_env_get_cpu(env);
ARMCPU *cpu = env_archcpu(env);
if (tlb_force_broadcast(env)) {
tlbiasid_is_write(env, NULL, value);
@ -673,7 +673,7 @@ static void tlbimvaa_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
/* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */
ARMCPU *cpu = arm_env_get_cpu(env);
ARMCPU *cpu = env_archcpu(env);
if (tlb_force_broadcast(env)) {
tlbimvaa_is_write(env, NULL, value);
@ -686,7 +686,7 @@ static void tlbimvaa_write(CPUARMState *env, const ARMCPRegInfo *ri,
static void tlbiall_nsnh_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
CPUState *cs = ENV_GET_CPU(env);
CPUState *cs = env_cpu(env);
tlb_flush_by_mmuidx(cs,
ARMMMUIdxBit_S12NSE1 |
@ -697,7 +697,7 @@ static void tlbiall_nsnh_write(CPUARMState *env, const ARMCPRegInfo *ri,
static void tlbiall_nsnh_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
CPUState *cs = ENV_GET_CPU(env);
CPUState *cs = env_cpu(env);
tlb_flush_by_mmuidx_all_cpus_synced(cs,
ARMMMUIdxBit_S12NSE1 |
@ -714,7 +714,7 @@ static void tlbiipas2_write(CPUARMState *env, const ARMCPRegInfo *ri,
* translation information.
* This must NOP if EL2 isn't implemented or SCR_EL3.NS is zero.
*/
CPUState *cs = ENV_GET_CPU(env);
CPUState *cs = env_cpu(env);
uint64_t pageaddr;
if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) {
@ -729,7 +729,7 @@ static void tlbiipas2_write(CPUARMState *env, const ARMCPRegInfo *ri,
static void tlbiipas2_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
CPUState *cs = ENV_GET_CPU(env);
CPUState *cs = env_cpu(env);
uint64_t pageaddr;
if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) {
@ -745,7 +745,7 @@ static void tlbiipas2_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
static void tlbiall_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
CPUState *cs = ENV_GET_CPU(env);
CPUState *cs = env_cpu(env);
tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_S1E2);
}
@ -753,7 +753,7 @@ static void tlbiall_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
static void tlbiall_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
CPUState *cs = ENV_GET_CPU(env);
CPUState *cs = env_cpu(env);
tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_S1E2);
}
@ -761,7 +761,7 @@ static void tlbiall_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
static void tlbimva_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
CPUState *cs = ENV_GET_CPU(env);
CPUState *cs = env_cpu(env);
uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12);
tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S1E2);
@ -770,7 +770,7 @@ static void tlbimva_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
static void tlbimva_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
CPUState *cs = ENV_GET_CPU(env);
CPUState *cs = env_cpu(env);
uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12);
tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
@ -1353,7 +1353,7 @@ static bool pmu_counter_enabled(CPUARMState *env, uint8_t counter)
static void pmu_update_irq(CPUARMState *env)
{
ARMCPU *cpu = arm_env_get_cpu(env);
ARMCPU *cpu = env_archcpu(env);
qemu_set_irq(cpu->pmu_interrupt, (env->cp15.c9_pmcr & PMCRE) &&
(env->cp15.c9_pminten & env->cp15.c9_pmovsr));
}
@ -1408,7 +1408,7 @@ static void pmccntr_op_finish(CPUARMState *env)
if (overflow_in > 0) {
int64_t overflow_at = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
overflow_in;
ARMCPU *cpu = arm_env_get_cpu(env);
ARMCPU *cpu = env_archcpu(env);
timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at);
}
#endif
@ -1457,7 +1457,7 @@ static void pmevcntr_op_finish(CPUARMState *env, uint8_t counter)
if (overflow_in > 0) {
int64_t overflow_at = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
overflow_in;
ARMCPU *cpu = arm_env_get_cpu(env);
ARMCPU *cpu = env_archcpu(env);
timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at);
}
#endif
@ -1865,7 +1865,7 @@ static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
{
/* Begin with base v8.0 state. */
uint32_t valid_mask = 0x3fff;
ARMCPU *cpu = arm_env_get_cpu(env);
ARMCPU *cpu = env_archcpu(env);
if (arm_el_is_aa64(env, 3)) {
value |= SCR_FW | SCR_AW; /* these two bits are RES1. */
@ -1902,7 +1902,7 @@ static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
static uint64_t ccsidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
{
ARMCPU *cpu = arm_env_get_cpu(env);
ARMCPU *cpu = env_archcpu(env);
/* Acquire the CSSELR index from the bank corresponding to the CCSIDR
* bank
@ -1921,7 +1921,7 @@ static void csselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
static uint64_t isr_read(CPUARMState *env, const ARMCPRegInfo *ri)
{
CPUState *cs = ENV_GET_CPU(env);
CPUState *cs = env_cpu(env);
uint64_t hcr_el2 = arm_hcr_el2_eff(env);
uint64_t ret = 0;
@ -2452,7 +2452,7 @@ static void gt_recalc_timer(ARMCPU *cpu, int timeridx)
static void gt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri,
int timeridx)
{
ARMCPU *cpu = arm_env_get_cpu(env);
ARMCPU *cpu = env_archcpu(env);
timer_del(cpu->gt_timer[timeridx]);
}
@ -2473,7 +2473,7 @@ static void gt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
{
trace_arm_gt_cval_write(timeridx, value);
env->cp15.c14_timer[timeridx].cval = value;
gt_recalc_timer(arm_env_get_cpu(env), timeridx);
gt_recalc_timer(env_archcpu(env), timeridx);
}
static uint64_t gt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri,
@ -2494,14 +2494,14 @@ static void gt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
trace_arm_gt_tval_write(timeridx, value);
env->cp15.c14_timer[timeridx].cval = gt_get_countervalue(env) - offset +
sextract64(value, 0, 32);
gt_recalc_timer(arm_env_get_cpu(env), timeridx);
gt_recalc_timer(env_archcpu(env), timeridx);
}
static void gt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
int timeridx,
uint64_t value)
{
ARMCPU *cpu = arm_env_get_cpu(env);
ARMCPU *cpu = env_archcpu(env);
uint32_t oldval = env->cp15.c14_timer[timeridx].ctl;
trace_arm_gt_ctl_write(timeridx, value);
@ -2579,7 +2579,7 @@ static void gt_virt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
static void gt_cntvoff_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
ARMCPU *cpu = arm_env_get_cpu(env);
ARMCPU *cpu = env_archcpu(env);
trace_arm_gt_cntvoff_write(value);
raw_write(env, ri, value);
@ -3212,7 +3212,7 @@ static uint64_t pmsav7_read(CPUARMState *env, const ARMCPRegInfo *ri)
static void pmsav7_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
ARMCPU *cpu = arm_env_get_cpu(env);
ARMCPU *cpu = env_archcpu(env);
uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri);
if (!u32p) {
@ -3227,7 +3227,7 @@ static void pmsav7_write(CPUARMState *env, const ARMCPRegInfo *ri,
static void pmsav7_rgnr_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
ARMCPU *cpu = arm_env_get_cpu(env);
ARMCPU *cpu = env_archcpu(env);
uint32_t nrgs = cpu->pmsav7_dregion;
if (value >= nrgs) {
@ -3355,7 +3355,7 @@ static void vmsa_ttbcr_raw_write(CPUARMState *env, const ARMCPRegInfo *ri,
static void vmsa_ttbcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
ARMCPU *cpu = arm_env_get_cpu(env);
ARMCPU *cpu = env_archcpu(env);
TCR *tcr = raw_ptr(env, ri);
if (arm_feature(env, ARM_FEATURE_LPAE)) {
@ -3384,7 +3384,7 @@ static void vmsa_ttbcr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
static void vmsa_tcr_el1_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
ARMCPU *cpu = arm_env_get_cpu(env);
ARMCPU *cpu = env_archcpu(env);
TCR *tcr = raw_ptr(env, ri);
/* For AArch64 the A1 bit could result in a change of ASID, so TLB flush. */
@ -3398,7 +3398,7 @@ static void vmsa_ttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
/* If the ASID changes (with a 64-bit write), we must flush the TLB. */
if (cpreg_field_is_64bit(ri) &&
extract64(raw_read(env, ri) ^ value, 48, 16) != 0) {
ARMCPU *cpu = arm_env_get_cpu(env);
ARMCPU *cpu = env_archcpu(env);
tlb_flush(CPU(cpu));
}
raw_write(env, ri, value);
@ -3407,7 +3407,7 @@ static void vmsa_ttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
static void vttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
ARMCPU *cpu = arm_env_get_cpu(env);
ARMCPU *cpu = env_archcpu(env);
CPUState *cs = CPU(cpu);
/* Accesses to VTTBR may change the VMID so we must flush the TLB. */
@ -3497,7 +3497,7 @@ static void omap_wfi_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
/* Wait-for-interrupt (deprecated) */
cpu_interrupt(CPU(arm_env_get_cpu(env)), CPU_INTERRUPT_HALT);
cpu_interrupt(env_cpu(env), CPU_INTERRUPT_HALT);
}
static void omap_cachemaint_write(CPUARMState *env, const ARMCPRegInfo *ri,
@ -3650,7 +3650,7 @@ static const ARMCPRegInfo strongarm_cp_reginfo[] = {
static uint64_t midr_read(CPUARMState *env, const ARMCPRegInfo *ri)
{
ARMCPU *cpu = arm_env_get_cpu(env);
ARMCPU *cpu = env_archcpu(env);
unsigned int cur_el = arm_current_el(env);
bool secure = arm_is_secure(env);
@ -3662,7 +3662,7 @@ static uint64_t midr_read(CPUARMState *env, const ARMCPRegInfo *ri)
static uint64_t mpidr_read_val(CPUARMState *env)
{
ARMCPU *cpu = ARM_CPU(arm_env_get_cpu(env));
ARMCPU *cpu = env_archcpu(env);
uint64_t mpidr = cpu->mp_affinity;
if (arm_feature(env, ARM_FEATURE_V7MP)) {
@ -3773,7 +3773,7 @@ static CPAccessResult aa64_cacheop_access(CPUARMState *env,
static void tlbi_aa64_vmalle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
CPUState *cs = ENV_GET_CPU(env);
CPUState *cs = env_cpu(env);
bool sec = arm_is_secure_below_el3(env);
if (sec) {
@ -3790,7 +3790,7 @@ static void tlbi_aa64_vmalle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
static void tlbi_aa64_vmalle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
CPUState *cs = ENV_GET_CPU(env);
CPUState *cs = env_cpu(env);
if (tlb_force_broadcast(env)) {
tlbi_aa64_vmalle1is_write(env, NULL, value);
@ -3815,7 +3815,7 @@ static void tlbi_aa64_alle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
* stage 2 translations, whereas most other scopes only invalidate
* stage 1 translations.
*/
ARMCPU *cpu = arm_env_get_cpu(env);
ARMCPU *cpu = env_archcpu(env);
CPUState *cs = CPU(cpu);
if (arm_is_secure_below_el3(env)) {
@ -3839,7 +3839,7 @@ static void tlbi_aa64_alle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
static void tlbi_aa64_alle2_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
ARMCPU *cpu = arm_env_get_cpu(env);
ARMCPU *cpu = env_archcpu(env);
CPUState *cs = CPU(cpu);
tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_S1E2);
@ -3848,7 +3848,7 @@ static void tlbi_aa64_alle2_write(CPUARMState *env, const ARMCPRegInfo *ri,
static void tlbi_aa64_alle3_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
ARMCPU *cpu = arm_env_get_cpu(env);
ARMCPU *cpu = env_archcpu(env);
CPUState *cs = CPU(cpu);
tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_S1E3);
@ -3861,7 +3861,7 @@ static void tlbi_aa64_alle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
* stage 2 translations, whereas most other scopes only invalidate
* stage 1 translations.
*/
CPUState *cs = ENV_GET_CPU(env);
CPUState *cs = env_cpu(env);
bool sec = arm_is_secure_below_el3(env);
bool has_el2 = arm_feature(env, ARM_FEATURE_EL2);
@ -3884,7 +3884,7 @@ static void tlbi_aa64_alle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
static void tlbi_aa64_alle2is_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
CPUState *cs = ENV_GET_CPU(env);
CPUState *cs = env_cpu(env);
tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_S1E2);
}
@ -3892,7 +3892,7 @@ static void tlbi_aa64_alle2is_write(CPUARMState *env, const ARMCPRegInfo *ri,
static void tlbi_aa64_alle3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
CPUState *cs = ENV_GET_CPU(env);
CPUState *cs = env_cpu(env);
tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_S1E3);
}
@ -3904,7 +3904,7 @@ static void tlbi_aa64_vae2_write(CPUARMState *env, const ARMCPRegInfo *ri,
* Currently handles both VAE2 and VALE2, since we don't support
* flush-last-level-only.
*/
ARMCPU *cpu = arm_env_get_cpu(env);
ARMCPU *cpu = env_archcpu(env);
CPUState *cs = CPU(cpu);
uint64_t pageaddr = sextract64(value << 12, 0, 56);
@ -3918,7 +3918,7 @@ static void tlbi_aa64_vae3_write(CPUARMState *env, const ARMCPRegInfo *ri,
* Currently handles both VAE3 and VALE3, since we don't support
* flush-last-level-only.
*/
ARMCPU *cpu = arm_env_get_cpu(env);
ARMCPU *cpu = env_archcpu(env);
CPUState *cs = CPU(cpu);
uint64_t pageaddr = sextract64(value << 12, 0, 56);
@ -3928,7 +3928,7 @@ static void tlbi_aa64_vae3_write(CPUARMState *env, const ARMCPRegInfo *ri,
static void tlbi_aa64_vae1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
ARMCPU *cpu = arm_env_get_cpu(env);
ARMCPU *cpu = env_archcpu(env);
CPUState *cs = CPU(cpu);
bool sec = arm_is_secure_below_el3(env);
uint64_t pageaddr = sextract64(value << 12, 0, 56);
@ -3952,7 +3952,7 @@ static void tlbi_aa64_vae1_write(CPUARMState *env, const ARMCPRegInfo *ri,
* since we don't support flush-for-specific-ASID-only or
* flush-last-level-only.
*/
ARMCPU *cpu = arm_env_get_cpu(env);
ARMCPU *cpu = env_archcpu(env);
CPUState *cs = CPU(cpu);
uint64_t pageaddr = sextract64(value << 12, 0, 56);
@ -3975,7 +3975,7 @@ static void tlbi_aa64_vae1_write(CPUARMState *env, const ARMCPRegInfo *ri,
static void tlbi_aa64_vae2is_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
CPUState *cs = ENV_GET_CPU(env);
CPUState *cs = env_cpu(env);
uint64_t pageaddr = sextract64(value << 12, 0, 56);
tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
@ -3985,7 +3985,7 @@ static void tlbi_aa64_vae2is_write(CPUARMState *env, const ARMCPRegInfo *ri,
static void tlbi_aa64_vae3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
CPUState *cs = ENV_GET_CPU(env);
CPUState *cs = env_cpu(env);
uint64_t pageaddr = sextract64(value << 12, 0, 56);
tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
@ -4001,7 +4001,7 @@ static void tlbi_aa64_ipas2e1_write(CPUARMState *env, const ARMCPRegInfo *ri,
* translation information.
* This must NOP if EL2 isn't implemented or SCR_EL3.NS is zero.
*/
ARMCPU *cpu = arm_env_get_cpu(env);
ARMCPU *cpu = env_archcpu(env);
CPUState *cs = CPU(cpu);
uint64_t pageaddr;
@ -4017,7 +4017,7 @@ static void tlbi_aa64_ipas2e1_write(CPUARMState *env, const ARMCPRegInfo *ri,
static void tlbi_aa64_ipas2e1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
CPUState *cs = ENV_GET_CPU(env);
CPUState *cs = env_cpu(env);
uint64_t pageaddr;
if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) {
@ -4044,7 +4044,7 @@ static CPAccessResult aa64_zva_access(CPUARMState *env, const ARMCPRegInfo *ri,
static uint64_t aa64_dczid_read(CPUARMState *env, const ARMCPRegInfo *ri)
{
ARMCPU *cpu = arm_env_get_cpu(env);
ARMCPU *cpu = env_archcpu(env);
int dzp_bit = 1 << 4;
/* DZP indicates whether DC ZVA access is allowed */
@ -4079,7 +4079,7 @@ static void spsel_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t val)
static void sctlr_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
ARMCPU *cpu = arm_env_get_cpu(env);
ARMCPU *cpu = env_archcpu(env);
if (raw_read(env, ri) == value) {
/* Skip the TLB flush if nothing actually changed; Linux likes
@ -4571,7 +4571,7 @@ static const ARMCPRegInfo el3_no_el2_v8_cp_reginfo[] = {
static void hcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
{
ARMCPU *cpu = arm_env_get_cpu(env);
ARMCPU *cpu = env_archcpu(env);
uint64_t valid_mask = HCR_MASK;
if (arm_feature(env, ARM_FEATURE_EL3)) {
@ -5238,7 +5238,7 @@ int sve_exception_el(CPUARMState *env, int el)
*/
uint32_t sve_zcr_len_for_el(CPUARMState *env, int el)
{
ARMCPU *cpu = arm_env_get_cpu(env);
ARMCPU *cpu = env_archcpu(env);
uint32_t zcr_len = cpu->sve_max_vq - 1;
if (el <= 1) {
@ -5406,7 +5406,7 @@ void hw_watchpoint_update_all(ARMCPU *cpu)
static void dbgwvr_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
ARMCPU *cpu = arm_env_get_cpu(env);
ARMCPU *cpu = env_archcpu(env);
int i = ri->crm;
/* Bits [63:49] are hardwired to the value of bit [48]; that is, the
@ -5422,7 +5422,7 @@ static void dbgwvr_write(CPUARMState *env, const ARMCPRegInfo *ri,
static void dbgwcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
ARMCPU *cpu = arm_env_get_cpu(env);
ARMCPU *cpu = env_archcpu(env);
int i = ri->crm;
raw_write(env, ri, value);
@ -5524,7 +5524,7 @@ void hw_breakpoint_update_all(ARMCPU *cpu)
static void dbgbvr_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
ARMCPU *cpu = arm_env_get_cpu(env);
ARMCPU *cpu = env_archcpu(env);
int i = ri->crm;
raw_write(env, ri, value);
@ -5534,7 +5534,7 @@ static void dbgbvr_write(CPUARMState *env, const ARMCPRegInfo *ri,
static void dbgbcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
ARMCPU *cpu = arm_env_get_cpu(env);
ARMCPU *cpu = env_archcpu(env);
int i = ri->crm;
/* BAS[3] is a read-only copy of BAS[2], and BAS[1] a read-only
@ -5630,7 +5630,7 @@ static void define_debug_regs(ARMCPU *cpu)
*/
static uint64_t id_pfr1_read(CPUARMState *env, const ARMCPRegInfo *ri)
{
ARMCPU *cpu = arm_env_get_cpu(env);
ARMCPU *cpu = env_archcpu(env);
uint64_t pfr1 = cpu->id_pfr1;
if (env->gicv3state) {
@ -5641,7 +5641,7 @@ static uint64_t id_pfr1_read(CPUARMState *env, const ARMCPRegInfo *ri)
static uint64_t id_aa64pfr0_read(CPUARMState *env, const ARMCPRegInfo *ri)
{
ARMCPU *cpu = arm_env_get_cpu(env);
ARMCPU *cpu = env_archcpu(env);
uint64_t pfr0 = cpu->isar.id_aa64pfr0;
if (env->gicv3state) {
@ -7421,14 +7421,14 @@ uint32_t HELPER(rbit)(uint32_t x)
/* These should probably raise undefined insn exceptions. */
void HELPER(v7m_msr)(CPUARMState *env, uint32_t reg, uint32_t val)
{
ARMCPU *cpu = arm_env_get_cpu(env);
ARMCPU *cpu = env_archcpu(env);
cpu_abort(CPU(cpu), "v7m_msr %d\n", reg);
}
uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
{
ARMCPU *cpu = arm_env_get_cpu(env);
ARMCPU *cpu = env_archcpu(env);
cpu_abort(CPU(cpu), "v7m_mrs %d\n", reg);
return 0;
@ -7488,7 +7488,7 @@ uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op)
static void switch_mode(CPUARMState *env, int mode)
{
ARMCPU *cpu = arm_env_get_cpu(env);
ARMCPU *cpu = env_archcpu(env);
if (mode != ARM_CPU_MODE_USR) {
cpu_abort(CPU(cpu), "Tried to switch out of user mode\n");
@ -7831,7 +7831,7 @@ void HELPER(v7m_preserve_fp_state)(CPUARMState *env)
* PreserveFPState() pseudocode.
* We may throw an exception if the stacking fails.
*/
ARMCPU *cpu = arm_env_get_cpu(env);
ARMCPU *cpu = env_archcpu(env);
bool is_secure = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
bool negpri = !(env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_HFRDY_MASK);
bool is_priv = !(env->v7m.fpccr[is_secure] & R_V7M_FPCCR_USER_MASK);
@ -10938,7 +10938,7 @@ static bool get_phys_addr_v5(CPUARMState *env, uint32_t address,
target_ulong *page_size,
ARMMMUFaultInfo *fi)
{
CPUState *cs = CPU(arm_env_get_cpu(env));
CPUState *cs = env_cpu(env);
int level = 1;
uint32_t table;
uint32_t desc;
@ -11059,7 +11059,7 @@ static bool get_phys_addr_v6(CPUARMState *env, uint32_t address,
hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
target_ulong *page_size, ARMMMUFaultInfo *fi)
{
CPUState *cs = CPU(arm_env_get_cpu(env));
CPUState *cs = env_cpu(env);
int level = 1;
uint32_t table;
uint32_t desc;
@ -11444,7 +11444,7 @@ static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
target_ulong *page_size_ptr,
ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
{
ARMCPU *cpu = arm_env_get_cpu(env);
ARMCPU *cpu = env_archcpu(env);
CPUState *cs = CPU(cpu);
/* Read an LPAE long-descriptor translation table. */
ARMFaultType fault_type = ARMFault_Translation;
@ -11802,7 +11802,7 @@ static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address,
target_ulong *page_size,
ARMMMUFaultInfo *fi)
{
ARMCPU *cpu = arm_env_get_cpu(env);
ARMCPU *cpu = env_archcpu(env);
int n;
bool is_user = regime_is_user(env, mmu_idx);
@ -12006,7 +12006,7 @@ static void v8m_security_lookup(CPUARMState *env, uint32_t address,
* pseudocode SecurityCheck() function.
* We assume the caller has zero-initialized *sattrs.
*/
ARMCPU *cpu = arm_env_get_cpu(env);
ARMCPU *cpu = env_archcpu(env);
int r;
bool idau_exempt = false, idau_ns = true, idau_nsc = true;
int idau_region = IREGION_NOTVALID;
@ -12119,7 +12119,7 @@ static bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
* We set is_subpage to true if the region hit doesn't cover the
* entire TARGET_PAGE the address is within.
*/
ARMCPU *cpu = arm_env_get_cpu(env);
ARMCPU *cpu = env_archcpu(env);
bool is_user = regime_is_user(env, mmu_idx);
uint32_t secure = regime_is_secure(env, mmu_idx);
int n;
@ -12899,7 +12899,7 @@ void HELPER(v7m_msr)(CPUARMState *env, uint32_t maskreg, uint32_t val)
limit = is_psp ? env->v7m.psplim[false] : env->v7m.msplim[false];
if (val < limit) {
CPUState *cs = CPU(arm_env_get_cpu(env));
CPUState *cs = env_cpu(env);
cpu_restore_state(cs, GETPC(), true);
raise_exception(env, EXCP_STKOF, 0, 1);
@ -13180,7 +13180,7 @@ void HELPER(dc_zva)(CPUARMState *env, uint64_t vaddr_in)
* alignment faults or any memory attribute handling).
*/
ARMCPU *cpu = arm_env_get_cpu(env);
ARMCPU *cpu = env_archcpu(env);
uint64_t blocklen = 4 << cpu->dcz_blocksize;
uint64_t vaddr = vaddr_in & ~(blocklen - 1);
@ -13680,7 +13680,7 @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
uint32_t flags = 0;
if (is_a64(env)) {
ARMCPU *cpu = arm_env_get_cpu(env);
ARMCPU *cpu = env_archcpu(env);
uint64_t sctlr;
*pc = env->pc;
@ -13853,7 +13853,7 @@ void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq)
uint64_t pmask;
assert(vq >= 1 && vq <= ARM_MAX_VQ);
assert(vq <= arm_env_get_cpu(env)->sve_max_vq);
assert(vq <= env_archcpu(env)->sve_max_vq);
/* Zap the high bits of the zregs. */
for (i = 0; i < 32; i++) {
@ -13879,7 +13879,7 @@ void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq)
void aarch64_sve_change_el(CPUARMState *env, int old_el,
int new_el, bool el0_a64)
{
ARMCPU *cpu = arm_env_get_cpu(env);
ARMCPU *cpu = env_archcpu(env);
int old_len, new_len;
bool old_a64, new_a64;

View File

@ -31,7 +31,7 @@
static CPUState *do_raise_exception(CPUARMState *env, uint32_t excp,
uint32_t syndrome, uint32_t target_el)
{
CPUState *cs = CPU(arm_env_get_cpu(env));
CPUState *cs = env_cpu(env);
if (target_el == 1 && (arm_hcr_el2_eff(env) & HCR_TGE)) {
/*
@ -224,7 +224,7 @@ void HELPER(v8m_stackcheck)(CPUARMState *env, uint32_t newvalue)
* raising an exception if the limit is breached.
*/
if (newvalue < v7m_sp_limit(env)) {
CPUState *cs = CPU(arm_env_get_cpu(env));
CPUState *cs = env_cpu(env);
/*
* Stack limit exceptions are a rare case, so rather than syncing
@ -427,7 +427,7 @@ static inline int check_wfx_trap(CPUARMState *env, bool is_wfe)
void HELPER(wfi)(CPUARMState *env, uint32_t insn_len)
{
CPUState *cs = CPU(arm_env_get_cpu(env));
CPUState *cs = env_cpu(env);
int target_el = check_wfx_trap(env, false);
if (cpu_has_work(cs)) {
@ -462,8 +462,7 @@ void HELPER(wfe)(CPUARMState *env)
void HELPER(yield)(CPUARMState *env)
{
ARMCPU *cpu = arm_env_get_cpu(env);
CPUState *cs = CPU(cpu);
CPUState *cs = env_cpu(env);
/* This is a non-trappable hint instruction that generally indicates
* that the guest is currently busy-looping. Yield control back to the
@ -481,7 +480,7 @@ void HELPER(yield)(CPUARMState *env)
*/
void HELPER(exception_internal)(CPUARMState *env, uint32_t excp)
{
CPUState *cs = CPU(arm_env_get_cpu(env));
CPUState *cs = env_cpu(env);
assert(excp_is_internal(excp));
cs->exception_index = excp;
@ -524,7 +523,7 @@ void HELPER(cpsr_write)(CPUARMState *env, uint32_t val, uint32_t mask)
void HELPER(cpsr_write_eret)(CPUARMState *env, uint32_t val)
{
qemu_mutex_lock_iothread();
arm_call_pre_el_change_hook(arm_env_get_cpu(env));
arm_call_pre_el_change_hook(env_archcpu(env));
qemu_mutex_unlock_iothread();
cpsr_write(env, val, CPSR_ERET_MASK, CPSRWriteExceptionReturn);
@ -537,7 +536,7 @@ void HELPER(cpsr_write_eret)(CPUARMState *env, uint32_t val)
env->regs[15] &= (env->thumb ? ~1 : ~3);
qemu_mutex_lock_iothread();
arm_call_el_change_hook(arm_env_get_cpu(env));
arm_call_el_change_hook(env_archcpu(env));
qemu_mutex_unlock_iothread();
}
@ -842,7 +841,7 @@ uint64_t HELPER(get_cp_reg64)(CPUARMState *env, void *rip)
void HELPER(pre_hvc)(CPUARMState *env)
{
ARMCPU *cpu = arm_env_get_cpu(env);
ARMCPU *cpu = env_archcpu(env);
int cur_el = arm_current_el(env);
/* FIXME: Use actual secure state. */
bool secure = false;
@ -882,7 +881,7 @@ void HELPER(pre_hvc)(CPUARMState *env)
void HELPER(pre_smc)(CPUARMState *env, uint32_t syndrome)
{
ARMCPU *cpu = arm_env_get_cpu(env);
ARMCPU *cpu = env_archcpu(env);
int cur_el = arm_current_el(env);
bool secure = arm_is_secure(env);
bool smd_flag = env->cp15.scr_el3 & SCR_SMD;
@ -1156,7 +1155,7 @@ static bool check_breakpoints(ARMCPU *cpu)
void HELPER(check_breakpoints)(CPUARMState *env)
{
ARMCPU *cpu = arm_env_get_cpu(env);
ARMCPU *cpu = env_archcpu(env);
if (check_breakpoints(cpu)) {
HELPER(exception_internal(env, EXCP_DEBUG));

View File

@ -14134,7 +14134,7 @@ static bool is_guarded_page(CPUARMState *env, DisasContext *s)
* table entry even for that case.
*/
return (tlb_hit(entry->addr_code, addr) &&
env->iotlb[mmu_idx][index].attrs.target_tlb_bit0);
env_tlb(env)->d[mmu_idx].iotlb[index].attrs.target_tlb_bit0);
#endif
}
@ -14289,7 +14289,7 @@ static void aarch64_tr_init_disas_context(DisasContextBase *dcbase,
{
DisasContext *dc = container_of(dcbase, DisasContext, base);
CPUARMState *env = cpu->env_ptr;
ARMCPU *arm_cpu = arm_env_get_cpu(env);
ARMCPU *arm_cpu = env_archcpu(env);
uint32_t tb_flags = dc->base.tb->flags;
int bound, core_mmu_idx;

View File

@ -13408,7 +13408,7 @@ static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
{
DisasContext *dc = container_of(dcbase, DisasContext, base);
CPUARMState *env = cs->env_ptr;
ARMCPU *cpu = arm_env_get_cpu(env);
ARMCPU *cpu = env_archcpu(env);
uint32_t tb_flags = dc->base.tb->flags;
uint32_t condexec, core_mmu_idx;

View File

@ -101,7 +101,7 @@ void HELPER(vfp_set_fpscr)(CPUARMState *env, uint32_t val)
uint32_t changed = env->vfp.xregs[ARM_VFP_FPSCR];
/* When ARMv8.2-FP16 is not supported, FZ16 is RES0. */
if (!cpu_isar_feature(aa64_fp16, arm_env_get_cpu(env))) {
if (!cpu_isar_feature(aa64_fp16, env_archcpu(env))) {
val &= ~FPCR_FZ16;
}

17
target/cris/cpu-param.h Normal file
View File

@ -0,0 +1,17 @@
/*
* CRIS cpu parameters for qemu.
*
* Copyright (c) 2007 AXIS Communications AB
* SPDX-License-Identifier: LGPL-2.0+
*/
#ifndef CRIS_CPU_PARAM_H
#define CRIS_CPU_PARAM_H 1
#define TARGET_LONG_BITS 32
#define TARGET_PAGE_BITS 13
#define TARGET_PHYS_ADDR_SPACE_BITS 32
#define TARGET_VIRT_ADDR_SPACE_BITS 32
#define NB_MMU_MODES 2
#endif

View File

@ -172,12 +172,11 @@ static void cris_disas_set_info(CPUState *cpu, disassemble_info *info)
static void cris_cpu_initfn(Object *obj)
{
CPUState *cs = CPU(obj);
CRISCPU *cpu = CRIS_CPU(obj);
CRISCPUClass *ccc = CRIS_CPU_GET_CLASS(obj);
CPUCRISState *env = &cpu->env;
cs->env_ptr = env;
cpu_set_cpustate_pointers(cpu);
env->pregs[PR_VR] = ccc->vr;

View File

@ -23,11 +23,6 @@
#include "qemu-common.h"
#include "cpu-qom.h"
#define TARGET_LONG_BITS 32
#define CPUArchState struct CPUCRISState
#include "exec/cpu-defs.h"
#define EXCP_NMI 1
@ -105,8 +100,6 @@
#define CC_A 14
#define CC_P 15
#define NB_MMU_MODES 2
typedef struct {
uint32_t hi;
uint32_t lo;
@ -170,8 +163,6 @@ typedef struct CPUCRISState {
/* Fields up to this point are cleared by a CPU reset */
struct {} end_reset_fields;
CPU_COMMON
/* Members from load_info on are preserved across resets. */
void *load_info;
} CPUCRISState;
@ -187,17 +178,10 @@ struct CRISCPU {
CPUState parent_obj;
/*< public >*/
CPUNegativeOffsetState neg;
CPUCRISState env;
};
static inline CRISCPU *cris_env_get_cpu(CPUCRISState *env)
{
return container_of(env, CRISCPU, env);
}
#define ENV_GET_CPU(e) CPU(cris_env_get_cpu(e))
#define ENV_OFFSET offsetof(CRISCPU, env)
#ifndef CONFIG_USER_ONLY
extern const struct VMStateDescription vmstate_cris_cpu;
@ -260,12 +244,8 @@ enum {
};
/* CRIS uses 8k pages. */
#define TARGET_PAGE_BITS 13
#define MMAP_SHIFT TARGET_PAGE_BITS
#define TARGET_PHYS_ADDR_SPACE_BITS 32
#define TARGET_VIRT_ADDR_SPACE_BITS 32
#define CRIS_CPU_TYPE_SUFFIX "-" TYPE_CRIS_CPU
#define CRIS_CPU_TYPE_NAME(name) (name CRIS_CPU_TYPE_SUFFIX)
#define CPU_RESOLVING_TYPE TYPE_CRIS_CPU
@ -295,6 +275,9 @@ bool cris_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
#define SFR_RW_MM_TLB_LO env->pregs[PR_SRS]][5
#define SFR_RW_MM_TLB_HI env->pregs[PR_SRS]][6
typedef CPUCRISState CPUArchState;
typedef CRISCPU ArchCPU;
#include "exec/cpu-all.h"
static inline void cpu_get_tb_cpu_state(CPUCRISState *env, target_ulong *pc,

View File

@ -33,96 +33,99 @@
void cris_mmu_init(CPUCRISState *env)
{
env->mmu_rand_lfsr = 0xcccc;
env->mmu_rand_lfsr = 0xcccc;
}
#define SR_POLYNOM 0x8805
static inline unsigned int compute_polynom(unsigned int sr)
{
unsigned int i;
unsigned int f;
unsigned int i;
unsigned int f;
f = 0;
for (i = 0; i < 16; i++)
f += ((SR_POLYNOM >> i) & 1) & ((sr >> i) & 1);
f = 0;
for (i = 0; i < 16; i++) {
f += ((SR_POLYNOM >> i) & 1) & ((sr >> i) & 1);
}
return f;
return f;
}
static void cris_mmu_update_rand_lfsr(CPUCRISState *env)
{
unsigned int f;
unsigned int f;
/* Update lfsr at every fault. */
f = compute_polynom(env->mmu_rand_lfsr);
env->mmu_rand_lfsr >>= 1;
env->mmu_rand_lfsr |= (f << 15);
env->mmu_rand_lfsr &= 0xffff;
/* Update lfsr at every fault. */
f = compute_polynom(env->mmu_rand_lfsr);
env->mmu_rand_lfsr >>= 1;
env->mmu_rand_lfsr |= (f << 15);
env->mmu_rand_lfsr &= 0xffff;
}
static inline int cris_mmu_enabled(uint32_t rw_gc_cfg)
{
return (rw_gc_cfg & 12) != 0;
return (rw_gc_cfg & 12) != 0;
}
static inline int cris_mmu_segmented_addr(int seg, uint32_t rw_mm_cfg)
{
return (1 << seg) & rw_mm_cfg;
return (1 << seg) & rw_mm_cfg;
}
static uint32_t cris_mmu_translate_seg(CPUCRISState *env, int seg)
{
uint32_t base;
int i;
uint32_t base;
int i;
if (seg < 8)
base = env->sregs[SFR_RW_MM_KBASE_LO];
else
base = env->sregs[SFR_RW_MM_KBASE_HI];
if (seg < 8) {
base = env->sregs[SFR_RW_MM_KBASE_LO];
} else {
base = env->sregs[SFR_RW_MM_KBASE_HI];
}
i = seg & 7;
base >>= i * 4;
base &= 15;
i = seg & 7;
base >>= i * 4;
base &= 15;
base <<= 28;
return base;
base <<= 28;
return base;
}
/* Used by the tlb decoder. */
#define EXTRACT_FIELD(src, start, end) \
(((src) >> start) & ((1 << (end - start + 1)) - 1))
static inline void set_field(uint32_t *dst, unsigned int val,
/* Used by the tlb decoder. */
#define EXTRACT_FIELD(src, start, end) \
(((src) >> start) & ((1 << (end - start + 1)) - 1))
static inline void set_field(uint32_t *dst, unsigned int val,
unsigned int offset, unsigned int width)
{
uint32_t mask;
uint32_t mask;
mask = (1 << width) - 1;
mask <<= offset;
val <<= offset;
mask = (1 << width) - 1;
mask <<= offset;
val <<= offset;
val &= mask;
*dst &= ~(mask);
*dst |= val;
val &= mask;
*dst &= ~(mask);
*dst |= val;
}
#ifdef DEBUG
static void dump_tlb(CPUCRISState *env, int mmu)
{
int set;
int idx;
uint32_t hi, lo, tlb_vpn, tlb_pfn;
int set;
int idx;
uint32_t hi, lo, tlb_vpn, tlb_pfn;
for (set = 0; set < 4; set++) {
for (idx = 0; idx < 16; idx++) {
lo = env->tlbsets[mmu][set][idx].lo;
hi = env->tlbsets[mmu][set][idx].hi;
tlb_vpn = EXTRACT_FIELD(hi, 13, 31);
tlb_pfn = EXTRACT_FIELD(lo, 13, 31);
for (set = 0; set < 4; set++) {
for (idx = 0; idx < 16; idx++) {
lo = env->tlbsets[mmu][set][idx].lo;
hi = env->tlbsets[mmu][set][idx].hi;
tlb_vpn = EXTRACT_FIELD(hi, 13, 31);
tlb_pfn = EXTRACT_FIELD(lo, 13, 31);
printf ("TLB: [%d][%d] hi=%x lo=%x v=%x p=%x\n",
set, idx, hi, lo, tlb_vpn, tlb_pfn);
}
}
printf("TLB: [%d][%d] hi=%x lo=%x v=%x p=%x\n",
set, idx, hi, lo, tlb_vpn, tlb_pfn);
}
}
}
#endif
@ -131,232 +134,223 @@ static int cris_mmu_translate_page(struct cris_mmu_result *res,
CPUCRISState *env, uint32_t vaddr,
int rw, int usermode, int debug)
{
unsigned int vpage;
unsigned int idx;
uint32_t pid, lo, hi;
uint32_t tlb_vpn, tlb_pfn = 0;
int tlb_pid, tlb_g, tlb_v, tlb_k, tlb_w, tlb_x;
int cfg_v, cfg_k, cfg_w, cfg_x;
int set, match = 0;
uint32_t r_cause;
uint32_t r_cfg;
int rwcause;
int mmu = 1; /* Data mmu is default. */
int vect_base;
unsigned int vpage;
unsigned int idx;
uint32_t pid, lo, hi;
uint32_t tlb_vpn, tlb_pfn = 0;
int tlb_pid, tlb_g, tlb_v, tlb_k, tlb_w, tlb_x;
int cfg_v, cfg_k, cfg_w, cfg_x;
int set, match = 0;
uint32_t r_cause;
uint32_t r_cfg;
int rwcause;
int mmu = 1; /* Data mmu is default. */
int vect_base;
r_cause = env->sregs[SFR_R_MM_CAUSE];
r_cfg = env->sregs[SFR_RW_MM_CFG];
pid = env->pregs[PR_PID] & 0xff;
r_cause = env->sregs[SFR_R_MM_CAUSE];
r_cfg = env->sregs[SFR_RW_MM_CFG];
pid = env->pregs[PR_PID] & 0xff;
switch (rw) {
case 2: rwcause = CRIS_MMU_ERR_EXEC; mmu = 0; break;
case 1: rwcause = CRIS_MMU_ERR_WRITE; break;
default:
case 0: rwcause = CRIS_MMU_ERR_READ; break;
}
switch (rw) {
case 2:
rwcause = CRIS_MMU_ERR_EXEC;
mmu = 0;
break;
case 1:
rwcause = CRIS_MMU_ERR_WRITE;
break;
default:
case 0:
rwcause = CRIS_MMU_ERR_READ;
break;
}
/* I exception vectors 4 - 7, D 8 - 11. */
vect_base = (mmu + 1) * 4;
/* I exception vectors 4 - 7, D 8 - 11. */
vect_base = (mmu + 1) * 4;
vpage = vaddr >> 13;
vpage = vaddr >> 13;
/* We know the index which to check on each set.
Scan both I and D. */
#if 0
for (set = 0; set < 4; set++) {
for (idx = 0; idx < 16; idx++) {
lo = env->tlbsets[mmu][set][idx].lo;
hi = env->tlbsets[mmu][set][idx].hi;
tlb_vpn = EXTRACT_FIELD(hi, 13, 31);
tlb_pfn = EXTRACT_FIELD(lo, 13, 31);
/*
* We know the index which to check on each set.
* Scan both I and D.
*/
idx = vpage & 15;
for (set = 0; set < 4; set++) {
lo = env->tlbsets[mmu][set][idx].lo;
hi = env->tlbsets[mmu][set][idx].hi;
printf ("TLB: [%d][%d] hi=%x lo=%x v=%x p=%x\n",
set, idx, hi, lo, tlb_vpn, tlb_pfn);
}
}
#endif
tlb_vpn = hi >> 13;
tlb_pid = EXTRACT_FIELD(hi, 0, 7);
tlb_g = EXTRACT_FIELD(lo, 4, 4);
idx = vpage & 15;
for (set = 0; set < 4; set++)
{
lo = env->tlbsets[mmu][set][idx].lo;
hi = env->tlbsets[mmu][set][idx].hi;
D_LOG("TLB[%d][%d][%d] v=%x vpage=%x lo=%x hi=%x\n",
mmu, set, idx, tlb_vpn, vpage, lo, hi);
if ((tlb_g || (tlb_pid == pid)) && tlb_vpn == vpage) {
match = 1;
break;
}
}
tlb_vpn = hi >> 13;
tlb_pid = EXTRACT_FIELD(hi, 0, 7);
tlb_g = EXTRACT_FIELD(lo, 4, 4);
res->bf_vec = vect_base;
if (match) {
cfg_w = EXTRACT_FIELD(r_cfg, 19, 19);
cfg_k = EXTRACT_FIELD(r_cfg, 18, 18);
cfg_x = EXTRACT_FIELD(r_cfg, 17, 17);
cfg_v = EXTRACT_FIELD(r_cfg, 16, 16);
D_LOG("TLB[%d][%d][%d] v=%x vpage=%x lo=%x hi=%x\n",
mmu, set, idx, tlb_vpn, vpage, lo, hi);
if ((tlb_g || (tlb_pid == pid))
&& tlb_vpn == vpage) {
match = 1;
break;
}
}
tlb_pfn = EXTRACT_FIELD(lo, 13, 31);
tlb_v = EXTRACT_FIELD(lo, 3, 3);
tlb_k = EXTRACT_FIELD(lo, 2, 2);
tlb_w = EXTRACT_FIELD(lo, 1, 1);
tlb_x = EXTRACT_FIELD(lo, 0, 0);
res->bf_vec = vect_base;
if (match) {
cfg_w = EXTRACT_FIELD(r_cfg, 19, 19);
cfg_k = EXTRACT_FIELD(r_cfg, 18, 18);
cfg_x = EXTRACT_FIELD(r_cfg, 17, 17);
cfg_v = EXTRACT_FIELD(r_cfg, 16, 16);
/*
* set_exception_vector(0x04, i_mmu_refill);
* set_exception_vector(0x05, i_mmu_invalid);
* set_exception_vector(0x06, i_mmu_access);
* set_exception_vector(0x07, i_mmu_execute);
* set_exception_vector(0x08, d_mmu_refill);
* set_exception_vector(0x09, d_mmu_invalid);
* set_exception_vector(0x0a, d_mmu_access);
* set_exception_vector(0x0b, d_mmu_write);
*/
if (cfg_k && tlb_k && usermode) {
D(printf("tlb: kernel protected %x lo=%x pc=%x\n",
vaddr, lo, env->pc));
match = 0;
res->bf_vec = vect_base + 2;
} else if (rw == 1 && cfg_w && !tlb_w) {
D(printf("tlb: write protected %x lo=%x pc=%x\n",
vaddr, lo, env->pc));
match = 0;
/* write accesses never go through the I mmu. */
res->bf_vec = vect_base + 3;
} else if (rw == 2 && cfg_x && !tlb_x) {
D(printf("tlb: exec protected %x lo=%x pc=%x\n",
vaddr, lo, env->pc));
match = 0;
res->bf_vec = vect_base + 3;
} else if (cfg_v && !tlb_v) {
D(printf("tlb: invalid %x\n", vaddr));
match = 0;
res->bf_vec = vect_base + 1;
}
tlb_pfn = EXTRACT_FIELD(lo, 13, 31);
tlb_v = EXTRACT_FIELD(lo, 3, 3);
tlb_k = EXTRACT_FIELD(lo, 2, 2);
tlb_w = EXTRACT_FIELD(lo, 1, 1);
tlb_x = EXTRACT_FIELD(lo, 0, 0);
res->prot = 0;
if (match) {
res->prot |= PAGE_READ;
if (tlb_w) {
res->prot |= PAGE_WRITE;
}
if (mmu == 0 && (cfg_x || tlb_x)) {
res->prot |= PAGE_EXEC;
}
} else {
D(dump_tlb(env, mmu));
}
} else {
/* If refill, provide a randomized set. */
set = env->mmu_rand_lfsr & 3;
}
/*
set_exception_vector(0x04, i_mmu_refill);
set_exception_vector(0x05, i_mmu_invalid);
set_exception_vector(0x06, i_mmu_access);
set_exception_vector(0x07, i_mmu_execute);
set_exception_vector(0x08, d_mmu_refill);
set_exception_vector(0x09, d_mmu_invalid);
set_exception_vector(0x0a, d_mmu_access);
set_exception_vector(0x0b, d_mmu_write);
*/
if (cfg_k && tlb_k && usermode) {
D(printf ("tlb: kernel protected %x lo=%x pc=%x\n",
vaddr, lo, env->pc));
match = 0;
res->bf_vec = vect_base + 2;
} else if (rw == 1 && cfg_w && !tlb_w) {
D(printf ("tlb: write protected %x lo=%x pc=%x\n",
vaddr, lo, env->pc));
match = 0;
/* write accesses never go through the I mmu. */
res->bf_vec = vect_base + 3;
} else if (rw == 2 && cfg_x && !tlb_x) {
D(printf ("tlb: exec protected %x lo=%x pc=%x\n",
vaddr, lo, env->pc));
match = 0;
res->bf_vec = vect_base + 3;
} else if (cfg_v && !tlb_v) {
D(printf ("tlb: invalid %x\n", vaddr));
match = 0;
res->bf_vec = vect_base + 1;
}
if (!match && !debug) {
cris_mmu_update_rand_lfsr(env);
res->prot = 0;
if (match) {
res->prot |= PAGE_READ;
if (tlb_w)
res->prot |= PAGE_WRITE;
if (mmu == 0 && (cfg_x || tlb_x))
res->prot |= PAGE_EXEC;
}
else
D(dump_tlb(env, mmu));
} else {
/* If refill, provide a randomized set. */
set = env->mmu_rand_lfsr & 3;
}
/* Compute index. */
idx = vpage & 15;
if (!match && !debug) {
cris_mmu_update_rand_lfsr(env);
/* Update RW_MM_TLB_SEL. */
env->sregs[SFR_RW_MM_TLB_SEL] = 0;
set_field(&env->sregs[SFR_RW_MM_TLB_SEL], idx, 0, 4);
set_field(&env->sregs[SFR_RW_MM_TLB_SEL], set, 4, 2);
/* Compute index. */
idx = vpage & 15;
/* Update RW_MM_CAUSE. */
set_field(&r_cause, rwcause, 8, 2);
set_field(&r_cause, vpage, 13, 19);
set_field(&r_cause, pid, 0, 8);
env->sregs[SFR_R_MM_CAUSE] = r_cause;
D(printf("refill vaddr=%x pc=%x\n", vaddr, env->pc));
}
/* Update RW_MM_TLB_SEL. */
env->sregs[SFR_RW_MM_TLB_SEL] = 0;
set_field(&env->sregs[SFR_RW_MM_TLB_SEL], idx, 0, 4);
set_field(&env->sregs[SFR_RW_MM_TLB_SEL], set, 4, 2);
D(printf("%s rw=%d mtch=%d pc=%x va=%x vpn=%x tlbvpn=%x pfn=%x pid=%x"
" %x cause=%x sel=%x sp=%x %x %x\n",
__func__, rw, match, env->pc,
vaddr, vpage,
tlb_vpn, tlb_pfn, tlb_pid,
pid,
r_cause,
env->sregs[SFR_RW_MM_TLB_SEL],
env->regs[R_SP], env->pregs[PR_USP], env->ksp));
/* Update RW_MM_CAUSE. */
set_field(&r_cause, rwcause, 8, 2);
set_field(&r_cause, vpage, 13, 19);
set_field(&r_cause, pid, 0, 8);
env->sregs[SFR_R_MM_CAUSE] = r_cause;
D(printf("refill vaddr=%x pc=%x\n", vaddr, env->pc));
}
D(printf ("%s rw=%d mtch=%d pc=%x va=%x vpn=%x tlbvpn=%x pfn=%x pid=%x"
" %x cause=%x sel=%x sp=%x %x %x\n",
__func__, rw, match, env->pc,
vaddr, vpage,
tlb_vpn, tlb_pfn, tlb_pid,
pid,
r_cause,
env->sregs[SFR_RW_MM_TLB_SEL],
env->regs[R_SP], env->pregs[PR_USP], env->ksp));
res->phy = tlb_pfn << TARGET_PAGE_BITS;
return !match;
res->phy = tlb_pfn << TARGET_PAGE_BITS;
return !match;
}
void cris_mmu_flush_pid(CPUCRISState *env, uint32_t pid)
{
CRISCPU *cpu = cris_env_get_cpu(env);
target_ulong vaddr;
unsigned int idx;
uint32_t lo, hi;
uint32_t tlb_vpn;
int tlb_pid, tlb_g, tlb_v;
unsigned int set;
unsigned int mmu;
target_ulong vaddr;
unsigned int idx;
uint32_t lo, hi;
uint32_t tlb_vpn;
int tlb_pid, tlb_g, tlb_v;
unsigned int set;
unsigned int mmu;
pid &= 0xff;
for (mmu = 0; mmu < 2; mmu++) {
for (set = 0; set < 4; set++)
{
for (idx = 0; idx < 16; idx++) {
lo = env->tlbsets[mmu][set][idx].lo;
hi = env->tlbsets[mmu][set][idx].hi;
tlb_vpn = EXTRACT_FIELD(hi, 13, 31);
tlb_pid = EXTRACT_FIELD(hi, 0, 7);
tlb_g = EXTRACT_FIELD(lo, 4, 4);
tlb_v = EXTRACT_FIELD(lo, 3, 3);
pid &= 0xff;
for (mmu = 0; mmu < 2; mmu++) {
for (set = 0; set < 4; set++) {
for (idx = 0; idx < 16; idx++) {
lo = env->tlbsets[mmu][set][idx].lo;
hi = env->tlbsets[mmu][set][idx].hi;
if (tlb_v && !tlb_g && (tlb_pid == pid)) {
vaddr = tlb_vpn << TARGET_PAGE_BITS;
D_LOG("flush pid=%x vaddr=%x\n",
pid, vaddr);
tlb_flush_page(CPU(cpu), vaddr);
}
}
}
}
tlb_vpn = EXTRACT_FIELD(hi, 13, 31);
tlb_pid = EXTRACT_FIELD(hi, 0, 7);
tlb_g = EXTRACT_FIELD(lo, 4, 4);
tlb_v = EXTRACT_FIELD(lo, 3, 3);
if (tlb_v && !tlb_g && (tlb_pid == pid)) {
vaddr = tlb_vpn << TARGET_PAGE_BITS;
D_LOG("flush pid=%x vaddr=%x\n", pid, vaddr);
tlb_flush_page(env_cpu(env), vaddr);
}
}
}
}
}
int cris_mmu_translate(struct cris_mmu_result *res,
CPUCRISState *env, uint32_t vaddr,
int rw, int mmu_idx, int debug)
{
int seg;
int miss = 0;
int is_user = mmu_idx == MMU_USER_IDX;
uint32_t old_srs;
int seg;
int miss = 0;
int is_user = mmu_idx == MMU_USER_IDX;
uint32_t old_srs;
old_srs= env->pregs[PR_SRS];
old_srs = env->pregs[PR_SRS];
/* rw == 2 means exec, map the access to the insn mmu. */
env->pregs[PR_SRS] = rw == 2 ? 1 : 2;
/* rw == 2 means exec, map the access to the insn mmu. */
env->pregs[PR_SRS] = rw == 2 ? 1 : 2;
if (!cris_mmu_enabled(env->sregs[SFR_RW_GC_CFG])) {
res->phy = vaddr;
res->prot = PAGE_BITS;
goto done;
}
if (!cris_mmu_enabled(env->sregs[SFR_RW_GC_CFG])) {
res->phy = vaddr;
res->prot = PAGE_BITS;
goto done;
}
seg = vaddr >> 28;
if (!is_user && cris_mmu_segmented_addr(seg, env->sregs[SFR_RW_MM_CFG]))
{
uint32_t base;
seg = vaddr >> 28;
if (!is_user && cris_mmu_segmented_addr(seg, env->sregs[SFR_RW_MM_CFG])) {
uint32_t base;
miss = 0;
base = cris_mmu_translate_seg(env, seg);
res->phy = base | (0x0fffffff & vaddr);
res->prot = PAGE_BITS;
} else {
miss = cris_mmu_translate_page(res, env, vaddr, rw,
is_user, debug);
}
done:
env->pregs[PR_SRS] = old_srs;
return miss;
miss = 0;
base = cris_mmu_translate_seg(env, seg);
res->phy = base | (0x0fffffff & vaddr);
res->prot = PAGE_BITS;
} else {
miss = cris_mmu_translate_page(res, env, vaddr, rw,
is_user, debug);
}
done:
env->pregs[PR_SRS] = old_srs;
return miss;
}

View File

@ -39,7 +39,7 @@
void helper_raise_exception(CPUCRISState *env, uint32_t index)
{
CPUState *cs = CPU(cris_env_get_cpu(env));
CPUState *cs = env_cpu(env);
cs->exception_index = index;
cpu_loop_exit(cs);
@ -48,17 +48,17 @@ void helper_raise_exception(CPUCRISState *env, uint32_t index)
void helper_tlb_flush_pid(CPUCRISState *env, uint32_t pid)
{
#if !defined(CONFIG_USER_ONLY)
pid &= 0xff;
if (pid != (env->pregs[PR_PID] & 0xff))
cris_mmu_flush_pid(env, env->pregs[PR_PID]);
pid &= 0xff;
if (pid != (env->pregs[PR_PID] & 0xff)) {
cris_mmu_flush_pid(env, env->pregs[PR_PID]);
}
#endif
}
void helper_spc_write(CPUCRISState *env, uint32_t new_spc)
{
#if !defined(CONFIG_USER_ONLY)
CRISCPU *cpu = cris_env_get_cpu(env);
CPUState *cs = CPU(cpu);
CPUState *cs = env_cpu(env);
tlb_flush_page(cs, env->pregs[PR_SPC]);
tlb_flush_page(cs, new_spc);
@ -66,541 +66,516 @@ void helper_spc_write(CPUCRISState *env, uint32_t new_spc)
}
/* Used by the tlb decoder. */
#define EXTRACT_FIELD(src, start, end) \
(((src) >> start) & ((1 << (end - start + 1)) - 1))
#define EXTRACT_FIELD(src, start, end) \
(((src) >> start) & ((1 << (end - start + 1)) - 1))
void helper_movl_sreg_reg(CPUCRISState *env, uint32_t sreg, uint32_t reg)
{
#if !defined(CONFIG_USER_ONLY)
CRISCPU *cpu = cris_env_get_cpu(env);
#endif
uint32_t srs;
srs = env->pregs[PR_SRS];
srs &= 3;
env->sregs[srs][sreg] = env->regs[reg];
uint32_t srs;
srs = env->pregs[PR_SRS];
srs &= 3;
env->sregs[srs][sreg] = env->regs[reg];
#if !defined(CONFIG_USER_ONLY)
if (srs == 1 || srs == 2) {
if (sreg == 6) {
/* Writes to tlb-hi write to mm_cause as a side
effect. */
env->sregs[SFR_RW_MM_TLB_HI] = env->regs[reg];
env->sregs[SFR_R_MM_CAUSE] = env->regs[reg];
}
else if (sreg == 5) {
uint32_t set;
uint32_t idx;
uint32_t lo, hi;
uint32_t vaddr;
int tlb_v;
if (srs == 1 || srs == 2) {
if (sreg == 6) {
/* Writes to tlb-hi write to mm_cause as a side effect. */
env->sregs[SFR_RW_MM_TLB_HI] = env->regs[reg];
env->sregs[SFR_R_MM_CAUSE] = env->regs[reg];
} else if (sreg == 5) {
uint32_t set;
uint32_t idx;
uint32_t lo, hi;
uint32_t vaddr;
int tlb_v;
idx = set = env->sregs[SFR_RW_MM_TLB_SEL];
set >>= 4;
set &= 3;
idx = set = env->sregs[SFR_RW_MM_TLB_SEL];
set >>= 4;
set &= 3;
idx &= 15;
/* We've just made a write to tlb_lo. */
lo = env->sregs[SFR_RW_MM_TLB_LO];
/* Writes are done via r_mm_cause. */
hi = env->sregs[SFR_R_MM_CAUSE];
idx &= 15;
/* We've just made a write to tlb_lo. */
lo = env->sregs[SFR_RW_MM_TLB_LO];
/* Writes are done via r_mm_cause. */
hi = env->sregs[SFR_R_MM_CAUSE];
vaddr = EXTRACT_FIELD(env->tlbsets[srs-1][set][idx].hi,
13, 31);
vaddr <<= TARGET_PAGE_BITS;
tlb_v = EXTRACT_FIELD(env->tlbsets[srs-1][set][idx].lo,
3, 3);
env->tlbsets[srs - 1][set][idx].lo = lo;
env->tlbsets[srs - 1][set][idx].hi = hi;
vaddr = EXTRACT_FIELD(env->tlbsets[srs - 1][set][idx].hi, 13, 31);
vaddr <<= TARGET_PAGE_BITS;
tlb_v = EXTRACT_FIELD(env->tlbsets[srs - 1][set][idx].lo, 3, 3);
env->tlbsets[srs - 1][set][idx].lo = lo;
env->tlbsets[srs - 1][set][idx].hi = hi;
D_LOG("tlb flush vaddr=%x v=%d pc=%x\n",
vaddr, tlb_v, env->pc);
if (tlb_v) {
tlb_flush_page(CPU(cpu), vaddr);
}
}
}
D_LOG("tlb flush vaddr=%x v=%d pc=%x\n",
vaddr, tlb_v, env->pc);
if (tlb_v) {
tlb_flush_page(env_cpu(env), vaddr);
}
}
}
#endif
}
void helper_movl_reg_sreg(CPUCRISState *env, uint32_t reg, uint32_t sreg)
{
uint32_t srs;
env->pregs[PR_SRS] &= 3;
srs = env->pregs[PR_SRS];
uint32_t srs;
env->pregs[PR_SRS] &= 3;
srs = env->pregs[PR_SRS];
#if !defined(CONFIG_USER_ONLY)
if (srs == 1 || srs == 2)
{
uint32_t set;
uint32_t idx;
uint32_t lo, hi;
if (srs == 1 || srs == 2) {
uint32_t set;
uint32_t idx;
uint32_t lo, hi;
idx = set = env->sregs[SFR_RW_MM_TLB_SEL];
set >>= 4;
set &= 3;
idx &= 15;
idx = set = env->sregs[SFR_RW_MM_TLB_SEL];
set >>= 4;
set &= 3;
idx &= 15;
/* Update the mirror regs. */
hi = env->tlbsets[srs - 1][set][idx].hi;
lo = env->tlbsets[srs - 1][set][idx].lo;
env->sregs[SFR_RW_MM_TLB_HI] = hi;
env->sregs[SFR_RW_MM_TLB_LO] = lo;
}
/* Update the mirror regs. */
hi = env->tlbsets[srs - 1][set][idx].hi;
lo = env->tlbsets[srs - 1][set][idx].lo;
env->sregs[SFR_RW_MM_TLB_HI] = hi;
env->sregs[SFR_RW_MM_TLB_LO] = lo;
}
#endif
env->regs[reg] = env->sregs[srs][sreg];
env->regs[reg] = env->sregs[srs][sreg];
}
static void cris_ccs_rshift(CPUCRISState *env)
{
uint32_t ccs;
uint32_t ccs;
/* Apply the ccs shift. */
ccs = env->pregs[PR_CCS];
ccs = (ccs & 0xc0000000) | ((ccs & 0x0fffffff) >> 10);
if (ccs & U_FLAG)
{
/* Enter user mode. */
env->ksp = env->regs[R_SP];
env->regs[R_SP] = env->pregs[PR_USP];
}
/* Apply the ccs shift. */
ccs = env->pregs[PR_CCS];
ccs = (ccs & 0xc0000000) | ((ccs & 0x0fffffff) >> 10);
if (ccs & U_FLAG) {
/* Enter user mode. */
env->ksp = env->regs[R_SP];
env->regs[R_SP] = env->pregs[PR_USP];
}
env->pregs[PR_CCS] = ccs;
env->pregs[PR_CCS] = ccs;
}
void helper_rfe(CPUCRISState *env)
{
int rflag = env->pregs[PR_CCS] & R_FLAG;
int rflag = env->pregs[PR_CCS] & R_FLAG;
D_LOG("rfe: erp=%x pid=%x ccs=%x btarget=%x\n",
env->pregs[PR_ERP], env->pregs[PR_PID],
env->pregs[PR_CCS],
env->btarget);
D_LOG("rfe: erp=%x pid=%x ccs=%x btarget=%x\n",
env->pregs[PR_ERP], env->pregs[PR_PID],
env->pregs[PR_CCS],
env->btarget);
cris_ccs_rshift(env);
cris_ccs_rshift(env);
/* RFE sets the P_FLAG only if the R_FLAG is not set. */
if (!rflag)
env->pregs[PR_CCS] |= P_FLAG;
/* RFE sets the P_FLAG only if the R_FLAG is not set. */
if (!rflag) {
env->pregs[PR_CCS] |= P_FLAG;
}
}
void helper_rfn(CPUCRISState *env)
{
int rflag = env->pregs[PR_CCS] & R_FLAG;
int rflag = env->pregs[PR_CCS] & R_FLAG;
D_LOG("rfn: erp=%x pid=%x ccs=%x btarget=%x\n",
env->pregs[PR_ERP], env->pregs[PR_PID],
env->pregs[PR_CCS],
env->btarget);
D_LOG("rfn: erp=%x pid=%x ccs=%x btarget=%x\n",
env->pregs[PR_ERP], env->pregs[PR_PID],
env->pregs[PR_CCS],
env->btarget);
cris_ccs_rshift(env);
cris_ccs_rshift(env);
/* Set the P_FLAG only if the R_FLAG is not set. */
if (!rflag)
env->pregs[PR_CCS] |= P_FLAG;
/* Set the P_FLAG only if the R_FLAG is not set. */
if (!rflag) {
env->pregs[PR_CCS] |= P_FLAG;
}
/* Always set the M flag. */
env->pregs[PR_CCS] |= M_FLAG_V32;
/* Always set the M flag. */
env->pregs[PR_CCS] |= M_FLAG_V32;
}
uint32_t helper_btst(CPUCRISState *env, uint32_t t0, uint32_t t1, uint32_t ccs)
{
/* FIXME: clean this up. */
/* FIXME: clean this up. */
/* des ref:
The N flag is set according to the selected bit in the dest reg.
The Z flag is set if the selected bit and all bits to the right are
zero.
The X flag is cleared.
Other flags are left untouched.
The destination reg is not affected.*/
unsigned int fz, sbit, bset, mask, masked_t0;
/*
* des ref:
* The N flag is set according to the selected bit in the dest reg.
* The Z flag is set if the selected bit and all bits to the right are
* zero.
* The X flag is cleared.
* Other flags are left untouched.
* The destination reg is not affected.
*/
unsigned int fz, sbit, bset, mask, masked_t0;
sbit = t1 & 31;
bset = !!(t0 & (1 << sbit));
mask = sbit == 31 ? -1 : (1 << (sbit + 1)) - 1;
masked_t0 = t0 & mask;
fz = !(masked_t0 | bset);
sbit = t1 & 31;
bset = !!(t0 & (1 << sbit));
mask = sbit == 31 ? -1 : (1 << (sbit + 1)) - 1;
masked_t0 = t0 & mask;
fz = !(masked_t0 | bset);
/* Clear the X, N and Z flags. */
ccs = ccs & ~(X_FLAG | N_FLAG | Z_FLAG);
if (env->pregs[PR_VR] < 32)
ccs &= ~(V_FLAG | C_FLAG);
/* Set the N and Z flags accordingly. */
ccs |= (bset << 3) | (fz << 2);
return ccs;
/* Clear the X, N and Z flags. */
ccs = ccs & ~(X_FLAG | N_FLAG | Z_FLAG);
if (env->pregs[PR_VR] < 32) {
ccs &= ~(V_FLAG | C_FLAG);
}
/* Set the N and Z flags accordingly. */
ccs |= (bset << 3) | (fz << 2);
return ccs;
}
static inline uint32_t evaluate_flags_writeback(CPUCRISState *env,
uint32_t flags, uint32_t ccs)
{
unsigned int x, z, mask;
unsigned int x, z, mask;
/* Extended arithmetics, leave the z flag alone. */
x = env->cc_x;
mask = env->cc_mask | X_FLAG;
if (x) {
z = flags & Z_FLAG;
mask = mask & ~z;
}
flags &= mask;
/* Extended arithmetics, leave the z flag alone. */
x = env->cc_x;
mask = env->cc_mask | X_FLAG;
if (x) {
z = flags & Z_FLAG;
mask = mask & ~z;
}
flags &= mask;
/* all insn clear the x-flag except setf or clrf. */
ccs &= ~mask;
ccs |= flags;
return ccs;
/* all insn clear the x-flag except setf or clrf. */
ccs &= ~mask;
ccs |= flags;
return ccs;
}
uint32_t helper_evaluate_flags_muls(CPUCRISState *env,
uint32_t ccs, uint32_t res, uint32_t mof)
{
uint32_t flags = 0;
int64_t tmp;
int dneg;
uint32_t flags = 0;
int64_t tmp;
int dneg;
dneg = ((int32_t)res) < 0;
dneg = ((int32_t)res) < 0;
tmp = mof;
tmp <<= 32;
tmp |= res;
if (tmp == 0)
flags |= Z_FLAG;
else if (tmp < 0)
flags |= N_FLAG;
if ((dneg && mof != -1)
|| (!dneg && mof != 0))
flags |= V_FLAG;
return evaluate_flags_writeback(env, flags, ccs);
tmp = mof;
tmp <<= 32;
tmp |= res;
if (tmp == 0) {
flags |= Z_FLAG;
} else if (tmp < 0) {
flags |= N_FLAG;
}
if ((dneg && mof != -1) || (!dneg && mof != 0)) {
flags |= V_FLAG;
}
return evaluate_flags_writeback(env, flags, ccs);
}
uint32_t helper_evaluate_flags_mulu(CPUCRISState *env,
uint32_t ccs, uint32_t res, uint32_t mof)
{
uint32_t flags = 0;
uint64_t tmp;
uint32_t flags = 0;
uint64_t tmp;
tmp = mof;
tmp <<= 32;
tmp |= res;
if (tmp == 0)
flags |= Z_FLAG;
else if (tmp >> 63)
flags |= N_FLAG;
if (mof)
flags |= V_FLAG;
tmp = mof;
tmp <<= 32;
tmp |= res;
if (tmp == 0) {
flags |= Z_FLAG;
} else if (tmp >> 63) {
flags |= N_FLAG;
}
if (mof) {
flags |= V_FLAG;
}
return evaluate_flags_writeback(env, flags, ccs);
return evaluate_flags_writeback(env, flags, ccs);
}
uint32_t helper_evaluate_flags_mcp(CPUCRISState *env, uint32_t ccs,
uint32_t src, uint32_t dst, uint32_t res)
{
uint32_t flags = 0;
uint32_t flags = 0;
src = src & 0x80000000;
dst = dst & 0x80000000;
src = src & 0x80000000;
dst = dst & 0x80000000;
if ((res & 0x80000000L) != 0L)
{
flags |= N_FLAG;
if (!src && !dst)
flags |= V_FLAG;
else if (src & dst)
flags |= R_FLAG;
}
else
{
if (res == 0L)
flags |= Z_FLAG;
if (src & dst)
flags |= V_FLAG;
if (dst | src)
flags |= R_FLAG;
}
if ((res & 0x80000000L) != 0L) {
flags |= N_FLAG;
if (!src && !dst) {
flags |= V_FLAG;
} else if (src & dst) {
flags |= R_FLAG;
}
} else {
if (res == 0L) {
flags |= Z_FLAG;
}
if (src & dst) {
flags |= V_FLAG;
}
if (dst | src) {
flags |= R_FLAG;
}
}
return evaluate_flags_writeback(env, flags, ccs);
return evaluate_flags_writeback(env, flags, ccs);
}
uint32_t helper_evaluate_flags_alu_4(CPUCRISState *env, uint32_t ccs,
uint32_t src, uint32_t dst, uint32_t res)
{
uint32_t flags = 0;
uint32_t flags = 0;
src = src & 0x80000000;
dst = dst & 0x80000000;
src = src & 0x80000000;
dst = dst & 0x80000000;
if ((res & 0x80000000L) != 0L)
{
flags |= N_FLAG;
if (!src && !dst)
flags |= V_FLAG;
else if (src & dst)
flags |= C_FLAG;
}
else
{
if (res == 0L)
flags |= Z_FLAG;
if (src & dst)
flags |= V_FLAG;
if (dst | src)
flags |= C_FLAG;
}
if ((res & 0x80000000L) != 0L) {
flags |= N_FLAG;
if (!src && !dst) {
flags |= V_FLAG;
} else if (src & dst) {
flags |= C_FLAG;
}
} else {
if (res == 0L) {
flags |= Z_FLAG;
}
if (src & dst) {
flags |= V_FLAG;
}
if (dst | src) {
flags |= C_FLAG;
}
}
return evaluate_flags_writeback(env, flags, ccs);
return evaluate_flags_writeback(env, flags, ccs);
}
uint32_t helper_evaluate_flags_sub_4(CPUCRISState *env, uint32_t ccs,
uint32_t src, uint32_t dst, uint32_t res)
{
uint32_t flags = 0;
uint32_t flags = 0;
src = (~src) & 0x80000000;
dst = dst & 0x80000000;
src = (~src) & 0x80000000;
dst = dst & 0x80000000;
if ((res & 0x80000000L) != 0L)
{
flags |= N_FLAG;
if (!src && !dst)
flags |= V_FLAG;
else if (src & dst)
flags |= C_FLAG;
}
else
{
if (res == 0L)
flags |= Z_FLAG;
if (src & dst)
flags |= V_FLAG;
if (dst | src)
flags |= C_FLAG;
}
if ((res & 0x80000000L) != 0L) {
flags |= N_FLAG;
if (!src && !dst) {
flags |= V_FLAG;
} else if (src & dst) {
flags |= C_FLAG;
}
} else {
if (res == 0L) {
flags |= Z_FLAG;
}
if (src & dst) {
flags |= V_FLAG;
}
if (dst | src) {
flags |= C_FLAG;
}
}
flags ^= C_FLAG;
return evaluate_flags_writeback(env, flags, ccs);
flags ^= C_FLAG;
return evaluate_flags_writeback(env, flags, ccs);
}
uint32_t helper_evaluate_flags_move_4(CPUCRISState *env,
uint32_t ccs, uint32_t res)
{
uint32_t flags = 0;
uint32_t flags = 0;
if ((int32_t)res < 0)
flags |= N_FLAG;
else if (res == 0L)
flags |= Z_FLAG;
if ((int32_t)res < 0) {
flags |= N_FLAG;
} else if (res == 0L) {
flags |= Z_FLAG;
}
return evaluate_flags_writeback(env, flags, ccs);
return evaluate_flags_writeback(env, flags, ccs);
}
uint32_t helper_evaluate_flags_move_2(CPUCRISState *env,
uint32_t ccs, uint32_t res)
{
uint32_t flags = 0;
uint32_t flags = 0;
if ((int16_t)res < 0L)
flags |= N_FLAG;
else if (res == 0)
flags |= Z_FLAG;
if ((int16_t)res < 0L) {
flags |= N_FLAG;
} else if (res == 0) {
flags |= Z_FLAG;
}
return evaluate_flags_writeback(env, flags, ccs);
return evaluate_flags_writeback(env, flags, ccs);
}
/* TODO: This is expensive. We could split things up and only evaluate part of
CCR on a need to know basis. For now, we simply re-evaluate everything. */
/*
* TODO: This is expensive. We could split things up and only evaluate part of
* CCR on a need to know basis. For now, we simply re-evaluate everything.
*/
void helper_evaluate_flags(CPUCRISState *env)
{
uint32_t src, dst, res;
uint32_t flags = 0;
uint32_t src, dst, res;
uint32_t flags = 0;
src = env->cc_src;
dst = env->cc_dest;
res = env->cc_result;
src = env->cc_src;
dst = env->cc_dest;
res = env->cc_result;
if (env->cc_op == CC_OP_SUB || env->cc_op == CC_OP_CMP)
src = ~src;
if (env->cc_op == CC_OP_SUB || env->cc_op == CC_OP_CMP) {
src = ~src;
}
/* Now, evaluate the flags. This stuff is based on
Per Zander's CRISv10 simulator. */
switch (env->cc_size)
{
case 1:
if ((res & 0x80L) != 0L)
{
flags |= N_FLAG;
if (((src & 0x80L) == 0L)
&& ((dst & 0x80L) == 0L))
{
flags |= V_FLAG;
}
else if (((src & 0x80L) != 0L)
&& ((dst & 0x80L) != 0L))
{
flags |= C_FLAG;
}
}
else
{
if ((res & 0xFFL) == 0L)
{
flags |= Z_FLAG;
}
if (((src & 0x80L) != 0L)
&& ((dst & 0x80L) != 0L))
{
flags |= V_FLAG;
}
if ((dst & 0x80L) != 0L
|| (src & 0x80L) != 0L)
{
flags |= C_FLAG;
}
}
break;
case 2:
if ((res & 0x8000L) != 0L)
{
flags |= N_FLAG;
if (((src & 0x8000L) == 0L)
&& ((dst & 0x8000L) == 0L))
{
flags |= V_FLAG;
}
else if (((src & 0x8000L) != 0L)
&& ((dst & 0x8000L) != 0L))
{
flags |= C_FLAG;
}
}
else
{
if ((res & 0xFFFFL) == 0L)
{
flags |= Z_FLAG;
}
if (((src & 0x8000L) != 0L)
&& ((dst & 0x8000L) != 0L))
{
flags |= V_FLAG;
}
if ((dst & 0x8000L) != 0L
|| (src & 0x8000L) != 0L)
{
flags |= C_FLAG;
}
}
break;
case 4:
if ((res & 0x80000000L) != 0L)
{
flags |= N_FLAG;
if (((src & 0x80000000L) == 0L)
&& ((dst & 0x80000000L) == 0L))
{
flags |= V_FLAG;
}
else if (((src & 0x80000000L) != 0L) &&
((dst & 0x80000000L) != 0L))
{
flags |= C_FLAG;
}
}
else
{
if (res == 0L)
flags |= Z_FLAG;
if (((src & 0x80000000L) != 0L)
&& ((dst & 0x80000000L) != 0L))
flags |= V_FLAG;
if ((dst & 0x80000000L) != 0L
|| (src & 0x80000000L) != 0L)
flags |= C_FLAG;
}
break;
default:
break;
}
/*
* Now, evaluate the flags. This stuff is based on
* Per Zander's CRISv10 simulator.
*/
switch (env->cc_size) {
case 1:
if ((res & 0x80L) != 0L) {
flags |= N_FLAG;
if (((src & 0x80L) == 0L) && ((dst & 0x80L) == 0L)) {
flags |= V_FLAG;
} else if (((src & 0x80L) != 0L) && ((dst & 0x80L) != 0L)) {
flags |= C_FLAG;
}
} else {
if ((res & 0xFFL) == 0L) {
flags |= Z_FLAG;
}
if (((src & 0x80L) != 0L) && ((dst & 0x80L) != 0L)) {
flags |= V_FLAG;
}
if ((dst & 0x80L) != 0L || (src & 0x80L) != 0L) {
flags |= C_FLAG;
}
}
break;
case 2:
if ((res & 0x8000L) != 0L) {
flags |= N_FLAG;
if (((src & 0x8000L) == 0L) && ((dst & 0x8000L) == 0L)) {
flags |= V_FLAG;
} else if (((src & 0x8000L) != 0L) && ((dst & 0x8000L) != 0L)) {
flags |= C_FLAG;
}
} else {
if ((res & 0xFFFFL) == 0L) {
flags |= Z_FLAG;
}
if (((src & 0x8000L) != 0L) && ((dst & 0x8000L) != 0L)) {
flags |= V_FLAG;
}
if ((dst & 0x8000L) != 0L || (src & 0x8000L) != 0L) {
flags |= C_FLAG;
}
}
break;
case 4:
if ((res & 0x80000000L) != 0L) {
flags |= N_FLAG;
if (((src & 0x80000000L) == 0L) && ((dst & 0x80000000L) == 0L)) {
flags |= V_FLAG;
} else if (((src & 0x80000000L) != 0L) &&
((dst & 0x80000000L) != 0L)) {
flags |= C_FLAG;
}
} else {
if (res == 0L) {
flags |= Z_FLAG;
}
if (((src & 0x80000000L) != 0L) && ((dst & 0x80000000L) != 0L)) {
flags |= V_FLAG;
}
if ((dst & 0x80000000L) != 0L || (src & 0x80000000L) != 0L) {
flags |= C_FLAG;
}
}
break;
default:
break;
}
if (env->cc_op == CC_OP_SUB || env->cc_op == CC_OP_CMP)
flags ^= C_FLAG;
if (env->cc_op == CC_OP_SUB || env->cc_op == CC_OP_CMP) {
flags ^= C_FLAG;
}
env->pregs[PR_CCS] = evaluate_flags_writeback(env, flags,
env->pregs[PR_CCS]);
env->pregs[PR_CCS] = evaluate_flags_writeback(env, flags,
env->pregs[PR_CCS]);
}
void helper_top_evaluate_flags(CPUCRISState *env)
{
switch (env->cc_op)
{
case CC_OP_MCP:
env->pregs[PR_CCS] = helper_evaluate_flags_mcp(env,
env->pregs[PR_CCS], env->cc_src,
env->cc_dest, env->cc_result);
break;
case CC_OP_MULS:
env->pregs[PR_CCS] = helper_evaluate_flags_muls(env,
env->pregs[PR_CCS], env->cc_result,
env->pregs[PR_MOF]);
break;
case CC_OP_MULU:
env->pregs[PR_CCS] = helper_evaluate_flags_mulu(env,
env->pregs[PR_CCS], env->cc_result,
env->pregs[PR_MOF]);
break;
case CC_OP_MOVE:
case CC_OP_AND:
case CC_OP_OR:
case CC_OP_XOR:
case CC_OP_ASR:
case CC_OP_LSR:
case CC_OP_LSL:
switch (env->cc_size)
{
case 4:
env->pregs[PR_CCS] =
helper_evaluate_flags_move_4(env,
env->pregs[PR_CCS],
env->cc_result);
break;
case 2:
env->pregs[PR_CCS] =
helper_evaluate_flags_move_2(env,
env->pregs[PR_CCS],
env->cc_result);
break;
default:
helper_evaluate_flags(env);
break;
}
break;
case CC_OP_FLAGS:
/* live. */
break;
case CC_OP_SUB:
case CC_OP_CMP:
if (env->cc_size == 4)
env->pregs[PR_CCS] =
helper_evaluate_flags_sub_4(env,
env->pregs[PR_CCS],
env->cc_src, env->cc_dest,
env->cc_result);
else
helper_evaluate_flags(env);
break;
default:
{
switch (env->cc_size)
{
case 4:
env->pregs[PR_CCS] =
helper_evaluate_flags_alu_4(env,
env->pregs[PR_CCS],
env->cc_src, env->cc_dest,
env->cc_result);
break;
default:
helper_evaluate_flags(env);
break;
}
}
break;
}
switch (env->cc_op) {
case CC_OP_MCP:
env->pregs[PR_CCS]
= helper_evaluate_flags_mcp(env, env->pregs[PR_CCS],
env->cc_src, env->cc_dest,
env->cc_result);
break;
case CC_OP_MULS:
env->pregs[PR_CCS]
= helper_evaluate_flags_muls(env, env->pregs[PR_CCS],
env->cc_result, env->pregs[PR_MOF]);
break;
case CC_OP_MULU:
env->pregs[PR_CCS]
= helper_evaluate_flags_mulu(env, env->pregs[PR_CCS],
env->cc_result, env->pregs[PR_MOF]);
break;
case CC_OP_MOVE:
case CC_OP_AND:
case CC_OP_OR:
case CC_OP_XOR:
case CC_OP_ASR:
case CC_OP_LSR:
case CC_OP_LSL:
switch (env->cc_size) {
case 4:
env->pregs[PR_CCS] =
helper_evaluate_flags_move_4(env,
env->pregs[PR_CCS],
env->cc_result);
break;
case 2:
env->pregs[PR_CCS] =
helper_evaluate_flags_move_2(env,
env->pregs[PR_CCS],
env->cc_result);
break;
default:
helper_evaluate_flags(env);
break;
}
break;
case CC_OP_FLAGS:
/* live. */
break;
case CC_OP_SUB:
case CC_OP_CMP:
if (env->cc_size == 4) {
env->pregs[PR_CCS] =
helper_evaluate_flags_sub_4(env,
env->pregs[PR_CCS],
env->cc_src, env->cc_dest,
env->cc_result);
} else {
helper_evaluate_flags(env);
}
break;
default:
switch (env->cc_size) {
case 4:
env->pregs[PR_CCS] =
helper_evaluate_flags_alu_4(env,
env->pregs[PR_CCS],
env->cc_src, env->cc_dest,
env->cc_result);
break;
default:
helper_evaluate_flags(env);
break;
}
break;
}
}

View File

@ -3097,7 +3097,7 @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
* delayslot, like in real hw.
*/
pc_start = tb->pc & ~1;
dc->cpu = cris_env_get_cpu(env);
dc->cpu = env_archcpu(env);
dc->tb = tb;
dc->is_jmp = DISAS_NEXT;

34
target/hppa/cpu-param.h Normal file
View File

@ -0,0 +1,34 @@
/*
* PA-RISC cpu parameters for qemu.
*
* Copyright (c) 2016 Richard Henderson <rth@twiddle.net>
* SPDX-License-Identifier: LGPL-2.0+
*/
#ifndef HPPA_CPU_PARAM_H
#define HPPA_CPU_PARAM_H 1
#ifdef TARGET_HPPA64
# define TARGET_LONG_BITS 64
# define TARGET_REGISTER_BITS 64
# define TARGET_VIRT_ADDR_SPACE_BITS 64
# define TARGET_PHYS_ADDR_SPACE_BITS 64
#elif defined(CONFIG_USER_ONLY)
# define TARGET_LONG_BITS 32
# define TARGET_REGISTER_BITS 32
# define TARGET_VIRT_ADDR_SPACE_BITS 32
# define TARGET_PHYS_ADDR_SPACE_BITS 32
#else
/*
* In order to form the GVA from space:offset,
* we need a 64-bit virtual address space.
*/
# define TARGET_LONG_BITS 64
# define TARGET_REGISTER_BITS 32
# define TARGET_VIRT_ADDR_SPACE_BITS 64
# define TARGET_PHYS_ADDR_SPACE_BITS 32
#endif
#define TARGET_PAGE_BITS 12
#define NB_MMU_MODES 5
#endif

View File

@ -134,7 +134,7 @@ static void hppa_cpu_initfn(Object *obj)
HPPACPU *cpu = HPPA_CPU(obj);
CPUHPPAState *env = &cpu->env;
cs->env_ptr = env;
cpu_set_cpustate_pointers(cpu);
cs->exception_index = -1;
cpu_hppa_loaded_fr0(env);
cpu_hppa_put_psw(env, PSW_W);

View File

@ -22,25 +22,8 @@
#include "qemu-common.h"
#include "cpu-qom.h"
#include "exec/cpu-defs.h"
#ifdef TARGET_HPPA64
#define TARGET_LONG_BITS 64
#define TARGET_VIRT_ADDR_SPACE_BITS 64
#define TARGET_REGISTER_BITS 64
#define TARGET_PHYS_ADDR_SPACE_BITS 64
#elif defined(CONFIG_USER_ONLY)
#define TARGET_LONG_BITS 32
#define TARGET_VIRT_ADDR_SPACE_BITS 32
#define TARGET_REGISTER_BITS 32
#define TARGET_PHYS_ADDR_SPACE_BITS 32
#else
/* In order to form the GVA from space:offset,
we need a 64-bit virtual address space. */
#define TARGET_LONG_BITS 64
#define TARGET_VIRT_ADDR_SPACE_BITS 64
#define TARGET_REGISTER_BITS 32
#define TARGET_PHYS_ADDR_SPACE_BITS 32
#endif
/* PA-RISC 1.x processors have a strong memory model. */
/* ??? While we do not yet implement PA-RISC 2.0, those processors have
@ -48,14 +31,7 @@
basis. It's probably easier to fall back to a strong memory model. */
#define TCG_GUEST_DEFAULT_MO TCG_MO_ALL
#define CPUArchState struct CPUHPPAState
#include "exec/cpu-defs.h"
#define TARGET_PAGE_BITS 12
#define ALIGNED_ONLY
#define NB_MMU_MODES 5
#define MMU_KERNEL_IDX 0
#define MMU_USER_IDX 3
#define MMU_PHYS_IDX 4
@ -221,9 +197,6 @@ struct CPUHPPAState {
target_ureg cr_back[2]; /* back of cr17/cr18 */
target_ureg shadow[7]; /* shadow registers */
/* Those resources are used only in QEMU core */
CPU_COMMON
/* ??? The number of entries isn't specified by the architecture. */
/* ??? Implement a unified itlb/dtlb for the moment. */
/* ??? We should use a more intelligent data structure. */
@ -242,17 +215,14 @@ struct HPPACPU {
CPUState parent_obj;
/*< public >*/
CPUNegativeOffsetState neg;
CPUHPPAState env;
QEMUTimer *alarm_timer;
};
static inline HPPACPU *hppa_env_get_cpu(CPUHPPAState *env)
{
return container_of(env, HPPACPU, env);
}
#define ENV_GET_CPU(e) CPU(hppa_env_get_cpu(e))
#define ENV_OFFSET offsetof(HPPACPU, env)
typedef CPUHPPAState CPUArchState;
typedef HPPACPU ArchCPU;
#include "exec/cpu-all.h"

View File

@ -71,8 +71,7 @@ void cpu_hppa_put_psw(CPUHPPAState *env, target_ureg psw)
/* If PSW_P changes, it affects how we translate addresses. */
if ((psw ^ old_psw) & PSW_P) {
#ifndef CONFIG_USER_ONLY
CPUState *src = CPU(hppa_env_get_cpu(env));
tlb_flush_by_mmuidx(src, 0xf);
tlb_flush_by_mmuidx(env_cpu(env), 0xf);
#endif
}
}

View File

@ -77,7 +77,7 @@ void HELPER(write_eirr)(CPUHPPAState *env, target_ureg val)
{
env->cr[CR_EIRR] &= ~val;
qemu_mutex_lock_iothread();
eval_interrupt(hppa_env_get_cpu(env));
eval_interrupt(env_archcpu(env));
qemu_mutex_unlock_iothread();
}
@ -85,7 +85,7 @@ void HELPER(write_eiem)(CPUHPPAState *env, target_ureg val)
{
env->cr[CR_EIEM] = val;
qemu_mutex_lock_iothread();
eval_interrupt(hppa_env_get_cpu(env));
eval_interrupt(env_archcpu(env));
qemu_mutex_unlock_iothread();
}
#endif /* !CONFIG_USER_ONLY */

View File

@ -56,7 +56,7 @@ static hppa_tlb_entry *hppa_find_tlb(CPUHPPAState *env, vaddr addr)
static void hppa_flush_tlb_ent(CPUHPPAState *env, hppa_tlb_entry *ent)
{
CPUState *cs = CPU(hppa_env_get_cpu(env));
CPUState *cs = env_cpu(env);
unsigned i, n = 1 << (2 * ent->page_size);
uint64_t addr = ent->va_b;
@ -329,7 +329,7 @@ static void ptlb_work(CPUState *cpu, run_on_cpu_data data)
void HELPER(ptlb)(CPUHPPAState *env, target_ulong addr)
{
CPUState *src = CPU(hppa_env_get_cpu(env));
CPUState *src = env_cpu(env);
CPUState *cpu;
trace_hppa_tlb_ptlb(env);
run_on_cpu_data data = RUN_ON_CPU_TARGET_PTR(addr);
@ -346,17 +346,15 @@ void HELPER(ptlb)(CPUHPPAState *env, target_ulong addr)
number of pages/entries (we choose all), and is local to the cpu. */
void HELPER(ptlbe)(CPUHPPAState *env)
{
CPUState *src = CPU(hppa_env_get_cpu(env));
trace_hppa_tlb_ptlbe(env);
memset(env->tlb, 0, sizeof(env->tlb));
tlb_flush_by_mmuidx(src, 0xf);
tlb_flush_by_mmuidx(env_cpu(env), 0xf);
}
void cpu_hppa_change_prot_id(CPUHPPAState *env)
{
if (env->psw & PSW_P) {
CPUState *src = CPU(hppa_env_get_cpu(env));
tlb_flush_by_mmuidx(src, 0xf);
tlb_flush_by_mmuidx(env_cpu(env), 0xf);
}
}

View File

@ -29,8 +29,7 @@
void QEMU_NORETURN HELPER(excp)(CPUHPPAState *env, int excp)
{
HPPACPU *cpu = hppa_env_get_cpu(env);
CPUState *cs = CPU(cpu);
CPUState *cs = env_cpu(env);
cs->exception_index = excp;
cpu_loop_exit(cs);
@ -38,8 +37,7 @@ void QEMU_NORETURN HELPER(excp)(CPUHPPAState *env, int excp)
void QEMU_NORETURN hppa_dynamic_excp(CPUHPPAState *env, int excp, uintptr_t ra)
{
HPPACPU *cpu = hppa_env_get_cpu(env);
CPUState *cs = CPU(cpu);
CPUState *cs = env_cpu(env);
cs->exception_index = excp;
cpu_loop_exit_restore(cs, ra);
@ -77,7 +75,7 @@ static void atomic_store_3(CPUHPPAState *env, target_ulong addr, uint32_t val,
}
#else
/* FIXME -- we can do better. */
cpu_loop_exit_atomic(ENV_GET_CPU(env), ra);
cpu_loop_exit_atomic(env_cpu(env), ra);
#endif
}
@ -630,7 +628,7 @@ target_ureg HELPER(read_interval_timer)(void)
#ifndef CONFIG_USER_ONLY
void HELPER(write_interval_timer)(CPUHPPAState *env, target_ureg val)
{
HPPACPU *cpu = hppa_env_get_cpu(env);
HPPACPU *cpu = env_archcpu(env);
uint64_t current = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
uint64_t timeout;

View File

@ -53,7 +53,7 @@ static inline int hw_breakpoint_len(unsigned long dr7, int index)
static int hw_breakpoint_insert(CPUX86State *env, int index)
{
CPUState *cs = CPU(x86_env_get_cpu(env));
CPUState *cs = env_cpu(env);
target_ulong dr7 = env->dr[7];
target_ulong drN = env->dr[index];
int err = 0;
@ -97,7 +97,7 @@ static int hw_breakpoint_insert(CPUX86State *env, int index)
static void hw_breakpoint_remove(CPUX86State *env, int index)
{
CPUState *cs = CPU(x86_env_get_cpu(env));
CPUState *cs = env_cpu(env);
switch (hw_breakpoint_type(env->dr[7], index)) {
case DR7_TYPE_BP_INST:

28
target/i386/cpu-param.h Normal file
View File

@ -0,0 +1,28 @@
/*
* i386 cpu parameters for qemu.
*
* Copyright (c) 2003 Fabrice Bellard
* SPDX-License-Identifier: LGPL-2.0+
*/
#ifndef I386_CPU_PARAM_H
#define I386_CPU_PARAM_H 1
#ifdef TARGET_X86_64
# define TARGET_LONG_BITS 64
# define TARGET_PHYS_ADDR_SPACE_BITS 52
/*
* ??? This is really 48 bits, sign-extended, but the only thing
* accessible to userland with bit 48 set is the VSYSCALL, and that
* is handled via other mechanisms.
*/
# define TARGET_VIRT_ADDR_SPACE_BITS 47
#else
# define TARGET_LONG_BITS 32
# define TARGET_PHYS_ADDR_SPACE_BITS 36
# define TARGET_VIRT_ADDR_SPACE_BITS 32
#endif
#define TARGET_PAGE_BITS 12
#define NB_MMU_MODES 3
#endif

View File

@ -4222,8 +4222,8 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
uint32_t *eax, uint32_t *ebx,
uint32_t *ecx, uint32_t *edx)
{
X86CPU *cpu = x86_env_get_cpu(env);
CPUState *cs = CPU(cpu);
X86CPU *cpu = env_archcpu(env);
CPUState *cs = env_cpu(env);
uint32_t pkg_offset;
uint32_t limit;
uint32_t signature[3];
@ -5592,13 +5592,12 @@ static void x86_cpu_get_crash_info_qom(Object *obj, Visitor *v,
static void x86_cpu_initfn(Object *obj)
{
CPUState *cs = CPU(obj);
X86CPU *cpu = X86_CPU(obj);
X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
CPUX86State *env = &cpu->env;
FeatureWord w;
cs->env_ptr = env;
cpu_set_cpustate_pointers(cpu);
object_property_add(obj, "family", "int",
x86_cpuid_version_get_family,

View File

@ -1,4 +1,3 @@
/*
* i386 virtual CPU header
*
@ -24,13 +23,6 @@
#include "qemu-common.h"
#include "cpu-qom.h"
#include "hyperv-proto.h"
#ifdef TARGET_X86_64
#define TARGET_LONG_BITS 64
#else
#define TARGET_LONG_BITS 32
#endif
#include "exec/cpu-defs.h"
/* The x86 has a strong memory model with some store-after-load re-ordering */
@ -51,8 +43,6 @@
#define ELF_MACHINE_UNAME "i686"
#endif
#define CPUArchState struct CPUX86State
enum {
R_EAX = 0,
R_ECX = 1,
@ -956,7 +946,6 @@ typedef struct {
#define MAX_FIXED_COUNTERS 3
#define MAX_GP_COUNTERS (MSR_IA32_PERF_STATUS - MSR_P6_EVNTSEL0)
#define NB_MMU_MODES 3
#define TARGET_INSN_START_EXTRA_WORDS 1
#define NB_OPMASK_REGS 8
@ -1300,9 +1289,7 @@ typedef struct CPUX86State {
/* Fields up to this point are cleared by a CPU reset */
struct {} end_reset_fields;
CPU_COMMON
/* Fields after CPU_COMMON are preserved across CPU reset. */
/* Fields after this point are preserved across CPU reset. */
/* processor features (e.g. for CPUID insn) */
/* Minimum level/xlevel/xlevel2, based on CPU model + features */
@ -1380,6 +1367,7 @@ struct X86CPU {
CPUState parent_obj;
/*< public >*/
CPUNegativeOffsetState neg;
CPUX86State env;
bool hyperv_vapic;
@ -1491,14 +1479,6 @@ struct X86CPU {
int32_t hv_max_vps;
};
static inline X86CPU *x86_env_get_cpu(CPUX86State *env)
{
return container_of(env, X86CPU, env);
}
#define ENV_GET_CPU(e) CPU(x86_env_get_cpu(e))
#define ENV_OFFSET offsetof(X86CPU, env)
#ifndef CONFIG_USER_ONLY
extern struct VMStateDescription vmstate_x86_cpu;
@ -1695,19 +1675,6 @@ void cpu_x86_update_dr7(CPUX86State *env, uint32_t new_dr7);
/* hw/pc.c */
uint64_t cpu_get_tsc(CPUX86State *env);
#define TARGET_PAGE_BITS 12
#ifdef TARGET_X86_64
#define TARGET_PHYS_ADDR_SPACE_BITS 52
/* ??? This is really 48 bits, sign-extended, but the only thing
accessible to userland with bit 48 set is the VSYSCALL, and that
is handled via other mechanisms. */
#define TARGET_VIRT_ADDR_SPACE_BITS 47
#else
#define TARGET_PHYS_ADDR_SPACE_BITS 36
#define TARGET_VIRT_ADDR_SPACE_BITS 32
#endif
/* XXX: This value should match the one returned by CPUID
* and in exec.c */
# if defined(TARGET_X86_64)
@ -1776,6 +1743,9 @@ static inline target_long lshift(target_long x, int n)
/* translate.c */
void tcg_x86_init(void);
typedef CPUX86State CPUArchState;
typedef X86CPU ArchCPU;
#include "exec/cpu-all.h"
#include "svm.h"

View File

@ -90,7 +90,7 @@ static void QEMU_NORETURN raise_interrupt2(CPUX86State *env, int intno,
int next_eip_addend,
uintptr_t retaddr)
{
CPUState *cs = CPU(x86_env_get_cpu(env));
CPUState *cs = env_cpu(env);
if (!is_int) {
cpu_svm_check_intercept_param(env, SVM_EXIT_EXCP_BASE + intno,

View File

@ -1477,7 +1477,7 @@ void helper_xrstor(CPUX86State *env, target_ulong ptr, uint64_t rfbm)
env->pkru = 0;
}
if (env->pkru != old_pkru) {
CPUState *cs = CPU(x86_env_get_cpu(env));
CPUState *cs = env_cpu(env);
tlb_flush(cs);
}
}

View File

@ -67,7 +67,7 @@ int valid_hax_tunnel_size(uint16_t size)
hax_fd hax_vcpu_get_fd(CPUArchState *env)
{
struct hax_vcpu_state *vcpu = ENV_GET_CPU(env)->hax_vcpu;
struct hax_vcpu_state *vcpu = env_cpu(env)->hax_vcpu;
if (!vcpu) {
return HAX_INVALID_FD;
}
@ -409,7 +409,7 @@ static int hax_handle_io(CPUArchState *env, uint32_t df, uint16_t port,
static int hax_vcpu_interrupt(CPUArchState *env)
{
CPUState *cpu = ENV_GET_CPU(env);
CPUState *cpu = env_cpu(env);
struct hax_vcpu_state *vcpu = cpu->hax_vcpu;
struct hax_tunnel *ht = vcpu->tunnel;
@ -461,7 +461,7 @@ void hax_raise_event(CPUState *cpu)
static int hax_vcpu_hax_exec(CPUArchState *env)
{
int ret = 0;
CPUState *cpu = ENV_GET_CPU(env);
CPUState *cpu = env_cpu(env);
X86CPU *x86_cpu = X86_CPU(cpu);
struct hax_vcpu_state *vcpu = cpu->hax_vcpu;
struct hax_tunnel *ht = vcpu->tunnel;

Some files were not shown because too many files have changed in this diff Show More