Patch queue for s390 - 2015-06-17
This is a special one. Two awesome features in one pull request: - CCW support for TCG - Watchpoint support for TCG To celebrate this, we also switch the default machine model from s390-virtio to s390-ccw and give users a fully working s390x model again! -----BEGIN PGP SIGNATURE----- Version: GnuPG v2.0.9 (GNU/Linux) iQIcBAABAgAGBQJVgU8SAAoJECszeR4D/txgtzEP/R48c1FBYVPf2hg1DnGtDV7N YEsymqymRXXi2Esdnfj+5Hs97BLKis0Fr0AX15tp5uhK15lntgLLqfZeGjguX3pD 5dSn9uJeRpzkf05eCnJo7/nV5w5iWgJhKNt2lyt7+mQWUdrdYQ6XzfLbBUhHkFJp ev4SU7LilcL4sNAM8pQKQBLOT3djdy2KAwE4PdeqZxSngkRcePWDMBt4axyGX9t5 esKpWS3tQ9b9AgTkGB/XHtPIimQDpbsvi/MGI6A57Xd13hLn4NDKjjfFHhx/RMyW UVeJl9+ndlp1DdsIRKaUOWGBaWKZB/sxCk6rt1W46WyQ8JoHWni2b2pfjn2k5NTk xmGpUk91GlV56OUd5K+9W6wwdkOjpf8Ps79+s6z+w5yz5NJ2L+vLwUZjs13UDMYs UZbHDCOI9wCZ2G8jwRBOkEyaXgRMAbBLefeBfr+Zlbnx/U/rBhioUFtmody9F0er MfQjSYeDoC3I4v0KdS15li+ndpyCE5CwYUMBEajsoC5A5l6N/zK+y9ZY1NfPKKJi kkvtbHaFtlz/UluOSgA8EN62AAu/nFAfJcZkG20qfumg0oIIs3kX/BsDvwKSoeSU D6cpaw+q65/a2M5wsPnXJojXP6zZyC9Oxnv41l6c8vFqWt6vjfcESCZ6/QzFv16T jCzNsC4T+tOSf+nHqyns =PPD5 -----END PGP SIGNATURE----- Merge remote-tracking branch 'remotes/agraf/tags/signed-s390-for-upstream' into staging Patch queue for s390 - 2015-06-17 This is a special one. Two awesome features in one pull request: - CCW support for TCG - Watchpoint support for TCG To celebrate this, we also switch the default machine model from s390-virtio to s390-ccw and give users a fully working s390x model again! # gpg: Signature made Wed Jun 17 11:42:26 2015 BST using RSA key ID 03FEDC60 # gpg: Good signature from "Alexander Graf <agraf@suse.de>" # gpg: aka "Alexander Graf <alex@csgraf.de>" * remotes/agraf/tags/signed-s390-for-upstream: (26 commits) s390x: Switch to s390-ccw machine as default target-s390x: PER: add Breaking-Event-Address register target-s390x: PER instruction-fetch nullification event support target-s390x: PER store-using-real-address event support target-s390x: PER storage-alteration event support translate-all: fix watchpoints if retranslation not possible target-s390x: PER instruction-fetch event support target-s390x: PER successful-branching event support target-s390x: basic PER event handling target-s390x: add get_per_in_range function target-s390x: add get_per_atmid function target-s390x: add PER related constants target-s390x: mvc_fast_memmove: access memory through softmmu target-s390x: mvc_fast_memset: access memory through softmmu target-s390x: function to adjust the length wrt page boundary softmmu: provide tlb_vaddr_to_host function for user mode target-s390x: wire up I/O instructions in TCG mode target-s390x: wire up DIAG REIPL in TCG mode target-s390x: wire up DIAG IPL in TCG mode target-s390x: fix s390_cpu_initial_reset ... Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
commit
f754c3c9cc
@ -942,7 +942,7 @@ DriveInfo *drive_new(QemuOpts *all_opts, BlockInterfaceType block_default_type)
|
||||
devopts = qemu_opts_create(qemu_find_opts("device"), NULL, 0,
|
||||
&error_abort);
|
||||
if (arch_type == QEMU_ARCH_S390X) {
|
||||
qemu_opt_set(devopts, "driver", "virtio-blk-s390", &error_abort);
|
||||
qemu_opt_set(devopts, "driver", "virtio-blk-ccw", &error_abort);
|
||||
} else {
|
||||
qemu_opt_set(devopts, "driver", "virtio-blk-pci", &error_abort);
|
||||
}
|
||||
|
@ -216,6 +216,7 @@ static void ccw_machine_class_init(ObjectClass *oc, void *data)
|
||||
mc->no_sdcard = 1;
|
||||
mc->use_sclp = 1;
|
||||
mc->max_cpus = 255;
|
||||
mc->is_default = 1;
|
||||
nc->nmi_monitor_handler = s390_nmi;
|
||||
}
|
||||
|
||||
|
@ -345,7 +345,6 @@ static void s390_machine_class_init(ObjectClass *oc, void *data)
|
||||
mc->no_floppy = 1;
|
||||
mc->no_cdrom = 1;
|
||||
mc->no_sdcard = 1;
|
||||
mc->is_default = 1;
|
||||
nc->nmi_monitor_handler = s390_nmi;
|
||||
}
|
||||
|
||||
|
@ -1401,6 +1401,10 @@ static void virtio_ccw_device_plugged(DeviceState *d, Error **errp)
|
||||
return;
|
||||
}
|
||||
|
||||
if (!kvm_eventfds_enabled()) {
|
||||
dev->flags &= ~VIRTIO_CCW_FLAG_USE_IOEVENTFD;
|
||||
}
|
||||
|
||||
sch->id.cu_model = virtio_bus_get_vdev_id(&dev->bus);
|
||||
|
||||
css_generate_sch_crws(sch->cssid, sch->ssid, sch->schid,
|
||||
|
@ -399,6 +399,8 @@ uint64_t helper_ldq_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
|
||||
#undef MEMSUFFIX
|
||||
#undef SOFTMMU_CODE_ACCESS
|
||||
|
||||
#endif /* defined(CONFIG_USER_ONLY) */
|
||||
|
||||
/**
|
||||
* tlb_vaddr_to_host:
|
||||
* @env: CPUArchState
|
||||
@ -417,6 +419,9 @@ uint64_t helper_ldq_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
|
||||
static inline void *tlb_vaddr_to_host(CPUArchState *env, target_ulong addr,
|
||||
int access_type, int mmu_idx)
|
||||
{
|
||||
#if defined(CONFIG_USER_ONLY)
|
||||
return g2h(vaddr);
|
||||
#else
|
||||
int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
|
||||
CPUTLBEntry *tlbentry = &env->tlb_table[mmu_idx][index];
|
||||
target_ulong tlb_addr;
|
||||
@ -449,8 +454,7 @@ static inline void *tlb_vaddr_to_host(CPUArchState *env, target_ulong addr,
|
||||
|
||||
haddr = addr + env->tlb_table[mmu_idx][index].addend;
|
||||
return (void *)haddr;
|
||||
#endif /* defined(CONFIG_USER_ONLY) */
|
||||
}
|
||||
|
||||
#endif /* defined(CONFIG_USER_ONLY) */
|
||||
|
||||
#endif /* CPU_LDST_H */
|
||||
|
@ -42,9 +42,9 @@ static const QDevAlias qdev_alias_table[] = {
|
||||
{ "virtio-serial-pci", "virtio-serial", QEMU_ARCH_ALL & ~QEMU_ARCH_S390X },
|
||||
{ "virtio-balloon-pci", "virtio-balloon",
|
||||
QEMU_ARCH_ALL & ~QEMU_ARCH_S390X },
|
||||
{ "virtio-blk-s390", "virtio-blk", QEMU_ARCH_S390X },
|
||||
{ "virtio-net-s390", "virtio-net", QEMU_ARCH_S390X },
|
||||
{ "virtio-serial-s390", "virtio-serial", QEMU_ARCH_S390X },
|
||||
{ "virtio-blk-ccw", "virtio-blk", QEMU_ARCH_S390X },
|
||||
{ "virtio-net-ccw", "virtio-net", QEMU_ARCH_S390X },
|
||||
{ "virtio-serial-ccw", "virtio-serial", QEMU_ARCH_S390X },
|
||||
{ "lsi53c895a", "lsi" },
|
||||
{ "ich9-ahci", "ahci" },
|
||||
{ "kvm-pci-assign", "pci-assign" },
|
||||
|
@ -98,5 +98,6 @@ hwaddr s390_cpu_get_phys_addr_debug(CPUState *cpu, vaddr addr);
|
||||
int s390_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg);
|
||||
int s390_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
|
||||
void s390_cpu_gdb_init(CPUState *cs);
|
||||
void s390x_cpu_debug_excp_handler(CPUState *cs);
|
||||
|
||||
#endif
|
||||
|
@ -106,6 +106,7 @@ static void s390_cpu_initial_reset(CPUState *s)
|
||||
{
|
||||
S390CPU *cpu = S390_CPU(s);
|
||||
CPUS390XState *env = &cpu->env;
|
||||
int i;
|
||||
|
||||
s390_cpu_reset(s);
|
||||
/* initial reset does not touch regs,fregs and aregs */
|
||||
@ -116,7 +117,14 @@ static void s390_cpu_initial_reset(CPUState *s)
|
||||
env->cregs[0] = CR0_RESET;
|
||||
env->cregs[14] = CR14_RESET;
|
||||
|
||||
/* architectured initial value for Breaking-Event-Address register */
|
||||
env->gbea = 1;
|
||||
|
||||
env->pfault_token = -1UL;
|
||||
env->ext_index = -1;
|
||||
for (i = 0; i < ARRAY_SIZE(env->io_index); i++) {
|
||||
env->io_index[i] = -1;
|
||||
}
|
||||
|
||||
/* tininess for underflow is detected before rounding */
|
||||
set_float_detect_tininess(float_tininess_before_rounding,
|
||||
@ -126,6 +134,7 @@ static void s390_cpu_initial_reset(CPUState *s)
|
||||
if (kvm_enabled()) {
|
||||
kvm_s390_reset_vcpu(cpu);
|
||||
}
|
||||
tlb_flush(s, 1);
|
||||
}
|
||||
|
||||
/* CPUClass:reset() */
|
||||
@ -134,6 +143,7 @@ static void s390_cpu_full_reset(CPUState *s)
|
||||
S390CPU *cpu = S390_CPU(s);
|
||||
S390CPUClass *scc = S390_CPU_GET_CLASS(cpu);
|
||||
CPUS390XState *env = &cpu->env;
|
||||
int i;
|
||||
|
||||
scc->parent_reset(s);
|
||||
cpu->env.sigp_order = 0;
|
||||
@ -145,7 +155,14 @@ static void s390_cpu_full_reset(CPUState *s)
|
||||
env->cregs[0] = CR0_RESET;
|
||||
env->cregs[14] = CR14_RESET;
|
||||
|
||||
/* architectured initial value for Breaking-Event-Address register */
|
||||
env->gbea = 1;
|
||||
|
||||
env->pfault_token = -1UL;
|
||||
env->ext_index = -1;
|
||||
for (i = 0; i < ARRAY_SIZE(env->io_index); i++) {
|
||||
env->io_index[i] = -1;
|
||||
}
|
||||
|
||||
/* tininess for underflow is detected before rounding */
|
||||
set_float_detect_tininess(float_tininess_before_rounding,
|
||||
@ -207,7 +224,6 @@ static void s390_cpu_initfn(Object *obj)
|
||||
s390_cpu_set_state(CPU_STATE_STOPPED, cpu);
|
||||
#endif
|
||||
env->cpu_num = cpu_num++;
|
||||
env->ext_index = -1;
|
||||
|
||||
if (tcg_enabled() && !inited) {
|
||||
inited = true;
|
||||
@ -333,6 +349,7 @@ static void s390_cpu_class_init(ObjectClass *oc, void *data)
|
||||
cc->write_elf64_note = s390_cpu_write_elf64_note;
|
||||
cc->write_elf64_qemunote = s390_cpu_write_elf64_qemunote;
|
||||
cc->cpu_exec_interrupt = s390_cpu_exec_interrupt;
|
||||
cc->debug_excp_handler = s390x_cpu_debug_excp_handler;
|
||||
#endif
|
||||
cc->gdb_num_core_regs = S390_NUM_CORE_REGS;
|
||||
cc->gdb_core_xml_file = "s390x-core64.xml";
|
||||
|
@ -111,6 +111,9 @@ typedef struct CPUS390XState {
|
||||
uint32_t int_svc_code;
|
||||
uint32_t int_svc_ilen;
|
||||
|
||||
uint64_t per_address;
|
||||
uint16_t per_perc_atmid;
|
||||
|
||||
uint64_t cregs[16]; /* control registers */
|
||||
|
||||
ExtQueue ext_queue[MAX_EXT_QUEUE];
|
||||
@ -364,6 +367,45 @@ static inline int get_ilen(uint8_t opc)
|
||||
}
|
||||
}
|
||||
|
||||
/* PER bits from control register 9 */
|
||||
#define PER_CR9_EVENT_BRANCH 0x80000000
|
||||
#define PER_CR9_EVENT_IFETCH 0x40000000
|
||||
#define PER_CR9_EVENT_STORE 0x20000000
|
||||
#define PER_CR9_EVENT_STORE_REAL 0x08000000
|
||||
#define PER_CR9_EVENT_NULLIFICATION 0x01000000
|
||||
#define PER_CR9_CONTROL_BRANCH_ADDRESS 0x00800000
|
||||
#define PER_CR9_CONTROL_ALTERATION 0x00200000
|
||||
|
||||
/* PER bits from the PER CODE/ATMID/AI in lowcore */
|
||||
#define PER_CODE_EVENT_BRANCH 0x8000
|
||||
#define PER_CODE_EVENT_IFETCH 0x4000
|
||||
#define PER_CODE_EVENT_STORE 0x2000
|
||||
#define PER_CODE_EVENT_STORE_REAL 0x0800
|
||||
#define PER_CODE_EVENT_NULLIFICATION 0x0100
|
||||
|
||||
/* Compute the ATMID field that is stored in the per_perc_atmid lowcore
|
||||
entry when a PER exception is triggered. */
|
||||
static inline uint8_t get_per_atmid(CPUS390XState *env)
|
||||
{
|
||||
return ((env->psw.mask & PSW_MASK_64) ? (1 << 7) : 0) |
|
||||
( (1 << 6) ) |
|
||||
((env->psw.mask & PSW_MASK_32) ? (1 << 5) : 0) |
|
||||
((env->psw.mask & PSW_MASK_DAT)? (1 << 4) : 0) |
|
||||
((env->psw.mask & PSW_ASC_SECONDARY)? (1 << 3) : 0) |
|
||||
((env->psw.mask & PSW_ASC_ACCREG)? (1 << 2) : 0);
|
||||
}
|
||||
|
||||
/* Check if an address is within the PER starting address and the PER
|
||||
ending address. The address range might loop. */
|
||||
static inline bool get_per_in_range(CPUS390XState *env, uint64_t addr)
|
||||
{
|
||||
if (env->cregs[10] <= env->cregs[11]) {
|
||||
return env->cregs[10] <= addr && addr <= env->cregs[11];
|
||||
} else {
|
||||
return env->cregs[10] <= addr || addr <= env->cregs[11];
|
||||
}
|
||||
}
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
/* In several cases of runtime exceptions, we havn't recorded the true
|
||||
instruction length. Use these codes when raising exceptions in order
|
||||
@ -709,6 +751,7 @@ static inline void setcc(S390CPU *cpu, uint64_t cc)
|
||||
|
||||
env->psw.mask &= ~(3ull << 44);
|
||||
env->psw.mask |= (cc & 3) << 44;
|
||||
env->cc_op = cc;
|
||||
}
|
||||
|
||||
typedef struct LowCore
|
||||
@ -746,14 +789,16 @@ typedef struct LowCore
|
||||
uint8_t pad5[0xf4-0xf0]; /* 0x0f0 */
|
||||
uint32_t external_damage_code; /* 0x0f4 */
|
||||
uint64_t failing_storage_address; /* 0x0f8 */
|
||||
uint8_t pad6[0x120-0x100]; /* 0x100 */
|
||||
uint8_t pad6[0x110-0x100]; /* 0x100 */
|
||||
uint64_t per_breaking_event_addr; /* 0x110 */
|
||||
uint8_t pad7[0x120-0x118]; /* 0x118 */
|
||||
PSW restart_old_psw; /* 0x120 */
|
||||
PSW external_old_psw; /* 0x130 */
|
||||
PSW svc_old_psw; /* 0x140 */
|
||||
PSW program_old_psw; /* 0x150 */
|
||||
PSW mcck_old_psw; /* 0x160 */
|
||||
PSW io_old_psw; /* 0x170 */
|
||||
uint8_t pad7[0x1a0-0x180]; /* 0x180 */
|
||||
uint8_t pad8[0x1a0-0x180]; /* 0x180 */
|
||||
PSW restart_new_psw; /* 0x1a0 */
|
||||
PSW external_new_psw; /* 0x1b0 */
|
||||
PSW svc_new_psw; /* 0x1c0 */
|
||||
@ -771,10 +816,10 @@ typedef struct LowCore
|
||||
uint64_t last_update_clock; /* 0x280 */
|
||||
uint64_t steal_clock; /* 0x288 */
|
||||
PSW return_mcck_psw; /* 0x290 */
|
||||
uint8_t pad8[0xc00-0x2a0]; /* 0x2a0 */
|
||||
uint8_t pad9[0xc00-0x2a0]; /* 0x2a0 */
|
||||
/* System info area */
|
||||
uint64_t save_area[16]; /* 0xc00 */
|
||||
uint8_t pad9[0xd40-0xc80]; /* 0xc80 */
|
||||
uint8_t pad10[0xd40-0xc80]; /* 0xc80 */
|
||||
uint64_t kernel_stack; /* 0xd40 */
|
||||
uint64_t thread_info; /* 0xd48 */
|
||||
uint64_t async_stack; /* 0xd50 */
|
||||
@ -782,7 +827,7 @@ typedef struct LowCore
|
||||
uint64_t user_asce; /* 0xd60 */
|
||||
uint64_t panic_stack; /* 0xd68 */
|
||||
uint64_t user_exec_asce; /* 0xd70 */
|
||||
uint8_t pad10[0xdc0-0xd78]; /* 0xd78 */
|
||||
uint8_t pad11[0xdc0-0xd78]; /* 0xd78 */
|
||||
|
||||
/* SMP info area: defined by DJB */
|
||||
uint64_t clock_comparator; /* 0xdc0 */
|
||||
@ -1002,6 +1047,7 @@ int mmu_translate(CPUS390XState *env, target_ulong vaddr, int rw, uint64_t asc,
|
||||
int sclp_service_call(CPUS390XState *env, uint64_t sccb, uint32_t code);
|
||||
uint32_t calc_cc(CPUS390XState *env, uint32_t cc_op, uint64_t src, uint64_t dst,
|
||||
uint64_t vr);
|
||||
void s390_cpu_recompute_watchpoints(CPUState *cs);
|
||||
|
||||
int s390_cpu_virt_mem_rw(S390CPU *cpu, vaddr laddr, uint8_t ar, void *hostbuf,
|
||||
int len, bool is_write);
|
||||
@ -1215,11 +1261,7 @@ static inline int s390_assign_subch_ioeventfd(EventNotifier *notifier,
|
||||
uint32_t sch_id, int vq,
|
||||
bool assign)
|
||||
{
|
||||
if (kvm_enabled()) {
|
||||
return kvm_s390_assign_subch_ioeventfd(notifier, sch_id, vq, assign);
|
||||
} else {
|
||||
return -ENOSYS;
|
||||
}
|
||||
return kvm_s390_assign_subch_ioeventfd(notifier, sch_id, vq, assign);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_KVM
|
||||
|
@ -181,12 +181,18 @@ hwaddr s390_cpu_get_phys_addr_debug(CPUState *cs, vaddr vaddr)
|
||||
|
||||
void load_psw(CPUS390XState *env, uint64_t mask, uint64_t addr)
|
||||
{
|
||||
uint64_t old_mask = env->psw.mask;
|
||||
|
||||
env->psw.addr = addr;
|
||||
env->psw.mask = mask;
|
||||
if (tcg_enabled()) {
|
||||
env->cc_op = (mask >> 44) & 3;
|
||||
}
|
||||
|
||||
if ((old_mask ^ mask) & PSW_MASK_PER) {
|
||||
s390_cpu_recompute_watchpoints(CPU(s390_env_get_cpu(env)));
|
||||
}
|
||||
|
||||
if (mask & PSW_MASK_WAIT) {
|
||||
S390CPU *cpu = s390_env_get_cpu(env);
|
||||
if (s390_cpu_halt(cpu) == 0) {
|
||||
@ -250,25 +256,6 @@ void do_restart_interrupt(CPUS390XState *env)
|
||||
load_psw(env, mask, addr);
|
||||
}
|
||||
|
||||
static void do_svc_interrupt(CPUS390XState *env)
|
||||
{
|
||||
uint64_t mask, addr;
|
||||
LowCore *lowcore;
|
||||
|
||||
lowcore = cpu_map_lowcore(env);
|
||||
|
||||
lowcore->svc_code = cpu_to_be16(env->int_svc_code);
|
||||
lowcore->svc_ilen = cpu_to_be16(env->int_svc_ilen);
|
||||
lowcore->svc_old_psw.mask = cpu_to_be64(get_psw_mask(env));
|
||||
lowcore->svc_old_psw.addr = cpu_to_be64(env->psw.addr + env->int_svc_ilen);
|
||||
mask = be64_to_cpu(lowcore->svc_new_psw.mask);
|
||||
addr = be64_to_cpu(lowcore->svc_new_psw.addr);
|
||||
|
||||
cpu_unmap_lowcore(lowcore);
|
||||
|
||||
load_psw(env, mask, addr);
|
||||
}
|
||||
|
||||
static void do_program_interrupt(CPUS390XState *env)
|
||||
{
|
||||
uint64_t mask, addr;
|
||||
@ -292,12 +279,21 @@ static void do_program_interrupt(CPUS390XState *env)
|
||||
|
||||
lowcore = cpu_map_lowcore(env);
|
||||
|
||||
/* Signal PER events with the exception. */
|
||||
if (env->per_perc_atmid) {
|
||||
env->int_pgm_code |= PGM_PER;
|
||||
lowcore->per_address = cpu_to_be64(env->per_address);
|
||||
lowcore->per_perc_atmid = cpu_to_be16(env->per_perc_atmid);
|
||||
env->per_perc_atmid = 0;
|
||||
}
|
||||
|
||||
lowcore->pgm_ilen = cpu_to_be16(ilen);
|
||||
lowcore->pgm_code = cpu_to_be16(env->int_pgm_code);
|
||||
lowcore->program_old_psw.mask = cpu_to_be64(get_psw_mask(env));
|
||||
lowcore->program_old_psw.addr = cpu_to_be64(env->psw.addr);
|
||||
mask = be64_to_cpu(lowcore->program_new_psw.mask);
|
||||
addr = be64_to_cpu(lowcore->program_new_psw.addr);
|
||||
lowcore->per_breaking_event_addr = cpu_to_be64(env->gbea);
|
||||
|
||||
cpu_unmap_lowcore(lowcore);
|
||||
|
||||
@ -308,6 +304,33 @@ static void do_program_interrupt(CPUS390XState *env)
|
||||
load_psw(env, mask, addr);
|
||||
}
|
||||
|
||||
static void do_svc_interrupt(CPUS390XState *env)
|
||||
{
|
||||
uint64_t mask, addr;
|
||||
LowCore *lowcore;
|
||||
|
||||
lowcore = cpu_map_lowcore(env);
|
||||
|
||||
lowcore->svc_code = cpu_to_be16(env->int_svc_code);
|
||||
lowcore->svc_ilen = cpu_to_be16(env->int_svc_ilen);
|
||||
lowcore->svc_old_psw.mask = cpu_to_be64(get_psw_mask(env));
|
||||
lowcore->svc_old_psw.addr = cpu_to_be64(env->psw.addr + env->int_svc_ilen);
|
||||
mask = be64_to_cpu(lowcore->svc_new_psw.mask);
|
||||
addr = be64_to_cpu(lowcore->svc_new_psw.addr);
|
||||
|
||||
cpu_unmap_lowcore(lowcore);
|
||||
|
||||
load_psw(env, mask, addr);
|
||||
|
||||
/* When a PER event is pending, the PER exception has to happen
|
||||
immediately after the SERVICE CALL one. */
|
||||
if (env->per_perc_atmid) {
|
||||
env->int_pgm_code = PGM_PER;
|
||||
env->int_pgm_ilen = env->int_svc_ilen;
|
||||
do_program_interrupt(env);
|
||||
}
|
||||
}
|
||||
|
||||
#define VIRTIO_SUBCODE_64 0x0D00
|
||||
|
||||
static void do_ext_interrupt(CPUS390XState *env)
|
||||
@ -557,4 +580,73 @@ bool s390_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
void s390_cpu_recompute_watchpoints(CPUState *cs)
|
||||
{
|
||||
const int wp_flags = BP_CPU | BP_MEM_WRITE | BP_STOP_BEFORE_ACCESS;
|
||||
S390CPU *cpu = S390_CPU(cs);
|
||||
CPUS390XState *env = &cpu->env;
|
||||
|
||||
/* We are called when the watchpoints have changed. First
|
||||
remove them all. */
|
||||
cpu_watchpoint_remove_all(cs, BP_CPU);
|
||||
|
||||
/* Return if PER is not enabled */
|
||||
if (!(env->psw.mask & PSW_MASK_PER)) {
|
||||
return;
|
||||
}
|
||||
|
||||
/* Return if storage-alteration event is not enabled. */
|
||||
if (!(env->cregs[9] & PER_CR9_EVENT_STORE)) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (env->cregs[10] == 0 && env->cregs[11] == -1LL) {
|
||||
/* We can't create a watchoint spanning the whole memory range, so
|
||||
split it in two parts. */
|
||||
cpu_watchpoint_insert(cs, 0, 1ULL << 63, wp_flags, NULL);
|
||||
cpu_watchpoint_insert(cs, 1ULL << 63, 1ULL << 63, wp_flags, NULL);
|
||||
} else if (env->cregs[10] > env->cregs[11]) {
|
||||
/* The address range loops, create two watchpoints. */
|
||||
cpu_watchpoint_insert(cs, env->cregs[10], -env->cregs[10],
|
||||
wp_flags, NULL);
|
||||
cpu_watchpoint_insert(cs, 0, env->cregs[11] + 1, wp_flags, NULL);
|
||||
|
||||
} else {
|
||||
/* Default case, create a single watchpoint. */
|
||||
cpu_watchpoint_insert(cs, env->cregs[10],
|
||||
env->cregs[11] - env->cregs[10] + 1,
|
||||
wp_flags, NULL);
|
||||
}
|
||||
}
|
||||
|
||||
void s390x_cpu_debug_excp_handler(CPUState *cs)
|
||||
{
|
||||
S390CPU *cpu = S390_CPU(cs);
|
||||
CPUS390XState *env = &cpu->env;
|
||||
CPUWatchpoint *wp_hit = cs->watchpoint_hit;
|
||||
|
||||
if (wp_hit && wp_hit->flags & BP_CPU) {
|
||||
/* FIXME: When the storage-alteration-space control bit is set,
|
||||
the exception should only be triggered if the memory access
|
||||
is done using an address space with the storage-alteration-event
|
||||
bit set. We have no way to detect that with the current
|
||||
watchpoint code. */
|
||||
cs->watchpoint_hit = NULL;
|
||||
|
||||
env->per_address = env->psw.addr;
|
||||
env->per_perc_atmid |= PER_CODE_EVENT_STORE | get_per_atmid(env);
|
||||
/* FIXME: We currently no way to detect the address space used
|
||||
to trigger the watchpoint. For now just consider it is the
|
||||
current default ASC. This turn to be true except when MVCP
|
||||
and MVCS instrutions are not used. */
|
||||
env->per_perc_atmid |= env->psw.mask & (PSW_MASK_ASC) >> 46;
|
||||
|
||||
/* Remove all watchpoints to re-execute the code. A PER exception
|
||||
will be triggered, it will call load_psw which will recompute
|
||||
the watchpoints. */
|
||||
cpu_watchpoint_remove_all(cs, BP_CPU);
|
||||
cpu_resume_from_signal(cs, NULL);
|
||||
}
|
||||
}
|
||||
#endif /* CONFIG_USER_ONLY */
|
||||
|
@ -87,7 +87,7 @@ DEF_HELPER_FLAGS_1(popcnt, TCG_CALL_NO_RWG_SE, i64, i64)
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
DEF_HELPER_3(servc, i32, env, i64, i64)
|
||||
DEF_HELPER_4(diag, i64, env, i32, i64, i64)
|
||||
DEF_HELPER_4(diag, void, env, i32, i32, i32)
|
||||
DEF_HELPER_3(load_psw, noreturn, env, i64, i64)
|
||||
DEF_HELPER_FLAGS_2(spx, TCG_CALL_NO_RWG, void, env, i64)
|
||||
DEF_HELPER_FLAGS_1(stck, TCG_CALL_NO_RWG_SE, i64, env)
|
||||
@ -116,4 +116,18 @@ DEF_HELPER_FLAGS_2(lura, TCG_CALL_NO_WG, i64, env, i64)
|
||||
DEF_HELPER_FLAGS_2(lurag, TCG_CALL_NO_WG, i64, env, i64)
|
||||
DEF_HELPER_FLAGS_3(stura, TCG_CALL_NO_WG, void, env, i64, i64)
|
||||
DEF_HELPER_FLAGS_3(sturg, TCG_CALL_NO_WG, void, env, i64, i64)
|
||||
DEF_HELPER_1(per_check_exception, void, env)
|
||||
DEF_HELPER_FLAGS_3(per_branch, TCG_CALL_NO_RWG, void, env, i64, i64)
|
||||
DEF_HELPER_FLAGS_2(per_ifetch, TCG_CALL_NO_RWG, void, env, i64)
|
||||
|
||||
DEF_HELPER_2(xsch, void, env, i64)
|
||||
DEF_HELPER_2(csch, void, env, i64)
|
||||
DEF_HELPER_2(hsch, void, env, i64)
|
||||
DEF_HELPER_3(msch, void, env, i64, i64)
|
||||
DEF_HELPER_2(rchp, void, env, i64)
|
||||
DEF_HELPER_2(rsch, void, env, i64)
|
||||
DEF_HELPER_3(ssch, void, env, i64, i64)
|
||||
DEF_HELPER_3(stsch, void, env, i64, i64)
|
||||
DEF_HELPER_3(tsch, void, env, i64, i64)
|
||||
DEF_HELPER_2(chsc, void, env, i64)
|
||||
#endif
|
||||
|
@ -835,7 +835,7 @@
|
||||
/* COMPARE AND SWAP AND PURGE */
|
||||
C(0xb250, CSP, RRE, Z, 0, ra2, 0, 0, csp, 0)
|
||||
/* DIAGNOSE (KVM hypercall) */
|
||||
C(0x8300, DIAG, RX_a, Z, 0, 0, 0, 0, diag, 0)
|
||||
C(0x8300, DIAG, RSI, Z, 0, 0, 0, 0, diag, 0)
|
||||
/* INSERT STORAGE KEY EXTENDED */
|
||||
C(0xb229, ISKE, RRE, Z, 0, r2_o, new, r1_8, iske, 0)
|
||||
/* INVALIDATE PAGE TABLE ENTRY */
|
||||
@ -915,17 +915,17 @@
|
||||
/* TEST PROTECTION */
|
||||
C(0xe501, TPROT, SSE, Z, la1, a2, 0, 0, tprot, 0)
|
||||
|
||||
/* I/O Instructions. For each we simply indicate non-operation. */
|
||||
C(0xb276, XSCH, S, Z, 0, 0, 0, 0, subchannel, 0)
|
||||
C(0xb230, CSCH, S, Z, 0, 0, 0, 0, subchannel, 0)
|
||||
C(0xb231, HSCH, S, Z, 0, 0, 0, 0, subchannel, 0)
|
||||
C(0xb232, MSCH, S, Z, 0, 0, 0, 0, subchannel, 0)
|
||||
C(0xb23b, RCHP, S, Z, 0, 0, 0, 0, subchannel, 0)
|
||||
C(0xb238, RSCH, S, Z, 0, 0, 0, 0, subchannel, 0)
|
||||
C(0xb233, SSCH, S, Z, 0, 0, 0, 0, subchannel, 0)
|
||||
C(0xb234, STSCH, S, Z, 0, 0, 0, 0, subchannel, 0)
|
||||
C(0xb235, TSCH, S, Z, 0, 0, 0, 0, subchannel, 0)
|
||||
/* CCW I/O Instructions */
|
||||
C(0xb276, XSCH, S, Z, 0, 0, 0, 0, xsch, 0)
|
||||
C(0xb230, CSCH, S, Z, 0, 0, 0, 0, csch, 0)
|
||||
C(0xb231, HSCH, S, Z, 0, 0, 0, 0, hsch, 0)
|
||||
C(0xb232, MSCH, S, Z, 0, insn, 0, 0, msch, 0)
|
||||
C(0xb23b, RCHP, S, Z, 0, 0, 0, 0, rchp, 0)
|
||||
C(0xb238, RSCH, S, Z, 0, 0, 0, 0, rsch, 0)
|
||||
C(0xb233, SSCH, S, Z, 0, insn, 0, 0, ssch, 0)
|
||||
C(0xb234, STSCH, S, Z, 0, insn, 0, 0, stsch, 0)
|
||||
C(0xb235, TSCH, S, Z, 0, insn, 0, 0, tsch, 0)
|
||||
/* ??? Not listed in PoO ninth edition, but there's a linux driver that
|
||||
uses it: "A CHSC subchannel is usually present on LPAR only." */
|
||||
C(0xb25f, CHSC, S, Z, 0, 0, 0, 0, subchannel, 0)
|
||||
C(0xb25f, CHSC, RRE, Z, 0, insn, 0, 0, chsc, 0)
|
||||
#endif /* CONFIG_USER_ONLY */
|
||||
|
@ -129,12 +129,12 @@ void ioinst_handle_hsch(S390CPU *cpu, uint64_t reg1)
|
||||
|
||||
static int ioinst_schib_valid(SCHIB *schib)
|
||||
{
|
||||
if ((schib->pmcw.flags & PMCW_FLAGS_MASK_INVALID) ||
|
||||
(schib->pmcw.chars & PMCW_CHARS_MASK_INVALID)) {
|
||||
if ((be16_to_cpu(schib->pmcw.flags) & PMCW_FLAGS_MASK_INVALID) ||
|
||||
(be32_to_cpu(schib->pmcw.chars) & PMCW_CHARS_MASK_INVALID)) {
|
||||
return 0;
|
||||
}
|
||||
/* Disallow extended measurements for now. */
|
||||
if (schib->pmcw.chars & PMCW_CHARS_MASK_XMWME) {
|
||||
if (be32_to_cpu(schib->pmcw.chars) & PMCW_CHARS_MASK_XMWME) {
|
||||
return 0;
|
||||
}
|
||||
return 1;
|
||||
|
@ -220,7 +220,7 @@ typedef struct IOIntCode {
|
||||
#define IOINST_SCHID_SSID(_schid) ((_schid & 0x00060000) >> 17)
|
||||
#define IOINST_SCHID_NR(_schid) (_schid & 0x0000ffff)
|
||||
|
||||
#define IO_INT_WORD_ISC(_int_word) ((_int_word & 0x38000000) >> 24)
|
||||
#define IO_INT_WORD_ISC(_int_word) ((_int_word & 0x38000000) >> 27)
|
||||
#define ISC_TO_ISC_BITS(_isc) ((0x80 >> _isc) << 24)
|
||||
|
||||
#define IO_INT_WORD_AI 0x80000000
|
||||
|
@ -54,63 +54,67 @@ void tlb_fill(CPUState *cs, target_ulong addr, int is_write, int mmu_idx,
|
||||
#define HELPER_LOG(x...)
|
||||
#endif
|
||||
|
||||
/* Reduce the length so that addr + len doesn't cross a page boundary. */
|
||||
static inline uint64_t adj_len_to_page(uint64_t len, uint64_t addr)
|
||||
{
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
static void mvc_fast_memset(CPUS390XState *env, uint32_t l, uint64_t dest,
|
||||
uint8_t byte)
|
||||
{
|
||||
S390CPU *cpu = s390_env_get_cpu(env);
|
||||
hwaddr dest_phys;
|
||||
hwaddr len = l;
|
||||
void *dest_p;
|
||||
uint64_t asc = env->psw.mask & PSW_MASK_ASC;
|
||||
int flags;
|
||||
|
||||
if (mmu_translate(env, dest, 1, asc, &dest_phys, &flags, true)) {
|
||||
cpu_stb_data(env, dest, byte);
|
||||
cpu_abort(CPU(cpu), "should never reach here");
|
||||
if ((addr & ~TARGET_PAGE_MASK) + len - 1 >= TARGET_PAGE_SIZE) {
|
||||
return -addr & ~TARGET_PAGE_MASK;
|
||||
}
|
||||
dest_phys |= dest & ~TARGET_PAGE_MASK;
|
||||
|
||||
dest_p = cpu_physical_memory_map(dest_phys, &len, 1);
|
||||
|
||||
memset(dest_p, byte, len);
|
||||
|
||||
cpu_physical_memory_unmap(dest_p, 1, len, len);
|
||||
}
|
||||
|
||||
static void mvc_fast_memmove(CPUS390XState *env, uint32_t l, uint64_t dest,
|
||||
uint64_t src)
|
||||
{
|
||||
S390CPU *cpu = s390_env_get_cpu(env);
|
||||
hwaddr dest_phys;
|
||||
hwaddr src_phys;
|
||||
hwaddr len = l;
|
||||
void *dest_p;
|
||||
void *src_p;
|
||||
uint64_t asc = env->psw.mask & PSW_MASK_ASC;
|
||||
int flags;
|
||||
|
||||
if (mmu_translate(env, dest, 1, asc, &dest_phys, &flags, true)) {
|
||||
cpu_stb_data(env, dest, 0);
|
||||
cpu_abort(CPU(cpu), "should never reach here");
|
||||
}
|
||||
dest_phys |= dest & ~TARGET_PAGE_MASK;
|
||||
|
||||
if (mmu_translate(env, src, 0, asc, &src_phys, &flags, true)) {
|
||||
cpu_ldub_data(env, src);
|
||||
cpu_abort(CPU(cpu), "should never reach here");
|
||||
}
|
||||
src_phys |= src & ~TARGET_PAGE_MASK;
|
||||
|
||||
dest_p = cpu_physical_memory_map(dest_phys, &len, 1);
|
||||
src_p = cpu_physical_memory_map(src_phys, &len, 0);
|
||||
|
||||
memmove(dest_p, src_p, len);
|
||||
|
||||
cpu_physical_memory_unmap(dest_p, 1, len, len);
|
||||
cpu_physical_memory_unmap(src_p, 0, len, len);
|
||||
}
|
||||
#endif
|
||||
return len;
|
||||
}
|
||||
|
||||
static void fast_memset(CPUS390XState *env, uint64_t dest, uint8_t byte,
|
||||
uint32_t l)
|
||||
{
|
||||
int mmu_idx = cpu_mmu_index(env);
|
||||
|
||||
while (l > 0) {
|
||||
void *p = tlb_vaddr_to_host(env, dest, MMU_DATA_STORE, mmu_idx);
|
||||
if (p) {
|
||||
/* Access to the whole page in write mode granted. */
|
||||
int l_adj = adj_len_to_page(l, dest);
|
||||
memset(p, byte, l_adj);
|
||||
dest += l_adj;
|
||||
l -= l_adj;
|
||||
} else {
|
||||
/* We failed to get access to the whole page. The next write
|
||||
access will likely fill the QEMU TLB for the next iteration. */
|
||||
cpu_stb_data(env, dest, byte);
|
||||
dest++;
|
||||
l--;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void fast_memmove(CPUS390XState *env, uint64_t dest, uint64_t src,
|
||||
uint32_t l)
|
||||
{
|
||||
int mmu_idx = cpu_mmu_index(env);
|
||||
|
||||
while (l > 0) {
|
||||
void *src_p = tlb_vaddr_to_host(env, src, MMU_DATA_LOAD, mmu_idx);
|
||||
void *dest_p = tlb_vaddr_to_host(env, dest, MMU_DATA_STORE, mmu_idx);
|
||||
if (src_p && dest_p) {
|
||||
/* Access to both whole pages granted. */
|
||||
int l_adj = adj_len_to_page(l, src);
|
||||
l_adj = adj_len_to_page(l_adj, dest);
|
||||
memmove(dest_p, src_p, l_adj);
|
||||
src += l_adj;
|
||||
dest += l_adj;
|
||||
l -= l_adj;
|
||||
} else {
|
||||
/* We failed to get access to one or both whole pages. The next
|
||||
read or write access will likely fill the QEMU TLB for the
|
||||
next iteration. */
|
||||
cpu_stb_data(env, dest, cpu_ldub_data(env, src));
|
||||
src++;
|
||||
dest++;
|
||||
l--;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* and on array */
|
||||
uint32_t HELPER(nc)(CPUS390XState *env, uint32_t l, uint64_t dest,
|
||||
@ -143,19 +147,11 @@ uint32_t HELPER(xc)(CPUS390XState *env, uint32_t l, uint64_t dest,
|
||||
HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
|
||||
__func__, l, dest, src);
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
/* xor with itself is the same as memset(0) */
|
||||
if ((l > 32) && (src == dest) &&
|
||||
(src & TARGET_PAGE_MASK) == ((src + l) & TARGET_PAGE_MASK)) {
|
||||
mvc_fast_memset(env, l + 1, dest, 0);
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
if (src == dest) {
|
||||
memset(g2h(dest), 0, l + 1);
|
||||
fast_memset(env, dest, 0, l + 1);
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
for (i = 0; i <= l; i++) {
|
||||
x = cpu_ldub_data(env, dest + i) ^ cpu_ldub_data(env, src + i);
|
||||
@ -191,45 +187,25 @@ uint32_t HELPER(oc)(CPUS390XState *env, uint32_t l, uint64_t dest,
|
||||
void HELPER(mvc)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src)
|
||||
{
|
||||
int i = 0;
|
||||
int x = 0;
|
||||
uint32_t l_64 = (l + 1) / 8;
|
||||
|
||||
HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
|
||||
__func__, l, dest, src);
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
if ((l > 32) &&
|
||||
(src & TARGET_PAGE_MASK) == ((src + l) & TARGET_PAGE_MASK) &&
|
||||
(dest & TARGET_PAGE_MASK) == ((dest + l) & TARGET_PAGE_MASK)) {
|
||||
if (dest == (src + 1)) {
|
||||
mvc_fast_memset(env, l + 1, dest, cpu_ldub_data(env, src));
|
||||
return;
|
||||
} else if ((src & TARGET_PAGE_MASK) != (dest & TARGET_PAGE_MASK)) {
|
||||
mvc_fast_memmove(env, l + 1, dest, src);
|
||||
return;
|
||||
}
|
||||
}
|
||||
#else
|
||||
/* mvc with source pointing to the byte after the destination is the
|
||||
same as memset with the first source byte */
|
||||
if (dest == (src + 1)) {
|
||||
memset(g2h(dest), cpu_ldub_data(env, src), l + 1);
|
||||
return;
|
||||
/* mvc and memmove do not behave the same when areas overlap! */
|
||||
} else if ((dest < src) || (src + l < dest)) {
|
||||
memmove(g2h(dest), g2h(src), l + 1);
|
||||
fast_memset(env, dest, cpu_ldub_data(env, src), l + 1);
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* handle the parts that fit into 8-byte loads/stores */
|
||||
if ((dest + 8 <= src) || (src + 8 <= dest)) {
|
||||
for (i = 0; i < l_64; i++) {
|
||||
cpu_stq_data(env, dest + x, cpu_ldq_data(env, src + x));
|
||||
x += 8;
|
||||
}
|
||||
/* mvc and memmove do not behave the same when areas overlap! */
|
||||
if ((dest < src) || (src + l < dest)) {
|
||||
fast_memmove(env, dest, src, l + 1);
|
||||
return;
|
||||
}
|
||||
|
||||
/* slow version with byte accesses which always work */
|
||||
for (i = x; i <= l; i++) {
|
||||
for (i = 0; i <= l; i++) {
|
||||
cpu_stb_data(env, dest + i, cpu_ldub_data(env, src + i));
|
||||
}
|
||||
}
|
||||
@ -396,11 +372,7 @@ void HELPER(mvpg)(CPUS390XState *env, uint64_t r0, uint64_t r1, uint64_t r2)
|
||||
{
|
||||
/* XXX missing r0 handling */
|
||||
env->cc_op = 0;
|
||||
#ifdef CONFIG_USER_ONLY
|
||||
memmove(g2h(r1), g2h(r2), TARGET_PAGE_SIZE);
|
||||
#else
|
||||
mvc_fast_memmove(env, TARGET_PAGE_SIZE, r1, r2);
|
||||
#endif
|
||||
fast_memmove(env, r1, r2, TARGET_PAGE_SIZE);
|
||||
}
|
||||
|
||||
/* string copy (c is string terminator) */
|
||||
@ -869,11 +841,17 @@ uint32_t HELPER(trt)(CPUS390XState *env, uint32_t len, uint64_t array,
|
||||
void HELPER(lctlg)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
|
||||
{
|
||||
S390CPU *cpu = s390_env_get_cpu(env);
|
||||
bool PERchanged = false;
|
||||
int i;
|
||||
uint64_t src = a2;
|
||||
uint64_t val;
|
||||
|
||||
for (i = r1;; i = (i + 1) % 16) {
|
||||
env->cregs[i] = cpu_ldq_data(env, src);
|
||||
val = cpu_ldq_data(env, src);
|
||||
if (env->cregs[i] != val && i >= 9 && i <= 11) {
|
||||
PERchanged = true;
|
||||
}
|
||||
env->cregs[i] = val;
|
||||
HELPER_LOG("load ctl %d from 0x%" PRIx64 " == 0x%" PRIx64 "\n",
|
||||
i, src, env->cregs[i]);
|
||||
src += sizeof(uint64_t);
|
||||
@ -883,18 +861,27 @@ void HELPER(lctlg)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
|
||||
}
|
||||
}
|
||||
|
||||
if (PERchanged && env->psw.mask & PSW_MASK_PER) {
|
||||
s390_cpu_recompute_watchpoints(CPU(cpu));
|
||||
}
|
||||
|
||||
tlb_flush(CPU(cpu), 1);
|
||||
}
|
||||
|
||||
void HELPER(lctl)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
|
||||
{
|
||||
S390CPU *cpu = s390_env_get_cpu(env);
|
||||
bool PERchanged = false;
|
||||
int i;
|
||||
uint64_t src = a2;
|
||||
uint32_t val;
|
||||
|
||||
for (i = r1;; i = (i + 1) % 16) {
|
||||
env->cregs[i] = (env->cregs[i] & 0xFFFFFFFF00000000ULL) |
|
||||
cpu_ldl_data(env, src);
|
||||
val = cpu_ldl_data(env, src);
|
||||
if ((uint32_t)env->cregs[i] != val && i >= 9 && i <= 11) {
|
||||
PERchanged = true;
|
||||
}
|
||||
env->cregs[i] = (env->cregs[i] & 0xFFFFFFFF00000000ULL) | val;
|
||||
src += sizeof(uint32_t);
|
||||
|
||||
if (i == r3) {
|
||||
@ -902,6 +889,10 @@ void HELPER(lctl)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
|
||||
}
|
||||
}
|
||||
|
||||
if (PERchanged && env->psw.mask & PSW_MASK_PER) {
|
||||
s390_cpu_recompute_watchpoints(CPU(cpu));
|
||||
}
|
||||
|
||||
tlb_flush(CPU(cpu), 1);
|
||||
}
|
||||
|
||||
@ -1114,6 +1105,14 @@ void HELPER(stura)(CPUS390XState *env, uint64_t addr, uint64_t v1)
|
||||
CPUState *cs = CPU(s390_env_get_cpu(env));
|
||||
|
||||
stl_phys(cs->as, get_address(env, 0, 0, addr), (uint32_t)v1);
|
||||
|
||||
if ((env->psw.mask & PSW_MASK_PER) &&
|
||||
(env->cregs[9] & PER_CR9_EVENT_STORE) &&
|
||||
(env->cregs[9] & PER_CR9_EVENT_STORE_REAL)) {
|
||||
/* PSW is saved just before calling the helper. */
|
||||
env->per_address = env->psw.addr;
|
||||
env->per_perc_atmid = PER_CODE_EVENT_STORE_REAL | get_per_atmid(env);
|
||||
}
|
||||
}
|
||||
|
||||
void HELPER(sturg)(CPUS390XState *env, uint64_t addr, uint64_t v1)
|
||||
@ -1121,6 +1120,14 @@ void HELPER(sturg)(CPUS390XState *env, uint64_t addr, uint64_t v1)
|
||||
CPUState *cs = CPU(s390_env_get_cpu(env));
|
||||
|
||||
stq_phys(cs->as, get_address(env, 0, 0, addr), v1);
|
||||
|
||||
if ((env->psw.mask & PSW_MASK_PER) &&
|
||||
(env->cregs[9] & PER_CR9_EVENT_STORE) &&
|
||||
(env->cregs[9] & PER_CR9_EVENT_STORE_REAL)) {
|
||||
/* PSW is saved just before calling the helper. */
|
||||
env->per_address = env->psw.addr;
|
||||
env->per_perc_atmid = PER_CODE_EVENT_STORE_REAL | get_per_atmid(env);
|
||||
}
|
||||
}
|
||||
|
||||
/* load real address */
|
||||
|
@ -205,9 +205,21 @@ void handle_diag_308(CPUS390XState *env, uint64_t r1, uint64_t r3)
|
||||
switch (subcode) {
|
||||
case 0:
|
||||
modified_clear_reset(s390_env_get_cpu(env));
|
||||
if (tcg_enabled()) {
|
||||
cpu_loop_exit(CPU(s390_env_get_cpu(env)));
|
||||
}
|
||||
break;
|
||||
case 1:
|
||||
load_normal_reset(s390_env_get_cpu(env));
|
||||
if (tcg_enabled()) {
|
||||
cpu_loop_exit(CPU(s390_env_get_cpu(env)));
|
||||
}
|
||||
break;
|
||||
case 3:
|
||||
s390_reipl_request();
|
||||
if (tcg_enabled()) {
|
||||
cpu_loop_exit(CPU(s390_env_get_cpu(env)));
|
||||
}
|
||||
break;
|
||||
case 5:
|
||||
if ((r1 & 1) || (addr & 0x0fffULL)) {
|
||||
@ -254,9 +266,7 @@ void handle_diag_308(CPUS390XState *env, uint64_t r1, uint64_t r3)
|
||||
}
|
||||
#endif
|
||||
|
||||
/* DIAG */
|
||||
uint64_t HELPER(diag)(CPUS390XState *env, uint32_t num, uint64_t mem,
|
||||
uint64_t code)
|
||||
void HELPER(diag)(CPUS390XState *env, uint32_t r1, uint32_t r3, uint32_t num)
|
||||
{
|
||||
uint64_t r;
|
||||
|
||||
@ -271,6 +281,7 @@ uint64_t HELPER(diag)(CPUS390XState *env, uint32_t num, uint64_t mem,
|
||||
break;
|
||||
case 0x308:
|
||||
/* ipl */
|
||||
handle_diag_308(env, r1, r3);
|
||||
r = 0;
|
||||
break;
|
||||
default:
|
||||
@ -281,8 +292,6 @@ uint64_t HELPER(diag)(CPUS390XState *env, uint32_t num, uint64_t mem,
|
||||
if (r) {
|
||||
program_interrupt(env, PGM_OPERATION, ILEN_LATER_INC);
|
||||
}
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
/* Set Prefix */
|
||||
@ -523,3 +532,111 @@ uint32_t HELPER(sigp)(CPUS390XState *env, uint64_t order_code, uint32_t r1,
|
||||
return cc;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
void HELPER(xsch)(CPUS390XState *env, uint64_t r1)
|
||||
{
|
||||
S390CPU *cpu = s390_env_get_cpu(env);
|
||||
ioinst_handle_xsch(cpu, r1);
|
||||
}
|
||||
|
||||
void HELPER(csch)(CPUS390XState *env, uint64_t r1)
|
||||
{
|
||||
S390CPU *cpu = s390_env_get_cpu(env);
|
||||
ioinst_handle_csch(cpu, r1);
|
||||
}
|
||||
|
||||
void HELPER(hsch)(CPUS390XState *env, uint64_t r1)
|
||||
{
|
||||
S390CPU *cpu = s390_env_get_cpu(env);
|
||||
ioinst_handle_hsch(cpu, r1);
|
||||
}
|
||||
|
||||
void HELPER(msch)(CPUS390XState *env, uint64_t r1, uint64_t inst)
|
||||
{
|
||||
S390CPU *cpu = s390_env_get_cpu(env);
|
||||
ioinst_handle_msch(cpu, r1, inst >> 16);
|
||||
}
|
||||
|
||||
void HELPER(rchp)(CPUS390XState *env, uint64_t r1)
|
||||
{
|
||||
S390CPU *cpu = s390_env_get_cpu(env);
|
||||
ioinst_handle_rchp(cpu, r1);
|
||||
}
|
||||
|
||||
void HELPER(rsch)(CPUS390XState *env, uint64_t r1)
|
||||
{
|
||||
S390CPU *cpu = s390_env_get_cpu(env);
|
||||
ioinst_handle_rsch(cpu, r1);
|
||||
}
|
||||
|
||||
void HELPER(ssch)(CPUS390XState *env, uint64_t r1, uint64_t inst)
|
||||
{
|
||||
S390CPU *cpu = s390_env_get_cpu(env);
|
||||
ioinst_handle_ssch(cpu, r1, inst >> 16);
|
||||
}
|
||||
|
||||
void HELPER(stsch)(CPUS390XState *env, uint64_t r1, uint64_t inst)
|
||||
{
|
||||
S390CPU *cpu = s390_env_get_cpu(env);
|
||||
ioinst_handle_stsch(cpu, r1, inst >> 16);
|
||||
}
|
||||
|
||||
void HELPER(tsch)(CPUS390XState *env, uint64_t r1, uint64_t inst)
|
||||
{
|
||||
S390CPU *cpu = s390_env_get_cpu(env);
|
||||
ioinst_handle_tsch(cpu, r1, inst >> 16);
|
||||
}
|
||||
|
||||
void HELPER(chsc)(CPUS390XState *env, uint64_t inst)
|
||||
{
|
||||
S390CPU *cpu = s390_env_get_cpu(env);
|
||||
ioinst_handle_chsc(cpu, inst >> 16);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
void HELPER(per_check_exception)(CPUS390XState *env)
|
||||
{
|
||||
CPUState *cs = CPU(s390_env_get_cpu(env));
|
||||
|
||||
if (env->per_perc_atmid) {
|
||||
env->int_pgm_code = PGM_PER;
|
||||
env->int_pgm_ilen = get_ilen(cpu_ldub_code(env, env->per_address));
|
||||
|
||||
cs->exception_index = EXCP_PGM;
|
||||
cpu_loop_exit(cs);
|
||||
}
|
||||
}
|
||||
|
||||
void HELPER(per_branch)(CPUS390XState *env, uint64_t from, uint64_t to)
|
||||
{
|
||||
if ((env->cregs[9] & PER_CR9_EVENT_BRANCH)) {
|
||||
if (!(env->cregs[9] & PER_CR9_CONTROL_BRANCH_ADDRESS)
|
||||
|| get_per_in_range(env, to)) {
|
||||
env->per_address = from;
|
||||
env->per_perc_atmid = PER_CODE_EVENT_BRANCH | get_per_atmid(env);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void HELPER(per_ifetch)(CPUS390XState *env, uint64_t addr)
|
||||
{
|
||||
if ((env->cregs[9] & PER_CR9_EVENT_IFETCH) && get_per_in_range(env, addr)) {
|
||||
env->per_address = addr;
|
||||
env->per_perc_atmid = PER_CODE_EVENT_IFETCH | get_per_atmid(env);
|
||||
|
||||
/* If the instruction has to be nullified, trigger the
|
||||
exception immediately. */
|
||||
if (env->cregs[9] & PER_CR9_EVENT_NULLIFICATION) {
|
||||
CPUState *cs = CPU(s390_env_get_cpu(env));
|
||||
|
||||
env->int_pgm_code = PGM_PER;
|
||||
env->int_pgm_ilen = get_ilen(cpu_ldub_code(env, addr));
|
||||
|
||||
cs->exception_index = EXCP_PGM;
|
||||
cpu_loop_exit(cs);
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
@ -150,6 +150,7 @@ void s390_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
|
||||
|
||||
static TCGv_i64 psw_addr;
|
||||
static TCGv_i64 psw_mask;
|
||||
static TCGv_i64 gbea;
|
||||
|
||||
static TCGv_i32 cc_op;
|
||||
static TCGv_i64 cc_src;
|
||||
@ -173,6 +174,9 @@ void s390x_translate_init(void)
|
||||
psw_mask = tcg_global_mem_new_i64(TCG_AREG0,
|
||||
offsetof(CPUS390XState, psw.mask),
|
||||
"psw_mask");
|
||||
gbea = tcg_global_mem_new_i64(TCG_AREG0,
|
||||
offsetof(CPUS390XState, gbea),
|
||||
"gbea");
|
||||
|
||||
cc_op = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUS390XState, cc_op),
|
||||
"cc_op");
|
||||
@ -249,6 +253,46 @@ static void update_psw_addr(DisasContext *s)
|
||||
tcg_gen_movi_i64(psw_addr, s->pc);
|
||||
}
|
||||
|
||||
static void per_branch(DisasContext *s, bool to_next)
|
||||
{
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
tcg_gen_movi_i64(gbea, s->pc);
|
||||
|
||||
if (s->tb->flags & FLAG_MASK_PER) {
|
||||
TCGv_i64 next_pc = to_next ? tcg_const_i64(s->next_pc) : psw_addr;
|
||||
gen_helper_per_branch(cpu_env, gbea, next_pc);
|
||||
if (to_next) {
|
||||
tcg_temp_free_i64(next_pc);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
static void per_branch_cond(DisasContext *s, TCGCond cond,
|
||||
TCGv_i64 arg1, TCGv_i64 arg2)
|
||||
{
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
if (s->tb->flags & FLAG_MASK_PER) {
|
||||
TCGLabel *lab = gen_new_label();
|
||||
tcg_gen_brcond_i64(tcg_invert_cond(cond), arg1, arg2, lab);
|
||||
|
||||
tcg_gen_movi_i64(gbea, s->pc);
|
||||
gen_helper_per_branch(cpu_env, gbea, psw_addr);
|
||||
|
||||
gen_set_label(lab);
|
||||
} else {
|
||||
TCGv_i64 pc = tcg_const_i64(s->pc);
|
||||
tcg_gen_movcond_i64(cond, gbea, arg1, arg2, gbea, pc);
|
||||
tcg_temp_free_i64(pc);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
static void per_breaking_event(DisasContext *s)
|
||||
{
|
||||
tcg_gen_movi_i64(gbea, s->pc);
|
||||
}
|
||||
|
||||
static void update_cc_op(DisasContext *s)
|
||||
{
|
||||
if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
|
||||
@ -568,7 +612,8 @@ static int use_goto_tb(DisasContext *s, uint64_t dest)
|
||||
return (((dest & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK)
|
||||
|| (dest & TARGET_PAGE_MASK) == ((s->pc - 1) & TARGET_PAGE_MASK))
|
||||
&& !s->singlestep_enabled
|
||||
&& !(s->tb->cflags & CF_LAST_IO));
|
||||
&& !(s->tb->cflags & CF_LAST_IO)
|
||||
&& !(s->tb->flags & FLAG_MASK_PER));
|
||||
}
|
||||
|
||||
static void account_noninline_branch(DisasContext *s, int cc_op)
|
||||
@ -1001,6 +1046,7 @@ enum DisasFieldIndexC {
|
||||
};
|
||||
|
||||
struct DisasFields {
|
||||
uint64_t raw_insn;
|
||||
unsigned op:8;
|
||||
unsigned op2:8;
|
||||
unsigned presentC:16;
|
||||
@ -1181,16 +1227,19 @@ static void help_l2_shift(DisasContext *s, DisasFields *f,
|
||||
static ExitStatus help_goto_direct(DisasContext *s, uint64_t dest)
|
||||
{
|
||||
if (dest == s->next_pc) {
|
||||
per_branch(s, true);
|
||||
return NO_EXIT;
|
||||
}
|
||||
if (use_goto_tb(s, dest)) {
|
||||
update_cc_op(s);
|
||||
per_breaking_event(s);
|
||||
tcg_gen_goto_tb(0);
|
||||
tcg_gen_movi_i64(psw_addr, dest);
|
||||
tcg_gen_exit_tb((uintptr_t)s->tb);
|
||||
return EXIT_GOTO_TB;
|
||||
} else {
|
||||
tcg_gen_movi_i64(psw_addr, dest);
|
||||
per_branch(s, false);
|
||||
return EXIT_PC_UPDATED;
|
||||
}
|
||||
}
|
||||
@ -1210,6 +1259,7 @@ static ExitStatus help_branch(DisasContext *s, DisasCompare *c,
|
||||
if (is_imm) {
|
||||
if (dest == s->next_pc) {
|
||||
/* Branch to next. */
|
||||
per_branch(s, true);
|
||||
ret = NO_EXIT;
|
||||
goto egress;
|
||||
}
|
||||
@ -1225,6 +1275,7 @@ static ExitStatus help_branch(DisasContext *s, DisasCompare *c,
|
||||
}
|
||||
if (c->cond == TCG_COND_ALWAYS) {
|
||||
tcg_gen_mov_i64(psw_addr, cdest);
|
||||
per_branch(s, false);
|
||||
ret = EXIT_PC_UPDATED;
|
||||
goto egress;
|
||||
}
|
||||
@ -1249,6 +1300,7 @@ static ExitStatus help_branch(DisasContext *s, DisasCompare *c,
|
||||
|
||||
/* Branch taken. */
|
||||
gen_set_label(lab);
|
||||
per_breaking_event(s);
|
||||
tcg_gen_goto_tb(1);
|
||||
tcg_gen_movi_i64(psw_addr, dest);
|
||||
tcg_gen_exit_tb((uintptr_t)s->tb + 1);
|
||||
@ -1280,6 +1332,7 @@ static ExitStatus help_branch(DisasContext *s, DisasCompare *c,
|
||||
if (is_imm) {
|
||||
tcg_gen_movi_i64(psw_addr, dest);
|
||||
}
|
||||
per_breaking_event(s);
|
||||
ret = EXIT_PC_UPDATED;
|
||||
}
|
||||
} else {
|
||||
@ -1295,6 +1348,7 @@ static ExitStatus help_branch(DisasContext *s, DisasCompare *c,
|
||||
if (c->is_64) {
|
||||
tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
|
||||
cdest, next);
|
||||
per_branch_cond(s, c->cond, c->u.s64.a, c->u.s64.b);
|
||||
} else {
|
||||
TCGv_i32 t0 = tcg_temp_new_i32();
|
||||
TCGv_i64 t1 = tcg_temp_new_i64();
|
||||
@ -1303,6 +1357,7 @@ static ExitStatus help_branch(DisasContext *s, DisasCompare *c,
|
||||
tcg_gen_extu_i32_i64(t1, t0);
|
||||
tcg_temp_free_i32(t0);
|
||||
tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
|
||||
per_branch_cond(s, TCG_COND_NE, t1, z);
|
||||
tcg_temp_free_i64(t1);
|
||||
tcg_temp_free_i64(z);
|
||||
}
|
||||
@ -1435,6 +1490,7 @@ static ExitStatus op_bas(DisasContext *s, DisasOps *o)
|
||||
tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
|
||||
if (!TCGV_IS_UNUSED_I64(o->in2)) {
|
||||
tcg_gen_mov_i64(psw_addr, o->in2);
|
||||
per_branch(s, false);
|
||||
return EXIT_PC_UPDATED;
|
||||
} else {
|
||||
return NO_EXIT;
|
||||
@ -2025,15 +2081,19 @@ static ExitStatus op_ct(DisasContext *s, DisasOps *o)
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
static ExitStatus op_diag(DisasContext *s, DisasOps *o)
|
||||
{
|
||||
TCGv_i32 tmp;
|
||||
TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
|
||||
TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
|
||||
TCGv_i32 func_code = tcg_const_i32(get_field(s->fields, i2));
|
||||
|
||||
check_privileged(s);
|
||||
potential_page_fault(s);
|
||||
update_psw_addr(s);
|
||||
gen_op_calc_cc(s);
|
||||
|
||||
/* We pretend the format is RX_a so that D2 is the field we want. */
|
||||
tmp = tcg_const_i32(get_field(s->fields, d2) & 0xfff);
|
||||
gen_helper_diag(regs[2], cpu_env, tmp, regs[2], regs[1]);
|
||||
tcg_temp_free_i32(tmp);
|
||||
gen_helper_diag(cpu_env, r1, r3, func_code);
|
||||
|
||||
tcg_temp_free_i32(func_code);
|
||||
tcg_temp_free_i32(r3);
|
||||
tcg_temp_free_i32(r1);
|
||||
return NO_EXIT;
|
||||
}
|
||||
#endif
|
||||
@ -2505,6 +2565,7 @@ static ExitStatus op_lpsw(DisasContext *s, DisasOps *o)
|
||||
TCGv_i64 t1, t2;
|
||||
|
||||
check_privileged(s);
|
||||
per_breaking_event(s);
|
||||
|
||||
t1 = tcg_temp_new_i64();
|
||||
t2 = tcg_temp_new_i64();
|
||||
@ -2524,6 +2585,7 @@ static ExitStatus op_lpswe(DisasContext *s, DisasOps *o)
|
||||
TCGv_i64 t1, t2;
|
||||
|
||||
check_privileged(s);
|
||||
per_breaking_event(s);
|
||||
|
||||
t1 = tcg_temp_new_i64();
|
||||
t2 = tcg_temp_new_i64();
|
||||
@ -3584,11 +3646,93 @@ static ExitStatus op_spx(DisasContext *s, DisasOps *o)
|
||||
return NO_EXIT;
|
||||
}
|
||||
|
||||
static ExitStatus op_subchannel(DisasContext *s, DisasOps *o)
|
||||
static ExitStatus op_xsch(DisasContext *s, DisasOps *o)
|
||||
{
|
||||
check_privileged(s);
|
||||
/* Not operational. */
|
||||
gen_op_movi_cc(s, 3);
|
||||
potential_page_fault(s);
|
||||
gen_helper_xsch(cpu_env, regs[1]);
|
||||
set_cc_static(s);
|
||||
return NO_EXIT;
|
||||
}
|
||||
|
||||
static ExitStatus op_csch(DisasContext *s, DisasOps *o)
|
||||
{
|
||||
check_privileged(s);
|
||||
potential_page_fault(s);
|
||||
gen_helper_csch(cpu_env, regs[1]);
|
||||
set_cc_static(s);
|
||||
return NO_EXIT;
|
||||
}
|
||||
|
||||
static ExitStatus op_hsch(DisasContext *s, DisasOps *o)
|
||||
{
|
||||
check_privileged(s);
|
||||
potential_page_fault(s);
|
||||
gen_helper_hsch(cpu_env, regs[1]);
|
||||
set_cc_static(s);
|
||||
return NO_EXIT;
|
||||
}
|
||||
|
||||
static ExitStatus op_msch(DisasContext *s, DisasOps *o)
|
||||
{
|
||||
check_privileged(s);
|
||||
potential_page_fault(s);
|
||||
gen_helper_msch(cpu_env, regs[1], o->in2);
|
||||
set_cc_static(s);
|
||||
return NO_EXIT;
|
||||
}
|
||||
|
||||
static ExitStatus op_rchp(DisasContext *s, DisasOps *o)
|
||||
{
|
||||
check_privileged(s);
|
||||
potential_page_fault(s);
|
||||
gen_helper_rchp(cpu_env, regs[1]);
|
||||
set_cc_static(s);
|
||||
return NO_EXIT;
|
||||
}
|
||||
|
||||
static ExitStatus op_rsch(DisasContext *s, DisasOps *o)
|
||||
{
|
||||
check_privileged(s);
|
||||
potential_page_fault(s);
|
||||
gen_helper_rsch(cpu_env, regs[1]);
|
||||
set_cc_static(s);
|
||||
return NO_EXIT;
|
||||
}
|
||||
|
||||
static ExitStatus op_ssch(DisasContext *s, DisasOps *o)
|
||||
{
|
||||
check_privileged(s);
|
||||
potential_page_fault(s);
|
||||
gen_helper_ssch(cpu_env, regs[1], o->in2);
|
||||
set_cc_static(s);
|
||||
return NO_EXIT;
|
||||
}
|
||||
|
||||
static ExitStatus op_stsch(DisasContext *s, DisasOps *o)
|
||||
{
|
||||
check_privileged(s);
|
||||
potential_page_fault(s);
|
||||
gen_helper_stsch(cpu_env, regs[1], o->in2);
|
||||
set_cc_static(s);
|
||||
return NO_EXIT;
|
||||
}
|
||||
|
||||
static ExitStatus op_tsch(DisasContext *s, DisasOps *o)
|
||||
{
|
||||
check_privileged(s);
|
||||
potential_page_fault(s);
|
||||
gen_helper_tsch(cpu_env, regs[1], o->in2);
|
||||
set_cc_static(s);
|
||||
return NO_EXIT;
|
||||
}
|
||||
|
||||
static ExitStatus op_chsc(DisasContext *s, DisasOps *o)
|
||||
{
|
||||
check_privileged(s);
|
||||
potential_page_fault(s);
|
||||
gen_helper_chsc(cpu_env, o->in2);
|
||||
set_cc_static(s);
|
||||
return NO_EXIT;
|
||||
}
|
||||
|
||||
@ -4839,6 +4983,14 @@ static void in2_i2_32u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
|
||||
}
|
||||
#define SPEC_in2_i2_32u_shl 0
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
static void in2_insn(DisasContext *s, DisasFields *f, DisasOps *o)
|
||||
{
|
||||
o->in2 = tcg_const_i64(s->fields->raw_insn);
|
||||
}
|
||||
#define SPEC_in2_insn 0
|
||||
#endif
|
||||
|
||||
/* ====================================================================== */
|
||||
|
||||
/* Find opc within the table of insns. This is formulated as a switch
|
||||
@ -5015,6 +5167,7 @@ static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s,
|
||||
}
|
||||
|
||||
memset(f, 0, sizeof(*f));
|
||||
f->raw_insn = insn;
|
||||
f->op = op;
|
||||
f->op2 = op2;
|
||||
|
||||
@ -5051,6 +5204,14 @@ static ExitStatus translate_one(CPUS390XState *env, DisasContext *s)
|
||||
return EXIT_NORETURN;
|
||||
}
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
if (s->tb->flags & FLAG_MASK_PER) {
|
||||
TCGv_i64 addr = tcg_const_i64(s->pc);
|
||||
gen_helper_per_ifetch(cpu_env, addr);
|
||||
tcg_temp_free_i64(addr);
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Check for insn specification exceptions. */
|
||||
if (insn->spec) {
|
||||
int spec = insn->spec, excp = 0, r;
|
||||
@ -5138,6 +5299,21 @@ static ExitStatus translate_one(CPUS390XState *env, DisasContext *s)
|
||||
tcg_temp_free_i64(o.addr1);
|
||||
}
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
if (s->tb->flags & FLAG_MASK_PER) {
|
||||
/* An exception might be triggered, save PSW if not already done. */
|
||||
if (ret == NO_EXIT || ret == EXIT_PC_STALE) {
|
||||
tcg_gen_movi_i64(psw_addr, s->next_pc);
|
||||
}
|
||||
|
||||
/* Save off cc. */
|
||||
update_cc_op(s);
|
||||
|
||||
/* Call the helper to check for a possible PER exception. */
|
||||
gen_helper_per_check_exception(cpu_env);
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Advance to the next instruction. */
|
||||
s->pc = s->next_pc;
|
||||
return ret;
|
||||
|
@ -1431,12 +1431,22 @@ void tb_check_watchpoint(CPUState *cpu)
|
||||
TranslationBlock *tb;
|
||||
|
||||
tb = tb_find_pc(cpu->mem_io_pc);
|
||||
if (!tb) {
|
||||
cpu_abort(cpu, "check_watchpoint: could not find TB for pc=%p",
|
||||
(void *)cpu->mem_io_pc);
|
||||
if (tb) {
|
||||
/* We can use retranslation to find the PC. */
|
||||
cpu_restore_state_from_tb(cpu, tb, cpu->mem_io_pc);
|
||||
tb_phys_invalidate(tb, -1);
|
||||
} else {
|
||||
/* The exception probably happened in a helper. The CPU state should
|
||||
have been saved before calling it. Fetch the PC from there. */
|
||||
CPUArchState *env = cpu->env_ptr;
|
||||
target_ulong pc, cs_base;
|
||||
tb_page_addr_t addr;
|
||||
int flags;
|
||||
|
||||
cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
|
||||
addr = get_page_addr_code(env, pc);
|
||||
tb_invalidate_phys_range(addr, addr + 1);
|
||||
}
|
||||
cpu_restore_state_from_tb(cpu, tb, cpu->mem_io_pc);
|
||||
tb_phys_invalidate(tb, -1);
|
||||
}
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
|
Loading…
Reference in New Issue
Block a user