From 2d1df8591022737b8ef19d681ff74eda389f5198 Mon Sep 17 00:00:00 2001 From: Fam Zheng Date: Mon, 10 Sep 2018 22:56:15 +0800 Subject: [PATCH 01/80] virtio: Return true from virtio_queue_empty if broken Both virtio-blk and virtio-scsi use virtio_queue_empty() as the loop condition in VQ handlers (virtio_blk_handle_vq, virtio_scsi_handle_cmd_vq). When a device is marked broken in virtqueue_pop, for example if a vIOMMU address translation failed, we want to break out of the loop. This fixes a hanging problem when booting a CentOS 3.10.0-862.el7.x86_64 kernel with ATS enabled: $ qemu-system-x86_64 \ ... \ -device intel-iommu,intremap=on,caching-mode=on,eim=on,device-iotlb=on \ -device virtio-scsi-pci,iommu_platform=on,ats=on,id=scsi0,bus=pci.4,addr=0x0 The dead loop happens immediately when the kernel boots and initializes the device, where virtio_scsi_data_plane_handle_cmd will not return: > ... > #13 0x00005586602b7793 in virtio_scsi_handle_cmd_vq > #14 0x00005586602b8d66 in virtio_scsi_data_plane_handle_cmd > #15 0x00005586602ddab7 in virtio_queue_notify_aio_vq > #16 0x00005586602dfc9f in virtio_queue_host_notifier_aio_poll > #17 0x00005586607885da in run_poll_handlers_once > #18 0x000055866078880e in try_poll_mode > #19 0x00005586607888eb in aio_poll > #20 0x0000558660784561 in aio_wait_bh_oneshot > #21 0x00005586602b9582 in virtio_scsi_dataplane_stop > #22 0x00005586605a7110 in virtio_bus_stop_ioeventfd > #23 0x00005586605a9426 in virtio_pci_stop_ioeventfd > #24 0x00005586605ab808 in virtio_pci_common_write > #25 0x0000558660242396 in memory_region_write_accessor > #26 0x00005586602425ab in access_with_adjusted_size > #27 0x0000558660245281 in memory_region_dispatch_write > #28 0x00005586601e008e in flatview_write_continue > #29 0x00005586601e01d8 in flatview_write > #30 0x00005586601e04de in address_space_write > #31 0x00005586601e052f in address_space_rw > #32 0x00005586602607f2 in kvm_cpu_exec > #33 0x0000558660227148 in qemu_kvm_cpu_thread_fn > #34 0x000055866078bde7 in qemu_thread_start > #35 0x00007f5784906594 in start_thread > #36 0x00007f5784639e6f in clone With this patch, virtio_queue_empty will now return 1 as soon as the vdev is marked as broken, after a "virtio: zero sized buffers are not allowed" error. To be consistent, update virtio_queue_empty_rcu as well. Signed-off-by: Fam Zheng Message-Id: <20180910145616.8598-2-famz@redhat.com> Signed-off-by: Paolo Bonzini --- hw/virtio/virtio.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c index f6a588ab57..94f5c8e52a 100644 --- a/hw/virtio/virtio.c +++ b/hw/virtio/virtio.c @@ -358,6 +358,10 @@ int virtio_queue_ready(VirtQueue *vq) * Called within rcu_read_lock(). */ static int virtio_queue_empty_rcu(VirtQueue *vq) { + if (unlikely(vq->vdev->broken)) { + return 1; + } + if (unlikely(!vq->vring.avail)) { return 1; } @@ -373,6 +377,10 @@ int virtio_queue_empty(VirtQueue *vq) { bool empty; + if (unlikely(vq->vdev->broken)) { + return 1; + } + if (unlikely(!vq->vring.avail)) { return 1; } From 07d66672e7035dd24dbe8ee009847a8ceae1178d Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Fri, 24 Aug 2018 17:08:09 +0200 Subject: [PATCH 02/80] qsp: hide indirect function calls from Coverity Coverity does not see anymore that qemu_mutex_lock is taking a lock. Hide all the QSP magic so that static analysis works again. Signed-off-by: Paolo Bonzini --- include/qemu/thread.h | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/include/qemu/thread.h b/include/qemu/thread.h index dacebcfff0..b2661b6672 100644 --- a/include/qemu/thread.h +++ b/include/qemu/thread.h @@ -48,6 +48,22 @@ extern QemuCondWaitFunc qemu_cond_wait_func; #define qemu_mutex_trylock__raw(m) \ qemu_mutex_trylock_impl(m, __FILE__, __LINE__) +#ifdef __COVERITY__ +/* + * Coverity is severely confused by the indirect function calls, + * hide them. + */ +#define qemu_mutex_lock(m) \ + qemu_mutex_lock_impl(m, __FILE__, __LINE__); +#define qemu_mutex_trylock(m) \ + qemu_mutex_trylock_impl(m, __FILE__, __LINE__); +#define qemu_rec_mutex_lock(m) \ + qemu_rec_mutex_lock_impl(m, __FILE__, __LINE__); +#define qemu_rec_mutex_trylock(m) \ + qemu_rec_mutex_trylock_impl(m, __FILE__, __LINE__); +#define qemu_cond_wait(c, m) \ + qemu_cond_wait_impl(c, m, __FILE__, __LINE__); +#else #define qemu_mutex_lock(m) ({ \ QemuMutexLockFunc _f = atomic_read(&qemu_mutex_lock_func); \ _f(m, __FILE__, __LINE__); \ @@ -73,6 +89,7 @@ extern QemuCondWaitFunc qemu_cond_wait_func; QemuCondWaitFunc _f = atomic_read(&qemu_cond_wait_func); \ _f(c, m, __FILE__, __LINE__); \ }) +#endif #define qemu_mutex_unlock(mutex) \ qemu_mutex_unlock_impl(mutex, __FILE__, __LINE__) From cf9270e5220671f49cc238deaf6136669cc07ae1 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Fri, 24 Aug 2018 17:03:41 +0200 Subject: [PATCH 03/80] es1370: fix ADC_FRAMEADR and ADC_FRAMECNT They are not consecutive with DAC1_FRAME* and DAC2_FRAME*. Fixes: 154c1d1f960c5147a3f8ef00907504112f271cd8 Signed-off-by: Paolo Bonzini --- hw/audio/es1370.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/hw/audio/es1370.c b/hw/audio/es1370.c index dd75c9e8f5..4f980a598b 100644 --- a/hw/audio/es1370.c +++ b/hw/audio/es1370.c @@ -506,10 +506,13 @@ static void es1370_write(void *opaque, hwaddr addr, uint64_t val, unsigned size) d - &s->chan[0], val >> 16, (val & 0xffff)); break; + case ES1370_REG_ADC_FRAMEADR: + d += 2; + goto frameadr; case ES1370_REG_DAC1_FRAMEADR: case ES1370_REG_DAC2_FRAMEADR: - case ES1370_REG_ADC_FRAMEADR: d += (addr - ES1370_REG_DAC1_FRAMEADR) >> 3; + frameadr: d->frame_addr = val; ldebug ("chan %td frame address %#x\n", d - &s->chan[0], val); break; @@ -521,10 +524,13 @@ static void es1370_write(void *opaque, hwaddr addr, uint64_t val, unsigned size) lwarn ("writing to phantom frame address %#x\n", val); break; + case ES1370_REG_ADC_FRAMECNT: + d += 2; + goto framecnt; case ES1370_REG_DAC1_FRAMECNT: case ES1370_REG_DAC2_FRAMECNT: - case ES1370_REG_ADC_FRAMECNT: d += (addr - ES1370_REG_DAC1_FRAMECNT) >> 3; + framecnt: d->frame_cnt = val; d->leftover = 0; ldebug ("chan %td frame count %d, buffer size %d\n", From a1f2ed2ad8b4ec28a5f1bc4fcb50ef57b69a28a2 Mon Sep 17 00:00:00 2001 From: Pavel Dovgalyuk Date: Fri, 11 May 2018 11:16:01 +0300 Subject: [PATCH 04/80] ps2: prevent changing irq state on save and load Commit 2858ab09e6f708e381fc1a1cc87e747a690c4884 changed PS/2 keyboard/mouse buffers to the standard size. However, its state may change when migrating from the old buffer size and therefore irq needs updating. But this change made wrong, because it throws the whole queue if there are too much data instead of cropping it. That commit also updates irq (because the queue state may change). But updating the irq may change the VM state (and determinism of the execution). E.g., when replaying the execution, one may save the VM state and the state of the interrupt controller will be updated at the moment of saving, instead of using the recorded update events. This patch makes the queue update deterministic: it removes the update_irq call and crops the queue to prevent losing the characters and changing the required irq status. Signed-off-by: Pavel Dovgalyuk Message-Id: <20180511081601.14610.39946.stgit@pasha-VirtualBox> Signed-off-by: Paolo Bonzini --- hw/input/ps2.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/hw/input/ps2.c b/hw/input/ps2.c index fdfcadf9a1..6c43fc2912 100644 --- a/hw/input/ps2.c +++ b/hw/input/ps2.c @@ -914,7 +914,12 @@ static void ps2_common_post_load(PS2State *s) uint8_t tmp_data[PS2_QUEUE_SIZE]; /* set the useful data buffer queue size, < PS2_QUEUE_SIZE */ - size = (q->count < 0 || q->count > PS2_QUEUE_SIZE) ? 0 : q->count; + size = q->count; + if (q->count < 0) { + size = 0; + } else if (q->count > PS2_QUEUE_SIZE) { + size = PS2_QUEUE_SIZE; + } /* move the queue elements to the start of data array */ for (i = 0; i < size; i++) { @@ -929,7 +934,6 @@ static void ps2_common_post_load(PS2State *s) q->rptr = 0; q->wptr = (size == PS2_QUEUE_SIZE) ? 0 : size; q->count = size; - s->update_irq(s->update_arg, q->count != 0); } static void ps2_kbd_reset(void *opaque) From 119c440c3c599778bfb4f90d8e39fda132925813 Mon Sep 17 00:00:00 2001 From: "Emilio G. Cota" Date: Mon, 3 Sep 2018 13:18:28 -0400 Subject: [PATCH 05/80] atomic: fix comment s/x64_64/x86_64/ MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Emilio G. Cota Message-Id: <20180903171831.15446-4-cota@braap.org> Reviewed-by: Alex Bennée Signed-off-by: Paolo Bonzini --- include/qemu/atomic.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/qemu/atomic.h b/include/qemu/atomic.h index 9ed39effd3..de3e36f400 100644 --- a/include/qemu/atomic.h +++ b/include/qemu/atomic.h @@ -98,7 +98,7 @@ * We'd prefer not want to pull in everything else TCG related, so handle * those few cases by hand. * - * Note that x32 is fully detected with __x64_64__ + _ILP32, and that for + * Note that x32 is fully detected with __x86_64__ + _ILP32, and that for * Sparc we always force the use of sparcv9 in configure. */ #if defined(__x86_64__) || defined(__sparc__) From 87a09cdc529ddb5daa660e3c442b793032a2169d Mon Sep 17 00:00:00 2001 From: "Emilio G. Cota" Date: Mon, 3 Sep 2018 13:18:29 -0400 Subject: [PATCH 06/80] cpus: initialize timers_state.vm_clock_lock MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We forgot to initialize the spinlock introduced in 94377115b2 ("cpus: protect TimerState writes with a spinlock", 2018-08-23). Fix it. Signed-off-by: Emilio G. Cota Message-Id: <20180903171831.15446-5-cota@braap.org> Reviewed-by: Alex Bennée Signed-off-by: Paolo Bonzini --- cpus.c | 1 + 1 file changed, 1 insertion(+) diff --git a/cpus.c b/cpus.c index 719788320f..4abc3b3dda 100644 --- a/cpus.c +++ b/cpus.c @@ -823,6 +823,7 @@ int cpu_throttle_get_percentage(void) void cpu_ticks_init(void) { seqlock_init(&timers_state.vm_clock_seqlock); + qemu_spin_init(&timers_state.vm_clock_lock); vmstate_register(NULL, 0, &vmstate_timers, &timers_state); throttle_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL_RT, cpu_throttle_timer_tick, NULL); From 5fe21034292a2758639d6e66822a53770c7bcc0d Mon Sep 17 00:00:00 2001 From: "Emilio G. Cota" Date: Mon, 10 Sep 2018 19:27:41 -0400 Subject: [PATCH 07/80] cacheinfo: add i/d cache_linesize_log Signed-off-by: Emilio G. Cota Message-Id: <20180910232752.31565-2-cota@braap.org> Signed-off-by: Paolo Bonzini --- include/qemu/osdep.h | 2 ++ util/cacheinfo.c | 8 ++++++++ 2 files changed, 10 insertions(+) diff --git a/include/qemu/osdep.h b/include/qemu/osdep.h index a91068df0e..a746a5e531 100644 --- a/include/qemu/osdep.h +++ b/include/qemu/osdep.h @@ -570,6 +570,8 @@ extern uintptr_t qemu_real_host_page_size; extern intptr_t qemu_real_host_page_mask; extern int qemu_icache_linesize; +extern int qemu_icache_linesize_log; extern int qemu_dcache_linesize; +extern int qemu_dcache_linesize_log; #endif diff --git a/util/cacheinfo.c b/util/cacheinfo.c index db5172d07c..6c8fe797bf 100644 --- a/util/cacheinfo.c +++ b/util/cacheinfo.c @@ -7,9 +7,12 @@ */ #include "qemu/osdep.h" +#include "qemu/host-utils.h" int qemu_icache_linesize = 0; +int qemu_icache_linesize_log; int qemu_dcache_linesize = 0; +int qemu_dcache_linesize_log; /* * Operating system specific detection mechanisms. @@ -172,6 +175,11 @@ static void __attribute__((constructor)) init_cache_info(void) arch_cache_info(&isize, &dsize); fallback_cache_info(&isize, &dsize); + assert((isize & (isize - 1)) == 0); + assert((dsize & (dsize - 1)) == 0); + qemu_icache_linesize = isize; + qemu_icache_linesize_log = ctz32(isize); qemu_dcache_linesize = dsize; + qemu_dcache_linesize_log = ctz32(dsize); } From 782da5b2921c4d18777d5d5bd9385b9f7beae360 Mon Sep 17 00:00:00 2001 From: "Emilio G. Cota" Date: Mon, 10 Sep 2018 19:27:42 -0400 Subject: [PATCH 08/80] util: add atomic64 This introduces read/set accessors for int64_t and uint64_t. Signed-off-by: Emilio G. Cota Message-Id: <20180910232752.31565-3-cota@braap.org> Signed-off-by: Paolo Bonzini --- include/qemu/atomic.h | 34 ++++++++++++++++++ util/Makefile.objs | 1 + util/atomic64.c | 83 +++++++++++++++++++++++++++++++++++++++++++ util/cacheinfo.c | 3 ++ 4 files changed, 121 insertions(+) create mode 100644 util/atomic64.c diff --git a/include/qemu/atomic.h b/include/qemu/atomic.h index de3e36f400..f6993a8fb1 100644 --- a/include/qemu/atomic.h +++ b/include/qemu/atomic.h @@ -450,4 +450,38 @@ _oldn; \ }) +/* Abstractions to access atomically (i.e. "once") i64/u64 variables */ +#ifdef CONFIG_ATOMIC64 +static inline int64_t atomic_read_i64(const int64_t *ptr) +{ + /* use __nocheck because sizeof(void *) might be < sizeof(u64) */ + return atomic_read__nocheck(ptr); +} + +static inline uint64_t atomic_read_u64(const uint64_t *ptr) +{ + return atomic_read__nocheck(ptr); +} + +static inline void atomic_set_i64(int64_t *ptr, int64_t val) +{ + atomic_set__nocheck(ptr, val); +} + +static inline void atomic_set_u64(uint64_t *ptr, uint64_t val) +{ + atomic_set__nocheck(ptr, val); +} + +static inline void atomic64_init(void) +{ +} +#else /* !CONFIG_ATOMIC64 */ +int64_t atomic_read_i64(const int64_t *ptr); +uint64_t atomic_read_u64(const uint64_t *ptr); +void atomic_set_i64(int64_t *ptr, int64_t val); +void atomic_set_u64(uint64_t *ptr, uint64_t val); +void atomic64_init(void); +#endif /* !CONFIG_ATOMIC64 */ + #endif /* QEMU_ATOMIC_H */ diff --git a/util/Makefile.objs b/util/Makefile.objs index 0e88899011..0820923c18 100644 --- a/util/Makefile.objs +++ b/util/Makefile.objs @@ -3,6 +3,7 @@ util-obj-y += bufferiszero.o util-obj-y += lockcnt.o util-obj-y += aiocb.o async.o aio-wait.o thread-pool.o qemu-timer.o util-obj-y += main-loop.o iohandler.o +util-obj-$(call lnot,$(CONFIG_ATOMIC64)) += atomic64.o util-obj-$(CONFIG_POSIX) += aio-posix.o util-obj-$(CONFIG_POSIX) += compatfd.o util-obj-$(CONFIG_POSIX) += event_notifier-posix.o diff --git a/util/atomic64.c b/util/atomic64.c new file mode 100644 index 0000000000..b198a6c9c8 --- /dev/null +++ b/util/atomic64.c @@ -0,0 +1,83 @@ +/* + * Copyright (C) 2018, Emilio G. Cota + * + * License: GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ +#include "qemu/osdep.h" +#include "qemu/atomic.h" +#include "qemu/thread.h" + +#ifdef CONFIG_ATOMIC64 +#error This file must only be compiled if !CONFIG_ATOMIC64 +#endif + +/* + * When !CONFIG_ATOMIC64, we serialize both reads and writes with spinlocks. + * We use an array of spinlocks, with padding computed at run-time based on + * the host's dcache line size. + * We point to the array with a void * to simplify the padding's computation. + * Each spinlock is located every lock_size bytes. + */ +static void *lock_array; +static size_t lock_size; + +/* + * Systems without CONFIG_ATOMIC64 are unlikely to have many cores, so we use a + * small array of locks. + */ +#define NR_LOCKS 16 + +static QemuSpin *addr_to_lock(const void *addr) +{ + uintptr_t a = (uintptr_t)addr; + uintptr_t idx; + + idx = a >> qemu_dcache_linesize_log; + idx ^= (idx >> 8) ^ (idx >> 16); + idx &= NR_LOCKS - 1; + return lock_array + idx * lock_size; +} + +#define GEN_READ(name, type) \ + type name(const type *ptr) \ + { \ + QemuSpin *lock = addr_to_lock(ptr); \ + type ret; \ + \ + qemu_spin_lock(lock); \ + ret = *ptr; \ + qemu_spin_unlock(lock); \ + return ret; \ + } + +GEN_READ(atomic_read_i64, int64_t) +GEN_READ(atomic_read_u64, uint64_t) +#undef GEN_READ + +#define GEN_SET(name, type) \ + void name(type *ptr, type val) \ + { \ + QemuSpin *lock = addr_to_lock(ptr); \ + \ + qemu_spin_lock(lock); \ + *ptr = val; \ + qemu_spin_unlock(lock); \ + } + +GEN_SET(atomic_set_i64, int64_t) +GEN_SET(atomic_set_u64, uint64_t) +#undef GEN_SET + +void atomic64_init(void) +{ + int i; + + lock_size = ROUND_UP(sizeof(QemuSpin), qemu_dcache_linesize); + lock_array = qemu_memalign(qemu_dcache_linesize, lock_size * NR_LOCKS); + for (i = 0; i < NR_LOCKS; i++) { + QemuSpin *lock = lock_array + i * lock_size; + + qemu_spin_init(lock); + } +} diff --git a/util/cacheinfo.c b/util/cacheinfo.c index 6c8fe797bf..3cd080b83d 100644 --- a/util/cacheinfo.c +++ b/util/cacheinfo.c @@ -8,6 +8,7 @@ #include "qemu/osdep.h" #include "qemu/host-utils.h" +#include "qemu/atomic.h" int qemu_icache_linesize = 0; int qemu_icache_linesize_log; @@ -182,4 +183,6 @@ static void __attribute__((constructor)) init_cache_info(void) qemu_icache_linesize_log = ctz32(isize); qemu_dcache_linesize = dsize; qemu_dcache_linesize_log = ctz32(dsize); + + atomic64_init(); } From 82fdfcbe64ec09048d5f9b430b16231b9285d54b Mon Sep 17 00:00:00 2001 From: "Emilio G. Cota" Date: Mon, 10 Sep 2018 19:27:43 -0400 Subject: [PATCH 09/80] tests: add atomic64-bench - With CONFIG_ATOMIC64: $ tests/atomic64-bench -n 1 Throughput: 310.40 Mops/s - Without: $ tests/atomic64-bench -n 1 Throughput: 149.08 Mops/s Signed-off-by: Emilio G. Cota Message-Id: <20180910232752.31565-4-cota@braap.org> Signed-off-by: Paolo Bonzini --- tests/Makefile.include | 3 +- tests/atomic64-bench.c | 171 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 173 insertions(+), 1 deletion(-) create mode 100644 tests/atomic64-bench.c diff --git a/tests/Makefile.include b/tests/Makefile.include index d0c0a92e67..175d013f4a 100644 --- a/tests/Makefile.include +++ b/tests/Makefile.include @@ -613,7 +613,7 @@ test-obj-y = tests/check-qnum.o tests/check-qstring.o tests/check-qdict.o \ tests/test-rcu-tailq.o \ tests/test-qdist.o tests/test-shift128.o \ tests/test-qht.o tests/qht-bench.o tests/test-qht-par.o \ - tests/atomic_add-bench.o + tests/atomic_add-bench.o tests/atomic64-bench.o $(test-obj-y): QEMU_INCLUDES += -Itests QEMU_CFLAGS += -I$(SRC_PATH)/tests @@ -668,6 +668,7 @@ tests/test-qht-par$(EXESUF): tests/test-qht-par.o tests/qht-bench$(EXESUF) $(tes tests/qht-bench$(EXESUF): tests/qht-bench.o $(test-util-obj-y) tests/test-bufferiszero$(EXESUF): tests/test-bufferiszero.o $(test-util-obj-y) tests/atomic_add-bench$(EXESUF): tests/atomic_add-bench.o $(test-util-obj-y) +tests/atomic64-bench$(EXESUF): tests/atomic64-bench.o $(test-util-obj-y) tests/test-qdev-global-props$(EXESUF): tests/test-qdev-global-props.o \ hw/core/qdev.o hw/core/qdev-properties.o hw/core/hotplug.o\ diff --git a/tests/atomic64-bench.c b/tests/atomic64-bench.c new file mode 100644 index 0000000000..71692560ed --- /dev/null +++ b/tests/atomic64-bench.c @@ -0,0 +1,171 @@ +/* + * Copyright (C) 2018, Emilio G. Cota + * + * License: GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ +#include "qemu/osdep.h" +#include "qemu/thread.h" +#include "qemu/host-utils.h" +#include "qemu/processor.h" + +struct thread_info { + uint64_t r; + uint64_t accesses; +} QEMU_ALIGNED(64); + +struct count { + int64_t i64; +} QEMU_ALIGNED(64); + +static QemuThread *threads; +static struct thread_info *th_info; +static unsigned int n_threads = 1; +static unsigned int n_ready_threads; +static struct count *counts; +static unsigned int duration = 1; +static unsigned int range = 1024; +static bool test_start; +static bool test_stop; + +static const char commands_string[] = + " -d = duration in seconds\n" + " -n = number of threads\n" + " -r = range (will be rounded up to pow2)"; + +static void usage_complete(char *argv[]) +{ + fprintf(stderr, "Usage: %s [options]\n", argv[0]); + fprintf(stderr, "options:\n%s\n", commands_string); +} + +/* + * From: https://en.wikipedia.org/wiki/Xorshift + * This is faster than rand_r(), and gives us a wider range (RAND_MAX is only + * guaranteed to be >= INT_MAX). + */ +static uint64_t xorshift64star(uint64_t x) +{ + x ^= x >> 12; /* a */ + x ^= x << 25; /* b */ + x ^= x >> 27; /* c */ + return x * UINT64_C(2685821657736338717); +} + +static void *thread_func(void *arg) +{ + struct thread_info *info = arg; + + atomic_inc(&n_ready_threads); + while (!atomic_read(&test_start)) { + cpu_relax(); + } + + while (!atomic_read(&test_stop)) { + unsigned int index; + + info->r = xorshift64star(info->r); + index = info->r & (range - 1); + atomic_read_i64(&counts[index].i64); + info->accesses++; + } + return NULL; +} + +static void run_test(void) +{ + unsigned int remaining; + unsigned int i; + + while (atomic_read(&n_ready_threads) != n_threads) { + cpu_relax(); + } + atomic_set(&test_start, true); + do { + remaining = sleep(duration); + } while (remaining); + atomic_set(&test_stop, true); + + for (i = 0; i < n_threads; i++) { + qemu_thread_join(&threads[i]); + } +} + +static void create_threads(void) +{ + unsigned int i; + + threads = g_new(QemuThread, n_threads); + th_info = g_new(struct thread_info, n_threads); + counts = g_malloc0_n(range, sizeof(*counts)); + + for (i = 0; i < n_threads; i++) { + struct thread_info *info = &th_info[i]; + + info->r = (i + 1) ^ time(NULL); + info->accesses = 0; + qemu_thread_create(&threads[i], NULL, thread_func, info, + QEMU_THREAD_JOINABLE); + } +} + +static void pr_params(void) +{ + printf("Parameters:\n"); + printf(" # of threads: %u\n", n_threads); + printf(" duration: %u\n", duration); + printf(" ops' range: %u\n", range); +} + +static void pr_stats(void) +{ + unsigned long long val = 0; + double tx; + int i; + + for (i = 0; i < n_threads; i++) { + val += th_info[i].accesses; + } + tx = val / duration / 1e6; + + printf("Results:\n"); + printf("Duration: %u s\n", duration); + printf(" Throughput: %.2f Mops/s\n", tx); + printf(" Throughput/thread: %.2f Mops/s/thread\n", tx / n_threads); +} + +static void parse_args(int argc, char *argv[]) +{ + int c; + + for (;;) { + c = getopt(argc, argv, "hd:n:r:"); + if (c < 0) { + break; + } + switch (c) { + case 'h': + usage_complete(argv); + exit(0); + case 'd': + duration = atoi(optarg); + break; + case 'n': + n_threads = atoi(optarg); + break; + case 'r': + range = pow2ceil(atoi(optarg)); + break; + } + } +} + +int main(int argc, char *argv[]) +{ + parse_args(argc, argv); + pr_params(); + create_threads(); + run_test(); + pr_stats(); + return 0; +} From ac8c77486cbdd5d7be17c447ca64cbebd330f25a Mon Sep 17 00:00:00 2001 From: "Emilio G. Cota" Date: Mon, 10 Sep 2018 19:27:44 -0400 Subject: [PATCH 10/80] qsp: use atomic64 accessors With the seqlock, we either have to use atomics to remain within defined behaviour (and note that 64-bit atomics aren't always guaranteed to compile, irrespective of __nocheck), or drop the atomics and be in undefined behaviour territory. Fix it by dropping the seqlock and using atomic64 accessors. This will limit scalability when !CONFIG_ATOMIC64, but those machines (1) don't have many users and (2) are unlikely to have many cores. - With CONFIG_ATOMIC64: $ tests/atomic_add-bench -n 1 -m -p Throughput: 13.00 Mops/s - Forcing !CONFIG_ATOMIC64: $ tests/atomic_add-bench -n 1 -m -p Throughput: 10.89 Mops/s Signed-off-by: Emilio G. Cota Message-Id: <20180910232752.31565-5-cota@braap.org> Signed-off-by: Paolo Bonzini --- util/qsp.c | 49 ++++++++----------------------------------------- 1 file changed, 8 insertions(+), 41 deletions(-) diff --git a/util/qsp.c b/util/qsp.c index 2de3a97594..a848b09c6d 100644 --- a/util/qsp.c +++ b/util/qsp.c @@ -84,13 +84,6 @@ struct QSPEntry { uint64_t n_acqs; uint64_t ns; unsigned int n_objs; /* count of coalesced objs; only used for reporting */ -#ifndef CONFIG_ATOMIC64 - /* - * If we cannot update the counts atomically, then use a seqlock. - * We don't need an associated lock because the updates are thread-local. - */ - QemuSeqLock sequence; -#endif }; typedef struct QSPEntry QSPEntry; @@ -344,47 +337,16 @@ static QSPEntry *qsp_entry_get(const void *obj, const char *file, int line, return qsp_entry_find(&qsp_ht, &orig, hash); } -/* - * @from is in the global hash table; read it atomically if the host - * supports it, otherwise use the seqlock. - */ -static void qsp_entry_aggregate(QSPEntry *to, const QSPEntry *from) -{ -#ifdef CONFIG_ATOMIC64 - to->ns += atomic_read__nocheck(&from->ns); - to->n_acqs += atomic_read__nocheck(&from->n_acqs); -#else - unsigned int version; - uint64_t ns, n_acqs; - - do { - version = seqlock_read_begin(&from->sequence); - ns = atomic_read__nocheck(&from->ns); - n_acqs = atomic_read__nocheck(&from->n_acqs); - } while (seqlock_read_retry(&from->sequence, version)); - - to->ns += ns; - to->n_acqs += n_acqs; -#endif -} - /* * @e is in the global hash table; it is only written to by the current thread, * so we write to it atomically (as in "write once") to prevent torn reads. - * If the host doesn't support u64 atomics, use the seqlock. */ static inline void do_qsp_entry_record(QSPEntry *e, int64_t delta, bool acq) { -#ifndef CONFIG_ATOMIC64 - seqlock_write_begin(&e->sequence); -#endif - atomic_set__nocheck(&e->ns, e->ns + delta); + atomic_set_u64(&e->ns, e->ns + delta); if (acq) { - atomic_set__nocheck(&e->n_acqs, e->n_acqs + 1); + atomic_set_u64(&e->n_acqs, e->n_acqs + 1); } -#ifndef CONFIG_ATOMIC64 - seqlock_write_end(&e->sequence); -#endif } static inline void qsp_entry_record(QSPEntry *e, int64_t delta) @@ -550,7 +512,12 @@ static void qsp_aggregate(void *p, uint32_t h, void *up) hash = qsp_entry_no_thread_hash(e); agg = qsp_entry_find(ht, e, hash); - qsp_entry_aggregate(agg, e); + /* + * The entry is in the global hash table; read from it atomically (as in + * "read once"). + */ + agg->ns += atomic_read_u64(&e->ns); + agg->n_acqs += atomic_read_u64(&e->n_acqs); } static void qsp_iter_diff(void *p, uint32_t hash, void *htp) From 39fe576c82bc02749410ea16045109f7b7d4af62 Mon Sep 17 00:00:00 2001 From: "Emilio G. Cota" Date: Mon, 10 Sep 2018 19:27:45 -0400 Subject: [PATCH 11/80] test-rcu-list: access n_reclaims and n_nodes_removed with atomic64 To avoid undefined behaviour. Note that these "atomics" are atomic in the "access once" sense. The variables are updated by a single thread at a time, so no "full" atomics are necessary. Signed-off-by: Emilio G. Cota Message-Id: <20180910232752.31565-6-cota@braap.org> Signed-off-by: Paolo Bonzini --- tests/test-rcu-list.c | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/tests/test-rcu-list.c b/tests/test-rcu-list.c index 192bfbf02e..2e6f70bd59 100644 --- a/tests/test-rcu-list.c +++ b/tests/test-rcu-list.c @@ -33,8 +33,8 @@ static QemuMutex counts_mutex; static long long n_reads = 0LL; static long long n_updates = 0LL; -static long long n_reclaims = 0LL; -static long long n_nodes_removed = 0LL; +static int64_t n_reclaims; +static int64_t n_nodes_removed; static long long n_nodes = 0LL; static int g_test_in_charge = 0; @@ -104,7 +104,7 @@ static void reclaim_list_el(struct rcu_head *prcu) struct list_element *el = container_of(prcu, struct list_element, rcu); g_free(el); /* Accessed only from call_rcu thread. */ - n_reclaims++; + atomic_set_i64(&n_reclaims, n_reclaims + 1); } #if TEST_LIST_TYPE == 1 @@ -232,7 +232,7 @@ static void *rcu_q_updater(void *arg) qemu_mutex_lock(&counts_mutex); n_nodes += n_nodes_local; n_updates += n_updates_local; - n_nodes_removed += n_removed_local; + atomic_set_i64(&n_nodes_removed, n_nodes_removed + n_removed_local); qemu_mutex_unlock(&counts_mutex); return NULL; } @@ -286,19 +286,21 @@ static void rcu_qtest(const char *test, int duration, int nreaders) n_removed_local++; } qemu_mutex_lock(&counts_mutex); - n_nodes_removed += n_removed_local; + atomic_set_i64(&n_nodes_removed, n_nodes_removed + n_removed_local); qemu_mutex_unlock(&counts_mutex); synchronize_rcu(); - while (n_nodes_removed > n_reclaims) { + while (atomic_read_i64(&n_nodes_removed) > atomic_read_i64(&n_reclaims)) { g_usleep(100); synchronize_rcu(); } if (g_test_in_charge) { - g_assert_cmpint(n_nodes_removed, ==, n_reclaims); + g_assert_cmpint(atomic_read_i64(&n_nodes_removed), ==, + atomic_read_i64(&n_reclaims)); } else { printf("%s: %d readers; 1 updater; nodes read: " \ - "%lld, nodes removed: %lld; nodes reclaimed: %lld\n", - test, nthreadsrunning - 1, n_reads, n_nodes_removed, n_reclaims); + "%lld, nodes removed: %"PRIi64"; nodes reclaimed: %"PRIi64"\n", + test, nthreadsrunning - 1, n_reads, + atomic_read_i64(&n_nodes_removed), atomic_read_i64(&n_reclaims)); exit(0); } } From 9b4e6f496601d3cd35fd8d09c9e2103999fd5c33 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Tue, 11 Sep 2018 13:15:32 +0200 Subject: [PATCH 12/80] cpus: take seqlock across qemu_icount updates Even though writes of qemu_icount can safely race with reads in qemu_icount_raw, qemu_icount is also read by icount_adjust, which runs in the I/O thread. Therefore, writes do needs protection of the vm_clock_lock; for simplicity the patch protects it with both seqlock+spinlock, which we already do for hosts that lack 64-bit atomics. The bug actually predated the introduction of vm_clock_lock; cpu_update_icount would have needed the BQL before the spinlock was introduced. Reported-by: Emilio G. Cota Signed-off-by: Paolo Bonzini --- cpus.c | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/cpus.c b/cpus.c index 4abc3b3dda..6e1a892f8c 100644 --- a/cpus.c +++ b/cpus.c @@ -245,21 +245,27 @@ static int64_t cpu_get_icount_executed(CPUState *cpu) * account executed instructions. This is done by the TCG vCPU * thread so the main-loop can see time has moved forward. */ -void cpu_update_icount(CPUState *cpu) +static void cpu_update_icount_locked(CPUState *cpu) { int64_t executed = cpu_get_icount_executed(cpu); cpu->icount_budget -= executed; -#ifndef CONFIG_ATOMIC64 - seqlock_write_lock(&timers_state.vm_clock_seqlock, - &timers_state.vm_clock_lock); -#endif atomic_set__nocheck(&timers_state.qemu_icount, timers_state.qemu_icount + executed); -#ifndef CONFIG_ATOMIC64 +} + +/* + * Update the global shared timer_state.qemu_icount to take into + * account executed instructions. This is done by the TCG vCPU + * thread so the main-loop can see time has moved forward. + */ +void cpu_update_icount(CPUState *cpu) +{ + seqlock_write_lock(&timers_state.vm_clock_seqlock, + &timers_state.vm_clock_lock); + cpu_update_icount_locked(cpu); seqlock_write_unlock(&timers_state.vm_clock_seqlock, &timers_state.vm_clock_lock); -#endif } static int64_t cpu_get_icount_raw_locked(void) @@ -272,7 +278,7 @@ static int64_t cpu_get_icount_raw_locked(void) exit(1); } /* Take into account what has run */ - cpu_update_icount(cpu); + cpu_update_icount_locked(cpu); } /* The read is protected by the seqlock, so __nocheck is okay. */ return atomic_read__nocheck(&timers_state.qemu_icount); From 38adcb6e4197b5277dbb5ad0036a804b30b25bc6 Mon Sep 17 00:00:00 2001 From: "Emilio G. Cota" Date: Mon, 10 Sep 2018 19:27:49 -0400 Subject: [PATCH 13/80] cpus: access .qemu_icount with atomic64 Signed-off-by: Emilio G. Cota Message-Id: <20180910232752.31565-10-cota@braap.org> Signed-off-by: Paolo Bonzini --- cpus.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/cpus.c b/cpus.c index 6e1a892f8c..fed8ec1dff 100644 --- a/cpus.c +++ b/cpus.c @@ -250,8 +250,8 @@ static void cpu_update_icount_locked(CPUState *cpu) int64_t executed = cpu_get_icount_executed(cpu); cpu->icount_budget -= executed; - atomic_set__nocheck(&timers_state.qemu_icount, - timers_state.qemu_icount + executed); + atomic_set_i64(&timers_state.qemu_icount, + timers_state.qemu_icount + executed); } /* @@ -280,8 +280,8 @@ static int64_t cpu_get_icount_raw_locked(void) /* Take into account what has run */ cpu_update_icount_locked(cpu); } - /* The read is protected by the seqlock, so __nocheck is okay. */ - return atomic_read__nocheck(&timers_state.qemu_icount); + /* The read is protected by the seqlock, but needs atomic64 to avoid UB */ + return atomic_read_i64(&timers_state.qemu_icount); } static int64_t cpu_get_icount_locked(void) From c97595d16654c3508643ae1881be1531de48ecc7 Mon Sep 17 00:00:00 2001 From: "Emilio G. Cota" Date: Mon, 10 Sep 2018 19:27:50 -0400 Subject: [PATCH 14/80] cpus: access .qemu_icount_bias with atomic64 Signed-off-by: Emilio G. Cota Message-Id: <20180910232752.31565-11-cota@braap.org> Signed-off-by: Paolo Bonzini --- cpus.c | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/cpus.c b/cpus.c index fed8ec1dff..d8b3b46cc8 100644 --- a/cpus.c +++ b/cpus.c @@ -287,7 +287,8 @@ static int64_t cpu_get_icount_raw_locked(void) static int64_t cpu_get_icount_locked(void) { int64_t icount = cpu_get_icount_raw_locked(); - return atomic_read__nocheck(&timers_state.qemu_icount_bias) + cpu_icount_to_ns(icount); + return atomic_read_i64(&timers_state.qemu_icount_bias) + + cpu_icount_to_ns(icount); } int64_t cpu_get_icount_raw(void) @@ -460,9 +461,9 @@ static void icount_adjust(void) timers_state.icount_time_shift + 1); } last_delta = delta; - atomic_set__nocheck(&timers_state.qemu_icount_bias, - cur_icount - (timers_state.qemu_icount - << timers_state.icount_time_shift)); + atomic_set_i64(&timers_state.qemu_icount_bias, + cur_icount - (timers_state.qemu_icount + << timers_state.icount_time_shift)); seqlock_write_unlock(&timers_state.vm_clock_seqlock, &timers_state.vm_clock_lock); } @@ -522,8 +523,8 @@ static void icount_warp_rt(void) int64_t delta = clock - cur_icount; warp_delta = MIN(warp_delta, delta); } - atomic_set__nocheck(&timers_state.qemu_icount_bias, - timers_state.qemu_icount_bias + warp_delta); + atomic_set_i64(&timers_state.qemu_icount_bias, + timers_state.qemu_icount_bias + warp_delta); } timers_state.vm_clock_warp_start = -1; seqlock_write_unlock(&timers_state.vm_clock_seqlock, @@ -554,8 +555,8 @@ void qtest_clock_warp(int64_t dest) seqlock_write_lock(&timers_state.vm_clock_seqlock, &timers_state.vm_clock_lock); - atomic_set__nocheck(&timers_state.qemu_icount_bias, - timers_state.qemu_icount_bias + warp); + atomic_set_i64(&timers_state.qemu_icount_bias, + timers_state.qemu_icount_bias + warp); seqlock_write_unlock(&timers_state.vm_clock_seqlock, &timers_state.vm_clock_lock); @@ -626,8 +627,8 @@ void qemu_start_warp_timer(void) */ seqlock_write_lock(&timers_state.vm_clock_seqlock, &timers_state.vm_clock_lock); - atomic_set__nocheck(&timers_state.qemu_icount_bias, - timers_state.qemu_icount_bias + deadline); + atomic_set_i64(&timers_state.qemu_icount_bias, + timers_state.qemu_icount_bias + deadline); seqlock_write_unlock(&timers_state.vm_clock_seqlock, &timers_state.vm_clock_lock); qemu_clock_notify(QEMU_CLOCK_VIRTUAL); From 7184de64a1cf23a72191484b9a6995c3bfc9fb0b Mon Sep 17 00:00:00 2001 From: Viktor Prutyanov Date: Wed, 29 Aug 2018 21:30:56 +0300 Subject: [PATCH 15/80] dump: fix Windows dump memory run mapping We should map and use guest memory run by parts if it can't be mapped as a whole. After this patch, continuos guest physical memory blocks which are not continuos in host virtual address space will be processed correctly. Signed-off-by: Viktor Prutyanov Message-Id: <1535567456-6904-1-git-send-email-viktor.prutyanov@virtuozzo.com> Signed-off-by: Paolo Bonzini --- win_dump.c | 42 +++++++++++++++++++++++------------------- 1 file changed, 23 insertions(+), 19 deletions(-) diff --git a/win_dump.c b/win_dump.c index b15c191ad7..e10a7831c2 100644 --- a/win_dump.c +++ b/win_dump.c @@ -30,28 +30,32 @@ static size_t write_run(WinDumpPhyMemRun64 *run, int fd, Error **errp) void *buf; uint64_t addr = run->BasePage << TARGET_PAGE_BITS; uint64_t size = run->PageCount << TARGET_PAGE_BITS; - uint64_t len = size; + uint64_t len, l; + size_t total = 0; - buf = cpu_physical_memory_map(addr, &len, false); - if (!buf) { - error_setg(errp, "win-dump: failed to map run"); - return 0; - } - if (len != size) { - error_setg(errp, "win-dump: failed to map entire run"); - len = 0; - goto out_unmap; + while (size) { + len = size; + + buf = cpu_physical_memory_map(addr, &len, false); + if (!buf) { + error_setg(errp, "win-dump: failed to map physical range" + " 0x%016" PRIx64 "-0x%016" PRIx64, addr, addr + size - 1); + return 0; + } + + l = qemu_write_full(fd, buf, len); + cpu_physical_memory_unmap(buf, addr, false, len); + if (l != len) { + error_setg(errp, QERR_IO_ERROR); + return 0; + } + + addr += l; + size -= l; + total += l; } - len = qemu_write_full(fd, buf, len); - if (len != size) { - error_setg(errp, QERR_IO_ERROR); - } - -out_unmap: - cpu_physical_memory_unmap(buf, addr, false, len); - - return len; + return total; } static void write_runs(DumpState *s, WinDumpHeader64 *h, Error **errp) From 3829640049cf516d229620e5919b0ab66fd6ac86 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marc-Andr=C3=A9=20Lureau?= Date: Thu, 6 Sep 2018 20:14:15 +0400 Subject: [PATCH 16/80] hostmem-memfd: add checks before adding hostmem-memfd & properties MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Run some memfd-related checks before registering hostmem-memfd & various properties. This will help libvirt to figure out what the host is supposed to be capable of. qemu_memfd_check() is changed to a less optimized version, since it is used with various flags, it no longer caches the result. Signed-off-by: Marc-André Lureau Message-Id: <20180906161415.8543-1-marcandre.lureau@redhat.com> Signed-off-by: Paolo Bonzini --- backends/hostmem-memfd.c | 32 +++++++++++++++++++------------- include/qemu/memfd.h | 18 +++++++++++++++++- tests/vhost-user-test.c | 6 +++--- util/memfd.c | 37 +++++++------------------------------ 4 files changed, 46 insertions(+), 47 deletions(-) diff --git a/backends/hostmem-memfd.c b/backends/hostmem-memfd.c index 1e20fe0ba8..3800bd07b6 100644 --- a/backends/hostmem-memfd.c +++ b/backends/hostmem-memfd.c @@ -140,18 +140,22 @@ memfd_backend_class_init(ObjectClass *oc, void *data) bc->alloc = memfd_backend_memory_alloc; - object_class_property_add_bool(oc, "hugetlb", - memfd_backend_get_hugetlb, - memfd_backend_set_hugetlb, - &error_abort); - object_class_property_add(oc, "hugetlbsize", "int", - memfd_backend_get_hugetlbsize, - memfd_backend_set_hugetlbsize, - NULL, NULL, &error_abort); - object_class_property_add_bool(oc, "seal", - memfd_backend_get_seal, - memfd_backend_set_seal, - &error_abort); + if (qemu_memfd_check(MFD_HUGETLB)) { + object_class_property_add_bool(oc, "hugetlb", + memfd_backend_get_hugetlb, + memfd_backend_set_hugetlb, + &error_abort); + object_class_property_add(oc, "hugetlbsize", "int", + memfd_backend_get_hugetlbsize, + memfd_backend_set_hugetlbsize, + NULL, NULL, &error_abort); + } + if (qemu_memfd_check(MFD_ALLOW_SEALING)) { + object_class_property_add_bool(oc, "seal", + memfd_backend_get_seal, + memfd_backend_set_seal, + &error_abort); + } } static const TypeInfo memfd_backend_info = { @@ -164,7 +168,9 @@ static const TypeInfo memfd_backend_info = { static void register_types(void) { - type_register_static(&memfd_backend_info); + if (qemu_memfd_check(0)) { + type_register_static(&memfd_backend_info); + } } type_init(register_types); diff --git a/include/qemu/memfd.h b/include/qemu/memfd.h index 49e79634da..d551c28b68 100644 --- a/include/qemu/memfd.h +++ b/include/qemu/memfd.h @@ -16,12 +16,28 @@ #define F_SEAL_WRITE 0x0008 /* prevent writes */ #endif +#ifndef MFD_CLOEXEC +#define MFD_CLOEXEC 0x0001U +#endif + +#ifndef MFD_ALLOW_SEALING +#define MFD_ALLOW_SEALING 0x0002U +#endif + +#ifndef MFD_HUGETLB +#define MFD_HUGETLB 0x0004U +#endif + +#ifndef MFD_HUGE_SHIFT +#define MFD_HUGE_SHIFT 26 +#endif + int qemu_memfd_create(const char *name, size_t size, bool hugetlb, uint64_t hugetlbsize, unsigned int seals, Error **errp); bool qemu_memfd_alloc_check(void); void *qemu_memfd_alloc(const char *name, size_t size, unsigned int seals, int *fd, Error **errp); void qemu_memfd_free(void *ptr, size_t size, int fd); -bool qemu_memfd_check(void); +bool qemu_memfd_check(unsigned int flags); #endif /* QEMU_MEMFD_H */ diff --git a/tests/vhost-user-test.c b/tests/vhost-user-test.c index 716aff7153..45d58d8ea2 100644 --- a/tests/vhost-user-test.c +++ b/tests/vhost-user-test.c @@ -169,7 +169,7 @@ static char *get_qemu_cmd(TestServer *s, int mem, enum test_memfd memfd, const char *mem_path, const char *chr_opts, const char *extra) { - if (memfd == TEST_MEMFD_AUTO && qemu_memfd_check()) { + if (memfd == TEST_MEMFD_AUTO && qemu_memfd_check(0)) { memfd = TEST_MEMFD_YES; } @@ -903,7 +903,7 @@ static void test_multiqueue(void) s->queues = 2; test_server_listen(s); - if (qemu_memfd_check()) { + if (qemu_memfd_check(0)) { cmd = g_strdup_printf( QEMU_CMD_MEMFD QEMU_CMD_CHR QEMU_CMD_NETDEV ",queues=%d " "-device virtio-net-pci,netdev=net0,mq=on,vectors=%d", @@ -963,7 +963,7 @@ int main(int argc, char **argv) /* run the main loop thread so the chardev may operate */ thread = g_thread_new(NULL, thread_function, loop); - if (qemu_memfd_check()) { + if (qemu_memfd_check(0)) { qtest_add_data_func("/vhost-user/read-guest-mem/memfd", GINT_TO_POINTER(TEST_MEMFD_YES), test_read_guest_mem); diff --git a/util/memfd.c b/util/memfd.c index 6287946b61..8debd0d037 100644 --- a/util/memfd.c +++ b/util/memfd.c @@ -45,22 +45,6 @@ static int memfd_create(const char *name, unsigned int flags) } #endif -#ifndef MFD_CLOEXEC -#define MFD_CLOEXEC 0x0001U -#endif - -#ifndef MFD_ALLOW_SEALING -#define MFD_ALLOW_SEALING 0x0002U -#endif - -#ifndef MFD_HUGETLB -#define MFD_HUGETLB 0x0004U -#endif - -#ifndef MFD_HUGE_SHIFT -#define MFD_HUGE_SHIFT 26 -#endif - int qemu_memfd_create(const char *name, size_t size, bool hugetlb, uint64_t hugetlbsize, unsigned int seals, Error **errp) { @@ -201,23 +185,16 @@ bool qemu_memfd_alloc_check(void) * * Check if host supports memfd. */ -bool qemu_memfd_check(void) +bool qemu_memfd_check(unsigned int flags) { #ifdef CONFIG_LINUX - static int memfd_check = MEMFD_TODO; + int mfd = memfd_create("test", flags); - if (memfd_check == MEMFD_TODO) { - int mfd = memfd_create("test", 0); - if (mfd >= 0) { - memfd_check = MEMFD_OK; - close(mfd); - } else { - memfd_check = MEMFD_KO; - } + if (mfd >= 0) { + close(mfd); + return true; } - - return memfd_check == MEMFD_OK; -#else - return false; #endif + + return false; } From 0ea1472dc50a9488e87c4d19329c7d5c63eb7076 Mon Sep 17 00:00:00 2001 From: Jan Kiszka Date: Mon, 27 Aug 2018 10:47:51 +0200 Subject: [PATCH 17/80] kvm: x86: Fix kvm_arch_fixup_msi_route for remap-less case The AMD IOMMU does not (yet) support interrupt remapping. But kvm_arch_fixup_msi_route assumes that all implementations do and crashes when the AMD IOMMU is used in KVM mode. Fixes: 8b5ed7dffa1f ("intel_iommu: add support for split irqchip") Reported-by: Christopher Goldsworthy Signed-off-by: Jan Kiszka Message-Id: <48ae78d8-58ec-8813-8680-6f407ea46041@siemens.com> Reviewed-by: Peter Xu Reviewed-by: Michael S. Tsirkin Signed-off-by: Paolo Bonzini --- target/i386/kvm.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/target/i386/kvm.c b/target/i386/kvm.c index 0b2a07d3a4..de892db671 100644 --- a/target/i386/kvm.c +++ b/target/i386/kvm.c @@ -3669,6 +3669,10 @@ int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route, MSIMessage src, dst; X86IOMMUClass *class = X86_IOMMU_GET_CLASS(iommu); + if (!class->int_remap) { + return 0; + } + src.address = route->u.msi.address_hi; src.address <<= VTD_MSI_ADDR_HI_SHIFT; src.address |= route->u.msi.address_lo; From e811da7fe229cc17d98b230bdfeaf6d0631ea987 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Daniel=20P=2E=20Berrang=C3=A9?= Date: Tue, 4 Sep 2018 13:36:03 +0100 Subject: [PATCH 18/80] configure: preserve various environment variables in config.status MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The config.status script is auto-generated by configure upon completion. The intention is that config.status can be later invoked by the developer directly, or by make indirectly, to re-detect the same environment that configure originally used. The current config.status script, however, only contains a record of the command line arguments to configure. Various environment variables have an effect on what configure will find. In particular PKG_CONFIG_LIBDIR & PKG_CONFIG_PATH vars will affect what libraries pkg-config finds. The PATH var will affect what toolchain binaries and XXXX-config scripts are found. The LD_LIBRARY_PATH var will affect what libraries are found. Most commands have env variables that will override the name/path of the default version configure finds. All these key env variables should be recorded in the config.status script. Autoconf would also preserve CFLAGS, LDFLAGS, LIBS, CPPFLAGS, but QEMU deals with those differently, expecting extra flags to be set using configure args, rather than env variables. At the end of the script we also don't have the original values of those env vars, as we modify them during configure. Reviewed-by: Eric Blake Reviewed-by: Stefan Weil Signed-off-by: Daniel P. Berrange Message-Id: <20180904123603.10016-1-berrange@redhat.com> Reviewed-by: Thomas Huth Signed-off-by: Paolo Bonzini Signed-off-by: Daniel P. Berrangé --- configure | 40 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) diff --git a/configure b/configure index 18006f0865..59d1ade2d3 100755 --- a/configure +++ b/configure @@ -7527,6 +7527,46 @@ cat <config.status # Compiler output produced by configure, useful for debugging # configure, is in config.log if it exists. EOD + +preserve_env() { + envname=$1 + + eval envval=\$$envname + + if test -n "$envval" + then + echo "$envname='$envval'" >> config.status + echo "export $envname" >> config.status + else + echo "unset $envname" >> config.status + fi +} + +# Preserve various env variables that influence what +# features/build target configure will detect +preserve_env AR +preserve_env AS +preserve_env CC +preserve_env CPP +preserve_env CXX +preserve_env INSTALL +preserve_env LD +preserve_env LD_LIBRARY_PATH +preserve_env LIBTOOL +preserve_env MAKE +preserve_env NM +preserve_env OBJCOPY +preserve_env PATH +preserve_env PKG_CONFIG +preserve_env PKG_CONFIG_LIBDIR +preserve_env PKG_CONFIG_PATH +preserve_env PYTHON +preserve_env SDL_CONFIG +preserve_env SDL2_CONFIG +preserve_env SMBD +preserve_env STRIP +preserve_env WINDRES + printf "exec" >>config.status printf " '%s'" "$0" "$@" >>config.status echo ' "$@"' >>config.status From 71bb4ce1b5592cdc03abc48cdf4ecb15b2db81a0 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Wed, 5 Sep 2018 15:11:25 +0200 Subject: [PATCH 19/80] hw/char/sh_serial: Add timeout handling to unbreak serial input As of commit 18e8cf159177100e ("serial: sh-sci: increase RX FIFO trigger defaults for (H)SCIF") in Linux v4.11-rc1, the serial console on the QEMU SH4 target is broken: it delays serial input until enough data has been received. Since aforementioned commit, the Linux SCIF driver programs the Receive FIFO Data Count Trigger bits in the FIFO Control Register, to postpone generating a receive interrupt until: 1. At least the receive trigger count of bytes of data are available in the receive FIFO, OR 2. No further data has been received for at least 15 etu after the last received data. While QEMU implements the former, it does not implement the latter. Hence the receive interrupt is not generated until the former condition is met. Fix this by adding basic timeout handling. As the QEMU SCIF emulation ignores any serial speed programming, the timeout value used conforms to a default speed of 9600 bps, which is fine for any interactive console. Reported-by: Rob Landley Signed-off-by: Geert Uytterhoeven Tested-by: Ulrich Hecht Tested-by: Rob Landley Tested-by: Rich Felker Message-Id: <20180905131125.12635-1-geert+renesas@glider.be> Signed-off-by: Paolo Bonzini --- hw/char/sh_serial.c | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/hw/char/sh_serial.c b/hw/char/sh_serial.c index 373a40595f..12831561a6 100644 --- a/hw/char/sh_serial.c +++ b/hw/char/sh_serial.c @@ -29,6 +29,7 @@ #include "hw/sh4/sh.h" #include "chardev/char-fe.h" #include "qapi/error.h" +#include "qemu/timer.h" //#define DEBUG_SERIAL @@ -63,6 +64,8 @@ typedef struct { int rtrg; CharBackend chr; + QEMUTimer *fifo_timeout_timer; + uint64_t etu; /* Elementary Time Unit (ns) */ qemu_irq eri; qemu_irq rxi; @@ -314,6 +317,16 @@ static int sh_serial_can_receive1(void *opaque) return sh_serial_can_receive(s); } +static void sh_serial_timeout_int(void *opaque) +{ + sh_serial_state *s = opaque; + + s->flags |= SH_SERIAL_FLAG_RDF; + if (s->scr & (1 << 6) && s->rxi) { + qemu_set_irq(s->rxi, 1); + } +} + static void sh_serial_receive1(void *opaque, const uint8_t *buf, int size) { sh_serial_state *s = opaque; @@ -330,8 +343,12 @@ static void sh_serial_receive1(void *opaque, const uint8_t *buf, int size) if (s->rx_cnt >= s->rtrg) { s->flags |= SH_SERIAL_FLAG_RDF; if (s->scr & (1 << 6) && s->rxi) { + timer_del(s->fifo_timeout_timer); qemu_set_irq(s->rxi, 1); } + } else { + timer_mod(s->fifo_timeout_timer, + qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 15 * s->etu); } } } @@ -402,6 +419,9 @@ void sh_serial_init(MemoryRegion *sysmem, sh_serial_event, NULL, s, NULL, true); } + s->fifo_timeout_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, + sh_serial_timeout_int, s); + s->etu = NANOSECONDS_PER_SECOND / 9600; s->eri = eri_source; s->rxi = rxi_source; s->txi = txi_source; From 9e6bdef224f700c057462a7d5e9b4a2770e04569 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marc-Andr=C3=A9=20Lureau?= Date: Fri, 31 Aug 2018 16:53:12 +0200 Subject: [PATCH 20/80] util: add qemu_write_pidfile() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit There are variants of qemu_create_pidfile() in qemu-pr-helper and qemu-ga. Let's have a common implementation in libqemuutil. The code is initially based from pr-helper write_pidfile(), with various improvements and suggestions from Daniel Berrangé: QEMU will leave the pidfile existing on disk when it exits which initially made me think it avoids the deletion race. The app managing QEMU, however, may well delete the pidfile after it has seen QEMU exit, and even if the app locks the pidfile before deleting it, there is still a race. eg consider the following sequence QEMU 1 libvirtd QEMU 2 1. lock(pidfile) 2. exit() 3. open(pidfile) 4. lock(pidfile) 5. open(pidfile) 6. unlink(pidfile) 7. close(pidfile) 8. lock(pidfile) IOW, at step 8 the new QEMU has successfully acquired the lock, but the pidfile no longer exists on disk because it was deleted after the original QEMU exited. While we could just say no external app should ever delete the pidfile, I don't think that is satisfactory as people don't read docs, and admins don't like stale pidfiles being left around on disk. To make this robust, I think we might want to copy libvirt's approach to pidfile acquisition which runs in a loop and checks that the file on disk /after/ acquiring the lock matches the file that was locked. Then we could in fact safely let QEMU delete its own pidfiles on clean exit.. Signed-off-by: Marc-André Lureau Message-Id: <20180831145314.14736-2-marcandre.lureau@redhat.com> Signed-off-by: Paolo Bonzini --- include/qemu/osdep.h | 3 +- os-posix.c | 24 --------------- os-win32.c | 25 ---------------- qga/main.c | 54 +++++++--------------------------- scsi/qemu-pr-helper.c | 40 ++++--------------------- util/oslib-posix.c | 68 +++++++++++++++++++++++++++++++++++++++++++ util/oslib-win32.c | 27 +++++++++++++++++ vl.c | 4 +-- 8 files changed, 114 insertions(+), 131 deletions(-) diff --git a/include/qemu/osdep.h b/include/qemu/osdep.h index a746a5e531..4f8559e550 100644 --- a/include/qemu/osdep.h +++ b/include/qemu/osdep.h @@ -448,7 +448,8 @@ bool qemu_has_ofd_lock(void); #define FMT_pid "%d" #endif -int qemu_create_pidfile(const char *filename); +bool qemu_write_pidfile(const char *pidfile, Error **errp); + int qemu_get_thread_id(void); #ifndef CONFIG_IOVEC diff --git a/os-posix.c b/os-posix.c index 8f39447dd4..4bd80e44e6 100644 --- a/os-posix.c +++ b/os-posix.c @@ -344,30 +344,6 @@ void os_set_line_buffering(void) setvbuf(stdout, NULL, _IOLBF, 0); } -int qemu_create_pidfile(const char *filename) -{ - char buffer[128]; - int len; - int fd; - - fd = qemu_open(filename, O_RDWR | O_CREAT, 0600); - if (fd == -1) { - return -1; - } - if (lockf(fd, F_TLOCK, 0) == -1) { - close(fd); - return -1; - } - len = snprintf(buffer, sizeof(buffer), FMT_pid "\n", getpid()); - if (write(fd, buffer, len) != len) { - close(fd); - return -1; - } - - /* keep pidfile open & locked forever */ - return 0; -} - bool is_daemonized(void) { return daemonize; diff --git a/os-win32.c b/os-win32.c index 0674f94b57..0e0d7f50f3 100644 --- a/os-win32.c +++ b/os-win32.c @@ -97,28 +97,3 @@ int os_parse_cmd_args(int index, const char *optarg) { return -1; } - -int qemu_create_pidfile(const char *filename) -{ - char buffer[128]; - int len; - HANDLE file; - OVERLAPPED overlap; - BOOL ret; - memset(&overlap, 0, sizeof(overlap)); - - file = CreateFile(filename, GENERIC_WRITE, FILE_SHARE_READ, NULL, - OPEN_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL); - - if (file == INVALID_HANDLE_VALUE) { - return -1; - } - len = snprintf(buffer, sizeof(buffer), "%d\n", getpid()); - ret = WriteFile(file, (LPCVOID)buffer, (DWORD)len, - NULL, &overlap); - CloseHandle(file); - if (ret == 0) { - return -1; - } - return 0; -} diff --git a/qga/main.c b/qga/main.c index 6d70242d05..c399320d3c 100644 --- a/qga/main.c +++ b/qga/main.c @@ -340,46 +340,6 @@ static FILE *ga_open_logfile(const char *logfile) return f; } -#ifndef _WIN32 -static bool ga_open_pidfile(const char *pidfile) -{ - int pidfd; - char pidstr[32]; - - pidfd = qemu_open(pidfile, O_CREAT|O_WRONLY, S_IRUSR|S_IWUSR); - if (pidfd == -1 || lockf(pidfd, F_TLOCK, 0)) { - g_critical("Cannot lock pid file, %s", strerror(errno)); - if (pidfd != -1) { - close(pidfd); - } - return false; - } - - if (ftruncate(pidfd, 0)) { - g_critical("Failed to truncate pid file"); - goto fail; - } - snprintf(pidstr, sizeof(pidstr), "%d\n", getpid()); - if (write(pidfd, pidstr, strlen(pidstr)) != strlen(pidstr)) { - g_critical("Failed to write pid file"); - goto fail; - } - - /* keep pidfile open & locked forever */ - return true; - -fail: - unlink(pidfile); - close(pidfd); - return false; -} -#else /* _WIN32 */ -static bool ga_open_pidfile(const char *pidfile) -{ - return true; -} -#endif - static gint ga_strcmp(gconstpointer str1, gconstpointer str2) { return strcmp(str1, str2); @@ -479,8 +439,11 @@ void ga_unset_frozen(GAState *s) ga_enable_logging(s); g_warning("logging re-enabled due to filesystem unfreeze"); if (s->deferred_options.pid_filepath) { - if (!ga_open_pidfile(s->deferred_options.pid_filepath)) { - g_warning("failed to create/open pid file"); + Error *err = NULL; + + if (!qemu_write_pidfile(s->deferred_options.pid_filepath, &err)) { + g_warning("%s", error_get_pretty(err)); + error_free(err); } s->deferred_options.pid_filepath = NULL; } @@ -515,8 +478,11 @@ static void become_daemon(const char *pidfile) } if (pidfile) { - if (!ga_open_pidfile(pidfile)) { - g_critical("failed to create pidfile"); + Error *err = NULL; + + if (!qemu_write_pidfile(pidfile, &err)) { + g_critical("%s", error_get_pretty(err)); + error_free(err); exit(EXIT_FAILURE); } } diff --git a/scsi/qemu-pr-helper.c b/scsi/qemu-pr-helper.c index ed037aabee..ce40008bfc 100644 --- a/scsi/qemu-pr-helper.c +++ b/scsi/qemu-pr-helper.c @@ -117,39 +117,6 @@ QEMU_COPYRIGHT "\n" , name); } -static void write_pidfile(void) -{ - int pidfd; - char pidstr[32]; - - pidfd = qemu_open(pidfile, O_CREAT|O_WRONLY, S_IRUSR|S_IWUSR); - if (pidfd == -1) { - error_report("Cannot open pid file, %s", strerror(errno)); - exit(EXIT_FAILURE); - } - - if (lockf(pidfd, F_TLOCK, 0)) { - error_report("Cannot lock pid file, %s", strerror(errno)); - goto fail; - } - if (ftruncate(pidfd, 0)) { - error_report("Failed to truncate pid file"); - goto fail; - } - - snprintf(pidstr, sizeof(pidstr), "%d\n", getpid()); - if (write(pidfd, pidstr, strlen(pidstr)) != strlen(pidstr)) { - error_report("Failed to write pid file"); - goto fail; - } - return; - -fail: - unlink(pidfile); - close(pidfd); - exit(EXIT_FAILURE); -} - /* SG_IO support */ typedef struct PRHelperSGIOData { @@ -1080,8 +1047,11 @@ int main(int argc, char **argv) } } - if (daemonize || pidfile_specified) - write_pidfile(); + if ((daemonize || pidfile_specified) && + !qemu_write_pidfile(pidfile, &local_err)) { + error_report_err(local_err); + exit(EXIT_FAILURE); + } #ifdef CONFIG_LIBCAP if (drop_privileges() < 0) { diff --git a/util/oslib-posix.c b/util/oslib-posix.c index 13b6f8d776..0e3ab9d959 100644 --- a/util/oslib-posix.c +++ b/util/oslib-posix.c @@ -88,6 +88,74 @@ int qemu_daemon(int nochdir, int noclose) return daemon(nochdir, noclose); } +bool qemu_write_pidfile(const char *path, Error **errp) +{ + int fd; + char pidstr[32]; + + while (1) { + struct stat a, b; + + fd = qemu_open(path, O_CREAT | O_WRONLY, S_IRUSR | S_IWUSR); + if (fd == -1) { + error_setg_errno(errp, errno, "Cannot open pid file"); + return false; + } + + if (fstat(fd, &b) < 0) { + error_setg_errno(errp, errno, "Cannot stat file"); + goto fail_close; + } + + if (lockf(fd, F_TLOCK, 0) < 0) { + error_setg_errno(errp, errno, "Cannot lock pid file"); + goto fail_close; + } + + /* + * Now make sure the path we locked is the same one that now + * exists on the filesystem. + */ + if (stat(path, &a) < 0) { + /* + * PID file disappeared, someone else must be racing with + * us, so try again. + */ + close(fd); + continue; + } + + if (a.st_ino == b.st_ino) { + break; + } + + /* + * PID file was recreated, someone else must be racing with + * us, so try again. + */ + close(fd); + } + + if (ftruncate(fd, 0) < 0) { + error_setg_errno(errp, errno, "Failed to truncate pid file"); + goto fail_unlink; + } + + snprintf(pidstr, sizeof(pidstr), FMT_pid "\n", getpid()); + if (write(fd, pidstr, strlen(pidstr)) != strlen(pidstr)) { + error_setg(errp, "Failed to write pid file"); + goto fail_unlink; + } + + return true; + +fail_unlink: + unlink(path); +fail_close: + close(fd); + return false; +} + void *qemu_oom_check(void *ptr) { if (ptr == NULL) { diff --git a/util/oslib-win32.c b/util/oslib-win32.c index 25dd1595ad..b4c17f5dfa 100644 --- a/util/oslib-win32.c +++ b/util/oslib-win32.c @@ -776,3 +776,30 @@ ssize_t qemu_recvfrom_wrap(int sockfd, void *buf, size_t len, int flags, } return ret; } + +bool qemu_write_pidfile(const char *filename, Error **errp) +{ + char buffer[128]; + int len; + HANDLE file; + OVERLAPPED overlap; + BOOL ret; + memset(&overlap, 0, sizeof(overlap)); + + file = CreateFile(filename, GENERIC_WRITE, FILE_SHARE_READ, NULL, + OPEN_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL); + + if (file == INVALID_HANDLE_VALUE) { + error_setg(errp, "Failed to create PID file"); + return false; + } + len = snprintf(buffer, sizeof(buffer), FMT_pid "\n", (pid_t)getpid()); + ret = WriteFile(file, (LPCVOID)buffer, (DWORD)len, + NULL, &overlap); + CloseHandle(file); + if (ret == 0) { + error_setg(errp, "Failed to write PID file"); + return false; + } + return true; +} diff --git a/vl.c b/vl.c index cc55fe04a2..fa6db178a6 100644 --- a/vl.c +++ b/vl.c @@ -3906,8 +3906,8 @@ int main(int argc, char **argv, char **envp) os_daemonize(); rcu_disable_atfork(); - if (pid_file && qemu_create_pidfile(pid_file) != 0) { - error_report("could not acquire pid file: %s", strerror(errno)); + if (pid_file && !qemu_write_pidfile(pid_file, &err)) { + error_reportf_err(err, "cannot create PID file: "); exit(1); } From 35f7f3fb5c65dcdf8315bbfd40a3c1d015663d77 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marc-Andr=C3=A9=20Lureau?= Date: Fri, 31 Aug 2018 16:53:13 +0200 Subject: [PATCH 21/80] util: use fcntl() for qemu_write_pidfile() locking MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Daniel Berrangé suggested to use fcntl() locks rather than lockf(). 'man lockf': On Linux, lockf() is just an interface on top of fcntl(2) locking. Many other systems implement lockf() in this way, but note that POSIX.1 leaves the relationship between lockf() and fcntl(2) locks unspecified. A portable application should probably avoid mixing calls to these interfaces. IOW, if its just a shim around fcntl() on many systems, it is clearer if we just use fcntl() directly, as we then know how fcntl() locks will behave if they're on a network filesystem like NFS. Suggested-by: Daniel P. Berrangé Signed-off-by: Marc-André Lureau Message-Id: <20180831145314.14736-3-marcandre.lureau@redhat.com> Signed-off-by: Paolo Bonzini --- util/oslib-posix.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/util/oslib-posix.c b/util/oslib-posix.c index 0e3ab9d959..fbd0dc8c57 100644 --- a/util/oslib-posix.c +++ b/util/oslib-posix.c @@ -95,6 +95,11 @@ bool qemu_write_pidfile(const char *path, Error **errp) while (1) { struct stat a, b; + struct flock lock = { + .l_type = F_WRLCK, + .l_whence = SEEK_SET, + .l_len = 0, + }; fd = qemu_open(path, O_CREAT | O_WRONLY, S_IRUSR | S_IWUSR); if (fd == -1) { @@ -107,7 +112,7 @@ bool qemu_write_pidfile(const char *path, Error **errp) goto fail_close; } - if (lockf(fd, F_TLOCK, 0) < 0) { + if (fcntl(fd, F_SETLK, &lock)) { error_setg_errno(errp, errno, "Cannot lock pid file"); goto fail_close; } From 61a9346f60fe32143c007bb27bac6e1d83e4aee8 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Tue, 11 Sep 2018 15:16:58 +0200 Subject: [PATCH 22/80] serial: fix DLL writes Commit 0147883450fe84bb8de2d4a58381881f4262ce9b tries to handle word-sized writes to DLL/DLH, but due to a typo, this patch is causing tracebacks in all Linux kernels running the PXA serial driver, due to an unexpected DLL register value. Here is the surrounding code from drivers/tty/serial/pxa.c: serial_out(up, UART_DLL, quot & 0xff); /* LS of divisor */ /* * work around Errata #75 according to Intel(R) PXA27x * Processor Family Specification Update (Nov 2005) */ dll = serial_in(up, UART_DLL); WARN_ON(dll != (quot & 0xff)); // <-- warning Reported-by: Guenter Roeck Tested-by: Guenter Roeck Fixes: 0147883450fe84bb8de2d4a58381881f4262ce9b Signed-off-by: Paolo Bonzini --- hw/char/serial.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hw/char/serial.c b/hw/char/serial.c index 251f40fdac..02463e3388 100644 --- a/hw/char/serial.c +++ b/hw/char/serial.c @@ -345,9 +345,9 @@ static void serial_ioport_write(void *opaque, hwaddr addr, uint64_t val, default: case 0: if (s->lcr & UART_LCR_DLAB) { - if (size == 2) { + if (size == 1) { s->divider = (s->divider & 0xff00) | val; - } else if (size == 4) { + } else { s->divider = val; } serial_update_parameters(s); From 90a84d131c09096bdc424027526b575fe6a8a8d5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marc-Andr=C3=A9=20Lureau?= Date: Fri, 7 Sep 2018 16:13:19 +0400 Subject: [PATCH 23/80] Delete PID file on exit MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Register an exit notifier to remove the PID file. By the time atexit() is called, qemu_write_pidfile() guarantees QEMU owns the PID file, thus we could safely remove it when exiting. Signed-off-by: Marc-André Lureau Message-Id: <20180907121319.8607-4-marcandre.lureau@redhat.com> Signed-off-by: Paolo Bonzini --- vl.c | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/vl.c b/vl.c index fa6db178a6..f2964d9e43 100644 --- a/vl.c +++ b/vl.c @@ -2560,6 +2560,16 @@ static void qemu_run_exit_notifiers(void) notifier_list_notify(&exit_notifiers, NULL); } +static const char *pid_file; +static Notifier qemu_unlink_pidfile_notifier; + +static void qemu_unlink_pidfile(Notifier *n, void *data) +{ + if (pid_file) { + unlink(pid_file); + } +} + bool machine_init_done; void qemu_add_machine_init_done_notifier(Notifier *notify) @@ -2884,7 +2894,6 @@ int main(int argc, char **argv, char **envp) const char *vga_model = NULL; const char *qtest_chrdev = NULL; const char *qtest_log = NULL; - const char *pid_file = NULL; const char *incoming = NULL; bool userconfig = true; bool nographic = false; @@ -3911,6 +3920,9 @@ int main(int argc, char **argv, char **envp) exit(1); } + qemu_unlink_pidfile_notifier.notify = qemu_unlink_pidfile; + qemu_add_exit_notifier(&qemu_unlink_pidfile_notifier); + if (qemu_init_main_loop(&main_loop_err)) { error_report_err(main_loop_err); exit(1); From f3839fda5771596152b75dd1e1a6d050e6e6e380 Mon Sep 17 00:00:00 2001 From: Li Zhijian Date: Thu, 13 Sep 2018 18:07:13 +0800 Subject: [PATCH 24/80] change get_image_size return type to int64_t Previously, if the size of initrd >=2G, qemu exits with error: root@haswell-OptiPlex-9020:/home/lizj# /home/lizhijian/lkp/qemu-colo/x86_64-softmmu/qemu-system-x86_64 -kernel ./vmlinuz-4.16.0-rc4 -initrd large.cgz -nographic qemu: error reading initrd large.cgz: No such file or directory root@haswell-OptiPlex-9020:/home/lizj# du -sh large.cgz 2.5G large.cgz this patch changes the caller side that use this function to calculate size of initrd file as well. v2: update error message and int64_t printing format Signed-off-by: Li Zhijian Message-Id: <1536833233-14121-1-git-send-email-lizhijian@cn.fujitsu.com> Signed-off-by: Paolo Bonzini --- hw/alpha/dp264.c | 3 ++- hw/core/loader.c | 5 +++-- hw/hppa/machine.c | 2 +- hw/i386/pc.c | 7 ++++++- hw/mips/mips_fulong2e.c | 6 +++--- hw/mips/mips_malta.c | 6 +++--- hw/mips/mips_mipssim.c | 3 +-- hw/mips/mips_r4k.c | 6 +++--- hw/moxie/moxiesim.c | 2 +- include/hw/loader.h | 2 +- 10 files changed, 24 insertions(+), 18 deletions(-) diff --git a/hw/alpha/dp264.c b/hw/alpha/dp264.c index 80b987f7fb..dd62f2a405 100644 --- a/hw/alpha/dp264.c +++ b/hw/alpha/dp264.c @@ -150,7 +150,8 @@ static void clipper_init(MachineState *machine) } if (initrd_filename) { - long initrd_base, initrd_size; + long initrd_base; + int64_t initrd_size; initrd_size = get_image_size(initrd_filename); if (initrd_size < 0) { diff --git a/hw/core/loader.c b/hw/core/loader.c index 390987a05c..aa0b3fc867 100644 --- a/hw/core/loader.c +++ b/hw/core/loader.c @@ -61,9 +61,10 @@ static int roms_loaded; /* return the size or -1 if error */ -int get_image_size(const char *filename) +int64_t get_image_size(const char *filename) { - int fd, size; + int fd; + int64_t size; fd = open(filename, O_RDONLY | O_BINARY); if (fd < 0) return -1; diff --git a/hw/hppa/machine.c b/hw/hppa/machine.c index 0fb8fb877e..ac6dd7f6ab 100644 --- a/hw/hppa/machine.c +++ b/hw/hppa/machine.c @@ -191,7 +191,7 @@ static void machine_hppa_init(MachineState *machine) if (initrd_filename) { ram_addr_t initrd_base; - long initrd_size; + int64_t initrd_size; initrd_size = get_image_size(initrd_filename); if (initrd_size < 0) { diff --git a/hw/i386/pc.c b/hw/i386/pc.c index 03148450c8..cd5029c149 100644 --- a/hw/i386/pc.c +++ b/hw/i386/pc.c @@ -838,7 +838,8 @@ static void load_linux(PCMachineState *pcms, FWCfgState *fw_cfg) { uint16_t protocol; - int setup_size, kernel_size, initrd_size = 0, cmdline_size; + int setup_size, kernel_size, cmdline_size; + int64_t initrd_size = 0; int dtb_size, setup_data_offset; uint32_t initrd_max; uint8_t header[8192], *setup, *kernel, *initrd_data; @@ -974,6 +975,10 @@ static void load_linux(PCMachineState *pcms, fprintf(stderr, "qemu: error reading initrd %s: %s\n", initrd_filename, strerror(errno)); exit(1); + } else if (initrd_size >= initrd_max) { + fprintf(stderr, "qemu: initrd is too large, cannot support." + "(max: %"PRIu32", need %"PRId64")\n", initrd_max, initrd_size); + exit(1); } initrd_addr = (initrd_max-initrd_size) & ~4095; diff --git a/hw/mips/mips_fulong2e.c b/hw/mips/mips_fulong2e.c index c1694c8254..2fbba32c48 100644 --- a/hw/mips/mips_fulong2e.c +++ b/hw/mips/mips_fulong2e.c @@ -104,9 +104,9 @@ static void GCC_FMT_ATTR(3, 4) prom_set(uint32_t* prom_buf, int index, static int64_t load_kernel (CPUMIPSState *env) { - int64_t kernel_entry, kernel_low, kernel_high; + int64_t kernel_entry, kernel_low, kernel_high, initrd_size; int index = 0; - long kernel_size, initrd_size; + long kernel_size; ram_addr_t initrd_offset; uint32_t *prom_buf; long prom_size; @@ -150,7 +150,7 @@ static int64_t load_kernel (CPUMIPSState *env) prom_set(prom_buf, index++, "%s", loaderparams.kernel_filename); if (initrd_size > 0) { - prom_set(prom_buf, index++, "rd_start=0x%" PRIx64 " rd_size=%li %s", + prom_set(prom_buf, index++, "rd_start=0x%" PRIx64 " rd_size=%" PRId64 " %s", cpu_mips_phys_to_kseg0(NULL, initrd_offset), initrd_size, loaderparams.kernel_cmdline); } else { diff --git a/hw/mips/mips_malta.c b/hw/mips/mips_malta.c index 40041d5ec0..29b90bacf3 100644 --- a/hw/mips/mips_malta.c +++ b/hw/mips/mips_malta.c @@ -995,8 +995,8 @@ static void GCC_FMT_ATTR(3, 4) prom_set(uint32_t* prom_buf, int index, /* Kernel */ static int64_t load_kernel (void) { - int64_t kernel_entry, kernel_high; - long kernel_size, initrd_size; + int64_t kernel_entry, kernel_high, initrd_size; + long kernel_size; ram_addr_t initrd_offset; int big_endian; uint32_t *prom_buf; @@ -1070,7 +1070,7 @@ static int64_t load_kernel (void) prom_set(prom_buf, prom_index++, "%s", loaderparams.kernel_filename); if (initrd_size > 0) { - prom_set(prom_buf, prom_index++, "rd_start=0x%" PRIx64 " rd_size=%li %s", + prom_set(prom_buf, prom_index++, "rd_start=0x%" PRIx64 " rd_size=%" PRId64 " %s", xlate_to_kseg0(NULL, initrd_offset), initrd_size, loaderparams.kernel_cmdline); } else { diff --git a/hw/mips/mips_mipssim.c b/hw/mips/mips_mipssim.c index 241faa1d0f..f665752a2f 100644 --- a/hw/mips/mips_mipssim.c +++ b/hw/mips/mips_mipssim.c @@ -58,9 +58,8 @@ typedef struct ResetData { static int64_t load_kernel(void) { - int64_t entry, kernel_high; + int64_t entry, kernel_high, initrd_size; long kernel_size; - long initrd_size; ram_addr_t initrd_offset; int big_endian; diff --git a/hw/mips/mips_r4k.c b/hw/mips/mips_r4k.c index d5725d0555..3e852e98cf 100644 --- a/hw/mips/mips_r4k.c +++ b/hw/mips/mips_r4k.c @@ -81,8 +81,8 @@ typedef struct ResetData { static int64_t load_kernel(void) { const size_t params_size = 264; - int64_t entry, kernel_high; - long kernel_size, initrd_size; + int64_t entry, kernel_high, initrd_size; + long kernel_size; ram_addr_t initrd_offset; uint32_t *params_buf; int big_endian; @@ -136,7 +136,7 @@ static int64_t load_kernel(void) params_buf[1] = tswap32(0x12345678); if (initrd_size > 0) { - snprintf((char *)params_buf + 8, 256, "rd_start=0x%" PRIx64 " rd_size=%li %s", + snprintf((char *)params_buf + 8, 256, "rd_start=0x%" PRIx64 " rd_size=%" PRId64 " %s", cpu_mips_phys_to_kseg0(NULL, initrd_offset), initrd_size, loaderparams.kernel_cmdline); } else { diff --git a/hw/moxie/moxiesim.c b/hw/moxie/moxiesim.c index d41247dbdc..4b0ce09c5e 100644 --- a/hw/moxie/moxiesim.c +++ b/hw/moxie/moxiesim.c @@ -54,8 +54,8 @@ typedef struct { static void load_kernel(MoxieCPU *cpu, LoaderParams *loader_params) { uint64_t entry, kernel_low, kernel_high; + int64_t initrd_size; long kernel_size; - long initrd_size; ram_addr_t initrd_offset; kernel_size = load_elf(loader_params->kernel_filename, NULL, NULL, diff --git a/include/hw/loader.h b/include/hw/loader.h index 3c112975f4..67a0af84ac 100644 --- a/include/hw/loader.h +++ b/include/hw/loader.h @@ -10,7 +10,7 @@ * Returns the size of the image file on success, -1 otherwise. * On error, errno is also set as appropriate. */ -int get_image_size(const char *filename); +int64_t get_image_size(const char *filename); int load_image(const char *filename, uint8_t *addr); /* deprecated */ ssize_t load_image_size(const char *filename, void *addr, size_t size); From 93a3e108eb6a9bb781ab7db6e92d91528e482030 Mon Sep 17 00:00:00 2001 From: "Emilio G. Cota" Date: Tue, 11 Sep 2018 14:38:47 -0400 Subject: [PATCH 25/80] target/i386: move cpu_cc_srcT to DisasContext Signed-off-by: Emilio G. Cota Reviewed-by: Richard Henderson Signed-off-by: Paolo Bonzini --- target/i386/translate.c | 32 ++++++++++++++++++-------------- 1 file changed, 18 insertions(+), 14 deletions(-) diff --git a/target/i386/translate.c b/target/i386/translate.c index 1f9d1d9b24..e9f512472e 100644 --- a/target/i386/translate.c +++ b/target/i386/translate.c @@ -73,7 +73,7 @@ /* global register indexes */ static TCGv cpu_A0; -static TCGv cpu_cc_dst, cpu_cc_src, cpu_cc_src2, cpu_cc_srcT; +static TCGv cpu_cc_dst, cpu_cc_src, cpu_cc_src2; static TCGv_i32 cpu_cc_op; static TCGv cpu_regs[CPU_NB_REGS]; static TCGv cpu_seg_base[6]; @@ -135,6 +135,10 @@ typedef struct DisasContext { int cpuid_ext3_features; int cpuid_7_0_ebx_features; int cpuid_xsave_features; + + /* TCG local temps */ + TCGv cc_srcT; + sigjmp_buf jmpbuf; } DisasContext; @@ -244,7 +248,7 @@ static void set_cc_op(DisasContext *s, CCOp op) tcg_gen_discard_tl(cpu_cc_src2); } if (dead & USES_CC_SRCT) { - tcg_gen_discard_tl(cpu_cc_srcT); + tcg_gen_discard_tl(s->cc_srcT); } if (op == CC_OP_DYNAMIC) { @@ -667,11 +671,11 @@ static inline void gen_op_testl_T0_T1_cc(void) tcg_gen_and_tl(cpu_cc_dst, cpu_T0, cpu_T1); } -static void gen_op_update_neg_cc(void) +static void gen_op_update_neg_cc(DisasContext *s) { tcg_gen_mov_tl(cpu_cc_dst, cpu_T0); tcg_gen_neg_tl(cpu_cc_src, cpu_T0); - tcg_gen_movi_tl(cpu_cc_srcT, 0); + tcg_gen_movi_tl(s->cc_srcT, 0); } /* compute all eflags to cc_src */ @@ -742,7 +746,7 @@ static CCPrepare gen_prepare_eflags_c(DisasContext *s, TCGv reg) t1 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false); /* If no temporary was used, be careful not to alias t1 and t0. */ t0 = t1 == cpu_cc_src ? cpu_tmp0 : reg; - tcg_gen_mov_tl(t0, cpu_cc_srcT); + tcg_gen_mov_tl(t0, s->cc_srcT); gen_extu(size, t0); goto add_sub; @@ -899,7 +903,7 @@ static CCPrepare gen_prepare_cc(DisasContext *s, int b, TCGv reg) size = s->cc_op - CC_OP_SUBB; switch (jcc_op) { case JCC_BE: - tcg_gen_mov_tl(cpu_tmp4, cpu_cc_srcT); + tcg_gen_mov_tl(cpu_tmp4, s->cc_srcT); gen_extu(size, cpu_tmp4); t0 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false); cc = (CCPrepare) { .cond = TCG_COND_LEU, .reg = cpu_tmp4, @@ -912,7 +916,7 @@ static CCPrepare gen_prepare_cc(DisasContext *s, int b, TCGv reg) case JCC_LE: cond = TCG_COND_LE; fast_jcc_l: - tcg_gen_mov_tl(cpu_tmp4, cpu_cc_srcT); + tcg_gen_mov_tl(cpu_tmp4, s->cc_srcT); gen_exts(size, cpu_tmp4); t0 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, true); cc = (CCPrepare) { .cond = cond, .reg = cpu_tmp4, @@ -1309,11 +1313,11 @@ static void gen_op(DisasContext *s1, int op, TCGMemOp ot, int d) case OP_SUBL: if (s1->prefix & PREFIX_LOCK) { tcg_gen_neg_tl(cpu_T0, cpu_T1); - tcg_gen_atomic_fetch_add_tl(cpu_cc_srcT, cpu_A0, cpu_T0, + tcg_gen_atomic_fetch_add_tl(s1->cc_srcT, cpu_A0, cpu_T0, s1->mem_index, ot | MO_LE); - tcg_gen_sub_tl(cpu_T0, cpu_cc_srcT, cpu_T1); + tcg_gen_sub_tl(cpu_T0, s1->cc_srcT, cpu_T1); } else { - tcg_gen_mov_tl(cpu_cc_srcT, cpu_T0); + tcg_gen_mov_tl(s1->cc_srcT, cpu_T0); tcg_gen_sub_tl(cpu_T0, cpu_T0, cpu_T1); gen_op_st_rm_T0_A0(s1, ot, d); } @@ -1356,7 +1360,7 @@ static void gen_op(DisasContext *s1, int op, TCGMemOp ot, int d) break; case OP_CMPL: tcg_gen_mov_tl(cpu_cc_src, cpu_T1); - tcg_gen_mov_tl(cpu_cc_srcT, cpu_T0); + tcg_gen_mov_tl(s1->cc_srcT, cpu_T0); tcg_gen_sub_tl(cpu_cc_dst, cpu_T0, cpu_T1); set_cc_op(s1, CC_OP_SUBB + ot); break; @@ -4823,7 +4827,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) gen_op_mov_reg_v(ot, rm, cpu_T0); } } - gen_op_update_neg_cc(); + gen_op_update_neg_cc(s); set_cc_op(s, CC_OP_SUBB + ot); break; case 4: /* mul */ @@ -5283,7 +5287,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) } } tcg_gen_mov_tl(cpu_cc_src, oldv); - tcg_gen_mov_tl(cpu_cc_srcT, cmpv); + tcg_gen_mov_tl(s->cc_srcT, cmpv); tcg_gen_sub_tl(cpu_cc_dst, cmpv, oldv); set_cc_op(s, CC_OP_SUBB + ot); tcg_temp_free(oldv); @@ -8463,7 +8467,7 @@ static void i386_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu) cpu_tmp4 = tcg_temp_new(); cpu_ptr0 = tcg_temp_new_ptr(); cpu_ptr1 = tcg_temp_new_ptr(); - cpu_cc_srcT = tcg_temp_local_new(); + dc->cc_srcT = tcg_temp_local_new(); } static void i386_tr_tb_start(DisasContextBase *db, CPUState *cpu) From 6b672b5d6b14422c131969c5725f738751e12847 Mon Sep 17 00:00:00 2001 From: "Emilio G. Cota" Date: Tue, 11 Sep 2018 14:41:57 -0400 Subject: [PATCH 26/80] target/i386: move cpu_A0 to DisasContext Signed-off-by: Emilio G. Cota Reviewed-by: Richard Henderson Signed-off-by: Paolo Bonzini --- target/i386/translate.c | 472 ++++++++++++++++++++-------------------- 1 file changed, 236 insertions(+), 236 deletions(-) diff --git a/target/i386/translate.c b/target/i386/translate.c index e9f512472e..c6b1baab9d 100644 --- a/target/i386/translate.c +++ b/target/i386/translate.c @@ -72,7 +72,6 @@ //#define MACRO_TEST 1 /* global register indexes */ -static TCGv cpu_A0; static TCGv cpu_cc_dst, cpu_cc_src, cpu_cc_src2; static TCGv_i32 cpu_cc_op; static TCGv cpu_regs[CPU_NB_REGS]; @@ -138,6 +137,7 @@ typedef struct DisasContext { /* TCG local temps */ TCGv cc_srcT; + TCGv A0; sigjmp_buf jmpbuf; } DisasContext; @@ -395,9 +395,9 @@ static inline void gen_op_mov_v_reg(TCGMemOp ot, TCGv t0, int reg) static void gen_add_A0_im(DisasContext *s, int val) { - tcg_gen_addi_tl(cpu_A0, cpu_A0, val); + tcg_gen_addi_tl(s->A0, s->A0, val); if (!CODE64(s)) { - tcg_gen_ext32u_tl(cpu_A0, cpu_A0); + tcg_gen_ext32u_tl(s->A0, s->A0); } } @@ -431,7 +431,7 @@ static inline void gen_op_st_v(DisasContext *s, int idx, TCGv t0, TCGv a0) static inline void gen_op_st_rm_T0_A0(DisasContext *s, int idx, int d) { if (d == OR_TMP0) { - gen_op_st_v(s, idx, cpu_T0, cpu_A0); + gen_op_st_v(s, idx, cpu_T0, s->A0); } else { gen_op_mov_reg_v(idx, d, cpu_T0); } @@ -453,7 +453,7 @@ static void gen_lea_v_seg(DisasContext *s, TCGMemOp aflag, TCGv a0, #ifdef TARGET_X86_64 case MO_64: if (ovr_seg < 0) { - tcg_gen_mov_tl(cpu_A0, a0); + tcg_gen_mov_tl(s->A0, a0); return; } break; @@ -464,14 +464,14 @@ static void gen_lea_v_seg(DisasContext *s, TCGMemOp aflag, TCGv a0, ovr_seg = def_seg; } if (ovr_seg < 0) { - tcg_gen_ext32u_tl(cpu_A0, a0); + tcg_gen_ext32u_tl(s->A0, a0); return; } break; case MO_16: /* 16 bit address */ - tcg_gen_ext16u_tl(cpu_A0, a0); - a0 = cpu_A0; + tcg_gen_ext16u_tl(s->A0, a0); + a0 = s->A0; if (ovr_seg < 0) { if (s->addseg) { ovr_seg = def_seg; @@ -488,13 +488,13 @@ static void gen_lea_v_seg(DisasContext *s, TCGMemOp aflag, TCGv a0, TCGv seg = cpu_seg_base[ovr_seg]; if (aflag == MO_64) { - tcg_gen_add_tl(cpu_A0, a0, seg); + tcg_gen_add_tl(s->A0, a0, seg); } else if (CODE64(s)) { - tcg_gen_ext32u_tl(cpu_A0, a0); - tcg_gen_add_tl(cpu_A0, cpu_A0, seg); + tcg_gen_ext32u_tl(s->A0, a0); + tcg_gen_add_tl(s->A0, s->A0, seg); } else { - tcg_gen_add_tl(cpu_A0, a0, seg); - tcg_gen_ext32u_tl(cpu_A0, cpu_A0); + tcg_gen_add_tl(s->A0, a0, seg); + tcg_gen_ext32u_tl(s->A0, s->A0); } } } @@ -640,9 +640,9 @@ static void gen_check_io(DisasContext *s, TCGMemOp ot, target_ulong cur_eip, static inline void gen_movs(DisasContext *s, TCGMemOp ot) { gen_string_movl_A0_ESI(s); - gen_op_ld_v(s, ot, cpu_T0, cpu_A0); + gen_op_ld_v(s, ot, cpu_T0, s->A0); gen_string_movl_A0_EDI(s); - gen_op_st_v(s, ot, cpu_T0, cpu_A0); + gen_op_st_v(s, ot, cpu_T0, s->A0); gen_op_movl_T0_Dshift(ot); gen_op_add_reg_T0(s->aflag, R_ESI); gen_op_add_reg_T0(s->aflag, R_EDI); @@ -1072,7 +1072,7 @@ static inline void gen_stos(DisasContext *s, TCGMemOp ot) { gen_op_mov_v_reg(MO_32, cpu_T0, R_EAX); gen_string_movl_A0_EDI(s); - gen_op_st_v(s, ot, cpu_T0, cpu_A0); + gen_op_st_v(s, ot, cpu_T0, s->A0); gen_op_movl_T0_Dshift(ot); gen_op_add_reg_T0(s->aflag, R_EDI); } @@ -1080,7 +1080,7 @@ static inline void gen_stos(DisasContext *s, TCGMemOp ot) static inline void gen_lods(DisasContext *s, TCGMemOp ot) { gen_string_movl_A0_ESI(s); - gen_op_ld_v(s, ot, cpu_T0, cpu_A0); + gen_op_ld_v(s, ot, cpu_T0, s->A0); gen_op_mov_reg_v(ot, R_EAX, cpu_T0); gen_op_movl_T0_Dshift(ot); gen_op_add_reg_T0(s->aflag, R_ESI); @@ -1089,7 +1089,7 @@ static inline void gen_lods(DisasContext *s, TCGMemOp ot) static inline void gen_scas(DisasContext *s, TCGMemOp ot) { gen_string_movl_A0_EDI(s); - gen_op_ld_v(s, ot, cpu_T1, cpu_A0); + gen_op_ld_v(s, ot, cpu_T1, s->A0); gen_op(s, OP_CMPL, ot, R_EAX); gen_op_movl_T0_Dshift(ot); gen_op_add_reg_T0(s->aflag, R_EDI); @@ -1098,7 +1098,7 @@ static inline void gen_scas(DisasContext *s, TCGMemOp ot) static inline void gen_cmps(DisasContext *s, TCGMemOp ot) { gen_string_movl_A0_EDI(s); - gen_op_ld_v(s, ot, cpu_T1, cpu_A0); + gen_op_ld_v(s, ot, cpu_T1, s->A0); gen_string_movl_A0_ESI(s); gen_op(s, OP_CMPL, ot, OR_TMP0); gen_op_movl_T0_Dshift(ot); @@ -1128,11 +1128,11 @@ static inline void gen_ins(DisasContext *s, TCGMemOp ot) /* Note: we must do this dummy write first to be restartable in case of page fault. */ tcg_gen_movi_tl(cpu_T0, 0); - gen_op_st_v(s, ot, cpu_T0, cpu_A0); + gen_op_st_v(s, ot, cpu_T0, s->A0); tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[R_EDX]); tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff); gen_helper_in_func(ot, cpu_T0, cpu_tmp2_i32); - gen_op_st_v(s, ot, cpu_T0, cpu_A0); + gen_op_st_v(s, ot, cpu_T0, s->A0); gen_op_movl_T0_Dshift(ot); gen_op_add_reg_T0(s->aflag, R_EDI); gen_bpt_io(s, cpu_tmp2_i32, ot); @@ -1147,7 +1147,7 @@ static inline void gen_outs(DisasContext *s, TCGMemOp ot) gen_io_start(); } gen_string_movl_A0_ESI(s); - gen_op_ld_v(s, ot, cpu_T0, cpu_A0); + gen_op_ld_v(s, ot, cpu_T0, s->A0); tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[R_EDX]); tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff); @@ -1267,14 +1267,14 @@ static void gen_op(DisasContext *s1, int op, TCGMemOp ot, int d) if (d != OR_TMP0) { gen_op_mov_v_reg(ot, cpu_T0, d); } else if (!(s1->prefix & PREFIX_LOCK)) { - gen_op_ld_v(s1, ot, cpu_T0, cpu_A0); + gen_op_ld_v(s1, ot, cpu_T0, s1->A0); } switch(op) { case OP_ADCL: gen_compute_eflags_c(s1, cpu_tmp4); if (s1->prefix & PREFIX_LOCK) { tcg_gen_add_tl(cpu_T0, cpu_tmp4, cpu_T1); - tcg_gen_atomic_add_fetch_tl(cpu_T0, cpu_A0, cpu_T0, + tcg_gen_atomic_add_fetch_tl(cpu_T0, s1->A0, cpu_T0, s1->mem_index, ot | MO_LE); } else { tcg_gen_add_tl(cpu_T0, cpu_T0, cpu_T1); @@ -1289,7 +1289,7 @@ static void gen_op(DisasContext *s1, int op, TCGMemOp ot, int d) if (s1->prefix & PREFIX_LOCK) { tcg_gen_add_tl(cpu_T0, cpu_T1, cpu_tmp4); tcg_gen_neg_tl(cpu_T0, cpu_T0); - tcg_gen_atomic_add_fetch_tl(cpu_T0, cpu_A0, cpu_T0, + tcg_gen_atomic_add_fetch_tl(cpu_T0, s1->A0, cpu_T0, s1->mem_index, ot | MO_LE); } else { tcg_gen_sub_tl(cpu_T0, cpu_T0, cpu_T1); @@ -1301,7 +1301,7 @@ static void gen_op(DisasContext *s1, int op, TCGMemOp ot, int d) break; case OP_ADDL: if (s1->prefix & PREFIX_LOCK) { - tcg_gen_atomic_add_fetch_tl(cpu_T0, cpu_A0, cpu_T1, + tcg_gen_atomic_add_fetch_tl(cpu_T0, s1->A0, cpu_T1, s1->mem_index, ot | MO_LE); } else { tcg_gen_add_tl(cpu_T0, cpu_T0, cpu_T1); @@ -1313,7 +1313,7 @@ static void gen_op(DisasContext *s1, int op, TCGMemOp ot, int d) case OP_SUBL: if (s1->prefix & PREFIX_LOCK) { tcg_gen_neg_tl(cpu_T0, cpu_T1); - tcg_gen_atomic_fetch_add_tl(s1->cc_srcT, cpu_A0, cpu_T0, + tcg_gen_atomic_fetch_add_tl(s1->cc_srcT, s1->A0, cpu_T0, s1->mem_index, ot | MO_LE); tcg_gen_sub_tl(cpu_T0, s1->cc_srcT, cpu_T1); } else { @@ -1327,7 +1327,7 @@ static void gen_op(DisasContext *s1, int op, TCGMemOp ot, int d) default: case OP_ANDL: if (s1->prefix & PREFIX_LOCK) { - tcg_gen_atomic_and_fetch_tl(cpu_T0, cpu_A0, cpu_T1, + tcg_gen_atomic_and_fetch_tl(cpu_T0, s1->A0, cpu_T1, s1->mem_index, ot | MO_LE); } else { tcg_gen_and_tl(cpu_T0, cpu_T0, cpu_T1); @@ -1338,7 +1338,7 @@ static void gen_op(DisasContext *s1, int op, TCGMemOp ot, int d) break; case OP_ORL: if (s1->prefix & PREFIX_LOCK) { - tcg_gen_atomic_or_fetch_tl(cpu_T0, cpu_A0, cpu_T1, + tcg_gen_atomic_or_fetch_tl(cpu_T0, s1->A0, cpu_T1, s1->mem_index, ot | MO_LE); } else { tcg_gen_or_tl(cpu_T0, cpu_T0, cpu_T1); @@ -1349,7 +1349,7 @@ static void gen_op(DisasContext *s1, int op, TCGMemOp ot, int d) break; case OP_XORL: if (s1->prefix & PREFIX_LOCK) { - tcg_gen_atomic_xor_fetch_tl(cpu_T0, cpu_A0, cpu_T1, + tcg_gen_atomic_xor_fetch_tl(cpu_T0, s1->A0, cpu_T1, s1->mem_index, ot | MO_LE); } else { tcg_gen_xor_tl(cpu_T0, cpu_T0, cpu_T1); @@ -1372,13 +1372,13 @@ static void gen_inc(DisasContext *s1, TCGMemOp ot, int d, int c) { if (s1->prefix & PREFIX_LOCK) { tcg_gen_movi_tl(cpu_T0, c > 0 ? 1 : -1); - tcg_gen_atomic_add_fetch_tl(cpu_T0, cpu_A0, cpu_T0, + tcg_gen_atomic_add_fetch_tl(cpu_T0, s1->A0, cpu_T0, s1->mem_index, ot | MO_LE); } else { if (d != OR_TMP0) { gen_op_mov_v_reg(ot, cpu_T0, d); } else { - gen_op_ld_v(s1, ot, cpu_T0, cpu_A0); + gen_op_ld_v(s1, ot, cpu_T0, s1->A0); } tcg_gen_addi_tl(cpu_T0, cpu_T0, (c > 0 ? 1 : -1)); gen_op_st_rm_T0_A0(s1, ot, d); @@ -1441,7 +1441,7 @@ static void gen_shift_rm_T1(DisasContext *s, TCGMemOp ot, int op1, /* load */ if (op1 == OR_TMP0) { - gen_op_ld_v(s, ot, cpu_T0, cpu_A0); + gen_op_ld_v(s, ot, cpu_T0, s->A0); } else { gen_op_mov_v_reg(ot, cpu_T0, op1); } @@ -1477,7 +1477,7 @@ static void gen_shift_rm_im(DisasContext *s, TCGMemOp ot, int op1, int op2, /* load */ if (op1 == OR_TMP0) - gen_op_ld_v(s, ot, cpu_T0, cpu_A0); + gen_op_ld_v(s, ot, cpu_T0, s->A0); else gen_op_mov_v_reg(ot, cpu_T0, op1); @@ -1517,7 +1517,7 @@ static void gen_rot_rm_T1(DisasContext *s, TCGMemOp ot, int op1, int is_right) /* load */ if (op1 == OR_TMP0) { - gen_op_ld_v(s, ot, cpu_T0, cpu_A0); + gen_op_ld_v(s, ot, cpu_T0, s->A0); } else { gen_op_mov_v_reg(ot, cpu_T0, op1); } @@ -1603,7 +1603,7 @@ static void gen_rot_rm_im(DisasContext *s, TCGMemOp ot, int op1, int op2, /* load */ if (op1 == OR_TMP0) { - gen_op_ld_v(s, ot, cpu_T0, cpu_A0); + gen_op_ld_v(s, ot, cpu_T0, s->A0); } else { gen_op_mov_v_reg(ot, cpu_T0, op1); } @@ -1681,7 +1681,7 @@ static void gen_rotc_rm_T1(DisasContext *s, TCGMemOp ot, int op1, /* load */ if (op1 == OR_TMP0) - gen_op_ld_v(s, ot, cpu_T0, cpu_A0); + gen_op_ld_v(s, ot, cpu_T0, s->A0); else gen_op_mov_v_reg(ot, cpu_T0, op1); @@ -1737,7 +1737,7 @@ static void gen_shiftd_rm_T1(DisasContext *s, TCGMemOp ot, int op1, /* load */ if (op1 == OR_TMP0) { - gen_op_ld_v(s, ot, cpu_T0, cpu_A0); + gen_op_ld_v(s, ot, cpu_T0, s->A0); } else { gen_op_mov_v_reg(ot, cpu_T0, op1); } @@ -2052,7 +2052,7 @@ static AddressParts gen_lea_modrm_0(CPUX86State *env, DisasContext *s, } /* Compute the address, with a minimum number of TCG ops. */ -static TCGv gen_lea_modrm_1(AddressParts a) +static TCGv gen_lea_modrm_1(DisasContext *s, AddressParts a) { TCGv ea = NULL; @@ -2060,22 +2060,22 @@ static TCGv gen_lea_modrm_1(AddressParts a) if (a.scale == 0) { ea = cpu_regs[a.index]; } else { - tcg_gen_shli_tl(cpu_A0, cpu_regs[a.index], a.scale); - ea = cpu_A0; + tcg_gen_shli_tl(s->A0, cpu_regs[a.index], a.scale); + ea = s->A0; } if (a.base >= 0) { - tcg_gen_add_tl(cpu_A0, ea, cpu_regs[a.base]); - ea = cpu_A0; + tcg_gen_add_tl(s->A0, ea, cpu_regs[a.base]); + ea = s->A0; } } else if (a.base >= 0) { ea = cpu_regs[a.base]; } if (!ea) { - tcg_gen_movi_tl(cpu_A0, a.disp); - ea = cpu_A0; + tcg_gen_movi_tl(s->A0, a.disp); + ea = s->A0; } else if (a.disp != 0) { - tcg_gen_addi_tl(cpu_A0, ea, a.disp); - ea = cpu_A0; + tcg_gen_addi_tl(s->A0, ea, a.disp); + ea = s->A0; } return ea; @@ -2084,7 +2084,7 @@ static TCGv gen_lea_modrm_1(AddressParts a) static void gen_lea_modrm(CPUX86State *env, DisasContext *s, int modrm) { AddressParts a = gen_lea_modrm_0(env, s, modrm); - TCGv ea = gen_lea_modrm_1(a); + TCGv ea = gen_lea_modrm_1(s, a); gen_lea_v_seg(s, s->aflag, ea, a.def_seg, s->override); } @@ -2097,7 +2097,7 @@ static void gen_nop_modrm(CPUX86State *env, DisasContext *s, int modrm) static void gen_bndck(CPUX86State *env, DisasContext *s, int modrm, TCGCond cond, TCGv_i64 bndv) { - TCGv ea = gen_lea_modrm_1(gen_lea_modrm_0(env, s, modrm)); + TCGv ea = gen_lea_modrm_1(s, gen_lea_modrm_0(env, s, modrm)); tcg_gen_extu_tl_i64(cpu_tmp1_i64, ea); if (!CODE64(s)) { @@ -2111,7 +2111,7 @@ static void gen_bndck(CPUX86State *env, DisasContext *s, int modrm, /* used for LEA and MOV AX, mem */ static void gen_add_A0_ds_seg(DisasContext *s) { - gen_lea_v_seg(s, s->aflag, cpu_A0, R_DS, s->override); + gen_lea_v_seg(s, s->aflag, s->A0, R_DS, s->override); } /* generate modrm memory load or store of 'reg'. TMP0 is used if reg == @@ -2138,9 +2138,9 @@ static void gen_ldst_modrm(CPUX86State *env, DisasContext *s, int modrm, if (is_store) { if (reg != OR_TMP0) gen_op_mov_v_reg(ot, cpu_T0, reg); - gen_op_st_v(s, ot, cpu_T0, cpu_A0); + gen_op_st_v(s, ot, cpu_T0, s->A0); } else { - gen_op_ld_v(s, ot, cpu_T0, cpu_A0); + gen_op_ld_v(s, ot, cpu_T0, s->A0); if (reg != OR_TMP0) gen_op_mov_reg_v(ot, reg, cpu_T0); } @@ -2334,19 +2334,19 @@ static void gen_push_v(DisasContext *s, TCGv val) TCGMemOp d_ot = mo_pushpop(s, s->dflag); TCGMemOp a_ot = mo_stacksize(s); int size = 1 << d_ot; - TCGv new_esp = cpu_A0; + TCGv new_esp = s->A0; - tcg_gen_subi_tl(cpu_A0, cpu_regs[R_ESP], size); + tcg_gen_subi_tl(s->A0, cpu_regs[R_ESP], size); if (!CODE64(s)) { if (s->addseg) { new_esp = cpu_tmp4; - tcg_gen_mov_tl(new_esp, cpu_A0); + tcg_gen_mov_tl(new_esp, s->A0); } - gen_lea_v_seg(s, a_ot, cpu_A0, R_SS, -1); + gen_lea_v_seg(s, a_ot, s->A0, R_SS, -1); } - gen_op_st_v(s, d_ot, val, cpu_A0); + gen_op_st_v(s, d_ot, val, s->A0); gen_op_mov_reg_v(a_ot, R_ESP, new_esp); } @@ -2356,7 +2356,7 @@ static TCGMemOp gen_pop_T0(DisasContext *s) TCGMemOp d_ot = mo_pushpop(s, s->dflag); gen_lea_v_seg(s, mo_stacksize(s), cpu_regs[R_ESP], R_SS, -1); - gen_op_ld_v(s, d_ot, cpu_T0, cpu_A0); + gen_op_ld_v(s, d_ot, cpu_T0, s->A0); return d_ot; } @@ -2379,9 +2379,9 @@ static void gen_pusha(DisasContext *s) int i; for (i = 0; i < 8; i++) { - tcg_gen_addi_tl(cpu_A0, cpu_regs[R_ESP], (i - 8) * size); - gen_lea_v_seg(s, s_ot, cpu_A0, R_SS, -1); - gen_op_st_v(s, d_ot, cpu_regs[7 - i], cpu_A0); + tcg_gen_addi_tl(s->A0, cpu_regs[R_ESP], (i - 8) * size); + gen_lea_v_seg(s, s_ot, s->A0, R_SS, -1); + gen_op_st_v(s, d_ot, cpu_regs[7 - i], s->A0); } gen_stack_update(s, -8 * size); @@ -2399,9 +2399,9 @@ static void gen_popa(DisasContext *s) if (7 - i == R_ESP) { continue; } - tcg_gen_addi_tl(cpu_A0, cpu_regs[R_ESP], i * size); - gen_lea_v_seg(s, s_ot, cpu_A0, R_SS, -1); - gen_op_ld_v(s, d_ot, cpu_T0, cpu_A0); + tcg_gen_addi_tl(s->A0, cpu_regs[R_ESP], i * size); + gen_lea_v_seg(s, s_ot, s->A0, R_SS, -1); + gen_op_ld_v(s, d_ot, cpu_T0, s->A0); gen_op_mov_reg_v(d_ot, 7 - i, cpu_T0); } @@ -2417,7 +2417,7 @@ static void gen_enter(DisasContext *s, int esp_addend, int level) /* Push BP; compute FrameTemp into T1. */ tcg_gen_subi_tl(cpu_T1, cpu_regs[R_ESP], size); gen_lea_v_seg(s, a_ot, cpu_T1, R_SS, -1); - gen_op_st_v(s, d_ot, cpu_regs[R_EBP], cpu_A0); + gen_op_st_v(s, d_ot, cpu_regs[R_EBP], s->A0); level &= 31; if (level != 0) { @@ -2425,19 +2425,19 @@ static void gen_enter(DisasContext *s, int esp_addend, int level) /* Copy level-1 pointers from the previous frame. */ for (i = 1; i < level; ++i) { - tcg_gen_subi_tl(cpu_A0, cpu_regs[R_EBP], size * i); - gen_lea_v_seg(s, a_ot, cpu_A0, R_SS, -1); - gen_op_ld_v(s, d_ot, cpu_tmp0, cpu_A0); + tcg_gen_subi_tl(s->A0, cpu_regs[R_EBP], size * i); + gen_lea_v_seg(s, a_ot, s->A0, R_SS, -1); + gen_op_ld_v(s, d_ot, cpu_tmp0, s->A0); - tcg_gen_subi_tl(cpu_A0, cpu_T1, size * i); - gen_lea_v_seg(s, a_ot, cpu_A0, R_SS, -1); - gen_op_st_v(s, d_ot, cpu_tmp0, cpu_A0); + tcg_gen_subi_tl(s->A0, cpu_T1, size * i); + gen_lea_v_seg(s, a_ot, s->A0, R_SS, -1); + gen_op_st_v(s, d_ot, cpu_tmp0, s->A0); } /* Push the current FrameTemp as the last level. */ - tcg_gen_subi_tl(cpu_A0, cpu_T1, size * level); - gen_lea_v_seg(s, a_ot, cpu_A0, R_SS, -1); - gen_op_st_v(s, d_ot, cpu_T1, cpu_A0); + tcg_gen_subi_tl(s->A0, cpu_T1, size * level); + gen_lea_v_seg(s, a_ot, s->A0, R_SS, -1); + gen_op_st_v(s, d_ot, cpu_T1, s->A0); } /* Copy the FrameTemp value to EBP. */ @@ -2454,7 +2454,7 @@ static void gen_leave(DisasContext *s) TCGMemOp a_ot = mo_stacksize(s); gen_lea_v_seg(s, a_ot, cpu_regs[R_EBP], R_SS, -1); - gen_op_ld_v(s, d_ot, cpu_T0, cpu_A0); + gen_op_ld_v(s, d_ot, cpu_T0, s->A0); tcg_gen_addi_tl(cpu_T1, cpu_regs[R_EBP], 1 << d_ot); @@ -2633,22 +2633,22 @@ static void gen_jmp(DisasContext *s, target_ulong eip) static inline void gen_ldq_env_A0(DisasContext *s, int offset) { - tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ); + tcg_gen_qemu_ld_i64(cpu_tmp1_i64, s->A0, s->mem_index, MO_LEQ); tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset); } static inline void gen_stq_env_A0(DisasContext *s, int offset) { tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset); - tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ); + tcg_gen_qemu_st_i64(cpu_tmp1_i64, s->A0, s->mem_index, MO_LEQ); } static inline void gen_ldo_env_A0(DisasContext *s, int offset) { int mem_index = s->mem_index; - tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0, mem_index, MO_LEQ); + tcg_gen_qemu_ld_i64(cpu_tmp1_i64, s->A0, mem_index, MO_LEQ); tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(ZMMReg, ZMM_Q(0))); - tcg_gen_addi_tl(cpu_tmp0, cpu_A0, 8); + tcg_gen_addi_tl(cpu_tmp0, s->A0, 8); tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_tmp0, mem_index, MO_LEQ); tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(ZMMReg, ZMM_Q(1))); } @@ -2657,8 +2657,8 @@ static inline void gen_sto_env_A0(DisasContext *s, int offset) { int mem_index = s->mem_index; tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(ZMMReg, ZMM_Q(0))); - tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0, mem_index, MO_LEQ); - tcg_gen_addi_tl(cpu_tmp0, cpu_A0, 8); + tcg_gen_qemu_st_i64(cpu_tmp1_i64, s->A0, mem_index, MO_LEQ); + tcg_gen_addi_tl(cpu_tmp0, s->A0, 8); tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(ZMMReg, ZMM_Q(1))); tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_tmp0, mem_index, MO_LEQ); } @@ -3128,7 +3128,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, } else { tcg_gen_ld32u_tl(cpu_T0, cpu_env, offsetof(CPUX86State, xmm_regs[reg].ZMM_L(0))); - gen_op_st_v(s, MO_32, cpu_T0, cpu_A0); + gen_op_st_v(s, MO_32, cpu_T0, s->A0); } break; case 0x6e: /* movd mm, ea */ @@ -3193,7 +3193,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, case 0x210: /* movss xmm, ea */ if (mod != 3) { gen_lea_modrm(env, s, modrm); - gen_op_ld_v(s, MO_32, cpu_T0, cpu_A0); + gen_op_ld_v(s, MO_32, cpu_T0, s->A0); tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0))); tcg_gen_movi_tl(cpu_T0, 0); tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(1))); @@ -3380,7 +3380,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, if (mod != 3) { gen_lea_modrm(env, s, modrm); tcg_gen_ld32u_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0))); - gen_op_st_v(s, MO_32, cpu_T0, cpu_A0); + gen_op_st_v(s, MO_32, cpu_T0, s->A0); } else { rm = (modrm & 7) | REX_B(s); gen_op_movl(offsetof(CPUX86State,xmm_regs[rm].ZMM_L(0)), @@ -3555,7 +3555,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, if ((b >> 8) & 1) { gen_ldq_env_A0(s, offsetof(CPUX86State, xmm_t0.ZMM_Q(0))); } else { - gen_op_ld_v(s, MO_32, cpu_T0, cpu_A0); + gen_op_ld_v(s, MO_32, cpu_T0, s->A0); tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_t0.ZMM_L(0))); } op2_offset = offsetof(CPUX86State,xmm_t0); @@ -3694,13 +3694,13 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, break; case 0x21: case 0x31: /* pmovsxbd, pmovzxbd */ case 0x24: case 0x34: /* pmovsxwq, pmovzxwq */ - tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0, + tcg_gen_qemu_ld_i32(cpu_tmp2_i32, s->A0, s->mem_index, MO_LEUL); tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, op2_offset + offsetof(ZMMReg, ZMM_L(0))); break; case 0x22: case 0x32: /* pmovsxbq, pmovzxbq */ - tcg_gen_qemu_ld_tl(cpu_tmp0, cpu_A0, + tcg_gen_qemu_ld_tl(cpu_tmp0, s->A0, s->mem_index, MO_LEUW); tcg_gen_st16_tl(cpu_tmp0, cpu_env, op2_offset + offsetof(ZMMReg, ZMM_W(0))); @@ -3789,11 +3789,11 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, gen_lea_modrm(env, s, modrm); if ((b & 1) == 0) { - tcg_gen_qemu_ld_tl(cpu_T0, cpu_A0, + tcg_gen_qemu_ld_tl(cpu_T0, s->A0, s->mem_index, ot | MO_BE); gen_op_mov_reg_v(ot, reg, cpu_T0); } else { - tcg_gen_qemu_st_tl(cpu_regs[reg], cpu_A0, + tcg_gen_qemu_st_tl(cpu_regs[reg], s->A0, s->mem_index, ot | MO_BE); } break; @@ -3825,23 +3825,23 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); /* Extract START, and shift the operand. Shifts larger than operand size get zeros. */ - tcg_gen_ext8u_tl(cpu_A0, cpu_regs[s->vex_v]); - tcg_gen_shr_tl(cpu_T0, cpu_T0, cpu_A0); + tcg_gen_ext8u_tl(s->A0, cpu_regs[s->vex_v]); + tcg_gen_shr_tl(cpu_T0, cpu_T0, s->A0); bound = tcg_const_tl(ot == MO_64 ? 63 : 31); zero = tcg_const_tl(0); - tcg_gen_movcond_tl(TCG_COND_LEU, cpu_T0, cpu_A0, bound, + tcg_gen_movcond_tl(TCG_COND_LEU, cpu_T0, s->A0, bound, cpu_T0, zero); tcg_temp_free(zero); /* Extract the LEN into a mask. Lengths larger than operand size get all ones. */ - tcg_gen_extract_tl(cpu_A0, cpu_regs[s->vex_v], 8, 8); - tcg_gen_movcond_tl(TCG_COND_LEU, cpu_A0, cpu_A0, bound, - cpu_A0, bound); + tcg_gen_extract_tl(s->A0, cpu_regs[s->vex_v], 8, 8); + tcg_gen_movcond_tl(TCG_COND_LEU, s->A0, s->A0, bound, + s->A0, bound); tcg_temp_free(bound); tcg_gen_movi_tl(cpu_T1, 1); - tcg_gen_shl_tl(cpu_T1, cpu_T1, cpu_A0); + tcg_gen_shl_tl(cpu_T1, cpu_T1, s->A0); tcg_gen_subi_tl(cpu_T1, cpu_T1, 1); tcg_gen_and_tl(cpu_T0, cpu_T0, cpu_T1); @@ -3870,9 +3870,9 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, bound, bound, cpu_T1); tcg_temp_free(bound); } - tcg_gen_movi_tl(cpu_A0, -1); - tcg_gen_shl_tl(cpu_A0, cpu_A0, cpu_T1); - tcg_gen_andc_tl(cpu_T0, cpu_T0, cpu_A0); + tcg_gen_movi_tl(s->A0, -1); + tcg_gen_shl_tl(s->A0, s->A0, cpu_T1); + tcg_gen_andc_tl(cpu_T0, cpu_T0, s->A0); gen_op_mov_reg_v(ot, reg, cpu_T0); gen_op_update1_cc(); set_cc_op(s, CC_OP_BMILGB + ot); @@ -4124,7 +4124,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, if (mod == 3) { gen_op_mov_reg_v(ot, rm, cpu_T0); } else { - tcg_gen_qemu_st_tl(cpu_T0, cpu_A0, + tcg_gen_qemu_st_tl(cpu_T0, s->A0, s->mem_index, MO_UB); } break; @@ -4134,7 +4134,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, if (mod == 3) { gen_op_mov_reg_v(ot, rm, cpu_T0); } else { - tcg_gen_qemu_st_tl(cpu_T0, cpu_A0, + tcg_gen_qemu_st_tl(cpu_T0, s->A0, s->mem_index, MO_LEUW); } break; @@ -4146,7 +4146,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, if (mod == 3) { tcg_gen_extu_i32_tl(cpu_regs[rm], cpu_tmp2_i32); } else { - tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0, + tcg_gen_qemu_st_i32(cpu_tmp2_i32, s->A0, s->mem_index, MO_LEUL); } } else { /* pextrq */ @@ -4157,7 +4157,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, if (mod == 3) { tcg_gen_mov_i64(cpu_regs[rm], cpu_tmp1_i64); } else { - tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0, + tcg_gen_qemu_st_i64(cpu_tmp1_i64, s->A0, s->mem_index, MO_LEQ); } #else @@ -4171,7 +4171,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, if (mod == 3) { gen_op_mov_reg_v(ot, rm, cpu_T0); } else { - tcg_gen_qemu_st_tl(cpu_T0, cpu_A0, + tcg_gen_qemu_st_tl(cpu_T0, s->A0, s->mem_index, MO_LEUL); } break; @@ -4179,7 +4179,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, if (mod == 3) { gen_op_mov_v_reg(MO_32, cpu_T0, rm); } else { - tcg_gen_qemu_ld_tl(cpu_T0, cpu_A0, + tcg_gen_qemu_ld_tl(cpu_T0, s->A0, s->mem_index, MO_UB); } tcg_gen_st8_tl(cpu_T0, cpu_env, offsetof(CPUX86State, @@ -4191,7 +4191,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, offsetof(CPUX86State,xmm_regs[rm] .ZMM_L((val >> 6) & 3))); } else { - tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0, + tcg_gen_qemu_ld_i32(cpu_tmp2_i32, s->A0, s->mem_index, MO_LEUL); } tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, @@ -4219,7 +4219,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, if (mod == 3) { tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[rm]); } else { - tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0, + tcg_gen_qemu_ld_i32(cpu_tmp2_i32, s->A0, s->mem_index, MO_LEUL); } tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, @@ -4230,7 +4230,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, if (mod == 3) { gen_op_mov_v_reg(ot, cpu_tmp1_i64, rm); } else { - tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0, + tcg_gen_qemu_ld_i64(cpu_tmp1_i64, s->A0, s->mem_index, MO_LEQ); } tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, @@ -4360,7 +4360,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, switch (sz) { case 2: /* 32 bit access */ - gen_op_ld_v(s, MO_32, cpu_T0, cpu_A0); + gen_op_ld_v(s, MO_32, cpu_T0, s->A0); tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_t0.ZMM_L(0))); break; @@ -4426,15 +4426,15 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, /* maskmov : we must prepare A0 */ if (mod != 3) goto illegal_op; - tcg_gen_mov_tl(cpu_A0, cpu_regs[R_EDI]); - gen_extu(s->aflag, cpu_A0); + tcg_gen_mov_tl(s->A0, cpu_regs[R_EDI]); + gen_extu(s->aflag, s->A0); gen_add_A0_ds_seg(s); tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset); tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset); /* XXX: introduce a new table? */ sse_fn_eppt = (SSEFunc_0_eppt)sse_fn_epp; - sse_fn_eppt(cpu_env, cpu_ptr0, cpu_ptr1, cpu_A0); + sse_fn_eppt(cpu_env, cpu_ptr0, cpu_ptr1, s->A0); break; default: tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset); @@ -4673,7 +4673,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) rm = (modrm & 7) | REX_B(s); if (mod != 3) { gen_lea_modrm(env, s, modrm); - gen_op_ld_v(s, ot, cpu_T1, cpu_A0); + gen_op_ld_v(s, ot, cpu_T1, s->A0); } else if (op == OP_XORL && rm == reg) { goto xor_zero; } else { @@ -4760,7 +4760,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) /* For those below that handle locked memory, don't load here. */ if (!(s->prefix & PREFIX_LOCK) || op != 2) { - gen_op_ld_v(s, ot, cpu_T0, cpu_A0); + gen_op_ld_v(s, ot, cpu_T0, s->A0); } } else { gen_op_mov_v_reg(ot, cpu_T0, rm); @@ -4779,12 +4779,12 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) goto illegal_op; } tcg_gen_movi_tl(cpu_T0, ~0); - tcg_gen_atomic_xor_fetch_tl(cpu_T0, cpu_A0, cpu_T0, + tcg_gen_atomic_xor_fetch_tl(cpu_T0, s->A0, cpu_T0, s->mem_index, ot | MO_LE); } else { tcg_gen_not_tl(cpu_T0, cpu_T0); if (mod != 3) { - gen_op_st_v(s, ot, cpu_T0, cpu_A0); + gen_op_st_v(s, ot, cpu_T0, s->A0); } else { gen_op_mov_reg_v(ot, rm, cpu_T0); } @@ -4802,7 +4802,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) t0 = tcg_temp_local_new(); label1 = gen_new_label(); - tcg_gen_mov_tl(a0, cpu_A0); + tcg_gen_mov_tl(a0, s->A0); tcg_gen_mov_tl(t0, cpu_T0); gen_set_label(label1); @@ -4822,7 +4822,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) } else { tcg_gen_neg_tl(cpu_T0, cpu_T0); if (mod != 3) { - gen_op_st_v(s, ot, cpu_T0, cpu_A0); + gen_op_st_v(s, ot, cpu_T0, s->A0); } else { gen_op_mov_reg_v(ot, rm, cpu_T0); } @@ -5001,7 +5001,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) if (mod != 3) { gen_lea_modrm(env, s, modrm); if (op >= 2 && op != 3 && op != 5) - gen_op_ld_v(s, ot, cpu_T0, cpu_A0); + gen_op_ld_v(s, ot, cpu_T0, s->A0); } else { gen_op_mov_v_reg(ot, cpu_T0, rm); } @@ -5034,9 +5034,9 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) gen_jr(s, cpu_T0); break; case 3: /* lcall Ev */ - gen_op_ld_v(s, ot, cpu_T1, cpu_A0); + gen_op_ld_v(s, ot, cpu_T1, s->A0); gen_add_A0_im(s, 1 << ot); - gen_op_ld_v(s, MO_16, cpu_T0, cpu_A0); + gen_op_ld_v(s, MO_16, cpu_T0, s->A0); do_lcall: if (s->pe && !s->vm86) { tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0); @@ -5061,9 +5061,9 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) gen_jr(s, cpu_T0); break; case 5: /* ljmp Ev */ - gen_op_ld_v(s, ot, cpu_T1, cpu_A0); + gen_op_ld_v(s, ot, cpu_T1, s->A0); gen_add_A0_im(s, 1 << ot); - gen_op_ld_v(s, MO_16, cpu_T0, cpu_A0); + gen_op_ld_v(s, MO_16, cpu_T0, s->A0); do_ljmp: if (s->pe && !s->vm86) { tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0); @@ -5225,13 +5225,13 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) } else { gen_lea_modrm(env, s, modrm); if (s->prefix & PREFIX_LOCK) { - tcg_gen_atomic_fetch_add_tl(cpu_T1, cpu_A0, cpu_T0, + tcg_gen_atomic_fetch_add_tl(cpu_T1, s->A0, cpu_T0, s->mem_index, ot | MO_LE); tcg_gen_add_tl(cpu_T0, cpu_T0, cpu_T1); } else { - gen_op_ld_v(s, ot, cpu_T1, cpu_A0); + gen_op_ld_v(s, ot, cpu_T1, s->A0); tcg_gen_add_tl(cpu_T0, cpu_T0, cpu_T1); - gen_op_st_v(s, ot, cpu_T0, cpu_A0); + gen_op_st_v(s, ot, cpu_T0, s->A0); } gen_op_mov_reg_v(ot, reg, cpu_T1); } @@ -5258,7 +5258,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) goto illegal_op; } gen_lea_modrm(env, s, modrm); - tcg_gen_atomic_cmpxchg_tl(oldv, cpu_A0, cmpv, newv, + tcg_gen_atomic_cmpxchg_tl(oldv, s->A0, cmpv, newv, s->mem_index, ot | MO_LE); gen_op_mov_reg_v(ot, R_EAX, oldv); } else { @@ -5267,7 +5267,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) gen_op_mov_v_reg(ot, oldv, rm); } else { gen_lea_modrm(env, s, modrm); - gen_op_ld_v(s, ot, oldv, cpu_A0); + gen_op_ld_v(s, ot, oldv, s->A0); rm = 0; /* avoid warning */ } gen_extu(ot, oldv); @@ -5282,7 +5282,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) must be before changing accumulator to ensure idempotency if the store faults and the instruction is restarted */ - gen_op_st_v(s, ot, newv, cpu_A0); + gen_op_st_v(s, ot, newv, s->A0); gen_op_mov_reg_v(ot, R_EAX, oldv); } } @@ -5306,9 +5306,9 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) goto illegal_op; gen_lea_modrm(env, s, modrm); if ((s->prefix & PREFIX_LOCK) && (tb_cflags(s->base.tb) & CF_PARALLEL)) { - gen_helper_cmpxchg16b(cpu_env, cpu_A0); + gen_helper_cmpxchg16b(cpu_env, s->A0); } else { - gen_helper_cmpxchg16b_unlocked(cpu_env, cpu_A0); + gen_helper_cmpxchg16b_unlocked(cpu_env, s->A0); } } else #endif @@ -5317,9 +5317,9 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) goto illegal_op; gen_lea_modrm(env, s, modrm); if ((s->prefix & PREFIX_LOCK) && (tb_cflags(s->base.tb) & CF_PARALLEL)) { - gen_helper_cmpxchg8b(cpu_env, cpu_A0); + gen_helper_cmpxchg8b(cpu_env, s->A0); } else { - gen_helper_cmpxchg8b_unlocked(cpu_env, cpu_A0); + gen_helper_cmpxchg8b_unlocked(cpu_env, s->A0); } } set_cc_op(s, CC_OP_EFLAGS); @@ -5453,7 +5453,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) val = insn_get(env, s, ot); tcg_gen_movi_tl(cpu_T0, val); if (mod != 3) { - gen_op_st_v(s, ot, cpu_T0, cpu_A0); + gen_op_st_v(s, ot, cpu_T0, s->A0); } else { gen_op_mov_reg_v(ot, (modrm & 7) | REX_B(s), cpu_T0); } @@ -5540,7 +5540,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) gen_op_mov_reg_v(d_ot, reg, cpu_T0); } else { gen_lea_modrm(env, s, modrm); - gen_op_ld_v(s, s_ot, cpu_T0, cpu_A0); + gen_op_ld_v(s, s_ot, cpu_T0, s->A0); gen_op_mov_reg_v(d_ot, reg, cpu_T0); } } @@ -5554,9 +5554,9 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) reg = ((modrm >> 3) & 7) | rex_r; { AddressParts a = gen_lea_modrm_0(env, s, modrm); - TCGv ea = gen_lea_modrm_1(a); + TCGv ea = gen_lea_modrm_1(s, a); gen_lea_v_seg(s, s->aflag, ea, -1, -1); - gen_op_mov_reg_v(dflag, reg, cpu_A0); + gen_op_mov_reg_v(dflag, reg, s->A0); } break; @@ -5578,24 +5578,24 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) offset_addr = insn_get(env, s, s->aflag); break; } - tcg_gen_movi_tl(cpu_A0, offset_addr); + tcg_gen_movi_tl(s->A0, offset_addr); gen_add_A0_ds_seg(s); if ((b & 2) == 0) { - gen_op_ld_v(s, ot, cpu_T0, cpu_A0); + gen_op_ld_v(s, ot, cpu_T0, s->A0); gen_op_mov_reg_v(ot, R_EAX, cpu_T0); } else { gen_op_mov_v_reg(ot, cpu_T0, R_EAX); - gen_op_st_v(s, ot, cpu_T0, cpu_A0); + gen_op_st_v(s, ot, cpu_T0, s->A0); } } break; case 0xd7: /* xlat */ - tcg_gen_mov_tl(cpu_A0, cpu_regs[R_EBX]); + tcg_gen_mov_tl(s->A0, cpu_regs[R_EBX]); tcg_gen_ext8u_tl(cpu_T0, cpu_regs[R_EAX]); - tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_T0); - gen_extu(s->aflag, cpu_A0); + tcg_gen_add_tl(s->A0, s->A0, cpu_T0); + gen_extu(s->aflag, s->A0); gen_add_A0_ds_seg(s); - gen_op_ld_v(s, MO_8, cpu_T0, cpu_A0); + gen_op_ld_v(s, MO_8, cpu_T0, s->A0); gen_op_mov_reg_v(MO_8, R_EAX, cpu_T0); break; case 0xb0 ... 0xb7: /* mov R, Ib */ @@ -5646,7 +5646,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) gen_lea_modrm(env, s, modrm); gen_op_mov_v_reg(ot, cpu_T0, reg); /* for xchg, lock is implicit */ - tcg_gen_atomic_xchg_tl(cpu_T1, cpu_A0, cpu_T0, + tcg_gen_atomic_xchg_tl(cpu_T1, s->A0, cpu_T0, s->mem_index, ot | MO_LE); gen_op_mov_reg_v(ot, reg, cpu_T1); } @@ -5675,10 +5675,10 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) if (mod == 3) goto illegal_op; gen_lea_modrm(env, s, modrm); - gen_op_ld_v(s, ot, cpu_T1, cpu_A0); + gen_op_ld_v(s, ot, cpu_T1, s->A0); gen_add_A0_im(s, 1 << ot); /* load the segment first to handle exceptions properly */ - gen_op_ld_v(s, MO_16, cpu_T0, cpu_A0); + gen_op_ld_v(s, MO_16, cpu_T0, s->A0); gen_movl_seg_T0(s, op); /* then put the data */ gen_op_mov_reg_v(ot, reg, cpu_T1); @@ -5798,23 +5798,23 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) switch(op >> 4) { case 0: - tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0, + tcg_gen_qemu_ld_i32(cpu_tmp2_i32, s->A0, s->mem_index, MO_LEUL); gen_helper_flds_FT0(cpu_env, cpu_tmp2_i32); break; case 1: - tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0, + tcg_gen_qemu_ld_i32(cpu_tmp2_i32, s->A0, s->mem_index, MO_LEUL); gen_helper_fildl_FT0(cpu_env, cpu_tmp2_i32); break; case 2: - tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0, + tcg_gen_qemu_ld_i64(cpu_tmp1_i64, s->A0, s->mem_index, MO_LEQ); gen_helper_fldl_FT0(cpu_env, cpu_tmp1_i64); break; case 3: default: - tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0, + tcg_gen_qemu_ld_i32(cpu_tmp2_i32, s->A0, s->mem_index, MO_LESW); gen_helper_fildl_FT0(cpu_env, cpu_tmp2_i32); break; @@ -5837,23 +5837,23 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) case 0: switch(op >> 4) { case 0: - tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0, + tcg_gen_qemu_ld_i32(cpu_tmp2_i32, s->A0, s->mem_index, MO_LEUL); gen_helper_flds_ST0(cpu_env, cpu_tmp2_i32); break; case 1: - tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0, + tcg_gen_qemu_ld_i32(cpu_tmp2_i32, s->A0, s->mem_index, MO_LEUL); gen_helper_fildl_ST0(cpu_env, cpu_tmp2_i32); break; case 2: - tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0, + tcg_gen_qemu_ld_i64(cpu_tmp1_i64, s->A0, s->mem_index, MO_LEQ); gen_helper_fldl_ST0(cpu_env, cpu_tmp1_i64); break; case 3: default: - tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0, + tcg_gen_qemu_ld_i32(cpu_tmp2_i32, s->A0, s->mem_index, MO_LESW); gen_helper_fildl_ST0(cpu_env, cpu_tmp2_i32); break; @@ -5864,18 +5864,18 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) switch(op >> 4) { case 1: gen_helper_fisttl_ST0(cpu_tmp2_i32, cpu_env); - tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0, + tcg_gen_qemu_st_i32(cpu_tmp2_i32, s->A0, s->mem_index, MO_LEUL); break; case 2: gen_helper_fisttll_ST0(cpu_tmp1_i64, cpu_env); - tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0, + tcg_gen_qemu_st_i64(cpu_tmp1_i64, s->A0, s->mem_index, MO_LEQ); break; case 3: default: gen_helper_fistt_ST0(cpu_tmp2_i32, cpu_env); - tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0, + tcg_gen_qemu_st_i32(cpu_tmp2_i32, s->A0, s->mem_index, MO_LEUW); break; } @@ -5885,23 +5885,23 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) switch(op >> 4) { case 0: gen_helper_fsts_ST0(cpu_tmp2_i32, cpu_env); - tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0, + tcg_gen_qemu_st_i32(cpu_tmp2_i32, s->A0, s->mem_index, MO_LEUL); break; case 1: gen_helper_fistl_ST0(cpu_tmp2_i32, cpu_env); - tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0, + tcg_gen_qemu_st_i32(cpu_tmp2_i32, s->A0, s->mem_index, MO_LEUL); break; case 2: gen_helper_fstl_ST0(cpu_tmp1_i64, cpu_env); - tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0, + tcg_gen_qemu_st_i64(cpu_tmp1_i64, s->A0, s->mem_index, MO_LEQ); break; case 3: default: gen_helper_fist_ST0(cpu_tmp2_i32, cpu_env); - tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0, + tcg_gen_qemu_st_i32(cpu_tmp2_i32, s->A0, s->mem_index, MO_LEUW); break; } @@ -5911,53 +5911,53 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) } break; case 0x0c: /* fldenv mem */ - gen_helper_fldenv(cpu_env, cpu_A0, tcg_const_i32(dflag - 1)); + gen_helper_fldenv(cpu_env, s->A0, tcg_const_i32(dflag - 1)); break; case 0x0d: /* fldcw mem */ - tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0, + tcg_gen_qemu_ld_i32(cpu_tmp2_i32, s->A0, s->mem_index, MO_LEUW); gen_helper_fldcw(cpu_env, cpu_tmp2_i32); break; case 0x0e: /* fnstenv mem */ - gen_helper_fstenv(cpu_env, cpu_A0, tcg_const_i32(dflag - 1)); + gen_helper_fstenv(cpu_env, s->A0, tcg_const_i32(dflag - 1)); break; case 0x0f: /* fnstcw mem */ gen_helper_fnstcw(cpu_tmp2_i32, cpu_env); - tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0, + tcg_gen_qemu_st_i32(cpu_tmp2_i32, s->A0, s->mem_index, MO_LEUW); break; case 0x1d: /* fldt mem */ - gen_helper_fldt_ST0(cpu_env, cpu_A0); + gen_helper_fldt_ST0(cpu_env, s->A0); break; case 0x1f: /* fstpt mem */ - gen_helper_fstt_ST0(cpu_env, cpu_A0); + gen_helper_fstt_ST0(cpu_env, s->A0); gen_helper_fpop(cpu_env); break; case 0x2c: /* frstor mem */ - gen_helper_frstor(cpu_env, cpu_A0, tcg_const_i32(dflag - 1)); + gen_helper_frstor(cpu_env, s->A0, tcg_const_i32(dflag - 1)); break; case 0x2e: /* fnsave mem */ - gen_helper_fsave(cpu_env, cpu_A0, tcg_const_i32(dflag - 1)); + gen_helper_fsave(cpu_env, s->A0, tcg_const_i32(dflag - 1)); break; case 0x2f: /* fnstsw mem */ gen_helper_fnstsw(cpu_tmp2_i32, cpu_env); - tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0, + tcg_gen_qemu_st_i32(cpu_tmp2_i32, s->A0, s->mem_index, MO_LEUW); break; case 0x3c: /* fbld */ - gen_helper_fbld_ST0(cpu_env, cpu_A0); + gen_helper_fbld_ST0(cpu_env, s->A0); break; case 0x3e: /* fbstp */ - gen_helper_fbst_ST0(cpu_env, cpu_A0); + gen_helper_fbst_ST0(cpu_env, s->A0); gen_helper_fpop(cpu_env); break; case 0x3d: /* fildll */ - tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ); + tcg_gen_qemu_ld_i64(cpu_tmp1_i64, s->A0, s->mem_index, MO_LEQ); gen_helper_fildll_ST0(cpu_env, cpu_tmp1_i64); break; case 0x3f: /* fistpll */ gen_helper_fistll_ST0(cpu_tmp1_i64, cpu_env); - tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ); + tcg_gen_qemu_st_i64(cpu_tmp1_i64, s->A0, s->mem_index, MO_LEQ); gen_helper_fpop(cpu_env); break; default: @@ -6471,13 +6471,13 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) } else { gen_stack_A0(s); /* pop offset */ - gen_op_ld_v(s, dflag, cpu_T0, cpu_A0); + gen_op_ld_v(s, dflag, cpu_T0, s->A0); /* NOTE: keeping EIP updated is not a problem in case of exception */ gen_op_jmp_v(cpu_T0); /* pop selector */ gen_add_A0_im(s, 1 << dflag); - gen_op_ld_v(s, dflag, cpu_T0, cpu_A0); + gen_op_ld_v(s, dflag, cpu_T0, s->A0); gen_op_movl_seg_T0_vm(R_CS); /* add stack offset */ gen_stack_update(s, val + (2 << dflag)); @@ -6732,7 +6732,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) s->rip_offset = 1; gen_lea_modrm(env, s, modrm); if (!(s->prefix & PREFIX_LOCK)) { - gen_op_ld_v(s, ot, cpu_T0, cpu_A0); + gen_op_ld_v(s, ot, cpu_T0, s->A0); } } else { gen_op_mov_v_reg(ot, cpu_T0, rm); @@ -6768,10 +6768,10 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) gen_exts(ot, cpu_T1); tcg_gen_sari_tl(cpu_tmp0, cpu_T1, 3 + ot); tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, ot); - tcg_gen_add_tl(cpu_A0, gen_lea_modrm_1(a), cpu_tmp0); - gen_lea_v_seg(s, s->aflag, cpu_A0, a.def_seg, s->override); + tcg_gen_add_tl(s->A0, gen_lea_modrm_1(s, a), cpu_tmp0); + gen_lea_v_seg(s, s->aflag, s->A0, a.def_seg, s->override); if (!(s->prefix & PREFIX_LOCK)) { - gen_op_ld_v(s, ot, cpu_T0, cpu_A0); + gen_op_ld_v(s, ot, cpu_T0, s->A0); } } else { gen_op_mov_v_reg(ot, cpu_T0, rm); @@ -6785,20 +6785,20 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) case 0: /* bt */ /* Needs no atomic ops; we surpressed the normal memory load for LOCK above so do it now. */ - gen_op_ld_v(s, ot, cpu_T0, cpu_A0); + gen_op_ld_v(s, ot, cpu_T0, s->A0); break; case 1: /* bts */ - tcg_gen_atomic_fetch_or_tl(cpu_T0, cpu_A0, cpu_tmp0, + tcg_gen_atomic_fetch_or_tl(cpu_T0, s->A0, cpu_tmp0, s->mem_index, ot | MO_LE); break; case 2: /* btr */ tcg_gen_not_tl(cpu_tmp0, cpu_tmp0); - tcg_gen_atomic_fetch_and_tl(cpu_T0, cpu_A0, cpu_tmp0, + tcg_gen_atomic_fetch_and_tl(cpu_T0, s->A0, cpu_tmp0, s->mem_index, ot | MO_LE); break; default: case 3: /* btc */ - tcg_gen_atomic_fetch_xor_tl(cpu_T0, cpu_A0, cpu_tmp0, + tcg_gen_atomic_fetch_xor_tl(cpu_T0, s->A0, cpu_tmp0, s->mem_index, ot | MO_LE); break; } @@ -6822,7 +6822,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) } if (op != 0) { if (mod != 3) { - gen_op_st_v(s, ot, cpu_T0, cpu_A0); + gen_op_st_v(s, ot, cpu_T0, s->A0); } else { gen_op_mov_reg_v(ot, rm, cpu_T0); } @@ -7051,9 +7051,9 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) gen_lea_modrm(env, s, modrm); tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0); if (ot == MO_16) { - gen_helper_boundw(cpu_env, cpu_A0, cpu_tmp2_i32); + gen_helper_boundw(cpu_env, s->A0, cpu_tmp2_i32); } else { - gen_helper_boundl(cpu_env, cpu_A0, cpu_tmp2_i32); + gen_helper_boundl(cpu_env, s->A0, cpu_tmp2_i32); } break; case 0x1c8 ... 0x1cf: /* bswap reg */ @@ -7293,13 +7293,13 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) gen_lea_modrm(env, s, modrm); tcg_gen_ld32u_tl(cpu_T0, cpu_env, offsetof(CPUX86State, gdt.limit)); - gen_op_st_v(s, MO_16, cpu_T0, cpu_A0); + gen_op_st_v(s, MO_16, cpu_T0, s->A0); gen_add_A0_im(s, 2); tcg_gen_ld_tl(cpu_T0, cpu_env, offsetof(CPUX86State, gdt.base)); if (dflag == MO_16) { tcg_gen_andi_tl(cpu_T0, cpu_T0, 0xffffff); } - gen_op_st_v(s, CODE64(s) + MO_32, cpu_T0, cpu_A0); + gen_op_st_v(s, CODE64(s) + MO_32, cpu_T0, s->A0); break; case 0xc8: /* monitor */ @@ -7308,10 +7308,10 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) } gen_update_cc_op(s); gen_jmp_im(pc_start - s->cs_base); - tcg_gen_mov_tl(cpu_A0, cpu_regs[R_EAX]); - gen_extu(s->aflag, cpu_A0); + tcg_gen_mov_tl(s->A0, cpu_regs[R_EAX]); + gen_extu(s->aflag, s->A0); gen_add_A0_ds_seg(s); - gen_helper_monitor(cpu_env, cpu_A0); + gen_helper_monitor(cpu_env, s->A0); break; case 0xc9: /* mwait */ @@ -7348,13 +7348,13 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) gen_svm_check_intercept(s, pc_start, SVM_EXIT_IDTR_READ); gen_lea_modrm(env, s, modrm); tcg_gen_ld32u_tl(cpu_T0, cpu_env, offsetof(CPUX86State, idt.limit)); - gen_op_st_v(s, MO_16, cpu_T0, cpu_A0); + gen_op_st_v(s, MO_16, cpu_T0, s->A0); gen_add_A0_im(s, 2); tcg_gen_ld_tl(cpu_T0, cpu_env, offsetof(CPUX86State, idt.base)); if (dflag == MO_16) { tcg_gen_andi_tl(cpu_T0, cpu_T0, 0xffffff); } - gen_op_st_v(s, CODE64(s) + MO_32, cpu_T0, cpu_A0); + gen_op_st_v(s, CODE64(s) + MO_32, cpu_T0, s->A0); break; case 0xd0: /* xgetbv */ @@ -7498,9 +7498,9 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) } gen_svm_check_intercept(s, pc_start, SVM_EXIT_GDTR_WRITE); gen_lea_modrm(env, s, modrm); - gen_op_ld_v(s, MO_16, cpu_T1, cpu_A0); + gen_op_ld_v(s, MO_16, cpu_T1, s->A0); gen_add_A0_im(s, 2); - gen_op_ld_v(s, CODE64(s) + MO_32, cpu_T0, cpu_A0); + gen_op_ld_v(s, CODE64(s) + MO_32, cpu_T0, s->A0); if (dflag == MO_16) { tcg_gen_andi_tl(cpu_T0, cpu_T0, 0xffffff); } @@ -7515,9 +7515,9 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) } gen_svm_check_intercept(s, pc_start, SVM_EXIT_IDTR_WRITE); gen_lea_modrm(env, s, modrm); - gen_op_ld_v(s, MO_16, cpu_T1, cpu_A0); + gen_op_ld_v(s, MO_16, cpu_T1, s->A0); gen_add_A0_im(s, 2); - gen_op_ld_v(s, CODE64(s) + MO_32, cpu_T0, cpu_A0); + gen_op_ld_v(s, CODE64(s) + MO_32, cpu_T0, s->A0); if (dflag == MO_16) { tcg_gen_andi_tl(cpu_T0, cpu_T0, 0xffffff); } @@ -7573,7 +7573,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) gen_update_cc_op(s); gen_jmp_im(pc_start - s->cs_base); gen_lea_modrm(env, s, modrm); - gen_helper_invlpg(cpu_env, cpu_A0); + gen_helper_invlpg(cpu_env, s->A0); gen_jmp_im(s->pc - s->cs_base); gen_eob(s); break; @@ -7646,7 +7646,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) gen_op_mov_reg_v(d_ot, reg, cpu_T0); } else { gen_lea_modrm(env, s, modrm); - gen_op_ld_v(s, MO_32 | MO_SIGN, cpu_T0, cpu_A0); + gen_op_ld_v(s, MO_32 | MO_SIGN, cpu_T0, s->A0); gen_op_mov_reg_v(d_ot, reg, cpu_T0); } } else @@ -7667,9 +7667,9 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) rm = modrm & 7; if (mod != 3) { gen_lea_modrm(env, s, modrm); - gen_op_ld_v(s, ot, t0, cpu_A0); + gen_op_ld_v(s, ot, t0, s->A0); a0 = tcg_temp_local_new(); - tcg_gen_mov_tl(a0, cpu_A0); + tcg_gen_mov_tl(a0, s->A0); } else { gen_op_mov_v_reg(ot, t0, rm); a0 = NULL; @@ -7785,16 +7785,16 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) } else { gen_lea_modrm(env, s, modrm); if (CODE64(s)) { - tcg_gen_qemu_ld_i64(cpu_bndl[reg], cpu_A0, + tcg_gen_qemu_ld_i64(cpu_bndl[reg], s->A0, s->mem_index, MO_LEQ); - tcg_gen_addi_tl(cpu_A0, cpu_A0, 8); - tcg_gen_qemu_ld_i64(cpu_bndu[reg], cpu_A0, + tcg_gen_addi_tl(s->A0, s->A0, 8); + tcg_gen_qemu_ld_i64(cpu_bndu[reg], s->A0, s->mem_index, MO_LEQ); } else { - tcg_gen_qemu_ld_i64(cpu_bndl[reg], cpu_A0, + tcg_gen_qemu_ld_i64(cpu_bndl[reg], s->A0, s->mem_index, MO_LEUL); - tcg_gen_addi_tl(cpu_A0, cpu_A0, 4); - tcg_gen_qemu_ld_i64(cpu_bndu[reg], cpu_A0, + tcg_gen_addi_tl(s->A0, s->A0, 4); + tcg_gen_qemu_ld_i64(cpu_bndu[reg], s->A0, s->mem_index, MO_LEUL); } /* bnd registers are now in-use */ @@ -7810,22 +7810,22 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) goto illegal_op; } if (a.base >= 0) { - tcg_gen_addi_tl(cpu_A0, cpu_regs[a.base], a.disp); + tcg_gen_addi_tl(s->A0, cpu_regs[a.base], a.disp); } else { - tcg_gen_movi_tl(cpu_A0, 0); + tcg_gen_movi_tl(s->A0, 0); } - gen_lea_v_seg(s, s->aflag, cpu_A0, a.def_seg, s->override); + gen_lea_v_seg(s, s->aflag, s->A0, a.def_seg, s->override); if (a.index >= 0) { tcg_gen_mov_tl(cpu_T0, cpu_regs[a.index]); } else { tcg_gen_movi_tl(cpu_T0, 0); } if (CODE64(s)) { - gen_helper_bndldx64(cpu_bndl[reg], cpu_env, cpu_A0, cpu_T0); + gen_helper_bndldx64(cpu_bndl[reg], cpu_env, s->A0, cpu_T0); tcg_gen_ld_i64(cpu_bndu[reg], cpu_env, offsetof(CPUX86State, mmx_t0.MMX_Q(0))); } else { - gen_helper_bndldx32(cpu_bndu[reg], cpu_env, cpu_A0, cpu_T0); + gen_helper_bndldx32(cpu_bndu[reg], cpu_env, s->A0, cpu_T0); tcg_gen_ext32u_i64(cpu_bndl[reg], cpu_bndu[reg]); tcg_gen_shri_i64(cpu_bndu[reg], cpu_bndu[reg], 32); } @@ -7859,11 +7859,11 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) /* rip-relative generates #ud */ goto illegal_op; } - tcg_gen_not_tl(cpu_A0, gen_lea_modrm_1(a)); + tcg_gen_not_tl(s->A0, gen_lea_modrm_1(s, a)); if (!CODE64(s)) { - tcg_gen_ext32u_tl(cpu_A0, cpu_A0); + tcg_gen_ext32u_tl(s->A0, s->A0); } - tcg_gen_extu_tl_i64(cpu_bndu[reg], cpu_A0); + tcg_gen_extu_tl_i64(cpu_bndu[reg], s->A0); /* bnd registers are now in-use */ gen_set_hflag(s, HF_MPX_IU_MASK); break; @@ -7892,16 +7892,16 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) } else { gen_lea_modrm(env, s, modrm); if (CODE64(s)) { - tcg_gen_qemu_st_i64(cpu_bndl[reg], cpu_A0, + tcg_gen_qemu_st_i64(cpu_bndl[reg], s->A0, s->mem_index, MO_LEQ); - tcg_gen_addi_tl(cpu_A0, cpu_A0, 8); - tcg_gen_qemu_st_i64(cpu_bndu[reg], cpu_A0, + tcg_gen_addi_tl(s->A0, s->A0, 8); + tcg_gen_qemu_st_i64(cpu_bndu[reg], s->A0, s->mem_index, MO_LEQ); } else { - tcg_gen_qemu_st_i64(cpu_bndl[reg], cpu_A0, + tcg_gen_qemu_st_i64(cpu_bndl[reg], s->A0, s->mem_index, MO_LEUL); - tcg_gen_addi_tl(cpu_A0, cpu_A0, 4); - tcg_gen_qemu_st_i64(cpu_bndu[reg], cpu_A0, + tcg_gen_addi_tl(s->A0, s->A0, 4); + tcg_gen_qemu_st_i64(cpu_bndu[reg], s->A0, s->mem_index, MO_LEUL); } } @@ -7915,21 +7915,21 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) goto illegal_op; } if (a.base >= 0) { - tcg_gen_addi_tl(cpu_A0, cpu_regs[a.base], a.disp); + tcg_gen_addi_tl(s->A0, cpu_regs[a.base], a.disp); } else { - tcg_gen_movi_tl(cpu_A0, 0); + tcg_gen_movi_tl(s->A0, 0); } - gen_lea_v_seg(s, s->aflag, cpu_A0, a.def_seg, s->override); + gen_lea_v_seg(s, s->aflag, s->A0, a.def_seg, s->override); if (a.index >= 0) { tcg_gen_mov_tl(cpu_T0, cpu_regs[a.index]); } else { tcg_gen_movi_tl(cpu_T0, 0); } if (CODE64(s)) { - gen_helper_bndstx64(cpu_env, cpu_A0, cpu_T0, + gen_helper_bndstx64(cpu_env, s->A0, cpu_T0, cpu_bndl[reg], cpu_bndu[reg]); } else { - gen_helper_bndstx32(cpu_env, cpu_A0, cpu_T0, + gen_helper_bndstx32(cpu_env, s->A0, cpu_T0, cpu_bndl[reg], cpu_bndu[reg]); } } @@ -8069,7 +8069,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) break; } gen_lea_modrm(env, s, modrm); - gen_helper_fxsave(cpu_env, cpu_A0); + gen_helper_fxsave(cpu_env, s->A0); break; CASE_MODRM_MEM_OP(1): /* fxrstor */ @@ -8082,7 +8082,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) break; } gen_lea_modrm(env, s, modrm); - gen_helper_fxrstor(cpu_env, cpu_A0); + gen_helper_fxrstor(cpu_env, s->A0); break; CASE_MODRM_MEM_OP(2): /* ldmxcsr */ @@ -8094,7 +8094,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) break; } gen_lea_modrm(env, s, modrm); - tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0, s->mem_index, MO_LEUL); + tcg_gen_qemu_ld_i32(cpu_tmp2_i32, s->A0, s->mem_index, MO_LEUL); gen_helper_ldmxcsr(cpu_env, cpu_tmp2_i32); break; @@ -8108,7 +8108,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) } gen_lea_modrm(env, s, modrm); tcg_gen_ld32u_tl(cpu_T0, cpu_env, offsetof(CPUX86State, mxcsr)); - gen_op_st_v(s, MO_32, cpu_T0, cpu_A0); + gen_op_st_v(s, MO_32, cpu_T0, s->A0); break; CASE_MODRM_MEM_OP(4): /* xsave */ @@ -8120,7 +8120,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) gen_lea_modrm(env, s, modrm); tcg_gen_concat_tl_i64(cpu_tmp1_i64, cpu_regs[R_EAX], cpu_regs[R_EDX]); - gen_helper_xsave(cpu_env, cpu_A0, cpu_tmp1_i64); + gen_helper_xsave(cpu_env, s->A0, cpu_tmp1_i64); break; CASE_MODRM_MEM_OP(5): /* xrstor */ @@ -8132,7 +8132,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) gen_lea_modrm(env, s, modrm); tcg_gen_concat_tl_i64(cpu_tmp1_i64, cpu_regs[R_EAX], cpu_regs[R_EDX]); - gen_helper_xrstor(cpu_env, cpu_A0, cpu_tmp1_i64); + gen_helper_xrstor(cpu_env, s->A0, cpu_tmp1_i64); /* XRSTOR is how MPX is enabled, which changes how we translate. Thus we need to end the TB. */ gen_update_cc_op(s); @@ -8160,7 +8160,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) gen_lea_modrm(env, s, modrm); tcg_gen_concat_tl_i64(cpu_tmp1_i64, cpu_regs[R_EAX], cpu_regs[R_EDX]); - gen_helper_xsaveopt(cpu_env, cpu_A0, cpu_tmp1_i64); + gen_helper_xsaveopt(cpu_env, s->A0, cpu_tmp1_i64); } break; @@ -8458,7 +8458,7 @@ static void i386_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu) cpu_T0 = tcg_temp_new(); cpu_T1 = tcg_temp_new(); - cpu_A0 = tcg_temp_new(); + dc->A0 = tcg_temp_new(); cpu_tmp0 = tcg_temp_new(); cpu_tmp1_i64 = tcg_temp_new_i64(); From c66f97273f677d76afaaeb0e688eb08499701b1b Mon Sep 17 00:00:00 2001 From: "Emilio G. Cota" Date: Tue, 11 Sep 2018 14:48:41 -0400 Subject: [PATCH 27/80] target/i386: move cpu_T0 to DisasContext Signed-off-by: Emilio G. Cota Reviewed-by: Richard Henderson Signed-off-by: Paolo Bonzini --- target/i386/translate.c | 1174 ++++++++++++++++++++------------------- 1 file changed, 594 insertions(+), 580 deletions(-) diff --git a/target/i386/translate.c b/target/i386/translate.c index c6b1baab9d..73fd7e5b9a 100644 --- a/target/i386/translate.c +++ b/target/i386/translate.c @@ -79,7 +79,7 @@ static TCGv cpu_seg_base[6]; static TCGv_i64 cpu_bndl[4]; static TCGv_i64 cpu_bndu[4]; /* local temps */ -static TCGv cpu_T0, cpu_T1; +static TCGv cpu_T1; /* local register indexes (only used inside old micro ops) */ static TCGv cpu_tmp0, cpu_tmp4; static TCGv_ptr cpu_ptr0, cpu_ptr1; @@ -138,6 +138,7 @@ typedef struct DisasContext { /* TCG local temps */ TCGv cc_srcT; TCGv A0; + TCGv T0; sigjmp_buf jmpbuf; } DisasContext; @@ -412,9 +413,9 @@ static inline void gen_op_add_reg_im(TCGMemOp size, int reg, int32_t val) gen_op_mov_reg_v(size, reg, cpu_tmp0); } -static inline void gen_op_add_reg_T0(TCGMemOp size, int reg) +static inline void gen_op_add_reg_T0(DisasContext *s, TCGMemOp size, int reg) { - tcg_gen_add_tl(cpu_tmp0, cpu_regs[reg], cpu_T0); + tcg_gen_add_tl(cpu_tmp0, cpu_regs[reg], s->T0); gen_op_mov_reg_v(size, reg, cpu_tmp0); } @@ -431,9 +432,9 @@ static inline void gen_op_st_v(DisasContext *s, int idx, TCGv t0, TCGv a0) static inline void gen_op_st_rm_T0_A0(DisasContext *s, int idx, int d) { if (d == OR_TMP0) { - gen_op_st_v(s, idx, cpu_T0, s->A0); + gen_op_st_v(s, idx, s->T0, s->A0); } else { - gen_op_mov_reg_v(idx, d, cpu_T0); + gen_op_mov_reg_v(idx, d, s->T0); } } @@ -509,10 +510,10 @@ static inline void gen_string_movl_A0_EDI(DisasContext *s) gen_lea_v_seg(s, s->aflag, cpu_regs[R_EDI], R_ES, -1); } -static inline void gen_op_movl_T0_Dshift(TCGMemOp ot) +static inline void gen_op_movl_T0_Dshift(DisasContext *s, TCGMemOp ot) { - tcg_gen_ld32s_tl(cpu_T0, cpu_env, offsetof(CPUX86State, df)); - tcg_gen_shli_tl(cpu_T0, cpu_T0, ot); + tcg_gen_ld32s_tl(s->T0, cpu_env, offsetof(CPUX86State, df)); + tcg_gen_shli_tl(s->T0, s->T0, ot); }; static TCGv gen_ext_tl(TCGv dst, TCGv src, TCGMemOp size, bool sign) @@ -610,7 +611,7 @@ static void gen_check_io(DisasContext *s, TCGMemOp ot, target_ulong cur_eip, target_ulong next_eip; if (s->pe && (s->cpl > s->iopl || s->vm86)) { - tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0); + tcg_gen_trunc_tl_i32(cpu_tmp2_i32, s->T0); switch (ot) { case MO_8: gen_helper_check_iob(cpu_env, cpu_tmp2_i32); @@ -630,7 +631,7 @@ static void gen_check_io(DisasContext *s, TCGMemOp ot, target_ulong cur_eip, gen_jmp_im(cur_eip); svm_flags |= (1 << (4 + ot)); next_eip = s->pc - s->cs_base; - tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0); + tcg_gen_trunc_tl_i32(cpu_tmp2_i32, s->T0); gen_helper_svm_check_io(cpu_env, cpu_tmp2_i32, tcg_const_i32(svm_flags), tcg_const_i32(next_eip - cur_eip)); @@ -640,41 +641,41 @@ static void gen_check_io(DisasContext *s, TCGMemOp ot, target_ulong cur_eip, static inline void gen_movs(DisasContext *s, TCGMemOp ot) { gen_string_movl_A0_ESI(s); - gen_op_ld_v(s, ot, cpu_T0, s->A0); + gen_op_ld_v(s, ot, s->T0, s->A0); gen_string_movl_A0_EDI(s); - gen_op_st_v(s, ot, cpu_T0, s->A0); - gen_op_movl_T0_Dshift(ot); - gen_op_add_reg_T0(s->aflag, R_ESI); - gen_op_add_reg_T0(s->aflag, R_EDI); + gen_op_st_v(s, ot, s->T0, s->A0); + gen_op_movl_T0_Dshift(s, ot); + gen_op_add_reg_T0(s, s->aflag, R_ESI); + gen_op_add_reg_T0(s, s->aflag, R_EDI); } -static void gen_op_update1_cc(void) +static void gen_op_update1_cc(DisasContext *s) { - tcg_gen_mov_tl(cpu_cc_dst, cpu_T0); + tcg_gen_mov_tl(cpu_cc_dst, s->T0); } -static void gen_op_update2_cc(void) +static void gen_op_update2_cc(DisasContext *s) { tcg_gen_mov_tl(cpu_cc_src, cpu_T1); - tcg_gen_mov_tl(cpu_cc_dst, cpu_T0); + tcg_gen_mov_tl(cpu_cc_dst, s->T0); } -static void gen_op_update3_cc(TCGv reg) +static void gen_op_update3_cc(DisasContext *s, TCGv reg) { tcg_gen_mov_tl(cpu_cc_src2, reg); tcg_gen_mov_tl(cpu_cc_src, cpu_T1); - tcg_gen_mov_tl(cpu_cc_dst, cpu_T0); + tcg_gen_mov_tl(cpu_cc_dst, s->T0); } -static inline void gen_op_testl_T0_T1_cc(void) +static inline void gen_op_testl_T0_T1_cc(DisasContext *s) { - tcg_gen_and_tl(cpu_cc_dst, cpu_T0, cpu_T1); + tcg_gen_and_tl(cpu_cc_dst, s->T0, cpu_T1); } static void gen_op_update_neg_cc(DisasContext *s) { - tcg_gen_mov_tl(cpu_cc_dst, cpu_T0); - tcg_gen_neg_tl(cpu_cc_src, cpu_T0); + tcg_gen_mov_tl(cpu_cc_dst, s->T0); + tcg_gen_neg_tl(cpu_cc_src, s->T0); tcg_gen_movi_tl(s->cc_srcT, 0); } @@ -1022,11 +1023,11 @@ static inline void gen_compute_eflags_c(DisasContext *s, TCGv reg) value 'b'. In the fast case, T0 is guaranted not to be used. */ static inline void gen_jcc1_noeob(DisasContext *s, int b, TCGLabel *l1) { - CCPrepare cc = gen_prepare_cc(s, b, cpu_T0); + CCPrepare cc = gen_prepare_cc(s, b, s->T0); if (cc.mask != -1) { - tcg_gen_andi_tl(cpu_T0, cc.reg, cc.mask); - cc.reg = cpu_T0; + tcg_gen_andi_tl(s->T0, cc.reg, cc.mask); + cc.reg = s->T0; } if (cc.use_reg2) { tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1); @@ -1040,12 +1041,12 @@ static inline void gen_jcc1_noeob(DisasContext *s, int b, TCGLabel *l1) A translation block must end soon. */ static inline void gen_jcc1(DisasContext *s, int b, TCGLabel *l1) { - CCPrepare cc = gen_prepare_cc(s, b, cpu_T0); + CCPrepare cc = gen_prepare_cc(s, b, s->T0); gen_update_cc_op(s); if (cc.mask != -1) { - tcg_gen_andi_tl(cpu_T0, cc.reg, cc.mask); - cc.reg = cpu_T0; + tcg_gen_andi_tl(s->T0, cc.reg, cc.mask); + cc.reg = s->T0; } set_cc_op(s, CC_OP_DYNAMIC); if (cc.use_reg2) { @@ -1070,20 +1071,20 @@ static TCGLabel *gen_jz_ecx_string(DisasContext *s, target_ulong next_eip) static inline void gen_stos(DisasContext *s, TCGMemOp ot) { - gen_op_mov_v_reg(MO_32, cpu_T0, R_EAX); + gen_op_mov_v_reg(MO_32, s->T0, R_EAX); gen_string_movl_A0_EDI(s); - gen_op_st_v(s, ot, cpu_T0, s->A0); - gen_op_movl_T0_Dshift(ot); - gen_op_add_reg_T0(s->aflag, R_EDI); + gen_op_st_v(s, ot, s->T0, s->A0); + gen_op_movl_T0_Dshift(s, ot); + gen_op_add_reg_T0(s, s->aflag, R_EDI); } static inline void gen_lods(DisasContext *s, TCGMemOp ot) { gen_string_movl_A0_ESI(s); - gen_op_ld_v(s, ot, cpu_T0, s->A0); - gen_op_mov_reg_v(ot, R_EAX, cpu_T0); - gen_op_movl_T0_Dshift(ot); - gen_op_add_reg_T0(s->aflag, R_ESI); + gen_op_ld_v(s, ot, s->T0, s->A0); + gen_op_mov_reg_v(ot, R_EAX, s->T0); + gen_op_movl_T0_Dshift(s, ot); + gen_op_add_reg_T0(s, s->aflag, R_ESI); } static inline void gen_scas(DisasContext *s, TCGMemOp ot) @@ -1091,8 +1092,8 @@ static inline void gen_scas(DisasContext *s, TCGMemOp ot) gen_string_movl_A0_EDI(s); gen_op_ld_v(s, ot, cpu_T1, s->A0); gen_op(s, OP_CMPL, ot, R_EAX); - gen_op_movl_T0_Dshift(ot); - gen_op_add_reg_T0(s->aflag, R_EDI); + gen_op_movl_T0_Dshift(s, ot); + gen_op_add_reg_T0(s, s->aflag, R_EDI); } static inline void gen_cmps(DisasContext *s, TCGMemOp ot) @@ -1101,9 +1102,9 @@ static inline void gen_cmps(DisasContext *s, TCGMemOp ot) gen_op_ld_v(s, ot, cpu_T1, s->A0); gen_string_movl_A0_ESI(s); gen_op(s, OP_CMPL, ot, OR_TMP0); - gen_op_movl_T0_Dshift(ot); - gen_op_add_reg_T0(s->aflag, R_ESI); - gen_op_add_reg_T0(s->aflag, R_EDI); + gen_op_movl_T0_Dshift(s, ot); + gen_op_add_reg_T0(s, s->aflag, R_ESI); + gen_op_add_reg_T0(s, s->aflag, R_EDI); } static void gen_bpt_io(DisasContext *s, TCGv_i32 t_port, int ot) @@ -1127,14 +1128,14 @@ static inline void gen_ins(DisasContext *s, TCGMemOp ot) gen_string_movl_A0_EDI(s); /* Note: we must do this dummy write first to be restartable in case of page fault. */ - tcg_gen_movi_tl(cpu_T0, 0); - gen_op_st_v(s, ot, cpu_T0, s->A0); + tcg_gen_movi_tl(s->T0, 0); + gen_op_st_v(s, ot, s->T0, s->A0); tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[R_EDX]); tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff); - gen_helper_in_func(ot, cpu_T0, cpu_tmp2_i32); - gen_op_st_v(s, ot, cpu_T0, s->A0); - gen_op_movl_T0_Dshift(ot); - gen_op_add_reg_T0(s->aflag, R_EDI); + gen_helper_in_func(ot, s->T0, cpu_tmp2_i32); + gen_op_st_v(s, ot, s->T0, s->A0); + gen_op_movl_T0_Dshift(s, ot); + gen_op_add_reg_T0(s, s->aflag, R_EDI); gen_bpt_io(s, cpu_tmp2_i32, ot); if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { gen_io_end(); @@ -1147,14 +1148,14 @@ static inline void gen_outs(DisasContext *s, TCGMemOp ot) gen_io_start(); } gen_string_movl_A0_ESI(s); - gen_op_ld_v(s, ot, cpu_T0, s->A0); + gen_op_ld_v(s, ot, s->T0, s->A0); tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[R_EDX]); tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff); - tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T0); + tcg_gen_trunc_tl_i32(cpu_tmp3_i32, s->T0); gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32); - gen_op_movl_T0_Dshift(ot); - gen_op_add_reg_T0(s->aflag, R_ESI); + gen_op_movl_T0_Dshift(s, ot); + gen_op_add_reg_T0(s, s->aflag, R_ESI); gen_bpt_io(s, cpu_tmp2_i32, ot); if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { gen_io_end(); @@ -1265,103 +1266,103 @@ static void gen_helper_fp_arith_STN_ST0(int op, int opreg) static void gen_op(DisasContext *s1, int op, TCGMemOp ot, int d) { if (d != OR_TMP0) { - gen_op_mov_v_reg(ot, cpu_T0, d); + gen_op_mov_v_reg(ot, s1->T0, d); } else if (!(s1->prefix & PREFIX_LOCK)) { - gen_op_ld_v(s1, ot, cpu_T0, s1->A0); + gen_op_ld_v(s1, ot, s1->T0, s1->A0); } switch(op) { case OP_ADCL: gen_compute_eflags_c(s1, cpu_tmp4); if (s1->prefix & PREFIX_LOCK) { - tcg_gen_add_tl(cpu_T0, cpu_tmp4, cpu_T1); - tcg_gen_atomic_add_fetch_tl(cpu_T0, s1->A0, cpu_T0, + tcg_gen_add_tl(s1->T0, cpu_tmp4, cpu_T1); + tcg_gen_atomic_add_fetch_tl(s1->T0, s1->A0, s1->T0, s1->mem_index, ot | MO_LE); } else { - tcg_gen_add_tl(cpu_T0, cpu_T0, cpu_T1); - tcg_gen_add_tl(cpu_T0, cpu_T0, cpu_tmp4); + tcg_gen_add_tl(s1->T0, s1->T0, cpu_T1); + tcg_gen_add_tl(s1->T0, s1->T0, cpu_tmp4); gen_op_st_rm_T0_A0(s1, ot, d); } - gen_op_update3_cc(cpu_tmp4); + gen_op_update3_cc(s1, cpu_tmp4); set_cc_op(s1, CC_OP_ADCB + ot); break; case OP_SBBL: gen_compute_eflags_c(s1, cpu_tmp4); if (s1->prefix & PREFIX_LOCK) { - tcg_gen_add_tl(cpu_T0, cpu_T1, cpu_tmp4); - tcg_gen_neg_tl(cpu_T0, cpu_T0); - tcg_gen_atomic_add_fetch_tl(cpu_T0, s1->A0, cpu_T0, + tcg_gen_add_tl(s1->T0, cpu_T1, cpu_tmp4); + tcg_gen_neg_tl(s1->T0, s1->T0); + tcg_gen_atomic_add_fetch_tl(s1->T0, s1->A0, s1->T0, s1->mem_index, ot | MO_LE); } else { - tcg_gen_sub_tl(cpu_T0, cpu_T0, cpu_T1); - tcg_gen_sub_tl(cpu_T0, cpu_T0, cpu_tmp4); + tcg_gen_sub_tl(s1->T0, s1->T0, cpu_T1); + tcg_gen_sub_tl(s1->T0, s1->T0, cpu_tmp4); gen_op_st_rm_T0_A0(s1, ot, d); } - gen_op_update3_cc(cpu_tmp4); + gen_op_update3_cc(s1, cpu_tmp4); set_cc_op(s1, CC_OP_SBBB + ot); break; case OP_ADDL: if (s1->prefix & PREFIX_LOCK) { - tcg_gen_atomic_add_fetch_tl(cpu_T0, s1->A0, cpu_T1, + tcg_gen_atomic_add_fetch_tl(s1->T0, s1->A0, cpu_T1, s1->mem_index, ot | MO_LE); } else { - tcg_gen_add_tl(cpu_T0, cpu_T0, cpu_T1); + tcg_gen_add_tl(s1->T0, s1->T0, cpu_T1); gen_op_st_rm_T0_A0(s1, ot, d); } - gen_op_update2_cc(); + gen_op_update2_cc(s1); set_cc_op(s1, CC_OP_ADDB + ot); break; case OP_SUBL: if (s1->prefix & PREFIX_LOCK) { - tcg_gen_neg_tl(cpu_T0, cpu_T1); - tcg_gen_atomic_fetch_add_tl(s1->cc_srcT, s1->A0, cpu_T0, + tcg_gen_neg_tl(s1->T0, cpu_T1); + tcg_gen_atomic_fetch_add_tl(s1->cc_srcT, s1->A0, s1->T0, s1->mem_index, ot | MO_LE); - tcg_gen_sub_tl(cpu_T0, s1->cc_srcT, cpu_T1); + tcg_gen_sub_tl(s1->T0, s1->cc_srcT, cpu_T1); } else { - tcg_gen_mov_tl(s1->cc_srcT, cpu_T0); - tcg_gen_sub_tl(cpu_T0, cpu_T0, cpu_T1); + tcg_gen_mov_tl(s1->cc_srcT, s1->T0); + tcg_gen_sub_tl(s1->T0, s1->T0, cpu_T1); gen_op_st_rm_T0_A0(s1, ot, d); } - gen_op_update2_cc(); + gen_op_update2_cc(s1); set_cc_op(s1, CC_OP_SUBB + ot); break; default: case OP_ANDL: if (s1->prefix & PREFIX_LOCK) { - tcg_gen_atomic_and_fetch_tl(cpu_T0, s1->A0, cpu_T1, + tcg_gen_atomic_and_fetch_tl(s1->T0, s1->A0, cpu_T1, s1->mem_index, ot | MO_LE); } else { - tcg_gen_and_tl(cpu_T0, cpu_T0, cpu_T1); + tcg_gen_and_tl(s1->T0, s1->T0, cpu_T1); gen_op_st_rm_T0_A0(s1, ot, d); } - gen_op_update1_cc(); + gen_op_update1_cc(s1); set_cc_op(s1, CC_OP_LOGICB + ot); break; case OP_ORL: if (s1->prefix & PREFIX_LOCK) { - tcg_gen_atomic_or_fetch_tl(cpu_T0, s1->A0, cpu_T1, + tcg_gen_atomic_or_fetch_tl(s1->T0, s1->A0, cpu_T1, s1->mem_index, ot | MO_LE); } else { - tcg_gen_or_tl(cpu_T0, cpu_T0, cpu_T1); + tcg_gen_or_tl(s1->T0, s1->T0, cpu_T1); gen_op_st_rm_T0_A0(s1, ot, d); } - gen_op_update1_cc(); + gen_op_update1_cc(s1); set_cc_op(s1, CC_OP_LOGICB + ot); break; case OP_XORL: if (s1->prefix & PREFIX_LOCK) { - tcg_gen_atomic_xor_fetch_tl(cpu_T0, s1->A0, cpu_T1, + tcg_gen_atomic_xor_fetch_tl(s1->T0, s1->A0, cpu_T1, s1->mem_index, ot | MO_LE); } else { - tcg_gen_xor_tl(cpu_T0, cpu_T0, cpu_T1); + tcg_gen_xor_tl(s1->T0, s1->T0, cpu_T1); gen_op_st_rm_T0_A0(s1, ot, d); } - gen_op_update1_cc(); + gen_op_update1_cc(s1); set_cc_op(s1, CC_OP_LOGICB + ot); break; case OP_CMPL: tcg_gen_mov_tl(cpu_cc_src, cpu_T1); - tcg_gen_mov_tl(s1->cc_srcT, cpu_T0); - tcg_gen_sub_tl(cpu_cc_dst, cpu_T0, cpu_T1); + tcg_gen_mov_tl(s1->cc_srcT, s1->T0); + tcg_gen_sub_tl(cpu_cc_dst, s1->T0, cpu_T1); set_cc_op(s1, CC_OP_SUBB + ot); break; } @@ -1371,21 +1372,21 @@ static void gen_op(DisasContext *s1, int op, TCGMemOp ot, int d) static void gen_inc(DisasContext *s1, TCGMemOp ot, int d, int c) { if (s1->prefix & PREFIX_LOCK) { - tcg_gen_movi_tl(cpu_T0, c > 0 ? 1 : -1); - tcg_gen_atomic_add_fetch_tl(cpu_T0, s1->A0, cpu_T0, + tcg_gen_movi_tl(s1->T0, c > 0 ? 1 : -1); + tcg_gen_atomic_add_fetch_tl(s1->T0, s1->A0, s1->T0, s1->mem_index, ot | MO_LE); } else { if (d != OR_TMP0) { - gen_op_mov_v_reg(ot, cpu_T0, d); + gen_op_mov_v_reg(ot, s1->T0, d); } else { - gen_op_ld_v(s1, ot, cpu_T0, s1->A0); + gen_op_ld_v(s1, ot, s1->T0, s1->A0); } - tcg_gen_addi_tl(cpu_T0, cpu_T0, (c > 0 ? 1 : -1)); + tcg_gen_addi_tl(s1->T0, s1->T0, (c > 0 ? 1 : -1)); gen_op_st_rm_T0_A0(s1, ot, d); } gen_compute_eflags_c(s1, cpu_cc_src); - tcg_gen_mov_tl(cpu_cc_dst, cpu_T0); + tcg_gen_mov_tl(cpu_cc_dst, s1->T0); set_cc_op(s1, (c > 0 ? CC_OP_INCB : CC_OP_DECB) + ot); } @@ -1441,9 +1442,9 @@ static void gen_shift_rm_T1(DisasContext *s, TCGMemOp ot, int op1, /* load */ if (op1 == OR_TMP0) { - gen_op_ld_v(s, ot, cpu_T0, s->A0); + gen_op_ld_v(s, ot, s->T0, s->A0); } else { - gen_op_mov_v_reg(ot, cpu_T0, op1); + gen_op_mov_v_reg(ot, s->T0, op1); } tcg_gen_andi_tl(cpu_T1, cpu_T1, mask); @@ -1451,23 +1452,23 @@ static void gen_shift_rm_T1(DisasContext *s, TCGMemOp ot, int op1, if (is_right) { if (is_arith) { - gen_exts(ot, cpu_T0); - tcg_gen_sar_tl(cpu_tmp0, cpu_T0, cpu_tmp0); - tcg_gen_sar_tl(cpu_T0, cpu_T0, cpu_T1); + gen_exts(ot, s->T0); + tcg_gen_sar_tl(cpu_tmp0, s->T0, cpu_tmp0); + tcg_gen_sar_tl(s->T0, s->T0, cpu_T1); } else { - gen_extu(ot, cpu_T0); - tcg_gen_shr_tl(cpu_tmp0, cpu_T0, cpu_tmp0); - tcg_gen_shr_tl(cpu_T0, cpu_T0, cpu_T1); + gen_extu(ot, s->T0); + tcg_gen_shr_tl(cpu_tmp0, s->T0, cpu_tmp0); + tcg_gen_shr_tl(s->T0, s->T0, cpu_T1); } } else { - tcg_gen_shl_tl(cpu_tmp0, cpu_T0, cpu_tmp0); - tcg_gen_shl_tl(cpu_T0, cpu_T0, cpu_T1); + tcg_gen_shl_tl(cpu_tmp0, s->T0, cpu_tmp0); + tcg_gen_shl_tl(s->T0, s->T0, cpu_T1); } /* store */ gen_op_st_rm_T0_A0(s, ot, op1); - gen_shift_flags(s, ot, cpu_T0, cpu_tmp0, cpu_T1, is_right); + gen_shift_flags(s, ot, s->T0, cpu_tmp0, cpu_T1, is_right); } static void gen_shift_rm_im(DisasContext *s, TCGMemOp ot, int op1, int op2, @@ -1477,25 +1478,25 @@ static void gen_shift_rm_im(DisasContext *s, TCGMemOp ot, int op1, int op2, /* load */ if (op1 == OR_TMP0) - gen_op_ld_v(s, ot, cpu_T0, s->A0); + gen_op_ld_v(s, ot, s->T0, s->A0); else - gen_op_mov_v_reg(ot, cpu_T0, op1); + gen_op_mov_v_reg(ot, s->T0, op1); op2 &= mask; if (op2 != 0) { if (is_right) { if (is_arith) { - gen_exts(ot, cpu_T0); - tcg_gen_sari_tl(cpu_tmp4, cpu_T0, op2 - 1); - tcg_gen_sari_tl(cpu_T0, cpu_T0, op2); + gen_exts(ot, s->T0); + tcg_gen_sari_tl(cpu_tmp4, s->T0, op2 - 1); + tcg_gen_sari_tl(s->T0, s->T0, op2); } else { - gen_extu(ot, cpu_T0); - tcg_gen_shri_tl(cpu_tmp4, cpu_T0, op2 - 1); - tcg_gen_shri_tl(cpu_T0, cpu_T0, op2); + gen_extu(ot, s->T0); + tcg_gen_shri_tl(cpu_tmp4, s->T0, op2 - 1); + tcg_gen_shri_tl(s->T0, s->T0, op2); } } else { - tcg_gen_shli_tl(cpu_tmp4, cpu_T0, op2 - 1); - tcg_gen_shli_tl(cpu_T0, cpu_T0, op2); + tcg_gen_shli_tl(cpu_tmp4, s->T0, op2 - 1); + tcg_gen_shli_tl(s->T0, s->T0, op2); } } @@ -1505,7 +1506,7 @@ static void gen_shift_rm_im(DisasContext *s, TCGMemOp ot, int op1, int op2, /* update eflags if non zero shift */ if (op2 != 0) { tcg_gen_mov_tl(cpu_cc_src, cpu_tmp4); - tcg_gen_mov_tl(cpu_cc_dst, cpu_T0); + tcg_gen_mov_tl(cpu_cc_dst, s->T0); set_cc_op(s, (is_right ? CC_OP_SARB : CC_OP_SHLB) + ot); } } @@ -1517,9 +1518,9 @@ static void gen_rot_rm_T1(DisasContext *s, TCGMemOp ot, int op1, int is_right) /* load */ if (op1 == OR_TMP0) { - gen_op_ld_v(s, ot, cpu_T0, s->A0); + gen_op_ld_v(s, ot, s->T0, s->A0); } else { - gen_op_mov_v_reg(ot, cpu_T0, op1); + gen_op_mov_v_reg(ot, s->T0, op1); } tcg_gen_andi_tl(cpu_T1, cpu_T1, mask); @@ -1527,31 +1528,31 @@ static void gen_rot_rm_T1(DisasContext *s, TCGMemOp ot, int op1, int is_right) switch (ot) { case MO_8: /* Replicate the 8-bit input so that a 32-bit rotate works. */ - tcg_gen_ext8u_tl(cpu_T0, cpu_T0); - tcg_gen_muli_tl(cpu_T0, cpu_T0, 0x01010101); + tcg_gen_ext8u_tl(s->T0, s->T0); + tcg_gen_muli_tl(s->T0, s->T0, 0x01010101); goto do_long; case MO_16: /* Replicate the 16-bit input so that a 32-bit rotate works. */ - tcg_gen_deposit_tl(cpu_T0, cpu_T0, cpu_T0, 16, 16); + tcg_gen_deposit_tl(s->T0, s->T0, s->T0, 16, 16); goto do_long; do_long: #ifdef TARGET_X86_64 case MO_32: - tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0); + tcg_gen_trunc_tl_i32(cpu_tmp2_i32, s->T0); tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T1); if (is_right) { tcg_gen_rotr_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32); } else { tcg_gen_rotl_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32); } - tcg_gen_extu_i32_tl(cpu_T0, cpu_tmp2_i32); + tcg_gen_extu_i32_tl(s->T0, cpu_tmp2_i32); break; #endif default: if (is_right) { - tcg_gen_rotr_tl(cpu_T0, cpu_T0, cpu_T1); + tcg_gen_rotr_tl(s->T0, s->T0, cpu_T1); } else { - tcg_gen_rotl_tl(cpu_T0, cpu_T0, cpu_T1); + tcg_gen_rotl_tl(s->T0, s->T0, cpu_T1); } break; } @@ -1567,12 +1568,12 @@ static void gen_rot_rm_T1(DisasContext *s, TCGMemOp ot, int op1, int is_right) since we've computed the flags into CC_SRC, these variables are currently dead. */ if (is_right) { - tcg_gen_shri_tl(cpu_cc_src2, cpu_T0, mask - 1); - tcg_gen_shri_tl(cpu_cc_dst, cpu_T0, mask); + tcg_gen_shri_tl(cpu_cc_src2, s->T0, mask - 1); + tcg_gen_shri_tl(cpu_cc_dst, s->T0, mask); tcg_gen_andi_tl(cpu_cc_dst, cpu_cc_dst, 1); } else { - tcg_gen_shri_tl(cpu_cc_src2, cpu_T0, mask); - tcg_gen_andi_tl(cpu_cc_dst, cpu_T0, 1); + tcg_gen_shri_tl(cpu_cc_src2, s->T0, mask); + tcg_gen_andi_tl(cpu_cc_dst, s->T0, 1); } tcg_gen_andi_tl(cpu_cc_src2, cpu_cc_src2, 1); tcg_gen_xor_tl(cpu_cc_src2, cpu_cc_src2, cpu_cc_dst); @@ -1603,9 +1604,9 @@ static void gen_rot_rm_im(DisasContext *s, TCGMemOp ot, int op1, int op2, /* load */ if (op1 == OR_TMP0) { - gen_op_ld_v(s, ot, cpu_T0, s->A0); + gen_op_ld_v(s, ot, s->T0, s->A0); } else { - gen_op_mov_v_reg(ot, cpu_T0, op1); + gen_op_mov_v_reg(ot, s->T0, op1); } op2 &= mask; @@ -1613,20 +1614,20 @@ static void gen_rot_rm_im(DisasContext *s, TCGMemOp ot, int op1, int op2, switch (ot) { #ifdef TARGET_X86_64 case MO_32: - tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0); + tcg_gen_trunc_tl_i32(cpu_tmp2_i32, s->T0); if (is_right) { tcg_gen_rotri_i32(cpu_tmp2_i32, cpu_tmp2_i32, op2); } else { tcg_gen_rotli_i32(cpu_tmp2_i32, cpu_tmp2_i32, op2); } - tcg_gen_extu_i32_tl(cpu_T0, cpu_tmp2_i32); + tcg_gen_extu_i32_tl(s->T0, cpu_tmp2_i32); break; #endif default: if (is_right) { - tcg_gen_rotri_tl(cpu_T0, cpu_T0, op2); + tcg_gen_rotri_tl(s->T0, s->T0, op2); } else { - tcg_gen_rotli_tl(cpu_T0, cpu_T0, op2); + tcg_gen_rotli_tl(s->T0, s->T0, op2); } break; case MO_8: @@ -1639,10 +1640,10 @@ static void gen_rot_rm_im(DisasContext *s, TCGMemOp ot, int op1, int op2, if (is_right) { shift = mask + 1 - shift; } - gen_extu(ot, cpu_T0); - tcg_gen_shli_tl(cpu_tmp0, cpu_T0, shift); - tcg_gen_shri_tl(cpu_T0, cpu_T0, mask + 1 - shift); - tcg_gen_or_tl(cpu_T0, cpu_T0, cpu_tmp0); + gen_extu(ot, s->T0); + tcg_gen_shli_tl(cpu_tmp0, s->T0, shift); + tcg_gen_shri_tl(s->T0, s->T0, mask + 1 - shift); + tcg_gen_or_tl(s->T0, s->T0, cpu_tmp0); break; } } @@ -1659,12 +1660,12 @@ static void gen_rot_rm_im(DisasContext *s, TCGMemOp ot, int op1, int op2, since we've computed the flags into CC_SRC, these variables are currently dead. */ if (is_right) { - tcg_gen_shri_tl(cpu_cc_src2, cpu_T0, mask - 1); - tcg_gen_shri_tl(cpu_cc_dst, cpu_T0, mask); + tcg_gen_shri_tl(cpu_cc_src2, s->T0, mask - 1); + tcg_gen_shri_tl(cpu_cc_dst, s->T0, mask); tcg_gen_andi_tl(cpu_cc_dst, cpu_cc_dst, 1); } else { - tcg_gen_shri_tl(cpu_cc_src2, cpu_T0, mask); - tcg_gen_andi_tl(cpu_cc_dst, cpu_T0, 1); + tcg_gen_shri_tl(cpu_cc_src2, s->T0, mask); + tcg_gen_andi_tl(cpu_cc_dst, s->T0, 1); } tcg_gen_andi_tl(cpu_cc_src2, cpu_cc_src2, 1); tcg_gen_xor_tl(cpu_cc_src2, cpu_cc_src2, cpu_cc_dst); @@ -1681,24 +1682,24 @@ static void gen_rotc_rm_T1(DisasContext *s, TCGMemOp ot, int op1, /* load */ if (op1 == OR_TMP0) - gen_op_ld_v(s, ot, cpu_T0, s->A0); + gen_op_ld_v(s, ot, s->T0, s->A0); else - gen_op_mov_v_reg(ot, cpu_T0, op1); + gen_op_mov_v_reg(ot, s->T0, op1); if (is_right) { switch (ot) { case MO_8: - gen_helper_rcrb(cpu_T0, cpu_env, cpu_T0, cpu_T1); + gen_helper_rcrb(s->T0, cpu_env, s->T0, cpu_T1); break; case MO_16: - gen_helper_rcrw(cpu_T0, cpu_env, cpu_T0, cpu_T1); + gen_helper_rcrw(s->T0, cpu_env, s->T0, cpu_T1); break; case MO_32: - gen_helper_rcrl(cpu_T0, cpu_env, cpu_T0, cpu_T1); + gen_helper_rcrl(s->T0, cpu_env, s->T0, cpu_T1); break; #ifdef TARGET_X86_64 case MO_64: - gen_helper_rcrq(cpu_T0, cpu_env, cpu_T0, cpu_T1); + gen_helper_rcrq(s->T0, cpu_env, s->T0, cpu_T1); break; #endif default: @@ -1707,17 +1708,17 @@ static void gen_rotc_rm_T1(DisasContext *s, TCGMemOp ot, int op1, } else { switch (ot) { case MO_8: - gen_helper_rclb(cpu_T0, cpu_env, cpu_T0, cpu_T1); + gen_helper_rclb(s->T0, cpu_env, s->T0, cpu_T1); break; case MO_16: - gen_helper_rclw(cpu_T0, cpu_env, cpu_T0, cpu_T1); + gen_helper_rclw(s->T0, cpu_env, s->T0, cpu_T1); break; case MO_32: - gen_helper_rcll(cpu_T0, cpu_env, cpu_T0, cpu_T1); + gen_helper_rcll(s->T0, cpu_env, s->T0, cpu_T1); break; #ifdef TARGET_X86_64 case MO_64: - gen_helper_rclq(cpu_T0, cpu_env, cpu_T0, cpu_T1); + gen_helper_rclq(s->T0, cpu_env, s->T0, cpu_T1); break; #endif default: @@ -1737,9 +1738,9 @@ static void gen_shiftd_rm_T1(DisasContext *s, TCGMemOp ot, int op1, /* load */ if (op1 == OR_TMP0) { - gen_op_ld_v(s, ot, cpu_T0, s->A0); + gen_op_ld_v(s, ot, s->T0, s->A0); } else { - gen_op_mov_v_reg(ot, cpu_T0, op1); + gen_op_mov_v_reg(ot, s->T0, op1); } count = tcg_temp_new(); @@ -1751,11 +1752,11 @@ static void gen_shiftd_rm_T1(DisasContext *s, TCGMemOp ot, int op1, This means "shrdw C, B, A" shifts A:B:A >> C. Build the B:A portion by constructing it as a 32-bit value. */ if (is_right) { - tcg_gen_deposit_tl(cpu_tmp0, cpu_T0, cpu_T1, 16, 16); - tcg_gen_mov_tl(cpu_T1, cpu_T0); - tcg_gen_mov_tl(cpu_T0, cpu_tmp0); + tcg_gen_deposit_tl(cpu_tmp0, s->T0, cpu_T1, 16, 16); + tcg_gen_mov_tl(cpu_T1, s->T0); + tcg_gen_mov_tl(s->T0, cpu_tmp0); } else { - tcg_gen_deposit_tl(cpu_T1, cpu_T0, cpu_T1, 16, 16); + tcg_gen_deposit_tl(cpu_T1, s->T0, cpu_T1, 16, 16); } /* FALLTHRU */ #ifdef TARGET_X86_64 @@ -1763,28 +1764,28 @@ static void gen_shiftd_rm_T1(DisasContext *s, TCGMemOp ot, int op1, /* Concatenate the two 32-bit values and use a 64-bit shift. */ tcg_gen_subi_tl(cpu_tmp0, count, 1); if (is_right) { - tcg_gen_concat_tl_i64(cpu_T0, cpu_T0, cpu_T1); - tcg_gen_shr_i64(cpu_tmp0, cpu_T0, cpu_tmp0); - tcg_gen_shr_i64(cpu_T0, cpu_T0, count); + tcg_gen_concat_tl_i64(s->T0, s->T0, cpu_T1); + tcg_gen_shr_i64(cpu_tmp0, s->T0, cpu_tmp0); + tcg_gen_shr_i64(s->T0, s->T0, count); } else { - tcg_gen_concat_tl_i64(cpu_T0, cpu_T1, cpu_T0); - tcg_gen_shl_i64(cpu_tmp0, cpu_T0, cpu_tmp0); - tcg_gen_shl_i64(cpu_T0, cpu_T0, count); + tcg_gen_concat_tl_i64(s->T0, cpu_T1, s->T0); + tcg_gen_shl_i64(cpu_tmp0, s->T0, cpu_tmp0); + tcg_gen_shl_i64(s->T0, s->T0, count); tcg_gen_shri_i64(cpu_tmp0, cpu_tmp0, 32); - tcg_gen_shri_i64(cpu_T0, cpu_T0, 32); + tcg_gen_shri_i64(s->T0, s->T0, 32); } break; #endif default: tcg_gen_subi_tl(cpu_tmp0, count, 1); if (is_right) { - tcg_gen_shr_tl(cpu_tmp0, cpu_T0, cpu_tmp0); + tcg_gen_shr_tl(cpu_tmp0, s->T0, cpu_tmp0); tcg_gen_subfi_tl(cpu_tmp4, mask + 1, count); - tcg_gen_shr_tl(cpu_T0, cpu_T0, count); + tcg_gen_shr_tl(s->T0, s->T0, count); tcg_gen_shl_tl(cpu_T1, cpu_T1, cpu_tmp4); } else { - tcg_gen_shl_tl(cpu_tmp0, cpu_T0, cpu_tmp0); + tcg_gen_shl_tl(cpu_tmp0, s->T0, cpu_tmp0); if (ot == MO_16) { /* Only needed if count > 16, for Intel behaviour. */ tcg_gen_subfi_tl(cpu_tmp4, 33, count); @@ -1793,20 +1794,20 @@ static void gen_shiftd_rm_T1(DisasContext *s, TCGMemOp ot, int op1, } tcg_gen_subfi_tl(cpu_tmp4, mask + 1, count); - tcg_gen_shl_tl(cpu_T0, cpu_T0, count); + tcg_gen_shl_tl(s->T0, s->T0, count); tcg_gen_shr_tl(cpu_T1, cpu_T1, cpu_tmp4); } tcg_gen_movi_tl(cpu_tmp4, 0); tcg_gen_movcond_tl(TCG_COND_EQ, cpu_T1, count, cpu_tmp4, cpu_tmp4, cpu_T1); - tcg_gen_or_tl(cpu_T0, cpu_T0, cpu_T1); + tcg_gen_or_tl(s->T0, s->T0, cpu_T1); break; } /* store */ gen_op_st_rm_T0_A0(s, ot, op1); - gen_shift_flags(s, ot, cpu_T0, cpu_tmp0, count, is_right); + gen_shift_flags(s, ot, s->T0, cpu_tmp0, count, is_right); tcg_temp_free(count); } @@ -2126,23 +2127,23 @@ static void gen_ldst_modrm(CPUX86State *env, DisasContext *s, int modrm, if (mod == 3) { if (is_store) { if (reg != OR_TMP0) - gen_op_mov_v_reg(ot, cpu_T0, reg); - gen_op_mov_reg_v(ot, rm, cpu_T0); + gen_op_mov_v_reg(ot, s->T0, reg); + gen_op_mov_reg_v(ot, rm, s->T0); } else { - gen_op_mov_v_reg(ot, cpu_T0, rm); + gen_op_mov_v_reg(ot, s->T0, rm); if (reg != OR_TMP0) - gen_op_mov_reg_v(ot, reg, cpu_T0); + gen_op_mov_reg_v(ot, reg, s->T0); } } else { gen_lea_modrm(env, s, modrm); if (is_store) { if (reg != OR_TMP0) - gen_op_mov_v_reg(ot, cpu_T0, reg); - gen_op_st_v(s, ot, cpu_T0, s->A0); + gen_op_mov_v_reg(ot, s->T0, reg); + gen_op_st_v(s, ot, s->T0, s->A0); } else { - gen_op_ld_v(s, ot, cpu_T0, s->A0); + gen_op_ld_v(s, ot, s->T0, s->A0); if (reg != OR_TMP0) - gen_op_mov_reg_v(ot, reg, cpu_T0); + gen_op_mov_reg_v(ot, reg, s->T0); } } } @@ -2251,9 +2252,9 @@ static void gen_cmovcc1(CPUX86State *env, DisasContext *s, TCGMemOp ot, int b, cc.reg2 = tcg_const_tl(cc.imm); } - tcg_gen_movcond_tl(cc.cond, cpu_T0, cc.reg, cc.reg2, - cpu_T0, cpu_regs[reg]); - gen_op_mov_reg_v(ot, reg, cpu_T0); + tcg_gen_movcond_tl(cc.cond, s->T0, cc.reg, cc.reg2, + s->T0, cpu_regs[reg]); + gen_op_mov_reg_v(ot, reg, s->T0); if (cc.mask != -1) { tcg_temp_free(cc.reg); @@ -2263,18 +2264,18 @@ static void gen_cmovcc1(CPUX86State *env, DisasContext *s, TCGMemOp ot, int b, } } -static inline void gen_op_movl_T0_seg(int seg_reg) +static inline void gen_op_movl_T0_seg(DisasContext *s, int seg_reg) { - tcg_gen_ld32u_tl(cpu_T0, cpu_env, + tcg_gen_ld32u_tl(s->T0, cpu_env, offsetof(CPUX86State,segs[seg_reg].selector)); } -static inline void gen_op_movl_seg_T0_vm(int seg_reg) +static inline void gen_op_movl_seg_T0_vm(DisasContext *s, int seg_reg) { - tcg_gen_ext16u_tl(cpu_T0, cpu_T0); - tcg_gen_st32_tl(cpu_T0, cpu_env, + tcg_gen_ext16u_tl(s->T0, s->T0); + tcg_gen_st32_tl(s->T0, cpu_env, offsetof(CPUX86State,segs[seg_reg].selector)); - tcg_gen_shli_tl(cpu_seg_base[seg_reg], cpu_T0, 4); + tcg_gen_shli_tl(cpu_seg_base[seg_reg], s->T0, 4); } /* move T0 to seg_reg and compute if the CPU state may change. Never @@ -2282,7 +2283,7 @@ static inline void gen_op_movl_seg_T0_vm(int seg_reg) static void gen_movl_seg_T0(DisasContext *s, int seg_reg) { if (s->pe && !s->vm86) { - tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0); + tcg_gen_trunc_tl_i32(cpu_tmp2_i32, s->T0); gen_helper_load_seg(cpu_env, tcg_const_i32(seg_reg), cpu_tmp2_i32); /* abort translation because the addseg value may change or because ss32 may change. For R_SS, translation must always @@ -2292,7 +2293,7 @@ static void gen_movl_seg_T0(DisasContext *s, int seg_reg) s->base.is_jmp = DISAS_TOO_MANY; } } else { - gen_op_movl_seg_T0_vm(seg_reg); + gen_op_movl_seg_T0_vm(s, seg_reg); if (seg_reg == R_SS) { s->base.is_jmp = DISAS_TOO_MANY; } @@ -2356,7 +2357,7 @@ static TCGMemOp gen_pop_T0(DisasContext *s) TCGMemOp d_ot = mo_pushpop(s, s->dflag); gen_lea_v_seg(s, mo_stacksize(s), cpu_regs[R_ESP], R_SS, -1); - gen_op_ld_v(s, d_ot, cpu_T0, s->A0); + gen_op_ld_v(s, d_ot, s->T0, s->A0); return d_ot; } @@ -2401,8 +2402,8 @@ static void gen_popa(DisasContext *s) } tcg_gen_addi_tl(s->A0, cpu_regs[R_ESP], i * size); gen_lea_v_seg(s, s_ot, s->A0, R_SS, -1); - gen_op_ld_v(s, d_ot, cpu_T0, s->A0); - gen_op_mov_reg_v(d_ot, 7 - i, cpu_T0); + gen_op_ld_v(s, d_ot, s->T0, s->A0); + gen_op_mov_reg_v(d_ot, 7 - i, s->T0); } gen_stack_update(s, 8 * size); @@ -2454,11 +2455,11 @@ static void gen_leave(DisasContext *s) TCGMemOp a_ot = mo_stacksize(s); gen_lea_v_seg(s, a_ot, cpu_regs[R_EBP], R_SS, -1); - gen_op_ld_v(s, d_ot, cpu_T0, s->A0); + gen_op_ld_v(s, d_ot, s->T0, s->A0); tcg_gen_addi_tl(cpu_T1, cpu_regs[R_EBP], 1 << d_ot); - gen_op_mov_reg_v(d_ot, R_EBP, cpu_T0); + gen_op_mov_reg_v(d_ot, R_EBP, s->T0); gen_op_mov_reg_v(a_ot, R_ESP, cpu_T1); } @@ -3126,23 +3127,24 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, gen_stq_env_A0(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_Q(0))); } else { - tcg_gen_ld32u_tl(cpu_T0, cpu_env, offsetof(CPUX86State, + tcg_gen_ld32u_tl(s->T0, cpu_env, offsetof(CPUX86State, xmm_regs[reg].ZMM_L(0))); - gen_op_st_v(s, MO_32, cpu_T0, s->A0); + gen_op_st_v(s, MO_32, s->T0, s->A0); } break; case 0x6e: /* movd mm, ea */ #ifdef TARGET_X86_64 if (s->dflag == MO_64) { gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 0); - tcg_gen_st_tl(cpu_T0, cpu_env, offsetof(CPUX86State,fpregs[reg].mmx)); + tcg_gen_st_tl(s->T0, cpu_env, + offsetof(CPUX86State, fpregs[reg].mmx)); } else #endif { gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 0); tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,fpregs[reg].mmx)); - tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0); + tcg_gen_trunc_tl_i32(cpu_tmp2_i32, s->T0); gen_helper_movl_mm_T0_mmx(cpu_ptr0, cpu_tmp2_i32); } break; @@ -3152,14 +3154,14 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 0); tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,xmm_regs[reg])); - gen_helper_movq_mm_T0_xmm(cpu_ptr0, cpu_T0); + gen_helper_movq_mm_T0_xmm(cpu_ptr0, s->T0); } else #endif { gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 0); tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,xmm_regs[reg])); - tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0); + tcg_gen_trunc_tl_i32(cpu_tmp2_i32, s->T0); gen_helper_movl_mm_T0_xmm(cpu_ptr0, cpu_tmp2_i32); } break; @@ -3193,12 +3195,16 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, case 0x210: /* movss xmm, ea */ if (mod != 3) { gen_lea_modrm(env, s, modrm); - gen_op_ld_v(s, MO_32, cpu_T0, s->A0); - tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0))); - tcg_gen_movi_tl(cpu_T0, 0); - tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(1))); - tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(2))); - tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(3))); + gen_op_ld_v(s, MO_32, s->T0, s->A0); + tcg_gen_st32_tl(s->T0, cpu_env, + offsetof(CPUX86State, xmm_regs[reg].ZMM_L(0))); + tcg_gen_movi_tl(s->T0, 0); + tcg_gen_st32_tl(s->T0, cpu_env, + offsetof(CPUX86State, xmm_regs[reg].ZMM_L(1))); + tcg_gen_st32_tl(s->T0, cpu_env, + offsetof(CPUX86State, xmm_regs[reg].ZMM_L(2))); + tcg_gen_st32_tl(s->T0, cpu_env, + offsetof(CPUX86State, xmm_regs[reg].ZMM_L(3))); } else { rm = (modrm & 7) | REX_B(s); gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)), @@ -3210,9 +3216,11 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, gen_lea_modrm(env, s, modrm); gen_ldq_env_A0(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_Q(0))); - tcg_gen_movi_tl(cpu_T0, 0); - tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(2))); - tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(3))); + tcg_gen_movi_tl(s->T0, 0); + tcg_gen_st32_tl(s->T0, cpu_env, + offsetof(CPUX86State, xmm_regs[reg].ZMM_L(2))); + tcg_gen_st32_tl(s->T0, cpu_env, + offsetof(CPUX86State, xmm_regs[reg].ZMM_L(3))); } else { rm = (modrm & 7) | REX_B(s); gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)), @@ -3314,13 +3322,13 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, case 0x7e: /* movd ea, mm */ #ifdef TARGET_X86_64 if (s->dflag == MO_64) { - tcg_gen_ld_i64(cpu_T0, cpu_env, + tcg_gen_ld_i64(s->T0, cpu_env, offsetof(CPUX86State,fpregs[reg].mmx)); gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 1); } else #endif { - tcg_gen_ld32u_tl(cpu_T0, cpu_env, + tcg_gen_ld32u_tl(s->T0, cpu_env, offsetof(CPUX86State,fpregs[reg].mmx.MMX_L(0))); gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 1); } @@ -3328,13 +3336,13 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, case 0x17e: /* movd ea, xmm */ #ifdef TARGET_X86_64 if (s->dflag == MO_64) { - tcg_gen_ld_i64(cpu_T0, cpu_env, + tcg_gen_ld_i64(s->T0, cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0))); gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 1); } else #endif { - tcg_gen_ld32u_tl(cpu_T0, cpu_env, + tcg_gen_ld32u_tl(s->T0, cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0))); gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 1); } @@ -3379,8 +3387,9 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, case 0x211: /* movss ea, xmm */ if (mod != 3) { gen_lea_modrm(env, s, modrm); - tcg_gen_ld32u_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0))); - gen_op_st_v(s, MO_32, cpu_T0, s->A0); + tcg_gen_ld32u_tl(s->T0, cpu_env, + offsetof(CPUX86State, xmm_regs[reg].ZMM_L(0))); + gen_op_st_v(s, MO_32, s->T0, s->A0); } else { rm = (modrm & 7) | REX_B(s); gen_op_movl(offsetof(CPUX86State,xmm_regs[rm].ZMM_L(0)), @@ -3429,16 +3438,20 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, } val = x86_ldub_code(env, s); if (is_xmm) { - tcg_gen_movi_tl(cpu_T0, val); - tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_t0.ZMM_L(0))); - tcg_gen_movi_tl(cpu_T0, 0); - tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_t0.ZMM_L(1))); + tcg_gen_movi_tl(s->T0, val); + tcg_gen_st32_tl(s->T0, cpu_env, + offsetof(CPUX86State, xmm_t0.ZMM_L(0))); + tcg_gen_movi_tl(s->T0, 0); + tcg_gen_st32_tl(s->T0, cpu_env, + offsetof(CPUX86State, xmm_t0.ZMM_L(1))); op1_offset = offsetof(CPUX86State,xmm_t0); } else { - tcg_gen_movi_tl(cpu_T0, val); - tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(0))); - tcg_gen_movi_tl(cpu_T0, 0); - tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(1))); + tcg_gen_movi_tl(s->T0, val); + tcg_gen_st32_tl(s->T0, cpu_env, + offsetof(CPUX86State, mmx_t0.MMX_L(0))); + tcg_gen_movi_tl(s->T0, 0); + tcg_gen_st32_tl(s->T0, cpu_env, + offsetof(CPUX86State, mmx_t0.MMX_L(1))); op1_offset = offsetof(CPUX86State,mmx_t0); } sse_fn_epp = sse_op_table2[((b - 1) & 3) * 8 + @@ -3503,12 +3516,12 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset); if (ot == MO_32) { SSEFunc_0_epi sse_fn_epi = sse_op_table3ai[(b >> 8) & 1]; - tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0); + tcg_gen_trunc_tl_i32(cpu_tmp2_i32, s->T0); sse_fn_epi(cpu_env, cpu_ptr0, cpu_tmp2_i32); } else { #ifdef TARGET_X86_64 SSEFunc_0_epl sse_fn_epl = sse_op_table3aq[(b >> 8) & 1]; - sse_fn_epl(cpu_env, cpu_ptr0, cpu_T0); + sse_fn_epl(cpu_env, cpu_ptr0, s->T0); #else goto illegal_op; #endif @@ -3555,8 +3568,9 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, if ((b >> 8) & 1) { gen_ldq_env_A0(s, offsetof(CPUX86State, xmm_t0.ZMM_Q(0))); } else { - gen_op_ld_v(s, MO_32, cpu_T0, s->A0); - tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_t0.ZMM_L(0))); + gen_op_ld_v(s, MO_32, s->T0, s->A0); + tcg_gen_st32_tl(s->T0, cpu_env, + offsetof(CPUX86State, xmm_t0.ZMM_L(0))); } op2_offset = offsetof(CPUX86State,xmm_t0); } else { @@ -3568,17 +3582,17 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, SSEFunc_i_ep sse_fn_i_ep = sse_op_table3bi[((b >> 7) & 2) | (b & 1)]; sse_fn_i_ep(cpu_tmp2_i32, cpu_env, cpu_ptr0); - tcg_gen_extu_i32_tl(cpu_T0, cpu_tmp2_i32); + tcg_gen_extu_i32_tl(s->T0, cpu_tmp2_i32); } else { #ifdef TARGET_X86_64 SSEFunc_l_ep sse_fn_l_ep = sse_op_table3bq[((b >> 7) & 2) | (b & 1)]; - sse_fn_l_ep(cpu_T0, cpu_env, cpu_ptr0); + sse_fn_l_ep(s->T0, cpu_env, cpu_ptr0); #else goto illegal_op; #endif } - gen_op_mov_reg_v(ot, reg, cpu_T0); + gen_op_mov_reg_v(ot, reg, s->T0); break; case 0xc4: /* pinsrw */ case 0x1c4: @@ -3587,11 +3601,11 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, val = x86_ldub_code(env, s); if (b1) { val &= 7; - tcg_gen_st16_tl(cpu_T0, cpu_env, + tcg_gen_st16_tl(s->T0, cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_W(val))); } else { val &= 3; - tcg_gen_st16_tl(cpu_T0, cpu_env, + tcg_gen_st16_tl(s->T0, cpu_env, offsetof(CPUX86State,fpregs[reg].mmx.MMX_W(val))); } break; @@ -3604,16 +3618,16 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, if (b1) { val &= 7; rm = (modrm & 7) | REX_B(s); - tcg_gen_ld16u_tl(cpu_T0, cpu_env, + tcg_gen_ld16u_tl(s->T0, cpu_env, offsetof(CPUX86State,xmm_regs[rm].ZMM_W(val))); } else { val &= 3; rm = (modrm & 7); - tcg_gen_ld16u_tl(cpu_T0, cpu_env, + tcg_gen_ld16u_tl(s->T0, cpu_env, offsetof(CPUX86State,fpregs[rm].mmx.MMX_W(val))); } reg = ((modrm >> 3) & 7) | rex_r; - gen_op_mov_reg_v(ot, reg, cpu_T0); + gen_op_mov_reg_v(ot, reg, s->T0); break; case 0x1d6: /* movq ea, xmm */ if (mod != 3) { @@ -3760,11 +3774,11 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[reg]); gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); - gen_helper_crc32(cpu_T0, cpu_tmp2_i32, - cpu_T0, tcg_const_i32(8 << ot)); + gen_helper_crc32(s->T0, cpu_tmp2_i32, + s->T0, tcg_const_i32(8 << ot)); ot = mo_64_32(s->dflag); - gen_op_mov_reg_v(ot, reg, cpu_T0); + gen_op_mov_reg_v(ot, reg, s->T0); break; case 0x1f0: /* crc32 or movbe */ @@ -3789,9 +3803,9 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, gen_lea_modrm(env, s, modrm); if ((b & 1) == 0) { - tcg_gen_qemu_ld_tl(cpu_T0, s->A0, + tcg_gen_qemu_ld_tl(s->T0, s->A0, s->mem_index, ot | MO_BE); - gen_op_mov_reg_v(ot, reg, cpu_T0); + gen_op_mov_reg_v(ot, reg, s->T0); } else { tcg_gen_qemu_st_tl(cpu_regs[reg], s->A0, s->mem_index, ot | MO_BE); @@ -3806,9 +3820,9 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, } ot = mo_64_32(s->dflag); gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); - tcg_gen_andc_tl(cpu_T0, cpu_T0, cpu_regs[s->vex_v]); - gen_op_mov_reg_v(ot, reg, cpu_T0); - gen_op_update1_cc(); + tcg_gen_andc_tl(s->T0, s->T0, cpu_regs[s->vex_v]); + gen_op_mov_reg_v(ot, reg, s->T0); + gen_op_update1_cc(s); set_cc_op(s, CC_OP_LOGICB + ot); break; @@ -3826,12 +3840,12 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, /* Extract START, and shift the operand. Shifts larger than operand size get zeros. */ tcg_gen_ext8u_tl(s->A0, cpu_regs[s->vex_v]); - tcg_gen_shr_tl(cpu_T0, cpu_T0, s->A0); + tcg_gen_shr_tl(s->T0, s->T0, s->A0); bound = tcg_const_tl(ot == MO_64 ? 63 : 31); zero = tcg_const_tl(0); - tcg_gen_movcond_tl(TCG_COND_LEU, cpu_T0, s->A0, bound, - cpu_T0, zero); + tcg_gen_movcond_tl(TCG_COND_LEU, s->T0, s->A0, bound, + s->T0, zero); tcg_temp_free(zero); /* Extract the LEN into a mask. Lengths larger than @@ -3843,10 +3857,10 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, tcg_gen_movi_tl(cpu_T1, 1); tcg_gen_shl_tl(cpu_T1, cpu_T1, s->A0); tcg_gen_subi_tl(cpu_T1, cpu_T1, 1); - tcg_gen_and_tl(cpu_T0, cpu_T0, cpu_T1); + tcg_gen_and_tl(s->T0, s->T0, cpu_T1); - gen_op_mov_reg_v(ot, reg, cpu_T0); - gen_op_update1_cc(); + gen_op_mov_reg_v(ot, reg, s->T0); + gen_op_update1_cc(s); set_cc_op(s, CC_OP_LOGICB + ot); } break; @@ -3872,9 +3886,9 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, } tcg_gen_movi_tl(s->A0, -1); tcg_gen_shl_tl(s->A0, s->A0, cpu_T1); - tcg_gen_andc_tl(cpu_T0, cpu_T0, s->A0); - gen_op_mov_reg_v(ot, reg, cpu_T0); - gen_op_update1_cc(); + tcg_gen_andc_tl(s->T0, s->T0, s->A0); + gen_op_mov_reg_v(ot, reg, s->T0); + gen_op_update1_cc(s); set_cc_op(s, CC_OP_BMILGB + ot); break; @@ -3888,7 +3902,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); switch (ot) { default: - tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0); + tcg_gen_trunc_tl_i32(cpu_tmp2_i32, s->T0); tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_regs[R_EDX]); tcg_gen_mulu2_i32(cpu_tmp2_i32, cpu_tmp3_i32, cpu_tmp2_i32, cpu_tmp3_i32); @@ -3897,9 +3911,9 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, break; #ifdef TARGET_X86_64 case MO_64: - tcg_gen_mulu2_i64(cpu_T0, cpu_T1, - cpu_T0, cpu_regs[R_EDX]); - tcg_gen_mov_i64(cpu_regs[s->vex_v], cpu_T0); + tcg_gen_mulu2_i64(s->T0, cpu_T1, + s->T0, cpu_regs[R_EDX]); + tcg_gen_mov_i64(cpu_regs[s->vex_v], s->T0); tcg_gen_mov_i64(cpu_regs[reg], cpu_T1); break; #endif @@ -3921,7 +3935,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, } else { tcg_gen_ext32u_tl(cpu_T1, cpu_regs[s->vex_v]); } - gen_helper_pdep(cpu_regs[reg], cpu_T0, cpu_T1); + gen_helper_pdep(cpu_regs[reg], s->T0, cpu_T1); break; case 0x2f5: /* pext Gy, By, Ey */ @@ -3939,7 +3953,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, } else { tcg_gen_ext32u_tl(cpu_T1, cpu_regs[s->vex_v]); } - gen_helper_pext(cpu_regs[reg], cpu_T0, cpu_T1); + gen_helper_pext(cpu_regs[reg], s->T0, cpu_T1); break; case 0x1f6: /* adcx Gy, Ey */ @@ -3997,22 +4011,22 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, /* If we know TL is 64-bit, and we want a 32-bit result, just do everything in 64-bit arithmetic. */ tcg_gen_ext32u_i64(cpu_regs[reg], cpu_regs[reg]); - tcg_gen_ext32u_i64(cpu_T0, cpu_T0); - tcg_gen_add_i64(cpu_T0, cpu_T0, cpu_regs[reg]); - tcg_gen_add_i64(cpu_T0, cpu_T0, carry_in); - tcg_gen_ext32u_i64(cpu_regs[reg], cpu_T0); - tcg_gen_shri_i64(carry_out, cpu_T0, 32); + tcg_gen_ext32u_i64(s->T0, s->T0); + tcg_gen_add_i64(s->T0, s->T0, cpu_regs[reg]); + tcg_gen_add_i64(s->T0, s->T0, carry_in); + tcg_gen_ext32u_i64(cpu_regs[reg], s->T0); + tcg_gen_shri_i64(carry_out, s->T0, 32); break; #endif default: /* Otherwise compute the carry-out in two steps. */ zero = tcg_const_tl(0); - tcg_gen_add2_tl(cpu_T0, carry_out, - cpu_T0, zero, + tcg_gen_add2_tl(s->T0, carry_out, + s->T0, zero, carry_in, zero); tcg_gen_add2_tl(cpu_regs[reg], carry_out, cpu_regs[reg], carry_out, - cpu_T0, zero); + s->T0, zero); tcg_temp_free(zero); break; } @@ -4036,19 +4050,19 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, tcg_gen_andi_tl(cpu_T1, cpu_regs[s->vex_v], 31); } if (b == 0x1f7) { - tcg_gen_shl_tl(cpu_T0, cpu_T0, cpu_T1); + tcg_gen_shl_tl(s->T0, s->T0, cpu_T1); } else if (b == 0x2f7) { if (ot != MO_64) { - tcg_gen_ext32s_tl(cpu_T0, cpu_T0); + tcg_gen_ext32s_tl(s->T0, s->T0); } - tcg_gen_sar_tl(cpu_T0, cpu_T0, cpu_T1); + tcg_gen_sar_tl(s->T0, s->T0, cpu_T1); } else { if (ot != MO_64) { - tcg_gen_ext32u_tl(cpu_T0, cpu_T0); + tcg_gen_ext32u_tl(s->T0, s->T0); } - tcg_gen_shr_tl(cpu_T0, cpu_T0, cpu_T1); + tcg_gen_shr_tl(s->T0, s->T0, cpu_T1); } - gen_op_mov_reg_v(ot, reg, cpu_T0); + gen_op_mov_reg_v(ot, reg, s->T0); break; case 0x0f3: @@ -4063,25 +4077,25 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, ot = mo_64_32(s->dflag); gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); - tcg_gen_mov_tl(cpu_cc_src, cpu_T0); + tcg_gen_mov_tl(cpu_cc_src, s->T0); switch (reg & 7) { case 1: /* blsr By,Ey */ - tcg_gen_subi_tl(cpu_T1, cpu_T0, 1); - tcg_gen_and_tl(cpu_T0, cpu_T0, cpu_T1); + tcg_gen_subi_tl(cpu_T1, s->T0, 1); + tcg_gen_and_tl(s->T0, s->T0, cpu_T1); break; case 2: /* blsmsk By,Ey */ - tcg_gen_subi_tl(cpu_T1, cpu_T0, 1); - tcg_gen_xor_tl(cpu_T0, cpu_T0, cpu_T1); + tcg_gen_subi_tl(cpu_T1, s->T0, 1); + tcg_gen_xor_tl(s->T0, s->T0, cpu_T1); break; case 3: /* blsi By, Ey */ - tcg_gen_neg_tl(cpu_T1, cpu_T0); - tcg_gen_and_tl(cpu_T0, cpu_T0, cpu_T1); + tcg_gen_neg_tl(cpu_T1, s->T0); + tcg_gen_and_tl(s->T0, s->T0, cpu_T1); break; default: goto unknown_op; } - tcg_gen_mov_tl(cpu_cc_dst, cpu_T0); - gen_op_mov_reg_v(ot, s->vex_v, cpu_T0); + tcg_gen_mov_tl(cpu_cc_dst, s->T0); + gen_op_mov_reg_v(ot, s->vex_v, s->T0); set_cc_op(s, CC_OP_BMILGB + ot); break; @@ -4119,22 +4133,22 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, val = x86_ldub_code(env, s); switch (b) { case 0x14: /* pextrb */ - tcg_gen_ld8u_tl(cpu_T0, cpu_env, offsetof(CPUX86State, + tcg_gen_ld8u_tl(s->T0, cpu_env, offsetof(CPUX86State, xmm_regs[reg].ZMM_B(val & 15))); if (mod == 3) { - gen_op_mov_reg_v(ot, rm, cpu_T0); + gen_op_mov_reg_v(ot, rm, s->T0); } else { - tcg_gen_qemu_st_tl(cpu_T0, s->A0, + tcg_gen_qemu_st_tl(s->T0, s->A0, s->mem_index, MO_UB); } break; case 0x15: /* pextrw */ - tcg_gen_ld16u_tl(cpu_T0, cpu_env, offsetof(CPUX86State, + tcg_gen_ld16u_tl(s->T0, cpu_env, offsetof(CPUX86State, xmm_regs[reg].ZMM_W(val & 7))); if (mod == 3) { - gen_op_mov_reg_v(ot, rm, cpu_T0); + gen_op_mov_reg_v(ot, rm, s->T0); } else { - tcg_gen_qemu_st_tl(cpu_T0, s->A0, + tcg_gen_qemu_st_tl(s->T0, s->A0, s->mem_index, MO_LEUW); } break; @@ -4166,23 +4180,23 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, } break; case 0x17: /* extractps */ - tcg_gen_ld32u_tl(cpu_T0, cpu_env, offsetof(CPUX86State, + tcg_gen_ld32u_tl(s->T0, cpu_env, offsetof(CPUX86State, xmm_regs[reg].ZMM_L(val & 3))); if (mod == 3) { - gen_op_mov_reg_v(ot, rm, cpu_T0); + gen_op_mov_reg_v(ot, rm, s->T0); } else { - tcg_gen_qemu_st_tl(cpu_T0, s->A0, + tcg_gen_qemu_st_tl(s->T0, s->A0, s->mem_index, MO_LEUL); } break; case 0x20: /* pinsrb */ if (mod == 3) { - gen_op_mov_v_reg(MO_32, cpu_T0, rm); + gen_op_mov_v_reg(MO_32, s->T0, rm); } else { - tcg_gen_qemu_ld_tl(cpu_T0, s->A0, + tcg_gen_qemu_ld_tl(s->T0, s->A0, s->mem_index, MO_UB); } - tcg_gen_st8_tl(cpu_T0, cpu_env, offsetof(CPUX86State, + tcg_gen_st8_tl(s->T0, cpu_env, offsetof(CPUX86State, xmm_regs[reg].ZMM_B(val & 15))); break; case 0x21: /* insertps */ @@ -4297,13 +4311,13 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); b = x86_ldub_code(env, s); if (ot == MO_64) { - tcg_gen_rotri_tl(cpu_T0, cpu_T0, b & 63); + tcg_gen_rotri_tl(s->T0, s->T0, b & 63); } else { - tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0); + tcg_gen_trunc_tl_i32(cpu_tmp2_i32, s->T0); tcg_gen_rotri_i32(cpu_tmp2_i32, cpu_tmp2_i32, b & 31); - tcg_gen_extu_i32_tl(cpu_T0, cpu_tmp2_i32); + tcg_gen_extu_i32_tl(s->T0, cpu_tmp2_i32); } - gen_op_mov_reg_v(ot, reg, cpu_T0); + gen_op_mov_reg_v(ot, reg, s->T0); break; default: @@ -4360,8 +4374,8 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, switch (sz) { case 2: /* 32 bit access */ - gen_op_ld_v(s, MO_32, cpu_T0, s->A0); - tcg_gen_st32_tl(cpu_T0, cpu_env, + gen_op_ld_v(s, MO_32, s->T0, s->A0); + tcg_gen_st32_tl(s->T0, cpu_env, offsetof(CPUX86State,xmm_t0.ZMM_L(0))); break; case 3: @@ -4657,8 +4671,8 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) xor_zero: /* xor reg, reg optimisation */ set_cc_op(s, CC_OP_CLR); - tcg_gen_movi_tl(cpu_T0, 0); - gen_op_mov_reg_v(ot, reg, cpu_T0); + tcg_gen_movi_tl(s->T0, 0); + gen_op_mov_reg_v(ot, reg, s->T0); break; } else { opreg = rm; @@ -4760,17 +4774,17 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) /* For those below that handle locked memory, don't load here. */ if (!(s->prefix & PREFIX_LOCK) || op != 2) { - gen_op_ld_v(s, ot, cpu_T0, s->A0); + gen_op_ld_v(s, ot, s->T0, s->A0); } } else { - gen_op_mov_v_reg(ot, cpu_T0, rm); + gen_op_mov_v_reg(ot, s->T0, rm); } switch(op) { case 0: /* test */ val = insn_get(env, s, ot); tcg_gen_movi_tl(cpu_T1, val); - gen_op_testl_T0_T1_cc(); + gen_op_testl_T0_T1_cc(s); set_cc_op(s, CC_OP_LOGICB + ot); break; case 2: /* not */ @@ -4778,15 +4792,15 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) if (mod == 3) { goto illegal_op; } - tcg_gen_movi_tl(cpu_T0, ~0); - tcg_gen_atomic_xor_fetch_tl(cpu_T0, s->A0, cpu_T0, + tcg_gen_movi_tl(s->T0, ~0); + tcg_gen_atomic_xor_fetch_tl(s->T0, s->A0, s->T0, s->mem_index, ot | MO_LE); } else { - tcg_gen_not_tl(cpu_T0, cpu_T0); + tcg_gen_not_tl(s->T0, s->T0); if (mod != 3) { - gen_op_st_v(s, ot, cpu_T0, s->A0); + gen_op_st_v(s, ot, s->T0, s->A0); } else { - gen_op_mov_reg_v(ot, rm, cpu_T0); + gen_op_mov_reg_v(ot, rm, s->T0); } } break; @@ -4803,7 +4817,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) label1 = gen_new_label(); tcg_gen_mov_tl(a0, s->A0); - tcg_gen_mov_tl(t0, cpu_T0); + tcg_gen_mov_tl(t0, s->T0); gen_set_label(label1); t1 = tcg_temp_new(); @@ -4817,14 +4831,14 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) tcg_temp_free(t2); tcg_temp_free(a0); - tcg_gen_mov_tl(cpu_T0, t0); + tcg_gen_mov_tl(s->T0, t0); tcg_temp_free(t0); } else { - tcg_gen_neg_tl(cpu_T0, cpu_T0); + tcg_gen_neg_tl(s->T0, s->T0); if (mod != 3) { - gen_op_st_v(s, ot, cpu_T0, s->A0); + gen_op_st_v(s, ot, s->T0, s->A0); } else { - gen_op_mov_reg_v(ot, rm, cpu_T0); + gen_op_mov_reg_v(ot, rm, s->T0); } } gen_op_update_neg_cc(s); @@ -4834,31 +4848,31 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) switch(ot) { case MO_8: gen_op_mov_v_reg(MO_8, cpu_T1, R_EAX); - tcg_gen_ext8u_tl(cpu_T0, cpu_T0); + tcg_gen_ext8u_tl(s->T0, s->T0); tcg_gen_ext8u_tl(cpu_T1, cpu_T1); /* XXX: use 32 bit mul which could be faster */ - tcg_gen_mul_tl(cpu_T0, cpu_T0, cpu_T1); - gen_op_mov_reg_v(MO_16, R_EAX, cpu_T0); - tcg_gen_mov_tl(cpu_cc_dst, cpu_T0); - tcg_gen_andi_tl(cpu_cc_src, cpu_T0, 0xff00); + tcg_gen_mul_tl(s->T0, s->T0, cpu_T1); + gen_op_mov_reg_v(MO_16, R_EAX, s->T0); + tcg_gen_mov_tl(cpu_cc_dst, s->T0); + tcg_gen_andi_tl(cpu_cc_src, s->T0, 0xff00); set_cc_op(s, CC_OP_MULB); break; case MO_16: gen_op_mov_v_reg(MO_16, cpu_T1, R_EAX); - tcg_gen_ext16u_tl(cpu_T0, cpu_T0); + tcg_gen_ext16u_tl(s->T0, s->T0); tcg_gen_ext16u_tl(cpu_T1, cpu_T1); /* XXX: use 32 bit mul which could be faster */ - tcg_gen_mul_tl(cpu_T0, cpu_T0, cpu_T1); - gen_op_mov_reg_v(MO_16, R_EAX, cpu_T0); - tcg_gen_mov_tl(cpu_cc_dst, cpu_T0); - tcg_gen_shri_tl(cpu_T0, cpu_T0, 16); - gen_op_mov_reg_v(MO_16, R_EDX, cpu_T0); - tcg_gen_mov_tl(cpu_cc_src, cpu_T0); + tcg_gen_mul_tl(s->T0, s->T0, cpu_T1); + gen_op_mov_reg_v(MO_16, R_EAX, s->T0); + tcg_gen_mov_tl(cpu_cc_dst, s->T0); + tcg_gen_shri_tl(s->T0, s->T0, 16); + gen_op_mov_reg_v(MO_16, R_EDX, s->T0); + tcg_gen_mov_tl(cpu_cc_src, s->T0); set_cc_op(s, CC_OP_MULW); break; default: case MO_32: - tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0); + tcg_gen_trunc_tl_i32(cpu_tmp2_i32, s->T0); tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_regs[R_EAX]); tcg_gen_mulu2_i32(cpu_tmp2_i32, cpu_tmp3_i32, cpu_tmp2_i32, cpu_tmp3_i32); @@ -4871,7 +4885,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) #ifdef TARGET_X86_64 case MO_64: tcg_gen_mulu2_i64(cpu_regs[R_EAX], cpu_regs[R_EDX], - cpu_T0, cpu_regs[R_EAX]); + s->T0, cpu_regs[R_EAX]); tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]); tcg_gen_mov_tl(cpu_cc_src, cpu_regs[R_EDX]); set_cc_op(s, CC_OP_MULQ); @@ -4883,33 +4897,33 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) switch(ot) { case MO_8: gen_op_mov_v_reg(MO_8, cpu_T1, R_EAX); - tcg_gen_ext8s_tl(cpu_T0, cpu_T0); + tcg_gen_ext8s_tl(s->T0, s->T0); tcg_gen_ext8s_tl(cpu_T1, cpu_T1); /* XXX: use 32 bit mul which could be faster */ - tcg_gen_mul_tl(cpu_T0, cpu_T0, cpu_T1); - gen_op_mov_reg_v(MO_16, R_EAX, cpu_T0); - tcg_gen_mov_tl(cpu_cc_dst, cpu_T0); - tcg_gen_ext8s_tl(cpu_tmp0, cpu_T0); - tcg_gen_sub_tl(cpu_cc_src, cpu_T0, cpu_tmp0); + tcg_gen_mul_tl(s->T0, s->T0, cpu_T1); + gen_op_mov_reg_v(MO_16, R_EAX, s->T0); + tcg_gen_mov_tl(cpu_cc_dst, s->T0); + tcg_gen_ext8s_tl(cpu_tmp0, s->T0); + tcg_gen_sub_tl(cpu_cc_src, s->T0, cpu_tmp0); set_cc_op(s, CC_OP_MULB); break; case MO_16: gen_op_mov_v_reg(MO_16, cpu_T1, R_EAX); - tcg_gen_ext16s_tl(cpu_T0, cpu_T0); + tcg_gen_ext16s_tl(s->T0, s->T0); tcg_gen_ext16s_tl(cpu_T1, cpu_T1); /* XXX: use 32 bit mul which could be faster */ - tcg_gen_mul_tl(cpu_T0, cpu_T0, cpu_T1); - gen_op_mov_reg_v(MO_16, R_EAX, cpu_T0); - tcg_gen_mov_tl(cpu_cc_dst, cpu_T0); - tcg_gen_ext16s_tl(cpu_tmp0, cpu_T0); - tcg_gen_sub_tl(cpu_cc_src, cpu_T0, cpu_tmp0); - tcg_gen_shri_tl(cpu_T0, cpu_T0, 16); - gen_op_mov_reg_v(MO_16, R_EDX, cpu_T0); + tcg_gen_mul_tl(s->T0, s->T0, cpu_T1); + gen_op_mov_reg_v(MO_16, R_EAX, s->T0); + tcg_gen_mov_tl(cpu_cc_dst, s->T0); + tcg_gen_ext16s_tl(cpu_tmp0, s->T0); + tcg_gen_sub_tl(cpu_cc_src, s->T0, cpu_tmp0); + tcg_gen_shri_tl(s->T0, s->T0, 16); + gen_op_mov_reg_v(MO_16, R_EDX, s->T0); set_cc_op(s, CC_OP_MULW); break; default: case MO_32: - tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0); + tcg_gen_trunc_tl_i32(cpu_tmp2_i32, s->T0); tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_regs[R_EAX]); tcg_gen_muls2_i32(cpu_tmp2_i32, cpu_tmp3_i32, cpu_tmp2_i32, cpu_tmp3_i32); @@ -4924,7 +4938,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) #ifdef TARGET_X86_64 case MO_64: tcg_gen_muls2_i64(cpu_regs[R_EAX], cpu_regs[R_EDX], - cpu_T0, cpu_regs[R_EAX]); + s->T0, cpu_regs[R_EAX]); tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]); tcg_gen_sari_tl(cpu_cc_src, cpu_regs[R_EAX], 63); tcg_gen_sub_tl(cpu_cc_src, cpu_cc_src, cpu_regs[R_EDX]); @@ -4936,18 +4950,18 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) case 6: /* div */ switch(ot) { case MO_8: - gen_helper_divb_AL(cpu_env, cpu_T0); + gen_helper_divb_AL(cpu_env, s->T0); break; case MO_16: - gen_helper_divw_AX(cpu_env, cpu_T0); + gen_helper_divw_AX(cpu_env, s->T0); break; default: case MO_32: - gen_helper_divl_EAX(cpu_env, cpu_T0); + gen_helper_divl_EAX(cpu_env, s->T0); break; #ifdef TARGET_X86_64 case MO_64: - gen_helper_divq_EAX(cpu_env, cpu_T0); + gen_helper_divq_EAX(cpu_env, s->T0); break; #endif } @@ -4955,18 +4969,18 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) case 7: /* idiv */ switch(ot) { case MO_8: - gen_helper_idivb_AL(cpu_env, cpu_T0); + gen_helper_idivb_AL(cpu_env, s->T0); break; case MO_16: - gen_helper_idivw_AX(cpu_env, cpu_T0); + gen_helper_idivw_AX(cpu_env, s->T0); break; default: case MO_32: - gen_helper_idivl_EAX(cpu_env, cpu_T0); + gen_helper_idivl_EAX(cpu_env, s->T0); break; #ifdef TARGET_X86_64 case MO_64: - gen_helper_idivq_EAX(cpu_env, cpu_T0); + gen_helper_idivq_EAX(cpu_env, s->T0); break; #endif } @@ -5001,9 +5015,9 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) if (mod != 3) { gen_lea_modrm(env, s, modrm); if (op >= 2 && op != 3 && op != 5) - gen_op_ld_v(s, ot, cpu_T0, s->A0); + gen_op_ld_v(s, ot, s->T0, s->A0); } else { - gen_op_mov_v_reg(ot, cpu_T0, rm); + gen_op_mov_v_reg(ot, s->T0, rm); } switch(op) { @@ -5024,27 +5038,27 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) case 2: /* call Ev */ /* XXX: optimize if memory (no 'and' is necessary) */ if (dflag == MO_16) { - tcg_gen_ext16u_tl(cpu_T0, cpu_T0); + tcg_gen_ext16u_tl(s->T0, s->T0); } next_eip = s->pc - s->cs_base; tcg_gen_movi_tl(cpu_T1, next_eip); gen_push_v(s, cpu_T1); - gen_op_jmp_v(cpu_T0); + gen_op_jmp_v(s->T0); gen_bnd_jmp(s); - gen_jr(s, cpu_T0); + gen_jr(s, s->T0); break; case 3: /* lcall Ev */ gen_op_ld_v(s, ot, cpu_T1, s->A0); gen_add_A0_im(s, 1 << ot); - gen_op_ld_v(s, MO_16, cpu_T0, s->A0); + gen_op_ld_v(s, MO_16, s->T0, s->A0); do_lcall: if (s->pe && !s->vm86) { - tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0); + tcg_gen_trunc_tl_i32(cpu_tmp2_i32, s->T0); gen_helper_lcall_protected(cpu_env, cpu_tmp2_i32, cpu_T1, tcg_const_i32(dflag - 1), tcg_const_tl(s->pc - s->cs_base)); } else { - tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0); + tcg_gen_trunc_tl_i32(cpu_tmp2_i32, s->T0); gen_helper_lcall_real(cpu_env, cpu_tmp2_i32, cpu_T1, tcg_const_i32(dflag - 1), tcg_const_i32(s->pc - s->cs_base)); @@ -5054,30 +5068,30 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) break; case 4: /* jmp Ev */ if (dflag == MO_16) { - tcg_gen_ext16u_tl(cpu_T0, cpu_T0); + tcg_gen_ext16u_tl(s->T0, s->T0); } - gen_op_jmp_v(cpu_T0); + gen_op_jmp_v(s->T0); gen_bnd_jmp(s); - gen_jr(s, cpu_T0); + gen_jr(s, s->T0); break; case 5: /* ljmp Ev */ gen_op_ld_v(s, ot, cpu_T1, s->A0); gen_add_A0_im(s, 1 << ot); - gen_op_ld_v(s, MO_16, cpu_T0, s->A0); + gen_op_ld_v(s, MO_16, s->T0, s->A0); do_ljmp: if (s->pe && !s->vm86) { - tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0); + tcg_gen_trunc_tl_i32(cpu_tmp2_i32, s->T0); gen_helper_ljmp_protected(cpu_env, cpu_tmp2_i32, cpu_T1, tcg_const_tl(s->pc - s->cs_base)); } else { - gen_op_movl_seg_T0_vm(R_CS); + gen_op_movl_seg_T0_vm(s, R_CS); gen_op_jmp_v(cpu_T1); } tcg_gen_ld_tl(cpu_tmp4, cpu_env, offsetof(CPUX86State, eip)); gen_jr(s, cpu_tmp4); break; case 6: /* push Ev */ - gen_push_v(s, cpu_T0); + gen_push_v(s, s->T0); break; default: goto unknown_op; @@ -5093,7 +5107,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); gen_op_mov_v_reg(ot, cpu_T1, reg); - gen_op_testl_T0_T1_cc(); + gen_op_testl_T0_T1_cc(s); set_cc_op(s, CC_OP_LOGICB + ot); break; @@ -5102,9 +5116,9 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) ot = mo_b_d(b, dflag); val = insn_get(env, s, ot); - gen_op_mov_v_reg(ot, cpu_T0, OR_EAX); + gen_op_mov_v_reg(ot, s->T0, OR_EAX); tcg_gen_movi_tl(cpu_T1, val); - gen_op_testl_T0_T1_cc(); + gen_op_testl_T0_T1_cc(s); set_cc_op(s, CC_OP_LOGICB + ot); break; @@ -5112,20 +5126,20 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) switch (dflag) { #ifdef TARGET_X86_64 case MO_64: - gen_op_mov_v_reg(MO_32, cpu_T0, R_EAX); - tcg_gen_ext32s_tl(cpu_T0, cpu_T0); - gen_op_mov_reg_v(MO_64, R_EAX, cpu_T0); + gen_op_mov_v_reg(MO_32, s->T0, R_EAX); + tcg_gen_ext32s_tl(s->T0, s->T0); + gen_op_mov_reg_v(MO_64, R_EAX, s->T0); break; #endif case MO_32: - gen_op_mov_v_reg(MO_16, cpu_T0, R_EAX); - tcg_gen_ext16s_tl(cpu_T0, cpu_T0); - gen_op_mov_reg_v(MO_32, R_EAX, cpu_T0); + gen_op_mov_v_reg(MO_16, s->T0, R_EAX); + tcg_gen_ext16s_tl(s->T0, s->T0); + gen_op_mov_reg_v(MO_32, R_EAX, s->T0); break; case MO_16: - gen_op_mov_v_reg(MO_8, cpu_T0, R_EAX); - tcg_gen_ext8s_tl(cpu_T0, cpu_T0); - gen_op_mov_reg_v(MO_16, R_EAX, cpu_T0); + gen_op_mov_v_reg(MO_8, s->T0, R_EAX); + tcg_gen_ext8s_tl(s->T0, s->T0); + gen_op_mov_reg_v(MO_16, R_EAX, s->T0); break; default: tcg_abort(); @@ -5135,22 +5149,22 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) switch (dflag) { #ifdef TARGET_X86_64 case MO_64: - gen_op_mov_v_reg(MO_64, cpu_T0, R_EAX); - tcg_gen_sari_tl(cpu_T0, cpu_T0, 63); - gen_op_mov_reg_v(MO_64, R_EDX, cpu_T0); + gen_op_mov_v_reg(MO_64, s->T0, R_EAX); + tcg_gen_sari_tl(s->T0, s->T0, 63); + gen_op_mov_reg_v(MO_64, R_EDX, s->T0); break; #endif case MO_32: - gen_op_mov_v_reg(MO_32, cpu_T0, R_EAX); - tcg_gen_ext32s_tl(cpu_T0, cpu_T0); - tcg_gen_sari_tl(cpu_T0, cpu_T0, 31); - gen_op_mov_reg_v(MO_32, R_EDX, cpu_T0); + gen_op_mov_v_reg(MO_32, s->T0, R_EAX); + tcg_gen_ext32s_tl(s->T0, s->T0); + tcg_gen_sari_tl(s->T0, s->T0, 31); + gen_op_mov_reg_v(MO_32, R_EDX, s->T0); break; case MO_16: - gen_op_mov_v_reg(MO_16, cpu_T0, R_EAX); - tcg_gen_ext16s_tl(cpu_T0, cpu_T0); - tcg_gen_sari_tl(cpu_T0, cpu_T0, 15); - gen_op_mov_reg_v(MO_16, R_EDX, cpu_T0); + gen_op_mov_v_reg(MO_16, s->T0, R_EAX); + tcg_gen_ext16s_tl(s->T0, s->T0); + tcg_gen_sari_tl(s->T0, s->T0, 15); + gen_op_mov_reg_v(MO_16, R_EDX, s->T0); break; default: tcg_abort(); @@ -5179,14 +5193,14 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) switch (ot) { #ifdef TARGET_X86_64 case MO_64: - tcg_gen_muls2_i64(cpu_regs[reg], cpu_T1, cpu_T0, cpu_T1); + tcg_gen_muls2_i64(cpu_regs[reg], cpu_T1, s->T0, cpu_T1); tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[reg]); tcg_gen_sari_tl(cpu_cc_src, cpu_cc_dst, 63); tcg_gen_sub_tl(cpu_cc_src, cpu_cc_src, cpu_T1); break; #endif case MO_32: - tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0); + tcg_gen_trunc_tl_i32(cpu_tmp2_i32, s->T0); tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T1); tcg_gen_muls2_i32(cpu_tmp2_i32, cpu_tmp3_i32, cpu_tmp2_i32, cpu_tmp3_i32); @@ -5197,14 +5211,14 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) tcg_gen_extu_i32_tl(cpu_cc_src, cpu_tmp2_i32); break; default: - tcg_gen_ext16s_tl(cpu_T0, cpu_T0); + tcg_gen_ext16s_tl(s->T0, s->T0); tcg_gen_ext16s_tl(cpu_T1, cpu_T1); /* XXX: use 32 bit mul which could be faster */ - tcg_gen_mul_tl(cpu_T0, cpu_T0, cpu_T1); - tcg_gen_mov_tl(cpu_cc_dst, cpu_T0); - tcg_gen_ext16s_tl(cpu_tmp0, cpu_T0); - tcg_gen_sub_tl(cpu_cc_src, cpu_T0, cpu_tmp0); - gen_op_mov_reg_v(ot, reg, cpu_T0); + tcg_gen_mul_tl(s->T0, s->T0, cpu_T1); + tcg_gen_mov_tl(cpu_cc_dst, s->T0); + tcg_gen_ext16s_tl(cpu_tmp0, s->T0); + tcg_gen_sub_tl(cpu_cc_src, s->T0, cpu_tmp0); + gen_op_mov_reg_v(ot, reg, s->T0); break; } set_cc_op(s, CC_OP_MULB + ot); @@ -5215,27 +5229,27 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) modrm = x86_ldub_code(env, s); reg = ((modrm >> 3) & 7) | rex_r; mod = (modrm >> 6) & 3; - gen_op_mov_v_reg(ot, cpu_T0, reg); + gen_op_mov_v_reg(ot, s->T0, reg); if (mod == 3) { rm = (modrm & 7) | REX_B(s); gen_op_mov_v_reg(ot, cpu_T1, rm); - tcg_gen_add_tl(cpu_T0, cpu_T0, cpu_T1); + tcg_gen_add_tl(s->T0, s->T0, cpu_T1); gen_op_mov_reg_v(ot, reg, cpu_T1); - gen_op_mov_reg_v(ot, rm, cpu_T0); + gen_op_mov_reg_v(ot, rm, s->T0); } else { gen_lea_modrm(env, s, modrm); if (s->prefix & PREFIX_LOCK) { - tcg_gen_atomic_fetch_add_tl(cpu_T1, s->A0, cpu_T0, + tcg_gen_atomic_fetch_add_tl(cpu_T1, s->A0, s->T0, s->mem_index, ot | MO_LE); - tcg_gen_add_tl(cpu_T0, cpu_T0, cpu_T1); + tcg_gen_add_tl(s->T0, s->T0, cpu_T1); } else { gen_op_ld_v(s, ot, cpu_T1, s->A0); - tcg_gen_add_tl(cpu_T0, cpu_T0, cpu_T1); - gen_op_st_v(s, ot, cpu_T0, s->A0); + tcg_gen_add_tl(s->T0, s->T0, cpu_T1); + gen_op_st_v(s, ot, s->T0, s->A0); } gen_op_mov_reg_v(ot, reg, cpu_T1); } - gen_op_update2_cc(); + gen_op_update2_cc(s); set_cc_op(s, CC_OP_ADDB + ot); break; case 0x1b0: @@ -5328,14 +5342,14 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) /**************************/ /* push/pop */ case 0x50 ... 0x57: /* push */ - gen_op_mov_v_reg(MO_32, cpu_T0, (b & 7) | REX_B(s)); - gen_push_v(s, cpu_T0); + gen_op_mov_v_reg(MO_32, s->T0, (b & 7) | REX_B(s)); + gen_push_v(s, s->T0); break; case 0x58 ... 0x5f: /* pop */ ot = gen_pop_T0(s); /* NOTE: order is important for pop %sp */ gen_pop_update(s, ot); - gen_op_mov_reg_v(ot, (b & 7) | REX_B(s), cpu_T0); + gen_op_mov_reg_v(ot, (b & 7) | REX_B(s), s->T0); break; case 0x60: /* pusha */ if (CODE64(s)) @@ -5354,8 +5368,8 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) val = insn_get(env, s, ot); else val = (int8_t)insn_get(env, s, MO_8); - tcg_gen_movi_tl(cpu_T0, val); - gen_push_v(s, cpu_T0); + tcg_gen_movi_tl(s->T0, val); + gen_push_v(s, s->T0); break; case 0x8f: /* pop Ev */ modrm = x86_ldub_code(env, s); @@ -5365,7 +5379,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) /* NOTE: order is important for pop %sp */ gen_pop_update(s, ot); rm = (modrm & 7) | REX_B(s); - gen_op_mov_reg_v(ot, rm, cpu_T0); + gen_op_mov_reg_v(ot, rm, s->T0); } else { /* NOTE: order is important too for MMU exceptions */ s->popl_esp_hack = 1 << ot; @@ -5391,13 +5405,13 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) case 0x1e: /* push ds */ if (CODE64(s)) goto illegal_op; - gen_op_movl_T0_seg(b >> 3); - gen_push_v(s, cpu_T0); + gen_op_movl_T0_seg(s, b >> 3); + gen_push_v(s, s->T0); break; case 0x1a0: /* push fs */ case 0x1a8: /* push gs */ - gen_op_movl_T0_seg((b >> 3) & 7); - gen_push_v(s, cpu_T0); + gen_op_movl_T0_seg(s, (b >> 3) & 7); + gen_push_v(s, s->T0); break; case 0x07: /* pop es */ case 0x17: /* pop ss */ @@ -5451,11 +5465,11 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) gen_lea_modrm(env, s, modrm); } val = insn_get(env, s, ot); - tcg_gen_movi_tl(cpu_T0, val); + tcg_gen_movi_tl(s->T0, val); if (mod != 3) { - gen_op_st_v(s, ot, cpu_T0, s->A0); + gen_op_st_v(s, ot, s->T0, s->A0); } else { - gen_op_mov_reg_v(ot, (modrm & 7) | REX_B(s), cpu_T0); + gen_op_mov_reg_v(ot, (modrm & 7) | REX_B(s), s->T0); } break; case 0x8a: @@ -5465,7 +5479,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) reg = ((modrm >> 3) & 7) | rex_r; gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); - gen_op_mov_reg_v(ot, reg, cpu_T0); + gen_op_mov_reg_v(ot, reg, s->T0); break; case 0x8e: /* mov seg, Gv */ modrm = x86_ldub_code(env, s); @@ -5491,7 +5505,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) mod = (modrm >> 6) & 3; if (reg >= 6) goto illegal_op; - gen_op_movl_T0_seg(reg); + gen_op_movl_T0_seg(s, reg); ot = mod == 3 ? dflag : MO_16; gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1); break; @@ -5518,30 +5532,30 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) if (mod == 3) { if (s_ot == MO_SB && byte_reg_is_xH(rm)) { - tcg_gen_sextract_tl(cpu_T0, cpu_regs[rm - 4], 8, 8); + tcg_gen_sextract_tl(s->T0, cpu_regs[rm - 4], 8, 8); } else { - gen_op_mov_v_reg(ot, cpu_T0, rm); + gen_op_mov_v_reg(ot, s->T0, rm); switch (s_ot) { case MO_UB: - tcg_gen_ext8u_tl(cpu_T0, cpu_T0); + tcg_gen_ext8u_tl(s->T0, s->T0); break; case MO_SB: - tcg_gen_ext8s_tl(cpu_T0, cpu_T0); + tcg_gen_ext8s_tl(s->T0, s->T0); break; case MO_UW: - tcg_gen_ext16u_tl(cpu_T0, cpu_T0); + tcg_gen_ext16u_tl(s->T0, s->T0); break; default: case MO_SW: - tcg_gen_ext16s_tl(cpu_T0, cpu_T0); + tcg_gen_ext16s_tl(s->T0, s->T0); break; } } - gen_op_mov_reg_v(d_ot, reg, cpu_T0); + gen_op_mov_reg_v(d_ot, reg, s->T0); } else { gen_lea_modrm(env, s, modrm); - gen_op_ld_v(s, s_ot, cpu_T0, s->A0); - gen_op_mov_reg_v(d_ot, reg, cpu_T0); + gen_op_ld_v(s, s_ot, s->T0, s->A0); + gen_op_mov_reg_v(d_ot, reg, s->T0); } } break; @@ -5581,27 +5595,27 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) tcg_gen_movi_tl(s->A0, offset_addr); gen_add_A0_ds_seg(s); if ((b & 2) == 0) { - gen_op_ld_v(s, ot, cpu_T0, s->A0); - gen_op_mov_reg_v(ot, R_EAX, cpu_T0); + gen_op_ld_v(s, ot, s->T0, s->A0); + gen_op_mov_reg_v(ot, R_EAX, s->T0); } else { - gen_op_mov_v_reg(ot, cpu_T0, R_EAX); - gen_op_st_v(s, ot, cpu_T0, s->A0); + gen_op_mov_v_reg(ot, s->T0, R_EAX); + gen_op_st_v(s, ot, s->T0, s->A0); } } break; case 0xd7: /* xlat */ tcg_gen_mov_tl(s->A0, cpu_regs[R_EBX]); - tcg_gen_ext8u_tl(cpu_T0, cpu_regs[R_EAX]); - tcg_gen_add_tl(s->A0, s->A0, cpu_T0); + tcg_gen_ext8u_tl(s->T0, cpu_regs[R_EAX]); + tcg_gen_add_tl(s->A0, s->A0, s->T0); gen_extu(s->aflag, s->A0); gen_add_A0_ds_seg(s); - gen_op_ld_v(s, MO_8, cpu_T0, s->A0); - gen_op_mov_reg_v(MO_8, R_EAX, cpu_T0); + gen_op_ld_v(s, MO_8, s->T0, s->A0); + gen_op_mov_reg_v(MO_8, R_EAX, s->T0); break; case 0xb0 ... 0xb7: /* mov R, Ib */ val = insn_get(env, s, MO_8); - tcg_gen_movi_tl(cpu_T0, val); - gen_op_mov_reg_v(MO_8, (b & 7) | REX_B(s), cpu_T0); + tcg_gen_movi_tl(s->T0, val); + gen_op_mov_reg_v(MO_8, (b & 7) | REX_B(s), s->T0); break; case 0xb8 ... 0xbf: /* mov R, Iv */ #ifdef TARGET_X86_64 @@ -5610,16 +5624,16 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) /* 64 bit case */ tmp = x86_ldq_code(env, s); reg = (b & 7) | REX_B(s); - tcg_gen_movi_tl(cpu_T0, tmp); - gen_op_mov_reg_v(MO_64, reg, cpu_T0); + tcg_gen_movi_tl(s->T0, tmp); + gen_op_mov_reg_v(MO_64, reg, s->T0); } else #endif { ot = dflag; val = insn_get(env, s, ot); reg = (b & 7) | REX_B(s); - tcg_gen_movi_tl(cpu_T0, val); - gen_op_mov_reg_v(ot, reg, cpu_T0); + tcg_gen_movi_tl(s->T0, val); + gen_op_mov_reg_v(ot, reg, s->T0); } break; @@ -5638,15 +5652,15 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) if (mod == 3) { rm = (modrm & 7) | REX_B(s); do_xchg_reg: - gen_op_mov_v_reg(ot, cpu_T0, reg); + gen_op_mov_v_reg(ot, s->T0, reg); gen_op_mov_v_reg(ot, cpu_T1, rm); - gen_op_mov_reg_v(ot, rm, cpu_T0); + gen_op_mov_reg_v(ot, rm, s->T0); gen_op_mov_reg_v(ot, reg, cpu_T1); } else { gen_lea_modrm(env, s, modrm); - gen_op_mov_v_reg(ot, cpu_T0, reg); + gen_op_mov_v_reg(ot, s->T0, reg); /* for xchg, lock is implicit */ - tcg_gen_atomic_xchg_tl(cpu_T1, s->A0, cpu_T0, + tcg_gen_atomic_xchg_tl(cpu_T1, s->A0, s->T0, s->mem_index, ot | MO_LE); gen_op_mov_reg_v(ot, reg, cpu_T1); } @@ -5678,7 +5692,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) gen_op_ld_v(s, ot, cpu_T1, s->A0); gen_add_A0_im(s, 1 << ot); /* load the segment first to handle exceptions properly */ - gen_op_ld_v(s, MO_16, cpu_T0, s->A0); + gen_op_ld_v(s, MO_16, s->T0, s->A0); gen_movl_seg_T0(s, op); /* then put the data */ gen_op_mov_reg_v(ot, reg, cpu_T1); @@ -6220,8 +6234,8 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) switch(rm) { case 0: gen_helper_fnstsw(cpu_tmp2_i32, cpu_env); - tcg_gen_extu_i32_tl(cpu_T0, cpu_tmp2_i32); - gen_op_mov_reg_v(MO_16, R_EAX, cpu_T0); + tcg_gen_extu_i32_tl(s->T0, cpu_tmp2_i32); + gen_op_mov_reg_v(MO_16, R_EAX, s->T0); break; default: goto unknown_op; @@ -6331,7 +6345,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) case 0x6c: /* insS */ case 0x6d: ot = mo_b_d32(b, dflag); - tcg_gen_ext16u_tl(cpu_T0, cpu_regs[R_EDX]); + tcg_gen_ext16u_tl(s->T0, cpu_regs[R_EDX]); gen_check_io(s, ot, pc_start - s->cs_base, SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes) | 4); if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) { @@ -6346,7 +6360,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) case 0x6e: /* outsS */ case 0x6f: ot = mo_b_d32(b, dflag); - tcg_gen_ext16u_tl(cpu_T0, cpu_regs[R_EDX]); + tcg_gen_ext16u_tl(s->T0, cpu_regs[R_EDX]); gen_check_io(s, ot, pc_start - s->cs_base, svm_is_rep(prefixes) | 4); if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) { @@ -6366,7 +6380,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) case 0xe5: ot = mo_b_d32(b, dflag); val = x86_ldub_code(env, s); - tcg_gen_movi_tl(cpu_T0, val); + tcg_gen_movi_tl(s->T0, val); gen_check_io(s, ot, pc_start - s->cs_base, SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes)); if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { @@ -6385,7 +6399,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) case 0xe7: ot = mo_b_d32(b, dflag); val = x86_ldub_code(env, s); - tcg_gen_movi_tl(cpu_T0, val); + tcg_gen_movi_tl(s->T0, val); gen_check_io(s, ot, pc_start - s->cs_base, svm_is_rep(prefixes)); gen_op_mov_v_reg(ot, cpu_T1, R_EAX); @@ -6405,13 +6419,13 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) case 0xec: case 0xed: ot = mo_b_d32(b, dflag); - tcg_gen_ext16u_tl(cpu_T0, cpu_regs[R_EDX]); + tcg_gen_ext16u_tl(s->T0, cpu_regs[R_EDX]); gen_check_io(s, ot, pc_start - s->cs_base, SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes)); if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { gen_io_start(); } - tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0); + tcg_gen_trunc_tl_i32(cpu_tmp2_i32, s->T0); gen_helper_in_func(ot, cpu_T1, cpu_tmp2_i32); gen_op_mov_reg_v(ot, R_EAX, cpu_T1); gen_bpt_io(s, cpu_tmp2_i32, ot); @@ -6423,7 +6437,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) case 0xee: case 0xef: ot = mo_b_d32(b, dflag); - tcg_gen_ext16u_tl(cpu_T0, cpu_regs[R_EDX]); + tcg_gen_ext16u_tl(s->T0, cpu_regs[R_EDX]); gen_check_io(s, ot, pc_start - s->cs_base, svm_is_rep(prefixes)); gen_op_mov_v_reg(ot, cpu_T1, R_EAX); @@ -6431,7 +6445,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { gen_io_start(); } - tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0); + tcg_gen_trunc_tl_i32(cpu_tmp2_i32, s->T0); tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T1); gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32); gen_bpt_io(s, cpu_tmp2_i32, ot); @@ -6448,17 +6462,17 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) ot = gen_pop_T0(s); gen_stack_update(s, val + (1 << ot)); /* Note that gen_pop_T0 uses a zero-extending load. */ - gen_op_jmp_v(cpu_T0); + gen_op_jmp_v(s->T0); gen_bnd_jmp(s); - gen_jr(s, cpu_T0); + gen_jr(s, s->T0); break; case 0xc3: /* ret */ ot = gen_pop_T0(s); gen_pop_update(s, ot); /* Note that gen_pop_T0 uses a zero-extending load. */ - gen_op_jmp_v(cpu_T0); + gen_op_jmp_v(s->T0); gen_bnd_jmp(s); - gen_jr(s, cpu_T0); + gen_jr(s, s->T0); break; case 0xca: /* lret im */ val = x86_ldsw_code(env, s); @@ -6471,14 +6485,14 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) } else { gen_stack_A0(s); /* pop offset */ - gen_op_ld_v(s, dflag, cpu_T0, s->A0); + gen_op_ld_v(s, dflag, s->T0, s->A0); /* NOTE: keeping EIP updated is not a problem in case of exception */ - gen_op_jmp_v(cpu_T0); + gen_op_jmp_v(s->T0); /* pop selector */ gen_add_A0_im(s, 1 << dflag); - gen_op_ld_v(s, dflag, cpu_T0, s->A0); - gen_op_movl_seg_T0_vm(R_CS); + gen_op_ld_v(s, dflag, s->T0, s->A0); + gen_op_movl_seg_T0_vm(s, R_CS); /* add stack offset */ gen_stack_update(s, val + (2 << dflag)); } @@ -6521,8 +6535,8 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) } else if (!CODE64(s)) { tval &= 0xffffffff; } - tcg_gen_movi_tl(cpu_T0, next_eip); - gen_push_v(s, cpu_T0); + tcg_gen_movi_tl(s->T0, next_eip); + gen_push_v(s, s->T0); gen_bnd_jmp(s); gen_jmp(s, tval); } @@ -6537,7 +6551,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) offset = insn_get(env, s, ot); selector = insn_get(env, s, MO_16); - tcg_gen_movi_tl(cpu_T0, selector); + tcg_gen_movi_tl(s->T0, selector); tcg_gen_movi_tl(cpu_T1, offset); } goto do_lcall; @@ -6566,7 +6580,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) offset = insn_get(env, s, ot); selector = insn_get(env, s, MO_16); - tcg_gen_movi_tl(cpu_T0, selector); + tcg_gen_movi_tl(s->T0, selector); tcg_gen_movi_tl(cpu_T1, offset); } goto do_ljmp; @@ -6599,7 +6613,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) case 0x190 ... 0x19f: /* setcc Gv */ modrm = x86_ldub_code(env, s); - gen_setcc1(s, b, cpu_T0); + gen_setcc1(s, b, s->T0); gen_ldst_modrm(env, s, modrm, MO_8, OR_TMP0, 1); break; case 0x140 ... 0x14f: /* cmov Gv, Ev */ @@ -6620,8 +6634,8 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } else { gen_update_cc_op(s); - gen_helper_read_eflags(cpu_T0, cpu_env); - gen_push_v(s, cpu_T0); + gen_helper_read_eflags(s->T0, cpu_env); + gen_push_v(s, s->T0); } break; case 0x9d: /* popf */ @@ -6632,13 +6646,13 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) ot = gen_pop_T0(s); if (s->cpl == 0) { if (dflag != MO_16) { - gen_helper_write_eflags(cpu_env, cpu_T0, + gen_helper_write_eflags(cpu_env, s->T0, tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK | IF_MASK | IOPL_MASK))); } else { - gen_helper_write_eflags(cpu_env, cpu_T0, + gen_helper_write_eflags(cpu_env, s->T0, tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK | IF_MASK | IOPL_MASK) @@ -6647,14 +6661,14 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) } else { if (s->cpl <= s->iopl) { if (dflag != MO_16) { - gen_helper_write_eflags(cpu_env, cpu_T0, + gen_helper_write_eflags(cpu_env, s->T0, tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK | IF_MASK))); } else { - gen_helper_write_eflags(cpu_env, cpu_T0, + gen_helper_write_eflags(cpu_env, s->T0, tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | @@ -6664,11 +6678,11 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) } } else { if (dflag != MO_16) { - gen_helper_write_eflags(cpu_env, cpu_T0, + gen_helper_write_eflags(cpu_env, s->T0, tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK))); } else { - gen_helper_write_eflags(cpu_env, cpu_T0, + gen_helper_write_eflags(cpu_env, s->T0, tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK) & 0xffff)); @@ -6685,19 +6699,19 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) case 0x9e: /* sahf */ if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM)) goto illegal_op; - gen_op_mov_v_reg(MO_8, cpu_T0, R_AH); + gen_op_mov_v_reg(MO_8, s->T0, R_AH); gen_compute_eflags(s); tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, CC_O); - tcg_gen_andi_tl(cpu_T0, cpu_T0, CC_S | CC_Z | CC_A | CC_P | CC_C); - tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, cpu_T0); + tcg_gen_andi_tl(s->T0, s->T0, CC_S | CC_Z | CC_A | CC_P | CC_C); + tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, s->T0); break; case 0x9f: /* lahf */ if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM)) goto illegal_op; gen_compute_eflags(s); /* Note: gen_compute_eflags() only gives the condition codes */ - tcg_gen_ori_tl(cpu_T0, cpu_cc_src, 0x02); - gen_op_mov_reg_v(MO_8, R_AH, cpu_T0); + tcg_gen_ori_tl(s->T0, cpu_cc_src, 0x02); + gen_op_mov_reg_v(MO_8, R_AH, s->T0); break; case 0xf5: /* cmc */ gen_compute_eflags(s); @@ -6732,10 +6746,10 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) s->rip_offset = 1; gen_lea_modrm(env, s, modrm); if (!(s->prefix & PREFIX_LOCK)) { - gen_op_ld_v(s, ot, cpu_T0, s->A0); + gen_op_ld_v(s, ot, s->T0, s->A0); } } else { - gen_op_mov_v_reg(ot, cpu_T0, rm); + gen_op_mov_v_reg(ot, s->T0, rm); } /* load shift */ val = x86_ldub_code(env, s); @@ -6771,10 +6785,10 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) tcg_gen_add_tl(s->A0, gen_lea_modrm_1(s, a), cpu_tmp0); gen_lea_v_seg(s, s->aflag, s->A0, a.def_seg, s->override); if (!(s->prefix & PREFIX_LOCK)) { - gen_op_ld_v(s, ot, cpu_T0, s->A0); + gen_op_ld_v(s, ot, s->T0, s->A0); } } else { - gen_op_mov_v_reg(ot, cpu_T0, rm); + gen_op_mov_v_reg(ot, s->T0, rm); } bt_op: tcg_gen_andi_tl(cpu_T1, cpu_T1, (1 << (3 + ot)) - 1); @@ -6785,46 +6799,46 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) case 0: /* bt */ /* Needs no atomic ops; we surpressed the normal memory load for LOCK above so do it now. */ - gen_op_ld_v(s, ot, cpu_T0, s->A0); + gen_op_ld_v(s, ot, s->T0, s->A0); break; case 1: /* bts */ - tcg_gen_atomic_fetch_or_tl(cpu_T0, s->A0, cpu_tmp0, + tcg_gen_atomic_fetch_or_tl(s->T0, s->A0, cpu_tmp0, s->mem_index, ot | MO_LE); break; case 2: /* btr */ tcg_gen_not_tl(cpu_tmp0, cpu_tmp0); - tcg_gen_atomic_fetch_and_tl(cpu_T0, s->A0, cpu_tmp0, + tcg_gen_atomic_fetch_and_tl(s->T0, s->A0, cpu_tmp0, s->mem_index, ot | MO_LE); break; default: case 3: /* btc */ - tcg_gen_atomic_fetch_xor_tl(cpu_T0, s->A0, cpu_tmp0, + tcg_gen_atomic_fetch_xor_tl(s->T0, s->A0, cpu_tmp0, s->mem_index, ot | MO_LE); break; } - tcg_gen_shr_tl(cpu_tmp4, cpu_T0, cpu_T1); + tcg_gen_shr_tl(cpu_tmp4, s->T0, cpu_T1); } else { - tcg_gen_shr_tl(cpu_tmp4, cpu_T0, cpu_T1); + tcg_gen_shr_tl(cpu_tmp4, s->T0, cpu_T1); switch (op) { case 0: /* bt */ /* Data already loaded; nothing to do. */ break; case 1: /* bts */ - tcg_gen_or_tl(cpu_T0, cpu_T0, cpu_tmp0); + tcg_gen_or_tl(s->T0, s->T0, cpu_tmp0); break; case 2: /* btr */ - tcg_gen_andc_tl(cpu_T0, cpu_T0, cpu_tmp0); + tcg_gen_andc_tl(s->T0, s->T0, cpu_tmp0); break; default: case 3: /* btc */ - tcg_gen_xor_tl(cpu_T0, cpu_T0, cpu_tmp0); + tcg_gen_xor_tl(s->T0, s->T0, cpu_tmp0); break; } if (op != 0) { if (mod != 3) { - gen_op_st_v(s, ot, cpu_T0, s->A0); + gen_op_st_v(s, ot, s->T0, s->A0); } else { - gen_op_mov_reg_v(ot, rm, cpu_T0); + gen_op_mov_reg_v(ot, rm, s->T0); } } } @@ -6865,7 +6879,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) modrm = x86_ldub_code(env, s); reg = ((modrm >> 3) & 7) | rex_r; gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); - gen_extu(ot, cpu_T0); + gen_extu(ot, s->T0); /* Note that lzcnt and tzcnt are in different extensions. */ if ((prefixes & PREFIX_REPZ) @@ -6874,23 +6888,23 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) : s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)) { int size = 8 << ot; /* For lzcnt/tzcnt, C bit is defined related to the input. */ - tcg_gen_mov_tl(cpu_cc_src, cpu_T0); + tcg_gen_mov_tl(cpu_cc_src, s->T0); if (b & 1) { /* For lzcnt, reduce the target_ulong result by the number of zeros that we expect to find at the top. */ - tcg_gen_clzi_tl(cpu_T0, cpu_T0, TARGET_LONG_BITS); - tcg_gen_subi_tl(cpu_T0, cpu_T0, TARGET_LONG_BITS - size); + tcg_gen_clzi_tl(s->T0, s->T0, TARGET_LONG_BITS); + tcg_gen_subi_tl(s->T0, s->T0, TARGET_LONG_BITS - size); } else { /* For tzcnt, a zero input must return the operand size. */ - tcg_gen_ctzi_tl(cpu_T0, cpu_T0, size); + tcg_gen_ctzi_tl(s->T0, s->T0, size); } /* For lzcnt/tzcnt, Z bit is defined related to the result. */ - gen_op_update1_cc(); + gen_op_update1_cc(s); set_cc_op(s, CC_OP_BMILGB + ot); } else { /* For bsr/bsf, only the Z bit is defined and it is related to the input and not the result. */ - tcg_gen_mov_tl(cpu_cc_dst, cpu_T0); + tcg_gen_mov_tl(cpu_cc_dst, s->T0); set_cc_op(s, CC_OP_LOGICB + ot); /* ??? The manual says that the output is undefined when the @@ -6901,13 +6915,13 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) /* For bsr, return the bit index of the first 1 bit, not the count of leading zeros. */ tcg_gen_xori_tl(cpu_T1, cpu_regs[reg], TARGET_LONG_BITS - 1); - tcg_gen_clz_tl(cpu_T0, cpu_T0, cpu_T1); - tcg_gen_xori_tl(cpu_T0, cpu_T0, TARGET_LONG_BITS - 1); + tcg_gen_clz_tl(s->T0, s->T0, cpu_T1); + tcg_gen_xori_tl(s->T0, s->T0, TARGET_LONG_BITS - 1); } else { - tcg_gen_ctz_tl(cpu_T0, cpu_T0, cpu_regs[reg]); + tcg_gen_ctz_tl(s->T0, s->T0, cpu_regs[reg]); } } - gen_op_mov_reg_v(ot, reg, cpu_T0); + gen_op_mov_reg_v(ot, reg, s->T0); break; /************************/ /* bcd */ @@ -7047,9 +7061,9 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) mod = (modrm >> 6) & 3; if (mod == 3) goto illegal_op; - gen_op_mov_v_reg(ot, cpu_T0, reg); + gen_op_mov_v_reg(ot, s->T0, reg); gen_lea_modrm(env, s, modrm); - tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0); + tcg_gen_trunc_tl_i32(cpu_tmp2_i32, s->T0); if (ot == MO_16) { gen_helper_boundw(cpu_env, s->A0, cpu_tmp2_i32); } else { @@ -7060,24 +7074,24 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) reg = (b & 7) | REX_B(s); #ifdef TARGET_X86_64 if (dflag == MO_64) { - gen_op_mov_v_reg(MO_64, cpu_T0, reg); - tcg_gen_bswap64_i64(cpu_T0, cpu_T0); - gen_op_mov_reg_v(MO_64, reg, cpu_T0); + gen_op_mov_v_reg(MO_64, s->T0, reg); + tcg_gen_bswap64_i64(s->T0, s->T0); + gen_op_mov_reg_v(MO_64, reg, s->T0); } else #endif { - gen_op_mov_v_reg(MO_32, cpu_T0, reg); - tcg_gen_ext32u_tl(cpu_T0, cpu_T0); - tcg_gen_bswap32_tl(cpu_T0, cpu_T0); - gen_op_mov_reg_v(MO_32, reg, cpu_T0); + gen_op_mov_v_reg(MO_32, s->T0, reg); + tcg_gen_ext32u_tl(s->T0, s->T0); + tcg_gen_bswap32_tl(s->T0, s->T0); + gen_op_mov_reg_v(MO_32, reg, s->T0); } break; case 0xd6: /* salc */ if (CODE64(s)) goto illegal_op; - gen_compute_eflags_c(s, cpu_T0); - tcg_gen_neg_tl(cpu_T0, cpu_T0); - gen_op_mov_reg_v(MO_8, R_EAX, cpu_T0); + gen_compute_eflags_c(s, s->T0); + tcg_gen_neg_tl(s->T0, s->T0); + gen_op_mov_reg_v(MO_8, R_EAX, s->T0); break; case 0xe0: /* loopnz */ case 0xe1: /* loopz */ @@ -7229,7 +7243,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) if (!s->pe || s->vm86) goto illegal_op; gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_READ); - tcg_gen_ld32u_tl(cpu_T0, cpu_env, + tcg_gen_ld32u_tl(s->T0, cpu_env, offsetof(CPUX86State, ldt.selector)); ot = mod == 3 ? dflag : MO_16; gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1); @@ -7242,7 +7256,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) } else { gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_WRITE); gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0); - tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0); + tcg_gen_trunc_tl_i32(cpu_tmp2_i32, s->T0); gen_helper_lldt(cpu_env, cpu_tmp2_i32); } break; @@ -7250,7 +7264,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) if (!s->pe || s->vm86) goto illegal_op; gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_READ); - tcg_gen_ld32u_tl(cpu_T0, cpu_env, + tcg_gen_ld32u_tl(s->T0, cpu_env, offsetof(CPUX86State, tr.selector)); ot = mod == 3 ? dflag : MO_16; gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1); @@ -7263,7 +7277,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) } else { gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_WRITE); gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0); - tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0); + tcg_gen_trunc_tl_i32(cpu_tmp2_i32, s->T0); gen_helper_ltr(cpu_env, cpu_tmp2_i32); } break; @@ -7274,9 +7288,9 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0); gen_update_cc_op(s); if (op == 4) { - gen_helper_verr(cpu_env, cpu_T0); + gen_helper_verr(cpu_env, s->T0); } else { - gen_helper_verw(cpu_env, cpu_T0); + gen_helper_verw(cpu_env, s->T0); } set_cc_op(s, CC_OP_EFLAGS); break; @@ -7291,15 +7305,15 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) CASE_MODRM_MEM_OP(0): /* sgdt */ gen_svm_check_intercept(s, pc_start, SVM_EXIT_GDTR_READ); gen_lea_modrm(env, s, modrm); - tcg_gen_ld32u_tl(cpu_T0, + tcg_gen_ld32u_tl(s->T0, cpu_env, offsetof(CPUX86State, gdt.limit)); - gen_op_st_v(s, MO_16, cpu_T0, s->A0); + gen_op_st_v(s, MO_16, s->T0, s->A0); gen_add_A0_im(s, 2); - tcg_gen_ld_tl(cpu_T0, cpu_env, offsetof(CPUX86State, gdt.base)); + tcg_gen_ld_tl(s->T0, cpu_env, offsetof(CPUX86State, gdt.base)); if (dflag == MO_16) { - tcg_gen_andi_tl(cpu_T0, cpu_T0, 0xffffff); + tcg_gen_andi_tl(s->T0, s->T0, 0xffffff); } - gen_op_st_v(s, CODE64(s) + MO_32, cpu_T0, s->A0); + gen_op_st_v(s, CODE64(s) + MO_32, s->T0, s->A0); break; case 0xc8: /* monitor */ @@ -7347,14 +7361,14 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) CASE_MODRM_MEM_OP(1): /* sidt */ gen_svm_check_intercept(s, pc_start, SVM_EXIT_IDTR_READ); gen_lea_modrm(env, s, modrm); - tcg_gen_ld32u_tl(cpu_T0, cpu_env, offsetof(CPUX86State, idt.limit)); - gen_op_st_v(s, MO_16, cpu_T0, s->A0); + tcg_gen_ld32u_tl(s->T0, cpu_env, offsetof(CPUX86State, idt.limit)); + gen_op_st_v(s, MO_16, s->T0, s->A0); gen_add_A0_im(s, 2); - tcg_gen_ld_tl(cpu_T0, cpu_env, offsetof(CPUX86State, idt.base)); + tcg_gen_ld_tl(s->T0, cpu_env, offsetof(CPUX86State, idt.base)); if (dflag == MO_16) { - tcg_gen_andi_tl(cpu_T0, cpu_T0, 0xffffff); + tcg_gen_andi_tl(s->T0, s->T0, 0xffffff); } - gen_op_st_v(s, CODE64(s) + MO_32, cpu_T0, s->A0); + gen_op_st_v(s, CODE64(s) + MO_32, s->T0, s->A0); break; case 0xd0: /* xgetbv */ @@ -7500,11 +7514,11 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) gen_lea_modrm(env, s, modrm); gen_op_ld_v(s, MO_16, cpu_T1, s->A0); gen_add_A0_im(s, 2); - gen_op_ld_v(s, CODE64(s) + MO_32, cpu_T0, s->A0); + gen_op_ld_v(s, CODE64(s) + MO_32, s->T0, s->A0); if (dflag == MO_16) { - tcg_gen_andi_tl(cpu_T0, cpu_T0, 0xffffff); + tcg_gen_andi_tl(s->T0, s->T0, 0xffffff); } - tcg_gen_st_tl(cpu_T0, cpu_env, offsetof(CPUX86State, gdt.base)); + tcg_gen_st_tl(s->T0, cpu_env, offsetof(CPUX86State, gdt.base)); tcg_gen_st32_tl(cpu_T1, cpu_env, offsetof(CPUX86State, gdt.limit)); break; @@ -7517,17 +7531,17 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) gen_lea_modrm(env, s, modrm); gen_op_ld_v(s, MO_16, cpu_T1, s->A0); gen_add_A0_im(s, 2); - gen_op_ld_v(s, CODE64(s) + MO_32, cpu_T0, s->A0); + gen_op_ld_v(s, CODE64(s) + MO_32, s->T0, s->A0); if (dflag == MO_16) { - tcg_gen_andi_tl(cpu_T0, cpu_T0, 0xffffff); + tcg_gen_andi_tl(s->T0, s->T0, 0xffffff); } - tcg_gen_st_tl(cpu_T0, cpu_env, offsetof(CPUX86State, idt.base)); + tcg_gen_st_tl(s->T0, cpu_env, offsetof(CPUX86State, idt.base)); tcg_gen_st32_tl(cpu_T1, cpu_env, offsetof(CPUX86State, idt.limit)); break; CASE_MODRM_OP(4): /* smsw */ gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_CR0); - tcg_gen_ld_tl(cpu_T0, cpu_env, offsetof(CPUX86State, cr[0])); + tcg_gen_ld_tl(s->T0, cpu_env, offsetof(CPUX86State, cr[0])); if (CODE64(s)) { mod = (modrm >> 6) & 3; ot = (mod != 3 ? MO_16 : s->dflag); @@ -7560,7 +7574,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) } gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0); gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0); - gen_helper_lmsw(cpu_env, cpu_T0); + gen_helper_lmsw(cpu_env, s->T0); gen_jmp_im(s->pc - s->cs_base); gen_eob(s); break; @@ -7584,10 +7598,10 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } else { - tcg_gen_mov_tl(cpu_T0, cpu_seg_base[R_GS]); + tcg_gen_mov_tl(s->T0, cpu_seg_base[R_GS]); tcg_gen_ld_tl(cpu_seg_base[R_GS], cpu_env, offsetof(CPUX86State, kernelgsbase)); - tcg_gen_st_tl(cpu_T0, cpu_env, + tcg_gen_st_tl(s->T0, cpu_env, offsetof(CPUX86State, kernelgsbase)); } break; @@ -7638,16 +7652,16 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) rm = (modrm & 7) | REX_B(s); if (mod == 3) { - gen_op_mov_v_reg(MO_32, cpu_T0, rm); + gen_op_mov_v_reg(MO_32, s->T0, rm); /* sign extend */ if (d_ot == MO_64) { - tcg_gen_ext32s_tl(cpu_T0, cpu_T0); + tcg_gen_ext32s_tl(s->T0, s->T0); } - gen_op_mov_reg_v(d_ot, reg, cpu_T0); + gen_op_mov_reg_v(d_ot, reg, s->T0); } else { gen_lea_modrm(env, s, modrm); - gen_op_ld_v(s, MO_32 | MO_SIGN, cpu_T0, s->A0); - gen_op_mov_reg_v(d_ot, reg, cpu_T0); + gen_op_ld_v(s, MO_32 | MO_SIGN, s->T0, s->A0); + gen_op_mov_reg_v(d_ot, reg, s->T0); } } else #endif @@ -7712,9 +7726,9 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) t0 = tcg_temp_local_new(); gen_update_cc_op(s); if (b == 0x102) { - gen_helper_lar(t0, cpu_env, cpu_T0); + gen_helper_lar(t0, cpu_env, s->T0); } else { - gen_helper_lsl(t0, cpu_env, cpu_T0); + gen_helper_lsl(t0, cpu_env, s->T0); } tcg_gen_andi_tl(cpu_tmp0, cpu_cc_src, CC_Z); label1 = gen_new_label(); @@ -7816,16 +7830,16 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) } gen_lea_v_seg(s, s->aflag, s->A0, a.def_seg, s->override); if (a.index >= 0) { - tcg_gen_mov_tl(cpu_T0, cpu_regs[a.index]); + tcg_gen_mov_tl(s->T0, cpu_regs[a.index]); } else { - tcg_gen_movi_tl(cpu_T0, 0); + tcg_gen_movi_tl(s->T0, 0); } if (CODE64(s)) { - gen_helper_bndldx64(cpu_bndl[reg], cpu_env, s->A0, cpu_T0); + gen_helper_bndldx64(cpu_bndl[reg], cpu_env, s->A0, s->T0); tcg_gen_ld_i64(cpu_bndu[reg], cpu_env, offsetof(CPUX86State, mmx_t0.MMX_Q(0))); } else { - gen_helper_bndldx32(cpu_bndu[reg], cpu_env, s->A0, cpu_T0); + gen_helper_bndldx32(cpu_bndu[reg], cpu_env, s->A0, s->T0); tcg_gen_ext32u_i64(cpu_bndl[reg], cpu_bndu[reg]); tcg_gen_shri_i64(cpu_bndu[reg], cpu_bndu[reg], 32); } @@ -7921,15 +7935,15 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) } gen_lea_v_seg(s, s->aflag, s->A0, a.def_seg, s->override); if (a.index >= 0) { - tcg_gen_mov_tl(cpu_T0, cpu_regs[a.index]); + tcg_gen_mov_tl(s->T0, cpu_regs[a.index]); } else { - tcg_gen_movi_tl(cpu_T0, 0); + tcg_gen_movi_tl(s->T0, 0); } if (CODE64(s)) { - gen_helper_bndstx64(cpu_env, s->A0, cpu_T0, + gen_helper_bndstx64(cpu_env, s->A0, s->T0, cpu_bndl[reg], cpu_bndu[reg]); } else { - gen_helper_bndstx32(cpu_env, s->A0, cpu_T0, + gen_helper_bndstx32(cpu_env, s->A0, s->T0, cpu_bndl[reg], cpu_bndu[reg]); } } @@ -7973,9 +7987,9 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { gen_io_start(); } - gen_op_mov_v_reg(ot, cpu_T0, rm); + gen_op_mov_v_reg(ot, s->T0, rm); gen_helper_write_crN(cpu_env, tcg_const_i32(reg), - cpu_T0); + s->T0); if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { gen_io_end(); } @@ -7985,8 +7999,8 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { gen_io_start(); } - gen_helper_read_crN(cpu_T0, cpu_env, tcg_const_i32(reg)); - gen_op_mov_reg_v(ot, rm, cpu_T0); + gen_helper_read_crN(s->T0, cpu_env, tcg_const_i32(reg)); + gen_op_mov_reg_v(ot, rm, s->T0); if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { gen_io_end(); } @@ -8019,16 +8033,16 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) } if (b & 2) { gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_DR0 + reg); - gen_op_mov_v_reg(ot, cpu_T0, rm); + gen_op_mov_v_reg(ot, s->T0, rm); tcg_gen_movi_i32(cpu_tmp2_i32, reg); - gen_helper_set_dr(cpu_env, cpu_tmp2_i32, cpu_T0); + gen_helper_set_dr(cpu_env, cpu_tmp2_i32, s->T0); gen_jmp_im(s->pc - s->cs_base); gen_eob(s); } else { gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_DR0 + reg); tcg_gen_movi_i32(cpu_tmp2_i32, reg); - gen_helper_get_dr(cpu_T0, cpu_env, cpu_tmp2_i32); - gen_op_mov_reg_v(ot, rm, cpu_T0); + gen_helper_get_dr(s->T0, cpu_env, cpu_tmp2_i32); + gen_op_mov_reg_v(ot, rm, s->T0); } } break; @@ -8107,8 +8121,8 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) break; } gen_lea_modrm(env, s, modrm); - tcg_gen_ld32u_tl(cpu_T0, cpu_env, offsetof(CPUX86State, mxcsr)); - gen_op_st_v(s, MO_32, cpu_T0, s->A0); + tcg_gen_ld32u_tl(s->T0, cpu_env, offsetof(CPUX86State, mxcsr)); + gen_op_st_v(s, MO_32, s->T0, s->A0); break; CASE_MODRM_MEM_OP(4): /* xsave */ @@ -8287,10 +8301,10 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) } gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); - gen_extu(ot, cpu_T0); - tcg_gen_mov_tl(cpu_cc_src, cpu_T0); - tcg_gen_ctpop_tl(cpu_T0, cpu_T0); - gen_op_mov_reg_v(ot, reg, cpu_T0); + gen_extu(ot, s->T0); + tcg_gen_mov_tl(cpu_cc_src, s->T0); + tcg_gen_ctpop_tl(s->T0, s->T0); + gen_op_mov_reg_v(ot, reg, s->T0); set_cc_op(s, CC_OP_POPCNT); break; @@ -8456,7 +8470,7 @@ static void i386_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu) printf("ERROR addseg\n"); #endif - cpu_T0 = tcg_temp_new(); + dc->T0 = tcg_temp_new(); cpu_T1 = tcg_temp_new(); dc->A0 = tcg_temp_new(); From b48597b0eda32d4c7ade2ba3f98f06f62289e3e2 Mon Sep 17 00:00:00 2001 From: "Emilio G. Cota" Date: Tue, 11 Sep 2018 14:50:46 -0400 Subject: [PATCH 28/80] target/i386: move cpu_T1 to DisasContext Signed-off-by: Emilio G. Cota Reviewed-by: Richard Henderson Signed-off-by: Paolo Bonzini --- target/i386/translate.c | 341 ++++++++++++++++++++-------------------- 1 file changed, 170 insertions(+), 171 deletions(-) diff --git a/target/i386/translate.c b/target/i386/translate.c index 73fd7e5b9a..bd27e65344 100644 --- a/target/i386/translate.c +++ b/target/i386/translate.c @@ -78,8 +78,6 @@ static TCGv cpu_regs[CPU_NB_REGS]; static TCGv cpu_seg_base[6]; static TCGv_i64 cpu_bndl[4]; static TCGv_i64 cpu_bndu[4]; -/* local temps */ -static TCGv cpu_T1; /* local register indexes (only used inside old micro ops) */ static TCGv cpu_tmp0, cpu_tmp4; static TCGv_ptr cpu_ptr0, cpu_ptr1; @@ -139,6 +137,7 @@ typedef struct DisasContext { TCGv cc_srcT; TCGv A0; TCGv T0; + TCGv T1; sigjmp_buf jmpbuf; } DisasContext; @@ -656,20 +655,20 @@ static void gen_op_update1_cc(DisasContext *s) static void gen_op_update2_cc(DisasContext *s) { - tcg_gen_mov_tl(cpu_cc_src, cpu_T1); + tcg_gen_mov_tl(cpu_cc_src, s->T1); tcg_gen_mov_tl(cpu_cc_dst, s->T0); } static void gen_op_update3_cc(DisasContext *s, TCGv reg) { tcg_gen_mov_tl(cpu_cc_src2, reg); - tcg_gen_mov_tl(cpu_cc_src, cpu_T1); + tcg_gen_mov_tl(cpu_cc_src, s->T1); tcg_gen_mov_tl(cpu_cc_dst, s->T0); } static inline void gen_op_testl_T0_T1_cc(DisasContext *s) { - tcg_gen_and_tl(cpu_cc_dst, s->T0, cpu_T1); + tcg_gen_and_tl(cpu_cc_dst, s->T0, s->T1); } static void gen_op_update_neg_cc(DisasContext *s) @@ -1090,7 +1089,7 @@ static inline void gen_lods(DisasContext *s, TCGMemOp ot) static inline void gen_scas(DisasContext *s, TCGMemOp ot) { gen_string_movl_A0_EDI(s); - gen_op_ld_v(s, ot, cpu_T1, s->A0); + gen_op_ld_v(s, ot, s->T1, s->A0); gen_op(s, OP_CMPL, ot, R_EAX); gen_op_movl_T0_Dshift(s, ot); gen_op_add_reg_T0(s, s->aflag, R_EDI); @@ -1099,7 +1098,7 @@ static inline void gen_scas(DisasContext *s, TCGMemOp ot) static inline void gen_cmps(DisasContext *s, TCGMemOp ot) { gen_string_movl_A0_EDI(s); - gen_op_ld_v(s, ot, cpu_T1, s->A0); + gen_op_ld_v(s, ot, s->T1, s->A0); gen_string_movl_A0_ESI(s); gen_op(s, OP_CMPL, ot, OR_TMP0); gen_op_movl_T0_Dshift(s, ot); @@ -1274,11 +1273,11 @@ static void gen_op(DisasContext *s1, int op, TCGMemOp ot, int d) case OP_ADCL: gen_compute_eflags_c(s1, cpu_tmp4); if (s1->prefix & PREFIX_LOCK) { - tcg_gen_add_tl(s1->T0, cpu_tmp4, cpu_T1); + tcg_gen_add_tl(s1->T0, cpu_tmp4, s1->T1); tcg_gen_atomic_add_fetch_tl(s1->T0, s1->A0, s1->T0, s1->mem_index, ot | MO_LE); } else { - tcg_gen_add_tl(s1->T0, s1->T0, cpu_T1); + tcg_gen_add_tl(s1->T0, s1->T0, s1->T1); tcg_gen_add_tl(s1->T0, s1->T0, cpu_tmp4); gen_op_st_rm_T0_A0(s1, ot, d); } @@ -1288,12 +1287,12 @@ static void gen_op(DisasContext *s1, int op, TCGMemOp ot, int d) case OP_SBBL: gen_compute_eflags_c(s1, cpu_tmp4); if (s1->prefix & PREFIX_LOCK) { - tcg_gen_add_tl(s1->T0, cpu_T1, cpu_tmp4); + tcg_gen_add_tl(s1->T0, s1->T1, cpu_tmp4); tcg_gen_neg_tl(s1->T0, s1->T0); tcg_gen_atomic_add_fetch_tl(s1->T0, s1->A0, s1->T0, s1->mem_index, ot | MO_LE); } else { - tcg_gen_sub_tl(s1->T0, s1->T0, cpu_T1); + tcg_gen_sub_tl(s1->T0, s1->T0, s1->T1); tcg_gen_sub_tl(s1->T0, s1->T0, cpu_tmp4); gen_op_st_rm_T0_A0(s1, ot, d); } @@ -1302,10 +1301,10 @@ static void gen_op(DisasContext *s1, int op, TCGMemOp ot, int d) break; case OP_ADDL: if (s1->prefix & PREFIX_LOCK) { - tcg_gen_atomic_add_fetch_tl(s1->T0, s1->A0, cpu_T1, + tcg_gen_atomic_add_fetch_tl(s1->T0, s1->A0, s1->T1, s1->mem_index, ot | MO_LE); } else { - tcg_gen_add_tl(s1->T0, s1->T0, cpu_T1); + tcg_gen_add_tl(s1->T0, s1->T0, s1->T1); gen_op_st_rm_T0_A0(s1, ot, d); } gen_op_update2_cc(s1); @@ -1313,13 +1312,13 @@ static void gen_op(DisasContext *s1, int op, TCGMemOp ot, int d) break; case OP_SUBL: if (s1->prefix & PREFIX_LOCK) { - tcg_gen_neg_tl(s1->T0, cpu_T1); + tcg_gen_neg_tl(s1->T0, s1->T1); tcg_gen_atomic_fetch_add_tl(s1->cc_srcT, s1->A0, s1->T0, s1->mem_index, ot | MO_LE); - tcg_gen_sub_tl(s1->T0, s1->cc_srcT, cpu_T1); + tcg_gen_sub_tl(s1->T0, s1->cc_srcT, s1->T1); } else { tcg_gen_mov_tl(s1->cc_srcT, s1->T0); - tcg_gen_sub_tl(s1->T0, s1->T0, cpu_T1); + tcg_gen_sub_tl(s1->T0, s1->T0, s1->T1); gen_op_st_rm_T0_A0(s1, ot, d); } gen_op_update2_cc(s1); @@ -1328,10 +1327,10 @@ static void gen_op(DisasContext *s1, int op, TCGMemOp ot, int d) default: case OP_ANDL: if (s1->prefix & PREFIX_LOCK) { - tcg_gen_atomic_and_fetch_tl(s1->T0, s1->A0, cpu_T1, + tcg_gen_atomic_and_fetch_tl(s1->T0, s1->A0, s1->T1, s1->mem_index, ot | MO_LE); } else { - tcg_gen_and_tl(s1->T0, s1->T0, cpu_T1); + tcg_gen_and_tl(s1->T0, s1->T0, s1->T1); gen_op_st_rm_T0_A0(s1, ot, d); } gen_op_update1_cc(s1); @@ -1339,10 +1338,10 @@ static void gen_op(DisasContext *s1, int op, TCGMemOp ot, int d) break; case OP_ORL: if (s1->prefix & PREFIX_LOCK) { - tcg_gen_atomic_or_fetch_tl(s1->T0, s1->A0, cpu_T1, + tcg_gen_atomic_or_fetch_tl(s1->T0, s1->A0, s1->T1, s1->mem_index, ot | MO_LE); } else { - tcg_gen_or_tl(s1->T0, s1->T0, cpu_T1); + tcg_gen_or_tl(s1->T0, s1->T0, s1->T1); gen_op_st_rm_T0_A0(s1, ot, d); } gen_op_update1_cc(s1); @@ -1350,19 +1349,19 @@ static void gen_op(DisasContext *s1, int op, TCGMemOp ot, int d) break; case OP_XORL: if (s1->prefix & PREFIX_LOCK) { - tcg_gen_atomic_xor_fetch_tl(s1->T0, s1->A0, cpu_T1, + tcg_gen_atomic_xor_fetch_tl(s1->T0, s1->A0, s1->T1, s1->mem_index, ot | MO_LE); } else { - tcg_gen_xor_tl(s1->T0, s1->T0, cpu_T1); + tcg_gen_xor_tl(s1->T0, s1->T0, s1->T1); gen_op_st_rm_T0_A0(s1, ot, d); } gen_op_update1_cc(s1); set_cc_op(s1, CC_OP_LOGICB + ot); break; case OP_CMPL: - tcg_gen_mov_tl(cpu_cc_src, cpu_T1); + tcg_gen_mov_tl(cpu_cc_src, s1->T1); tcg_gen_mov_tl(s1->cc_srcT, s1->T0); - tcg_gen_sub_tl(cpu_cc_dst, s1->T0, cpu_T1); + tcg_gen_sub_tl(cpu_cc_dst, s1->T0, s1->T1); set_cc_op(s1, CC_OP_SUBB + ot); break; } @@ -1447,28 +1446,28 @@ static void gen_shift_rm_T1(DisasContext *s, TCGMemOp ot, int op1, gen_op_mov_v_reg(ot, s->T0, op1); } - tcg_gen_andi_tl(cpu_T1, cpu_T1, mask); - tcg_gen_subi_tl(cpu_tmp0, cpu_T1, 1); + tcg_gen_andi_tl(s->T1, s->T1, mask); + tcg_gen_subi_tl(cpu_tmp0, s->T1, 1); if (is_right) { if (is_arith) { gen_exts(ot, s->T0); tcg_gen_sar_tl(cpu_tmp0, s->T0, cpu_tmp0); - tcg_gen_sar_tl(s->T0, s->T0, cpu_T1); + tcg_gen_sar_tl(s->T0, s->T0, s->T1); } else { gen_extu(ot, s->T0); tcg_gen_shr_tl(cpu_tmp0, s->T0, cpu_tmp0); - tcg_gen_shr_tl(s->T0, s->T0, cpu_T1); + tcg_gen_shr_tl(s->T0, s->T0, s->T1); } } else { tcg_gen_shl_tl(cpu_tmp0, s->T0, cpu_tmp0); - tcg_gen_shl_tl(s->T0, s->T0, cpu_T1); + tcg_gen_shl_tl(s->T0, s->T0, s->T1); } /* store */ gen_op_st_rm_T0_A0(s, ot, op1); - gen_shift_flags(s, ot, s->T0, cpu_tmp0, cpu_T1, is_right); + gen_shift_flags(s, ot, s->T0, cpu_tmp0, s->T1, is_right); } static void gen_shift_rm_im(DisasContext *s, TCGMemOp ot, int op1, int op2, @@ -1523,7 +1522,7 @@ static void gen_rot_rm_T1(DisasContext *s, TCGMemOp ot, int op1, int is_right) gen_op_mov_v_reg(ot, s->T0, op1); } - tcg_gen_andi_tl(cpu_T1, cpu_T1, mask); + tcg_gen_andi_tl(s->T1, s->T1, mask); switch (ot) { case MO_8: @@ -1539,7 +1538,7 @@ static void gen_rot_rm_T1(DisasContext *s, TCGMemOp ot, int op1, int is_right) #ifdef TARGET_X86_64 case MO_32: tcg_gen_trunc_tl_i32(cpu_tmp2_i32, s->T0); - tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T1); + tcg_gen_trunc_tl_i32(cpu_tmp3_i32, s->T1); if (is_right) { tcg_gen_rotr_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32); } else { @@ -1550,9 +1549,9 @@ static void gen_rot_rm_T1(DisasContext *s, TCGMemOp ot, int op1, int is_right) #endif default: if (is_right) { - tcg_gen_rotr_tl(s->T0, s->T0, cpu_T1); + tcg_gen_rotr_tl(s->T0, s->T0, s->T1); } else { - tcg_gen_rotl_tl(s->T0, s->T0, cpu_T1); + tcg_gen_rotl_tl(s->T0, s->T0, s->T1); } break; } @@ -1584,7 +1583,7 @@ static void gen_rot_rm_T1(DisasContext *s, TCGMemOp ot, int op1, int is_right) exactly as we computed above. */ t0 = tcg_const_i32(0); t1 = tcg_temp_new_i32(); - tcg_gen_trunc_tl_i32(t1, cpu_T1); + tcg_gen_trunc_tl_i32(t1, s->T1); tcg_gen_movi_i32(cpu_tmp2_i32, CC_OP_ADCOX); tcg_gen_movi_i32(cpu_tmp3_i32, CC_OP_EFLAGS); tcg_gen_movcond_i32(TCG_COND_NE, cpu_cc_op, t1, t0, @@ -1689,17 +1688,17 @@ static void gen_rotc_rm_T1(DisasContext *s, TCGMemOp ot, int op1, if (is_right) { switch (ot) { case MO_8: - gen_helper_rcrb(s->T0, cpu_env, s->T0, cpu_T1); + gen_helper_rcrb(s->T0, cpu_env, s->T0, s->T1); break; case MO_16: - gen_helper_rcrw(s->T0, cpu_env, s->T0, cpu_T1); + gen_helper_rcrw(s->T0, cpu_env, s->T0, s->T1); break; case MO_32: - gen_helper_rcrl(s->T0, cpu_env, s->T0, cpu_T1); + gen_helper_rcrl(s->T0, cpu_env, s->T0, s->T1); break; #ifdef TARGET_X86_64 case MO_64: - gen_helper_rcrq(s->T0, cpu_env, s->T0, cpu_T1); + gen_helper_rcrq(s->T0, cpu_env, s->T0, s->T1); break; #endif default: @@ -1708,17 +1707,17 @@ static void gen_rotc_rm_T1(DisasContext *s, TCGMemOp ot, int op1, } else { switch (ot) { case MO_8: - gen_helper_rclb(s->T0, cpu_env, s->T0, cpu_T1); + gen_helper_rclb(s->T0, cpu_env, s->T0, s->T1); break; case MO_16: - gen_helper_rclw(s->T0, cpu_env, s->T0, cpu_T1); + gen_helper_rclw(s->T0, cpu_env, s->T0, s->T1); break; case MO_32: - gen_helper_rcll(s->T0, cpu_env, s->T0, cpu_T1); + gen_helper_rcll(s->T0, cpu_env, s->T0, s->T1); break; #ifdef TARGET_X86_64 case MO_64: - gen_helper_rclq(s->T0, cpu_env, s->T0, cpu_T1); + gen_helper_rclq(s->T0, cpu_env, s->T0, s->T1); break; #endif default: @@ -1752,11 +1751,11 @@ static void gen_shiftd_rm_T1(DisasContext *s, TCGMemOp ot, int op1, This means "shrdw C, B, A" shifts A:B:A >> C. Build the B:A portion by constructing it as a 32-bit value. */ if (is_right) { - tcg_gen_deposit_tl(cpu_tmp0, s->T0, cpu_T1, 16, 16); - tcg_gen_mov_tl(cpu_T1, s->T0); + tcg_gen_deposit_tl(cpu_tmp0, s->T0, s->T1, 16, 16); + tcg_gen_mov_tl(s->T1, s->T0); tcg_gen_mov_tl(s->T0, cpu_tmp0); } else { - tcg_gen_deposit_tl(cpu_T1, s->T0, cpu_T1, 16, 16); + tcg_gen_deposit_tl(s->T1, s->T0, s->T1, 16, 16); } /* FALLTHRU */ #ifdef TARGET_X86_64 @@ -1764,11 +1763,11 @@ static void gen_shiftd_rm_T1(DisasContext *s, TCGMemOp ot, int op1, /* Concatenate the two 32-bit values and use a 64-bit shift. */ tcg_gen_subi_tl(cpu_tmp0, count, 1); if (is_right) { - tcg_gen_concat_tl_i64(s->T0, s->T0, cpu_T1); + tcg_gen_concat_tl_i64(s->T0, s->T0, s->T1); tcg_gen_shr_i64(cpu_tmp0, s->T0, cpu_tmp0); tcg_gen_shr_i64(s->T0, s->T0, count); } else { - tcg_gen_concat_tl_i64(s->T0, cpu_T1, s->T0); + tcg_gen_concat_tl_i64(s->T0, s->T1, s->T0); tcg_gen_shl_i64(cpu_tmp0, s->T0, cpu_tmp0); tcg_gen_shl_i64(s->T0, s->T0, count); tcg_gen_shri_i64(cpu_tmp0, cpu_tmp0, 32); @@ -1783,24 +1782,24 @@ static void gen_shiftd_rm_T1(DisasContext *s, TCGMemOp ot, int op1, tcg_gen_subfi_tl(cpu_tmp4, mask + 1, count); tcg_gen_shr_tl(s->T0, s->T0, count); - tcg_gen_shl_tl(cpu_T1, cpu_T1, cpu_tmp4); + tcg_gen_shl_tl(s->T1, s->T1, cpu_tmp4); } else { tcg_gen_shl_tl(cpu_tmp0, s->T0, cpu_tmp0); if (ot == MO_16) { /* Only needed if count > 16, for Intel behaviour. */ tcg_gen_subfi_tl(cpu_tmp4, 33, count); - tcg_gen_shr_tl(cpu_tmp4, cpu_T1, cpu_tmp4); + tcg_gen_shr_tl(cpu_tmp4, s->T1, cpu_tmp4); tcg_gen_or_tl(cpu_tmp0, cpu_tmp0, cpu_tmp4); } tcg_gen_subfi_tl(cpu_tmp4, mask + 1, count); tcg_gen_shl_tl(s->T0, s->T0, count); - tcg_gen_shr_tl(cpu_T1, cpu_T1, cpu_tmp4); + tcg_gen_shr_tl(s->T1, s->T1, cpu_tmp4); } tcg_gen_movi_tl(cpu_tmp4, 0); - tcg_gen_movcond_tl(TCG_COND_EQ, cpu_T1, count, cpu_tmp4, - cpu_tmp4, cpu_T1); - tcg_gen_or_tl(s->T0, s->T0, cpu_T1); + tcg_gen_movcond_tl(TCG_COND_EQ, s->T1, count, cpu_tmp4, + cpu_tmp4, s->T1); + tcg_gen_or_tl(s->T0, s->T0, s->T1); break; } @@ -1814,7 +1813,7 @@ static void gen_shiftd_rm_T1(DisasContext *s, TCGMemOp ot, int op1, static void gen_shift(DisasContext *s1, int op, TCGMemOp ot, int d, int s) { if (s != OR_TMP1) - gen_op_mov_v_reg(ot, cpu_T1, s); + gen_op_mov_v_reg(ot, s1->T1, s); switch(op) { case OP_ROL: gen_rot_rm_T1(s1, ot, d, 0); @@ -1862,7 +1861,7 @@ static void gen_shifti(DisasContext *s1, int op, TCGMemOp ot, int d, int c) break; default: /* currently not optimized */ - tcg_gen_movi_tl(cpu_T1, c); + tcg_gen_movi_tl(s1->T1, c); gen_shift(s1, op, ot, d, OR_TMP1); break; } @@ -2242,7 +2241,7 @@ static void gen_cmovcc1(CPUX86State *env, DisasContext *s, TCGMemOp ot, int b, gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); - cc = gen_prepare_cc(s, b, cpu_T1); + cc = gen_prepare_cc(s, b, s->T1); if (cc.mask != -1) { TCGv t0 = tcg_temp_new(); tcg_gen_andi_tl(t0, cc.reg, cc.mask); @@ -2416,8 +2415,8 @@ static void gen_enter(DisasContext *s, int esp_addend, int level) int size = 1 << d_ot; /* Push BP; compute FrameTemp into T1. */ - tcg_gen_subi_tl(cpu_T1, cpu_regs[R_ESP], size); - gen_lea_v_seg(s, a_ot, cpu_T1, R_SS, -1); + tcg_gen_subi_tl(s->T1, cpu_regs[R_ESP], size); + gen_lea_v_seg(s, a_ot, s->T1, R_SS, -1); gen_op_st_v(s, d_ot, cpu_regs[R_EBP], s->A0); level &= 31; @@ -2430,23 +2429,23 @@ static void gen_enter(DisasContext *s, int esp_addend, int level) gen_lea_v_seg(s, a_ot, s->A0, R_SS, -1); gen_op_ld_v(s, d_ot, cpu_tmp0, s->A0); - tcg_gen_subi_tl(s->A0, cpu_T1, size * i); + tcg_gen_subi_tl(s->A0, s->T1, size * i); gen_lea_v_seg(s, a_ot, s->A0, R_SS, -1); gen_op_st_v(s, d_ot, cpu_tmp0, s->A0); } /* Push the current FrameTemp as the last level. */ - tcg_gen_subi_tl(s->A0, cpu_T1, size * level); + tcg_gen_subi_tl(s->A0, s->T1, size * level); gen_lea_v_seg(s, a_ot, s->A0, R_SS, -1); - gen_op_st_v(s, d_ot, cpu_T1, s->A0); + gen_op_st_v(s, d_ot, s->T1, s->A0); } /* Copy the FrameTemp value to EBP. */ - gen_op_mov_reg_v(a_ot, R_EBP, cpu_T1); + gen_op_mov_reg_v(a_ot, R_EBP, s->T1); /* Compute the final value of ESP. */ - tcg_gen_subi_tl(cpu_T1, cpu_T1, esp_addend + size * level); - gen_op_mov_reg_v(a_ot, R_ESP, cpu_T1); + tcg_gen_subi_tl(s->T1, s->T1, esp_addend + size * level); + gen_op_mov_reg_v(a_ot, R_ESP, s->T1); } static void gen_leave(DisasContext *s) @@ -2457,10 +2456,10 @@ static void gen_leave(DisasContext *s) gen_lea_v_seg(s, a_ot, cpu_regs[R_EBP], R_SS, -1); gen_op_ld_v(s, d_ot, s->T0, s->A0); - tcg_gen_addi_tl(cpu_T1, cpu_regs[R_EBP], 1 << d_ot); + tcg_gen_addi_tl(s->T1, cpu_regs[R_EBP], 1 << d_ot); gen_op_mov_reg_v(d_ot, R_EBP, s->T0); - gen_op_mov_reg_v(a_ot, R_ESP, cpu_T1); + gen_op_mov_reg_v(a_ot, R_ESP, s->T1); } static void gen_exception(DisasContext *s, int trapno, target_ulong cur_eip) @@ -3854,10 +3853,10 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, tcg_gen_movcond_tl(TCG_COND_LEU, s->A0, s->A0, bound, s->A0, bound); tcg_temp_free(bound); - tcg_gen_movi_tl(cpu_T1, 1); - tcg_gen_shl_tl(cpu_T1, cpu_T1, s->A0); - tcg_gen_subi_tl(cpu_T1, cpu_T1, 1); - tcg_gen_and_tl(s->T0, s->T0, cpu_T1); + tcg_gen_movi_tl(s->T1, 1); + tcg_gen_shl_tl(s->T1, s->T1, s->A0); + tcg_gen_subi_tl(s->T1, s->T1, 1); + tcg_gen_and_tl(s->T0, s->T0, s->T1); gen_op_mov_reg_v(ot, reg, s->T0); gen_op_update1_cc(s); @@ -3873,19 +3872,19 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, } ot = mo_64_32(s->dflag); gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); - tcg_gen_ext8u_tl(cpu_T1, cpu_regs[s->vex_v]); + tcg_gen_ext8u_tl(s->T1, cpu_regs[s->vex_v]); { TCGv bound = tcg_const_tl(ot == MO_64 ? 63 : 31); /* Note that since we're using BMILG (in order to get O cleared) we need to store the inverse into C. */ tcg_gen_setcond_tl(TCG_COND_LT, cpu_cc_src, - cpu_T1, bound); - tcg_gen_movcond_tl(TCG_COND_GT, cpu_T1, cpu_T1, - bound, bound, cpu_T1); + s->T1, bound); + tcg_gen_movcond_tl(TCG_COND_GT, s->T1, s->T1, + bound, bound, s->T1); tcg_temp_free(bound); } tcg_gen_movi_tl(s->A0, -1); - tcg_gen_shl_tl(s->A0, s->A0, cpu_T1); + tcg_gen_shl_tl(s->A0, s->A0, s->T1); tcg_gen_andc_tl(s->T0, s->T0, s->A0); gen_op_mov_reg_v(ot, reg, s->T0); gen_op_update1_cc(s); @@ -3911,10 +3910,10 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, break; #ifdef TARGET_X86_64 case MO_64: - tcg_gen_mulu2_i64(s->T0, cpu_T1, + tcg_gen_mulu2_i64(s->T0, s->T1, s->T0, cpu_regs[R_EDX]); tcg_gen_mov_i64(cpu_regs[s->vex_v], s->T0); - tcg_gen_mov_i64(cpu_regs[reg], cpu_T1); + tcg_gen_mov_i64(cpu_regs[reg], s->T1); break; #endif } @@ -3931,11 +3930,11 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, /* Note that by zero-extending the mask operand, we automatically handle zero-extending the result. */ if (ot == MO_64) { - tcg_gen_mov_tl(cpu_T1, cpu_regs[s->vex_v]); + tcg_gen_mov_tl(s->T1, cpu_regs[s->vex_v]); } else { - tcg_gen_ext32u_tl(cpu_T1, cpu_regs[s->vex_v]); + tcg_gen_ext32u_tl(s->T1, cpu_regs[s->vex_v]); } - gen_helper_pdep(cpu_regs[reg], s->T0, cpu_T1); + gen_helper_pdep(cpu_regs[reg], s->T0, s->T1); break; case 0x2f5: /* pext Gy, By, Ey */ @@ -3949,11 +3948,11 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, /* Note that by zero-extending the mask operand, we automatically handle zero-extending the result. */ if (ot == MO_64) { - tcg_gen_mov_tl(cpu_T1, cpu_regs[s->vex_v]); + tcg_gen_mov_tl(s->T1, cpu_regs[s->vex_v]); } else { - tcg_gen_ext32u_tl(cpu_T1, cpu_regs[s->vex_v]); + tcg_gen_ext32u_tl(s->T1, cpu_regs[s->vex_v]); } - gen_helper_pext(cpu_regs[reg], s->T0, cpu_T1); + gen_helper_pext(cpu_regs[reg], s->T0, s->T1); break; case 0x1f6: /* adcx Gy, Ey */ @@ -4045,22 +4044,22 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, ot = mo_64_32(s->dflag); gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); if (ot == MO_64) { - tcg_gen_andi_tl(cpu_T1, cpu_regs[s->vex_v], 63); + tcg_gen_andi_tl(s->T1, cpu_regs[s->vex_v], 63); } else { - tcg_gen_andi_tl(cpu_T1, cpu_regs[s->vex_v], 31); + tcg_gen_andi_tl(s->T1, cpu_regs[s->vex_v], 31); } if (b == 0x1f7) { - tcg_gen_shl_tl(s->T0, s->T0, cpu_T1); + tcg_gen_shl_tl(s->T0, s->T0, s->T1); } else if (b == 0x2f7) { if (ot != MO_64) { tcg_gen_ext32s_tl(s->T0, s->T0); } - tcg_gen_sar_tl(s->T0, s->T0, cpu_T1); + tcg_gen_sar_tl(s->T0, s->T0, s->T1); } else { if (ot != MO_64) { tcg_gen_ext32u_tl(s->T0, s->T0); } - tcg_gen_shr_tl(s->T0, s->T0, cpu_T1); + tcg_gen_shr_tl(s->T0, s->T0, s->T1); } gen_op_mov_reg_v(ot, reg, s->T0); break; @@ -4080,16 +4079,16 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, tcg_gen_mov_tl(cpu_cc_src, s->T0); switch (reg & 7) { case 1: /* blsr By,Ey */ - tcg_gen_subi_tl(cpu_T1, s->T0, 1); - tcg_gen_and_tl(s->T0, s->T0, cpu_T1); + tcg_gen_subi_tl(s->T1, s->T0, 1); + tcg_gen_and_tl(s->T0, s->T0, s->T1); break; case 2: /* blsmsk By,Ey */ - tcg_gen_subi_tl(cpu_T1, s->T0, 1); - tcg_gen_xor_tl(s->T0, s->T0, cpu_T1); + tcg_gen_subi_tl(s->T1, s->T0, 1); + tcg_gen_xor_tl(s->T0, s->T0, s->T1); break; case 3: /* blsi By, Ey */ - tcg_gen_neg_tl(cpu_T1, s->T0); - tcg_gen_and_tl(s->T0, s->T0, cpu_T1); + tcg_gen_neg_tl(s->T1, s->T0); + tcg_gen_and_tl(s->T0, s->T0, s->T1); break; default: goto unknown_op; @@ -4677,7 +4676,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) } else { opreg = rm; } - gen_op_mov_v_reg(ot, cpu_T1, reg); + gen_op_mov_v_reg(ot, s->T1, reg); gen_op(s, op, ot, opreg); break; case 1: /* OP Gv, Ev */ @@ -4687,17 +4686,17 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) rm = (modrm & 7) | REX_B(s); if (mod != 3) { gen_lea_modrm(env, s, modrm); - gen_op_ld_v(s, ot, cpu_T1, s->A0); + gen_op_ld_v(s, ot, s->T1, s->A0); } else if (op == OP_XORL && rm == reg) { goto xor_zero; } else { - gen_op_mov_v_reg(ot, cpu_T1, rm); + gen_op_mov_v_reg(ot, s->T1, rm); } gen_op(s, op, ot, reg); break; case 2: /* OP A, Iv */ val = insn_get(env, s, ot); - tcg_gen_movi_tl(cpu_T1, val); + tcg_gen_movi_tl(s->T1, val); gen_op(s, op, ot, OR_EAX); break; } @@ -4743,7 +4742,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) val = (int8_t)insn_get(env, s, MO_8); break; } - tcg_gen_movi_tl(cpu_T1, val); + tcg_gen_movi_tl(s->T1, val); gen_op(s, op, ot, opreg); } break; @@ -4783,7 +4782,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) switch(op) { case 0: /* test */ val = insn_get(env, s, ot); - tcg_gen_movi_tl(cpu_T1, val); + tcg_gen_movi_tl(s->T1, val); gen_op_testl_T0_T1_cc(s); set_cc_op(s, CC_OP_LOGICB + ot); break; @@ -4847,22 +4846,22 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) case 4: /* mul */ switch(ot) { case MO_8: - gen_op_mov_v_reg(MO_8, cpu_T1, R_EAX); + gen_op_mov_v_reg(MO_8, s->T1, R_EAX); tcg_gen_ext8u_tl(s->T0, s->T0); - tcg_gen_ext8u_tl(cpu_T1, cpu_T1); + tcg_gen_ext8u_tl(s->T1, s->T1); /* XXX: use 32 bit mul which could be faster */ - tcg_gen_mul_tl(s->T0, s->T0, cpu_T1); + tcg_gen_mul_tl(s->T0, s->T0, s->T1); gen_op_mov_reg_v(MO_16, R_EAX, s->T0); tcg_gen_mov_tl(cpu_cc_dst, s->T0); tcg_gen_andi_tl(cpu_cc_src, s->T0, 0xff00); set_cc_op(s, CC_OP_MULB); break; case MO_16: - gen_op_mov_v_reg(MO_16, cpu_T1, R_EAX); + gen_op_mov_v_reg(MO_16, s->T1, R_EAX); tcg_gen_ext16u_tl(s->T0, s->T0); - tcg_gen_ext16u_tl(cpu_T1, cpu_T1); + tcg_gen_ext16u_tl(s->T1, s->T1); /* XXX: use 32 bit mul which could be faster */ - tcg_gen_mul_tl(s->T0, s->T0, cpu_T1); + tcg_gen_mul_tl(s->T0, s->T0, s->T1); gen_op_mov_reg_v(MO_16, R_EAX, s->T0); tcg_gen_mov_tl(cpu_cc_dst, s->T0); tcg_gen_shri_tl(s->T0, s->T0, 16); @@ -4896,11 +4895,11 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) case 5: /* imul */ switch(ot) { case MO_8: - gen_op_mov_v_reg(MO_8, cpu_T1, R_EAX); + gen_op_mov_v_reg(MO_8, s->T1, R_EAX); tcg_gen_ext8s_tl(s->T0, s->T0); - tcg_gen_ext8s_tl(cpu_T1, cpu_T1); + tcg_gen_ext8s_tl(s->T1, s->T1); /* XXX: use 32 bit mul which could be faster */ - tcg_gen_mul_tl(s->T0, s->T0, cpu_T1); + tcg_gen_mul_tl(s->T0, s->T0, s->T1); gen_op_mov_reg_v(MO_16, R_EAX, s->T0); tcg_gen_mov_tl(cpu_cc_dst, s->T0); tcg_gen_ext8s_tl(cpu_tmp0, s->T0); @@ -4908,11 +4907,11 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) set_cc_op(s, CC_OP_MULB); break; case MO_16: - gen_op_mov_v_reg(MO_16, cpu_T1, R_EAX); + gen_op_mov_v_reg(MO_16, s->T1, R_EAX); tcg_gen_ext16s_tl(s->T0, s->T0); - tcg_gen_ext16s_tl(cpu_T1, cpu_T1); + tcg_gen_ext16s_tl(s->T1, s->T1); /* XXX: use 32 bit mul which could be faster */ - tcg_gen_mul_tl(s->T0, s->T0, cpu_T1); + tcg_gen_mul_tl(s->T0, s->T0, s->T1); gen_op_mov_reg_v(MO_16, R_EAX, s->T0); tcg_gen_mov_tl(cpu_cc_dst, s->T0); tcg_gen_ext16s_tl(cpu_tmp0, s->T0); @@ -5041,25 +5040,25 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) tcg_gen_ext16u_tl(s->T0, s->T0); } next_eip = s->pc - s->cs_base; - tcg_gen_movi_tl(cpu_T1, next_eip); - gen_push_v(s, cpu_T1); + tcg_gen_movi_tl(s->T1, next_eip); + gen_push_v(s, s->T1); gen_op_jmp_v(s->T0); gen_bnd_jmp(s); gen_jr(s, s->T0); break; case 3: /* lcall Ev */ - gen_op_ld_v(s, ot, cpu_T1, s->A0); + gen_op_ld_v(s, ot, s->T1, s->A0); gen_add_A0_im(s, 1 << ot); gen_op_ld_v(s, MO_16, s->T0, s->A0); do_lcall: if (s->pe && !s->vm86) { tcg_gen_trunc_tl_i32(cpu_tmp2_i32, s->T0); - gen_helper_lcall_protected(cpu_env, cpu_tmp2_i32, cpu_T1, + gen_helper_lcall_protected(cpu_env, cpu_tmp2_i32, s->T1, tcg_const_i32(dflag - 1), tcg_const_tl(s->pc - s->cs_base)); } else { tcg_gen_trunc_tl_i32(cpu_tmp2_i32, s->T0); - gen_helper_lcall_real(cpu_env, cpu_tmp2_i32, cpu_T1, + gen_helper_lcall_real(cpu_env, cpu_tmp2_i32, s->T1, tcg_const_i32(dflag - 1), tcg_const_i32(s->pc - s->cs_base)); } @@ -5075,17 +5074,17 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) gen_jr(s, s->T0); break; case 5: /* ljmp Ev */ - gen_op_ld_v(s, ot, cpu_T1, s->A0); + gen_op_ld_v(s, ot, s->T1, s->A0); gen_add_A0_im(s, 1 << ot); gen_op_ld_v(s, MO_16, s->T0, s->A0); do_ljmp: if (s->pe && !s->vm86) { tcg_gen_trunc_tl_i32(cpu_tmp2_i32, s->T0); - gen_helper_ljmp_protected(cpu_env, cpu_tmp2_i32, cpu_T1, + gen_helper_ljmp_protected(cpu_env, cpu_tmp2_i32, s->T1, tcg_const_tl(s->pc - s->cs_base)); } else { gen_op_movl_seg_T0_vm(s, R_CS); - gen_op_jmp_v(cpu_T1); + gen_op_jmp_v(s->T1); } tcg_gen_ld_tl(cpu_tmp4, cpu_env, offsetof(CPUX86State, eip)); gen_jr(s, cpu_tmp4); @@ -5106,7 +5105,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) reg = ((modrm >> 3) & 7) | rex_r; gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); - gen_op_mov_v_reg(ot, cpu_T1, reg); + gen_op_mov_v_reg(ot, s->T1, reg); gen_op_testl_T0_T1_cc(s); set_cc_op(s, CC_OP_LOGICB + ot); break; @@ -5117,7 +5116,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) val = insn_get(env, s, ot); gen_op_mov_v_reg(ot, s->T0, OR_EAX); - tcg_gen_movi_tl(cpu_T1, val); + tcg_gen_movi_tl(s->T1, val); gen_op_testl_T0_T1_cc(s); set_cc_op(s, CC_OP_LOGICB + ot); break; @@ -5183,25 +5182,25 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); if (b == 0x69) { val = insn_get(env, s, ot); - tcg_gen_movi_tl(cpu_T1, val); + tcg_gen_movi_tl(s->T1, val); } else if (b == 0x6b) { val = (int8_t)insn_get(env, s, MO_8); - tcg_gen_movi_tl(cpu_T1, val); + tcg_gen_movi_tl(s->T1, val); } else { - gen_op_mov_v_reg(ot, cpu_T1, reg); + gen_op_mov_v_reg(ot, s->T1, reg); } switch (ot) { #ifdef TARGET_X86_64 case MO_64: - tcg_gen_muls2_i64(cpu_regs[reg], cpu_T1, s->T0, cpu_T1); + tcg_gen_muls2_i64(cpu_regs[reg], s->T1, s->T0, s->T1); tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[reg]); tcg_gen_sari_tl(cpu_cc_src, cpu_cc_dst, 63); - tcg_gen_sub_tl(cpu_cc_src, cpu_cc_src, cpu_T1); + tcg_gen_sub_tl(cpu_cc_src, cpu_cc_src, s->T1); break; #endif case MO_32: tcg_gen_trunc_tl_i32(cpu_tmp2_i32, s->T0); - tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T1); + tcg_gen_trunc_tl_i32(cpu_tmp3_i32, s->T1); tcg_gen_muls2_i32(cpu_tmp2_i32, cpu_tmp3_i32, cpu_tmp2_i32, cpu_tmp3_i32); tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp2_i32); @@ -5212,9 +5211,9 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) break; default: tcg_gen_ext16s_tl(s->T0, s->T0); - tcg_gen_ext16s_tl(cpu_T1, cpu_T1); + tcg_gen_ext16s_tl(s->T1, s->T1); /* XXX: use 32 bit mul which could be faster */ - tcg_gen_mul_tl(s->T0, s->T0, cpu_T1); + tcg_gen_mul_tl(s->T0, s->T0, s->T1); tcg_gen_mov_tl(cpu_cc_dst, s->T0); tcg_gen_ext16s_tl(cpu_tmp0, s->T0); tcg_gen_sub_tl(cpu_cc_src, s->T0, cpu_tmp0); @@ -5232,22 +5231,22 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) gen_op_mov_v_reg(ot, s->T0, reg); if (mod == 3) { rm = (modrm & 7) | REX_B(s); - gen_op_mov_v_reg(ot, cpu_T1, rm); - tcg_gen_add_tl(s->T0, s->T0, cpu_T1); - gen_op_mov_reg_v(ot, reg, cpu_T1); + gen_op_mov_v_reg(ot, s->T1, rm); + tcg_gen_add_tl(s->T0, s->T0, s->T1); + gen_op_mov_reg_v(ot, reg, s->T1); gen_op_mov_reg_v(ot, rm, s->T0); } else { gen_lea_modrm(env, s, modrm); if (s->prefix & PREFIX_LOCK) { - tcg_gen_atomic_fetch_add_tl(cpu_T1, s->A0, s->T0, + tcg_gen_atomic_fetch_add_tl(s->T1, s->A0, s->T0, s->mem_index, ot | MO_LE); - tcg_gen_add_tl(s->T0, s->T0, cpu_T1); + tcg_gen_add_tl(s->T0, s->T0, s->T1); } else { - gen_op_ld_v(s, ot, cpu_T1, s->A0); - tcg_gen_add_tl(s->T0, s->T0, cpu_T1); + gen_op_ld_v(s, ot, s->T1, s->A0); + tcg_gen_add_tl(s->T0, s->T0, s->T1); gen_op_st_v(s, ot, s->T0, s->A0); } - gen_op_mov_reg_v(ot, reg, cpu_T1); + gen_op_mov_reg_v(ot, reg, s->T1); } gen_op_update2_cc(s); set_cc_op(s, CC_OP_ADDB + ot); @@ -5653,16 +5652,16 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) rm = (modrm & 7) | REX_B(s); do_xchg_reg: gen_op_mov_v_reg(ot, s->T0, reg); - gen_op_mov_v_reg(ot, cpu_T1, rm); + gen_op_mov_v_reg(ot, s->T1, rm); gen_op_mov_reg_v(ot, rm, s->T0); - gen_op_mov_reg_v(ot, reg, cpu_T1); + gen_op_mov_reg_v(ot, reg, s->T1); } else { gen_lea_modrm(env, s, modrm); gen_op_mov_v_reg(ot, s->T0, reg); /* for xchg, lock is implicit */ - tcg_gen_atomic_xchg_tl(cpu_T1, s->A0, s->T0, + tcg_gen_atomic_xchg_tl(s->T1, s->A0, s->T0, s->mem_index, ot | MO_LE); - gen_op_mov_reg_v(ot, reg, cpu_T1); + gen_op_mov_reg_v(ot, reg, s->T1); } break; case 0xc4: /* les Gv */ @@ -5689,13 +5688,13 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) if (mod == 3) goto illegal_op; gen_lea_modrm(env, s, modrm); - gen_op_ld_v(s, ot, cpu_T1, s->A0); + gen_op_ld_v(s, ot, s->T1, s->A0); gen_add_A0_im(s, 1 << ot); /* load the segment first to handle exceptions properly */ gen_op_ld_v(s, MO_16, s->T0, s->A0); gen_movl_seg_T0(s, op); /* then put the data */ - gen_op_mov_reg_v(ot, reg, cpu_T1); + gen_op_mov_reg_v(ot, reg, s->T1); if (s->base.is_jmp) { gen_jmp_im(s->pc - s->cs_base); gen_eob(s); @@ -5774,7 +5773,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) } else { opreg = rm; } - gen_op_mov_v_reg(ot, cpu_T1, reg); + gen_op_mov_v_reg(ot, s->T1, reg); if (shift) { TCGv imm = tcg_const_tl(x86_ldub_code(env, s)); @@ -6387,8 +6386,8 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) gen_io_start(); } tcg_gen_movi_i32(cpu_tmp2_i32, val); - gen_helper_in_func(ot, cpu_T1, cpu_tmp2_i32); - gen_op_mov_reg_v(ot, R_EAX, cpu_T1); + gen_helper_in_func(ot, s->T1, cpu_tmp2_i32); + gen_op_mov_reg_v(ot, R_EAX, s->T1); gen_bpt_io(s, cpu_tmp2_i32, ot); if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { gen_io_end(); @@ -6402,13 +6401,13 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) tcg_gen_movi_tl(s->T0, val); gen_check_io(s, ot, pc_start - s->cs_base, svm_is_rep(prefixes)); - gen_op_mov_v_reg(ot, cpu_T1, R_EAX); + gen_op_mov_v_reg(ot, s->T1, R_EAX); if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { gen_io_start(); } tcg_gen_movi_i32(cpu_tmp2_i32, val); - tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T1); + tcg_gen_trunc_tl_i32(cpu_tmp3_i32, s->T1); gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32); gen_bpt_io(s, cpu_tmp2_i32, ot); if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { @@ -6426,8 +6425,8 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) gen_io_start(); } tcg_gen_trunc_tl_i32(cpu_tmp2_i32, s->T0); - gen_helper_in_func(ot, cpu_T1, cpu_tmp2_i32); - gen_op_mov_reg_v(ot, R_EAX, cpu_T1); + gen_helper_in_func(ot, s->T1, cpu_tmp2_i32); + gen_op_mov_reg_v(ot, R_EAX, s->T1); gen_bpt_io(s, cpu_tmp2_i32, ot); if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { gen_io_end(); @@ -6440,13 +6439,13 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) tcg_gen_ext16u_tl(s->T0, cpu_regs[R_EDX]); gen_check_io(s, ot, pc_start - s->cs_base, svm_is_rep(prefixes)); - gen_op_mov_v_reg(ot, cpu_T1, R_EAX); + gen_op_mov_v_reg(ot, s->T1, R_EAX); if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { gen_io_start(); } tcg_gen_trunc_tl_i32(cpu_tmp2_i32, s->T0); - tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T1); + tcg_gen_trunc_tl_i32(cpu_tmp3_i32, s->T1); gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32); gen_bpt_io(s, cpu_tmp2_i32, ot); if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { @@ -6552,7 +6551,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) selector = insn_get(env, s, MO_16); tcg_gen_movi_tl(s->T0, selector); - tcg_gen_movi_tl(cpu_T1, offset); + tcg_gen_movi_tl(s->T1, offset); } goto do_lcall; case 0xe9: /* jmp im */ @@ -6581,7 +6580,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) selector = insn_get(env, s, MO_16); tcg_gen_movi_tl(s->T0, selector); - tcg_gen_movi_tl(cpu_T1, offset); + tcg_gen_movi_tl(s->T1, offset); } goto do_ljmp; case 0xeb: /* jmp Jb */ @@ -6753,7 +6752,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) } /* load shift */ val = x86_ldub_code(env, s); - tcg_gen_movi_tl(cpu_T1, val); + tcg_gen_movi_tl(s->T1, val); if (op < 4) goto unknown_op; op -= 4; @@ -6775,12 +6774,12 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) reg = ((modrm >> 3) & 7) | rex_r; mod = (modrm >> 6) & 3; rm = (modrm & 7) | REX_B(s); - gen_op_mov_v_reg(MO_32, cpu_T1, reg); + gen_op_mov_v_reg(MO_32, s->T1, reg); if (mod != 3) { AddressParts a = gen_lea_modrm_0(env, s, modrm); /* specific case: we need to add a displacement */ - gen_exts(ot, cpu_T1); - tcg_gen_sari_tl(cpu_tmp0, cpu_T1, 3 + ot); + gen_exts(ot, s->T1); + tcg_gen_sari_tl(cpu_tmp0, s->T1, 3 + ot); tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, ot); tcg_gen_add_tl(s->A0, gen_lea_modrm_1(s, a), cpu_tmp0); gen_lea_v_seg(s, s->aflag, s->A0, a.def_seg, s->override); @@ -6791,9 +6790,9 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) gen_op_mov_v_reg(ot, s->T0, rm); } bt_op: - tcg_gen_andi_tl(cpu_T1, cpu_T1, (1 << (3 + ot)) - 1); + tcg_gen_andi_tl(s->T1, s->T1, (1 << (3 + ot)) - 1); tcg_gen_movi_tl(cpu_tmp0, 1); - tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T1); + tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, s->T1); if (s->prefix & PREFIX_LOCK) { switch (op) { case 0: /* bt */ @@ -6816,9 +6815,9 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) s->mem_index, ot | MO_LE); break; } - tcg_gen_shr_tl(cpu_tmp4, s->T0, cpu_T1); + tcg_gen_shr_tl(cpu_tmp4, s->T0, s->T1); } else { - tcg_gen_shr_tl(cpu_tmp4, s->T0, cpu_T1); + tcg_gen_shr_tl(cpu_tmp4, s->T0, s->T1); switch (op) { case 0: /* bt */ /* Data already loaded; nothing to do. */ @@ -6914,8 +6913,8 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) if (b & 1) { /* For bsr, return the bit index of the first 1 bit, not the count of leading zeros. */ - tcg_gen_xori_tl(cpu_T1, cpu_regs[reg], TARGET_LONG_BITS - 1); - tcg_gen_clz_tl(s->T0, s->T0, cpu_T1); + tcg_gen_xori_tl(s->T1, cpu_regs[reg], TARGET_LONG_BITS - 1); + tcg_gen_clz_tl(s->T0, s->T0, s->T1); tcg_gen_xori_tl(s->T0, s->T0, TARGET_LONG_BITS - 1); } else { tcg_gen_ctz_tl(s->T0, s->T0, cpu_regs[reg]); @@ -7512,14 +7511,14 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) } gen_svm_check_intercept(s, pc_start, SVM_EXIT_GDTR_WRITE); gen_lea_modrm(env, s, modrm); - gen_op_ld_v(s, MO_16, cpu_T1, s->A0); + gen_op_ld_v(s, MO_16, s->T1, s->A0); gen_add_A0_im(s, 2); gen_op_ld_v(s, CODE64(s) + MO_32, s->T0, s->A0); if (dflag == MO_16) { tcg_gen_andi_tl(s->T0, s->T0, 0xffffff); } tcg_gen_st_tl(s->T0, cpu_env, offsetof(CPUX86State, gdt.base)); - tcg_gen_st32_tl(cpu_T1, cpu_env, offsetof(CPUX86State, gdt.limit)); + tcg_gen_st32_tl(s->T1, cpu_env, offsetof(CPUX86State, gdt.limit)); break; CASE_MODRM_MEM_OP(3): /* lidt */ @@ -7529,14 +7528,14 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) } gen_svm_check_intercept(s, pc_start, SVM_EXIT_IDTR_WRITE); gen_lea_modrm(env, s, modrm); - gen_op_ld_v(s, MO_16, cpu_T1, s->A0); + gen_op_ld_v(s, MO_16, s->T1, s->A0); gen_add_A0_im(s, 2); gen_op_ld_v(s, CODE64(s) + MO_32, s->T0, s->A0); if (dflag == MO_16) { tcg_gen_andi_tl(s->T0, s->T0, 0xffffff); } tcg_gen_st_tl(s->T0, cpu_env, offsetof(CPUX86State, idt.base)); - tcg_gen_st32_tl(cpu_T1, cpu_env, offsetof(CPUX86State, idt.limit)); + tcg_gen_st32_tl(s->T1, cpu_env, offsetof(CPUX86State, idt.limit)); break; CASE_MODRM_OP(4): /* smsw */ @@ -8471,7 +8470,7 @@ static void i386_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu) #endif dc->T0 = tcg_temp_new(); - cpu_T1 = tcg_temp_new(); + dc->T1 = tcg_temp_new(); dc->A0 = tcg_temp_new(); cpu_tmp0 = tcg_temp_new(); From fbd80f02df3fe272ba0f4825df27b8459dafbc14 Mon Sep 17 00:00:00 2001 From: "Emilio G. Cota" Date: Tue, 11 Sep 2018 14:07:57 -0400 Subject: [PATCH 29/80] target/i386: move cpu_tmp0 to DisasContext Signed-off-by: Emilio G. Cota Reviewed-by: Richard Henderson Signed-off-by: Paolo Bonzini --- target/i386/translate.c | 282 ++++++++++++++++++++-------------------- 1 file changed, 144 insertions(+), 138 deletions(-) diff --git a/target/i386/translate.c b/target/i386/translate.c index bd27e65344..873231fb44 100644 --- a/target/i386/translate.c +++ b/target/i386/translate.c @@ -78,8 +78,8 @@ static TCGv cpu_regs[CPU_NB_REGS]; static TCGv cpu_seg_base[6]; static TCGv_i64 cpu_bndl[4]; static TCGv_i64 cpu_bndu[4]; -/* local register indexes (only used inside old micro ops) */ -static TCGv cpu_tmp0, cpu_tmp4; + +static TCGv cpu_tmp4; static TCGv_ptr cpu_ptr0, cpu_ptr1; static TCGv_i32 cpu_tmp2_i32, cpu_tmp3_i32; static TCGv_i64 cpu_tmp1_i64; @@ -139,6 +139,9 @@ typedef struct DisasContext { TCGv T0; TCGv T1; + /* TCG local register indexes (only used inside old micro ops) */ + TCGv tmp0; + sigjmp_buf jmpbuf; } DisasContext; @@ -406,16 +409,17 @@ static inline void gen_op_jmp_v(TCGv dest) tcg_gen_st_tl(dest, cpu_env, offsetof(CPUX86State, eip)); } -static inline void gen_op_add_reg_im(TCGMemOp size, int reg, int32_t val) +static inline +void gen_op_add_reg_im(DisasContext *s, TCGMemOp size, int reg, int32_t val) { - tcg_gen_addi_tl(cpu_tmp0, cpu_regs[reg], val); - gen_op_mov_reg_v(size, reg, cpu_tmp0); + tcg_gen_addi_tl(s->tmp0, cpu_regs[reg], val); + gen_op_mov_reg_v(size, reg, s->tmp0); } static inline void gen_op_add_reg_T0(DisasContext *s, TCGMemOp size, int reg) { - tcg_gen_add_tl(cpu_tmp0, cpu_regs[reg], s->T0); - gen_op_mov_reg_v(size, reg, cpu_tmp0); + tcg_gen_add_tl(s->tmp0, cpu_regs[reg], s->T0); + gen_op_mov_reg_v(size, reg, s->tmp0); } static inline void gen_op_ld_v(DisasContext *s, int idx, TCGv t0, TCGv a0) @@ -437,10 +441,10 @@ static inline void gen_op_st_rm_T0_A0(DisasContext *s, int idx, int d) } } -static inline void gen_jmp_im(target_ulong pc) +static inline void gen_jmp_im(DisasContext *s, target_ulong pc) { - tcg_gen_movi_tl(cpu_tmp0, pc); - gen_op_jmp_v(cpu_tmp0); + tcg_gen_movi_tl(s->tmp0, pc); + gen_op_jmp_v(s->tmp0); } /* Compute SEG:REG into A0. SEG is selected from the override segment @@ -556,18 +560,20 @@ static void gen_exts(TCGMemOp ot, TCGv reg) gen_ext_tl(reg, reg, ot, true); } -static inline void gen_op_jnz_ecx(TCGMemOp size, TCGLabel *label1) +static inline +void gen_op_jnz_ecx(DisasContext *s, TCGMemOp size, TCGLabel *label1) { - tcg_gen_mov_tl(cpu_tmp0, cpu_regs[R_ECX]); - gen_extu(size, cpu_tmp0); - tcg_gen_brcondi_tl(TCG_COND_NE, cpu_tmp0, 0, label1); + tcg_gen_mov_tl(s->tmp0, cpu_regs[R_ECX]); + gen_extu(size, s->tmp0); + tcg_gen_brcondi_tl(TCG_COND_NE, s->tmp0, 0, label1); } -static inline void gen_op_jz_ecx(TCGMemOp size, TCGLabel *label1) +static inline +void gen_op_jz_ecx(DisasContext *s, TCGMemOp size, TCGLabel *label1) { - tcg_gen_mov_tl(cpu_tmp0, cpu_regs[R_ECX]); - gen_extu(size, cpu_tmp0); - tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, label1); + tcg_gen_mov_tl(s->tmp0, cpu_regs[R_ECX]); + gen_extu(size, s->tmp0); + tcg_gen_brcondi_tl(TCG_COND_EQ, s->tmp0, 0, label1); } static void gen_helper_in_func(TCGMemOp ot, TCGv v, TCGv_i32 n) @@ -627,7 +633,7 @@ static void gen_check_io(DisasContext *s, TCGMemOp ot, target_ulong cur_eip, } if(s->flags & HF_SVMI_MASK) { gen_update_cc_op(s); - gen_jmp_im(cur_eip); + gen_jmp_im(s, cur_eip); svm_flags |= (1 << (4 + ot)); next_eip = s->pc - s->cs_base; tcg_gen_trunc_tl_i32(cpu_tmp2_i32, s->T0); @@ -743,9 +749,9 @@ static CCPrepare gen_prepare_eflags_c(DisasContext *s, TCGv reg) case CC_OP_SUBB ... CC_OP_SUBQ: /* (DATA_TYPE)CC_SRCT < (DATA_TYPE)CC_SRC */ size = s->cc_op - CC_OP_SUBB; - t1 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false); + t1 = gen_ext_tl(s->tmp0, cpu_cc_src, size, false); /* If no temporary was used, be careful not to alias t1 and t0. */ - t0 = t1 == cpu_cc_src ? cpu_tmp0 : reg; + t0 = t1 == cpu_cc_src ? s->tmp0 : reg; tcg_gen_mov_tl(t0, s->cc_srcT); gen_extu(size, t0); goto add_sub; @@ -753,7 +759,7 @@ static CCPrepare gen_prepare_eflags_c(DisasContext *s, TCGv reg) case CC_OP_ADDB ... CC_OP_ADDQ: /* (DATA_TYPE)CC_DST < (DATA_TYPE)CC_SRC */ size = s->cc_op - CC_OP_ADDB; - t1 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false); + t1 = gen_ext_tl(s->tmp0, cpu_cc_src, size, false); t0 = gen_ext_tl(reg, cpu_cc_dst, size, false); add_sub: return (CCPrepare) { .cond = TCG_COND_LTU, .reg = t0, @@ -905,7 +911,7 @@ static CCPrepare gen_prepare_cc(DisasContext *s, int b, TCGv reg) case JCC_BE: tcg_gen_mov_tl(cpu_tmp4, s->cc_srcT); gen_extu(size, cpu_tmp4); - t0 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false); + t0 = gen_ext_tl(s->tmp0, cpu_cc_src, size, false); cc = (CCPrepare) { .cond = TCG_COND_LEU, .reg = cpu_tmp4, .reg2 = t0, .mask = -1, .use_reg2 = true }; break; @@ -918,7 +924,7 @@ static CCPrepare gen_prepare_cc(DisasContext *s, int b, TCGv reg) fast_jcc_l: tcg_gen_mov_tl(cpu_tmp4, s->cc_srcT); gen_exts(size, cpu_tmp4); - t0 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, true); + t0 = gen_ext_tl(s->tmp0, cpu_cc_src, size, true); cc = (CCPrepare) { .cond = cond, .reg = cpu_tmp4, .reg2 = t0, .mask = -1, .use_reg2 = true }; break; @@ -955,7 +961,7 @@ static CCPrepare gen_prepare_cc(DisasContext *s, int b, TCGv reg) case JCC_L: gen_compute_eflags(s); if (reg == cpu_cc_src) { - reg = cpu_tmp0; + reg = s->tmp0; } tcg_gen_shri_tl(reg, cpu_cc_src, 4); /* CC_O -> CC_S */ tcg_gen_xor_tl(reg, reg, cpu_cc_src); @@ -966,7 +972,7 @@ static CCPrepare gen_prepare_cc(DisasContext *s, int b, TCGv reg) case JCC_LE: gen_compute_eflags(s); if (reg == cpu_cc_src) { - reg = cpu_tmp0; + reg = s->tmp0; } tcg_gen_shri_tl(reg, cpu_cc_src, 4); /* CC_O -> CC_S */ tcg_gen_xor_tl(reg, reg, cpu_cc_src); @@ -1061,7 +1067,7 @@ static TCGLabel *gen_jz_ecx_string(DisasContext *s, target_ulong next_eip) { TCGLabel *l1 = gen_new_label(); TCGLabel *l2 = gen_new_label(); - gen_op_jnz_ecx(s->aflag, l1); + gen_op_jnz_ecx(s, s->aflag, l1); gen_set_label(l2); gen_jmp_tb(s, next_eip, 1); gen_set_label(l1); @@ -1171,11 +1177,11 @@ static inline void gen_repz_ ## op(DisasContext *s, TCGMemOp ot, \ gen_update_cc_op(s); \ l2 = gen_jz_ecx_string(s, next_eip); \ gen_ ## op(s, ot); \ - gen_op_add_reg_im(s->aflag, R_ECX, -1); \ + gen_op_add_reg_im(s, s->aflag, R_ECX, -1); \ /* a loop would cause two single step exceptions if ECX = 1 \ before rep string_insn */ \ if (s->repz_opt) \ - gen_op_jz_ecx(s->aflag, l2); \ + gen_op_jz_ecx(s, s->aflag, l2); \ gen_jmp(s, cur_eip); \ } @@ -1189,11 +1195,11 @@ static inline void gen_repz_ ## op(DisasContext *s, TCGMemOp ot, \ gen_update_cc_op(s); \ l2 = gen_jz_ecx_string(s, next_eip); \ gen_ ## op(s, ot); \ - gen_op_add_reg_im(s->aflag, R_ECX, -1); \ + gen_op_add_reg_im(s, s->aflag, R_ECX, -1); \ gen_update_cc_op(s); \ gen_jcc1(s, (JCC_Z << 1) | (nz ^ 1), l2); \ if (s->repz_opt) \ - gen_op_jz_ecx(s->aflag, l2); \ + gen_op_jz_ecx(s, s->aflag, l2); \ gen_jmp(s, cur_eip); \ } @@ -1447,27 +1453,27 @@ static void gen_shift_rm_T1(DisasContext *s, TCGMemOp ot, int op1, } tcg_gen_andi_tl(s->T1, s->T1, mask); - tcg_gen_subi_tl(cpu_tmp0, s->T1, 1); + tcg_gen_subi_tl(s->tmp0, s->T1, 1); if (is_right) { if (is_arith) { gen_exts(ot, s->T0); - tcg_gen_sar_tl(cpu_tmp0, s->T0, cpu_tmp0); + tcg_gen_sar_tl(s->tmp0, s->T0, s->tmp0); tcg_gen_sar_tl(s->T0, s->T0, s->T1); } else { gen_extu(ot, s->T0); - tcg_gen_shr_tl(cpu_tmp0, s->T0, cpu_tmp0); + tcg_gen_shr_tl(s->tmp0, s->T0, s->tmp0); tcg_gen_shr_tl(s->T0, s->T0, s->T1); } } else { - tcg_gen_shl_tl(cpu_tmp0, s->T0, cpu_tmp0); + tcg_gen_shl_tl(s->tmp0, s->T0, s->tmp0); tcg_gen_shl_tl(s->T0, s->T0, s->T1); } /* store */ gen_op_st_rm_T0_A0(s, ot, op1); - gen_shift_flags(s, ot, s->T0, cpu_tmp0, s->T1, is_right); + gen_shift_flags(s, ot, s->T0, s->tmp0, s->T1, is_right); } static void gen_shift_rm_im(DisasContext *s, TCGMemOp ot, int op1, int op2, @@ -1640,9 +1646,9 @@ static void gen_rot_rm_im(DisasContext *s, TCGMemOp ot, int op1, int op2, shift = mask + 1 - shift; } gen_extu(ot, s->T0); - tcg_gen_shli_tl(cpu_tmp0, s->T0, shift); + tcg_gen_shli_tl(s->tmp0, s->T0, shift); tcg_gen_shri_tl(s->T0, s->T0, mask + 1 - shift); - tcg_gen_or_tl(s->T0, s->T0, cpu_tmp0); + tcg_gen_or_tl(s->T0, s->T0, s->tmp0); break; } } @@ -1751,9 +1757,9 @@ static void gen_shiftd_rm_T1(DisasContext *s, TCGMemOp ot, int op1, This means "shrdw C, B, A" shifts A:B:A >> C. Build the B:A portion by constructing it as a 32-bit value. */ if (is_right) { - tcg_gen_deposit_tl(cpu_tmp0, s->T0, s->T1, 16, 16); + tcg_gen_deposit_tl(s->tmp0, s->T0, s->T1, 16, 16); tcg_gen_mov_tl(s->T1, s->T0); - tcg_gen_mov_tl(s->T0, cpu_tmp0); + tcg_gen_mov_tl(s->T0, s->tmp0); } else { tcg_gen_deposit_tl(s->T1, s->T0, s->T1, 16, 16); } @@ -1761,35 +1767,35 @@ static void gen_shiftd_rm_T1(DisasContext *s, TCGMemOp ot, int op1, #ifdef TARGET_X86_64 case MO_32: /* Concatenate the two 32-bit values and use a 64-bit shift. */ - tcg_gen_subi_tl(cpu_tmp0, count, 1); + tcg_gen_subi_tl(s->tmp0, count, 1); if (is_right) { tcg_gen_concat_tl_i64(s->T0, s->T0, s->T1); - tcg_gen_shr_i64(cpu_tmp0, s->T0, cpu_tmp0); + tcg_gen_shr_i64(s->tmp0, s->T0, s->tmp0); tcg_gen_shr_i64(s->T0, s->T0, count); } else { tcg_gen_concat_tl_i64(s->T0, s->T1, s->T0); - tcg_gen_shl_i64(cpu_tmp0, s->T0, cpu_tmp0); + tcg_gen_shl_i64(s->tmp0, s->T0, s->tmp0); tcg_gen_shl_i64(s->T0, s->T0, count); - tcg_gen_shri_i64(cpu_tmp0, cpu_tmp0, 32); + tcg_gen_shri_i64(s->tmp0, s->tmp0, 32); tcg_gen_shri_i64(s->T0, s->T0, 32); } break; #endif default: - tcg_gen_subi_tl(cpu_tmp0, count, 1); + tcg_gen_subi_tl(s->tmp0, count, 1); if (is_right) { - tcg_gen_shr_tl(cpu_tmp0, s->T0, cpu_tmp0); + tcg_gen_shr_tl(s->tmp0, s->T0, s->tmp0); tcg_gen_subfi_tl(cpu_tmp4, mask + 1, count); tcg_gen_shr_tl(s->T0, s->T0, count); tcg_gen_shl_tl(s->T1, s->T1, cpu_tmp4); } else { - tcg_gen_shl_tl(cpu_tmp0, s->T0, cpu_tmp0); + tcg_gen_shl_tl(s->tmp0, s->T0, s->tmp0); if (ot == MO_16) { /* Only needed if count > 16, for Intel behaviour. */ tcg_gen_subfi_tl(cpu_tmp4, 33, count); tcg_gen_shr_tl(cpu_tmp4, s->T1, cpu_tmp4); - tcg_gen_or_tl(cpu_tmp0, cpu_tmp0, cpu_tmp4); + tcg_gen_or_tl(s->tmp0, s->tmp0, cpu_tmp4); } tcg_gen_subfi_tl(cpu_tmp4, mask + 1, count); @@ -1806,7 +1812,7 @@ static void gen_shiftd_rm_T1(DisasContext *s, TCGMemOp ot, int op1, /* store */ gen_op_st_rm_T0_A0(s, ot, op1); - gen_shift_flags(s, ot, s->T0, cpu_tmp0, count, is_right); + gen_shift_flags(s, ot, s->T0, s->tmp0, count, is_right); tcg_temp_free(count); } @@ -2196,13 +2202,13 @@ static inline void gen_goto_tb(DisasContext *s, int tb_num, target_ulong eip) if (use_goto_tb(s, pc)) { /* jump to same page: we can use a direct jump */ tcg_gen_goto_tb(tb_num); - gen_jmp_im(eip); + gen_jmp_im(s, eip); tcg_gen_exit_tb(s->base.tb, tb_num); s->base.is_jmp = DISAS_NORETURN; } else { /* jump to another page */ - gen_jmp_im(eip); - gen_jr(s, cpu_tmp0); + gen_jmp_im(s, eip); + gen_jr(s, s->tmp0); } } @@ -2224,11 +2230,11 @@ static inline void gen_jcc(DisasContext *s, int b, l2 = gen_new_label(); gen_jcc1(s, b, l1); - gen_jmp_im(next_eip); + gen_jmp_im(s, next_eip); tcg_gen_br(l2); gen_set_label(l1); - gen_jmp_im(val); + gen_jmp_im(s, val); gen_set_label(l2); gen_eob(s); } @@ -2312,7 +2318,7 @@ gen_svm_check_intercept_param(DisasContext *s, target_ulong pc_start, if (likely(!(s->flags & HF_SVMI_MASK))) return; gen_update_cc_op(s); - gen_jmp_im(pc_start - s->cs_base); + gen_jmp_im(s, pc_start - s->cs_base); gen_helper_svm_check_intercept_param(cpu_env, tcg_const_i32(type), tcg_const_i64(param)); } @@ -2325,7 +2331,7 @@ gen_svm_check_intercept(DisasContext *s, target_ulong pc_start, uint64_t type) static inline void gen_stack_update(DisasContext *s, int addend) { - gen_op_add_reg_im(mo_stacksize(s), R_ESP, addend); + gen_op_add_reg_im(s, mo_stacksize(s), R_ESP, addend); } /* Generate a push. It depends on ss32, addseg and dflag. */ @@ -2427,11 +2433,11 @@ static void gen_enter(DisasContext *s, int esp_addend, int level) for (i = 1; i < level; ++i) { tcg_gen_subi_tl(s->A0, cpu_regs[R_EBP], size * i); gen_lea_v_seg(s, a_ot, s->A0, R_SS, -1); - gen_op_ld_v(s, d_ot, cpu_tmp0, s->A0); + gen_op_ld_v(s, d_ot, s->tmp0, s->A0); tcg_gen_subi_tl(s->A0, s->T1, size * i); gen_lea_v_seg(s, a_ot, s->A0, R_SS, -1); - gen_op_st_v(s, d_ot, cpu_tmp0, s->A0); + gen_op_st_v(s, d_ot, s->tmp0, s->A0); } /* Push the current FrameTemp as the last level. */ @@ -2465,7 +2471,7 @@ static void gen_leave(DisasContext *s) static void gen_exception(DisasContext *s, int trapno, target_ulong cur_eip) { gen_update_cc_op(s); - gen_jmp_im(cur_eip); + gen_jmp_im(s, cur_eip); gen_helper_raise_exception(cpu_env, tcg_const_i32(trapno)); s->base.is_jmp = DISAS_NORETURN; } @@ -2502,7 +2508,7 @@ static void gen_interrupt(DisasContext *s, int intno, target_ulong cur_eip, target_ulong next_eip) { gen_update_cc_op(s); - gen_jmp_im(cur_eip); + gen_jmp_im(s, cur_eip); gen_helper_raise_interrupt(cpu_env, tcg_const_i32(intno), tcg_const_i32(next_eip - cur_eip)); s->base.is_jmp = DISAS_NORETURN; @@ -2511,7 +2517,7 @@ static void gen_interrupt(DisasContext *s, int intno, static void gen_debug(DisasContext *s, target_ulong cur_eip) { gen_update_cc_op(s); - gen_jmp_im(cur_eip); + gen_jmp_im(s, cur_eip); gen_helper_debug(cpu_env); s->base.is_jmp = DISAS_NORETURN; } @@ -2621,7 +2627,7 @@ static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num) if (s->jmp_opt) { gen_goto_tb(s, tb_num, eip); } else { - gen_jmp_im(eip); + gen_jmp_im(s, eip); gen_eob(s); } } @@ -2648,8 +2654,8 @@ static inline void gen_ldo_env_A0(DisasContext *s, int offset) int mem_index = s->mem_index; tcg_gen_qemu_ld_i64(cpu_tmp1_i64, s->A0, mem_index, MO_LEQ); tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(ZMMReg, ZMM_Q(0))); - tcg_gen_addi_tl(cpu_tmp0, s->A0, 8); - tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_tmp0, mem_index, MO_LEQ); + tcg_gen_addi_tl(s->tmp0, s->A0, 8); + tcg_gen_qemu_ld_i64(cpu_tmp1_i64, s->tmp0, mem_index, MO_LEQ); tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(ZMMReg, ZMM_Q(1))); } @@ -2658,9 +2664,9 @@ static inline void gen_sto_env_A0(DisasContext *s, int offset) int mem_index = s->mem_index; tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(ZMMReg, ZMM_Q(0))); tcg_gen_qemu_st_i64(cpu_tmp1_i64, s->A0, mem_index, MO_LEQ); - tcg_gen_addi_tl(cpu_tmp0, s->A0, 8); + tcg_gen_addi_tl(s->tmp0, s->A0, 8); tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(ZMMReg, ZMM_Q(1))); - tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_tmp0, mem_index, MO_LEQ); + tcg_gen_qemu_st_i64(cpu_tmp1_i64, s->tmp0, mem_index, MO_LEQ); } static inline void gen_op_movo(int d_offset, int s_offset) @@ -3713,9 +3719,9 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, offsetof(ZMMReg, ZMM_L(0))); break; case 0x22: case 0x32: /* pmovsxbq, pmovzxbq */ - tcg_gen_qemu_ld_tl(cpu_tmp0, s->A0, + tcg_gen_qemu_ld_tl(s->tmp0, s->A0, s->mem_index, MO_LEUW); - tcg_gen_st16_tl(cpu_tmp0, cpu_env, op2_offset + + tcg_gen_st16_tl(s->tmp0, cpu_env, op2_offset + offsetof(ZMMReg, ZMM_W(0))); break; case 0x2a: /* movntqda */ @@ -3999,7 +4005,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, if (s->cc_op != CC_OP_ADCX && s->cc_op != CC_OP_ADOX) { gen_compute_eflags(s); } - carry_in = cpu_tmp0; + carry_in = s->tmp0; tcg_gen_extract_tl(carry_in, cpu_cc_src, ctz32(b == 0x1f6 ? CC_C : CC_O), 1); } @@ -4902,8 +4908,8 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) tcg_gen_mul_tl(s->T0, s->T0, s->T1); gen_op_mov_reg_v(MO_16, R_EAX, s->T0); tcg_gen_mov_tl(cpu_cc_dst, s->T0); - tcg_gen_ext8s_tl(cpu_tmp0, s->T0); - tcg_gen_sub_tl(cpu_cc_src, s->T0, cpu_tmp0); + tcg_gen_ext8s_tl(s->tmp0, s->T0); + tcg_gen_sub_tl(cpu_cc_src, s->T0, s->tmp0); set_cc_op(s, CC_OP_MULB); break; case MO_16: @@ -4914,8 +4920,8 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) tcg_gen_mul_tl(s->T0, s->T0, s->T1); gen_op_mov_reg_v(MO_16, R_EAX, s->T0); tcg_gen_mov_tl(cpu_cc_dst, s->T0); - tcg_gen_ext16s_tl(cpu_tmp0, s->T0); - tcg_gen_sub_tl(cpu_cc_src, s->T0, cpu_tmp0); + tcg_gen_ext16s_tl(s->tmp0, s->T0); + tcg_gen_sub_tl(cpu_cc_src, s->T0, s->tmp0); tcg_gen_shri_tl(s->T0, s->T0, 16); gen_op_mov_reg_v(MO_16, R_EDX, s->T0); set_cc_op(s, CC_OP_MULW); @@ -5215,8 +5221,8 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) /* XXX: use 32 bit mul which could be faster */ tcg_gen_mul_tl(s->T0, s->T0, s->T1); tcg_gen_mov_tl(cpu_cc_dst, s->T0); - tcg_gen_ext16s_tl(cpu_tmp0, s->T0); - tcg_gen_sub_tl(cpu_cc_src, s->T0, cpu_tmp0); + tcg_gen_ext16s_tl(s->tmp0, s->T0); + tcg_gen_sub_tl(cpu_cc_src, s->T0, s->tmp0); gen_op_mov_reg_v(ot, reg, s->T0); break; } @@ -5423,7 +5429,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) gen_pop_update(s, ot); /* Note that reg == R_SS in gen_movl_seg_T0 always sets is_jmp. */ if (s->base.is_jmp) { - gen_jmp_im(s->pc - s->cs_base); + gen_jmp_im(s, s->pc - s->cs_base); if (reg == R_SS) { s->tf = 0; gen_eob_inhibit_irq(s, true); @@ -5438,7 +5444,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) gen_movl_seg_T0(s, (b >> 3) & 7); gen_pop_update(s, ot); if (s->base.is_jmp) { - gen_jmp_im(s->pc - s->cs_base); + gen_jmp_im(s, s->pc - s->cs_base); gen_eob(s); } break; @@ -5489,7 +5495,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) gen_movl_seg_T0(s, reg); /* Note that reg == R_SS in gen_movl_seg_T0 always sets is_jmp. */ if (s->base.is_jmp) { - gen_jmp_im(s->pc - s->cs_base); + gen_jmp_im(s, s->pc - s->cs_base); if (reg == R_SS) { s->tf = 0; gen_eob_inhibit_irq(s, true); @@ -5696,7 +5702,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) /* then put the data */ gen_op_mov_reg_v(ot, reg, s->T1); if (s->base.is_jmp) { - gen_jmp_im(s->pc - s->cs_base); + gen_jmp_im(s, s->pc - s->cs_base); gen_eob(s); } break; @@ -6478,7 +6484,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) do_lret: if (s->pe && !s->vm86) { gen_update_cc_op(s); - gen_jmp_im(pc_start - s->cs_base); + gen_jmp_im(s, pc_start - s->cs_base); gen_helper_lret_protected(cpu_env, tcg_const_i32(dflag - 1), tcg_const_i32(val)); } else { @@ -6691,7 +6697,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) gen_pop_update(s, ot); set_cc_op(s, CC_OP_EFLAGS); /* abort translation because TF/AC flag may change */ - gen_jmp_im(s->pc - s->cs_base); + gen_jmp_im(s, s->pc - s->cs_base); gen_eob(s); } break; @@ -6779,9 +6785,9 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) AddressParts a = gen_lea_modrm_0(env, s, modrm); /* specific case: we need to add a displacement */ gen_exts(ot, s->T1); - tcg_gen_sari_tl(cpu_tmp0, s->T1, 3 + ot); - tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, ot); - tcg_gen_add_tl(s->A0, gen_lea_modrm_1(s, a), cpu_tmp0); + tcg_gen_sari_tl(s->tmp0, s->T1, 3 + ot); + tcg_gen_shli_tl(s->tmp0, s->tmp0, ot); + tcg_gen_add_tl(s->A0, gen_lea_modrm_1(s, a), s->tmp0); gen_lea_v_seg(s, s->aflag, s->A0, a.def_seg, s->override); if (!(s->prefix & PREFIX_LOCK)) { gen_op_ld_v(s, ot, s->T0, s->A0); @@ -6791,8 +6797,8 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) } bt_op: tcg_gen_andi_tl(s->T1, s->T1, (1 << (3 + ot)) - 1); - tcg_gen_movi_tl(cpu_tmp0, 1); - tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, s->T1); + tcg_gen_movi_tl(s->tmp0, 1); + tcg_gen_shl_tl(s->tmp0, s->tmp0, s->T1); if (s->prefix & PREFIX_LOCK) { switch (op) { case 0: /* bt */ @@ -6801,17 +6807,17 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) gen_op_ld_v(s, ot, s->T0, s->A0); break; case 1: /* bts */ - tcg_gen_atomic_fetch_or_tl(s->T0, s->A0, cpu_tmp0, + tcg_gen_atomic_fetch_or_tl(s->T0, s->A0, s->tmp0, s->mem_index, ot | MO_LE); break; case 2: /* btr */ - tcg_gen_not_tl(cpu_tmp0, cpu_tmp0); - tcg_gen_atomic_fetch_and_tl(s->T0, s->A0, cpu_tmp0, + tcg_gen_not_tl(s->tmp0, s->tmp0); + tcg_gen_atomic_fetch_and_tl(s->T0, s->A0, s->tmp0, s->mem_index, ot | MO_LE); break; default: case 3: /* btc */ - tcg_gen_atomic_fetch_xor_tl(s->T0, s->A0, cpu_tmp0, + tcg_gen_atomic_fetch_xor_tl(s->T0, s->A0, s->tmp0, s->mem_index, ot | MO_LE); break; } @@ -6823,14 +6829,14 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) /* Data already loaded; nothing to do. */ break; case 1: /* bts */ - tcg_gen_or_tl(s->T0, s->T0, cpu_tmp0); + tcg_gen_or_tl(s->T0, s->T0, s->tmp0); break; case 2: /* btr */ - tcg_gen_andc_tl(s->T0, s->T0, cpu_tmp0); + tcg_gen_andc_tl(s->T0, s->T0, s->tmp0); break; default: case 3: /* btc */ - tcg_gen_xor_tl(s->T0, s->T0, cpu_tmp0); + tcg_gen_xor_tl(s->T0, s->T0, s->tmp0); break; } if (op != 0) { @@ -6983,7 +6989,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) } if (prefixes & PREFIX_REPZ) { gen_update_cc_op(s); - gen_jmp_im(pc_start - s->cs_base); + gen_jmp_im(s, pc_start - s->cs_base); gen_helper_pause(cpu_env, tcg_const_i32(s->pc - pc_start)); s->base.is_jmp = DISAS_NORETURN; } @@ -7011,7 +7017,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) if (CODE64(s)) goto illegal_op; gen_update_cc_op(s); - gen_jmp_im(pc_start - s->cs_base); + gen_jmp_im(s, pc_start - s->cs_base); gen_helper_into(cpu_env, tcg_const_i32(s->pc - pc_start)); break; #ifdef WANT_ICEBP @@ -7045,7 +7051,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) if (s->vm86 ? s->iopl == 3 : s->cpl <= s->iopl) { gen_helper_sti(cpu_env); /* interruptions are enabled only the first insn after sti */ - gen_jmp_im(s->pc - s->cs_base); + gen_jmp_im(s, s->pc - s->cs_base); gen_eob_inhibit_irq(s, true); } else { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); @@ -7113,26 +7119,26 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) switch(b) { case 0: /* loopnz */ case 1: /* loopz */ - gen_op_add_reg_im(s->aflag, R_ECX, -1); - gen_op_jz_ecx(s->aflag, l3); + gen_op_add_reg_im(s, s->aflag, R_ECX, -1); + gen_op_jz_ecx(s, s->aflag, l3); gen_jcc1(s, (JCC_Z << 1) | (b ^ 1), l1); break; case 2: /* loop */ - gen_op_add_reg_im(s->aflag, R_ECX, -1); - gen_op_jnz_ecx(s->aflag, l1); + gen_op_add_reg_im(s, s->aflag, R_ECX, -1); + gen_op_jnz_ecx(s, s->aflag, l1); break; default: case 3: /* jcxz */ - gen_op_jz_ecx(s->aflag, l1); + gen_op_jz_ecx(s, s->aflag, l1); break; } gen_set_label(l3); - gen_jmp_im(next_eip); + gen_jmp_im(s, next_eip); tcg_gen_br(l2); gen_set_label(l1); - gen_jmp_im(tval); + gen_jmp_im(s, tval); gen_set_label(l2); gen_eob(s); } @@ -7143,7 +7149,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } else { gen_update_cc_op(s); - gen_jmp_im(pc_start - s->cs_base); + gen_jmp_im(s, pc_start - s->cs_base); if (b & 2) { gen_helper_rdmsr(cpu_env); } else { @@ -7153,7 +7159,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) break; case 0x131: /* rdtsc */ gen_update_cc_op(s); - gen_jmp_im(pc_start - s->cs_base); + gen_jmp_im(s, pc_start - s->cs_base); if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { gen_io_start(); } @@ -7165,7 +7171,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) break; case 0x133: /* rdpmc */ gen_update_cc_op(s); - gen_jmp_im(pc_start - s->cs_base); + gen_jmp_im(s, pc_start - s->cs_base); gen_helper_rdpmc(cpu_env); break; case 0x134: /* sysenter */ @@ -7194,7 +7200,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) case 0x105: /* syscall */ /* XXX: is it usable in real mode ? */ gen_update_cc_op(s); - gen_jmp_im(pc_start - s->cs_base); + gen_jmp_im(s, pc_start - s->cs_base); gen_helper_syscall(cpu_env, tcg_const_i32(s->pc - pc_start)); /* TF handling for the syscall insn is different. The TF bit is checked after the syscall insn completes. This allows #DB to not be @@ -7220,7 +7226,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) #endif case 0x1a2: /* cpuid */ gen_update_cc_op(s); - gen_jmp_im(pc_start - s->cs_base); + gen_jmp_im(s, pc_start - s->cs_base); gen_helper_cpuid(cpu_env); break; case 0xf4: /* hlt */ @@ -7228,7 +7234,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } else { gen_update_cc_op(s); - gen_jmp_im(pc_start - s->cs_base); + gen_jmp_im(s, pc_start - s->cs_base); gen_helper_hlt(cpu_env, tcg_const_i32(s->pc - pc_start)); s->base.is_jmp = DISAS_NORETURN; } @@ -7320,7 +7326,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) goto illegal_op; } gen_update_cc_op(s); - gen_jmp_im(pc_start - s->cs_base); + gen_jmp_im(s, pc_start - s->cs_base); tcg_gen_mov_tl(s->A0, cpu_regs[R_EAX]); gen_extu(s->aflag, s->A0); gen_add_A0_ds_seg(s); @@ -7332,7 +7338,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) goto illegal_op; } gen_update_cc_op(s); - gen_jmp_im(pc_start - s->cs_base); + gen_jmp_im(s, pc_start - s->cs_base); gen_helper_mwait(cpu_env, tcg_const_i32(s->pc - pc_start)); gen_eob(s); break; @@ -7343,7 +7349,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) goto illegal_op; } gen_helper_clac(cpu_env); - gen_jmp_im(s->pc - s->cs_base); + gen_jmp_im(s, s->pc - s->cs_base); gen_eob(s); break; @@ -7353,7 +7359,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) goto illegal_op; } gen_helper_stac(cpu_env); - gen_jmp_im(s->pc - s->cs_base); + gen_jmp_im(s, s->pc - s->cs_base); gen_eob(s); break; @@ -7396,7 +7402,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[R_ECX]); gen_helper_xsetbv(cpu_env, cpu_tmp2_i32, cpu_tmp1_i64); /* End TB because translation flags may change. */ - gen_jmp_im(s->pc - s->cs_base); + gen_jmp_im(s, s->pc - s->cs_base); gen_eob(s); break; @@ -7409,7 +7415,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) break; } gen_update_cc_op(s); - gen_jmp_im(pc_start - s->cs_base); + gen_jmp_im(s, pc_start - s->cs_base); gen_helper_vmrun(cpu_env, tcg_const_i32(s->aflag - 1), tcg_const_i32(s->pc - pc_start)); tcg_gen_exit_tb(NULL, 0); @@ -7421,7 +7427,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) goto illegal_op; } gen_update_cc_op(s); - gen_jmp_im(pc_start - s->cs_base); + gen_jmp_im(s, pc_start - s->cs_base); gen_helper_vmmcall(cpu_env); break; @@ -7434,7 +7440,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) break; } gen_update_cc_op(s); - gen_jmp_im(pc_start - s->cs_base); + gen_jmp_im(s, pc_start - s->cs_base); gen_helper_vmload(cpu_env, tcg_const_i32(s->aflag - 1)); break; @@ -7447,7 +7453,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) break; } gen_update_cc_op(s); - gen_jmp_im(pc_start - s->cs_base); + gen_jmp_im(s, pc_start - s->cs_base); gen_helper_vmsave(cpu_env, tcg_const_i32(s->aflag - 1)); break; @@ -7463,7 +7469,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) } gen_update_cc_op(s); gen_helper_stgi(cpu_env); - gen_jmp_im(s->pc - s->cs_base); + gen_jmp_im(s, s->pc - s->cs_base); gen_eob(s); break; @@ -7476,7 +7482,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) break; } gen_update_cc_op(s); - gen_jmp_im(pc_start - s->cs_base); + gen_jmp_im(s, pc_start - s->cs_base); gen_helper_clgi(cpu_env); break; @@ -7487,7 +7493,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) goto illegal_op; } gen_update_cc_op(s); - gen_jmp_im(pc_start - s->cs_base); + gen_jmp_im(s, pc_start - s->cs_base); gen_helper_skinit(cpu_env); break; @@ -7500,7 +7506,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) break; } gen_update_cc_op(s); - gen_jmp_im(pc_start - s->cs_base); + gen_jmp_im(s, pc_start - s->cs_base); gen_helper_invlpga(cpu_env, tcg_const_i32(s->aflag - 1)); break; @@ -7574,7 +7580,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0); gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0); gen_helper_lmsw(cpu_env, s->T0); - gen_jmp_im(s->pc - s->cs_base); + gen_jmp_im(s, s->pc - s->cs_base); gen_eob(s); break; @@ -7584,10 +7590,10 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) break; } gen_update_cc_op(s); - gen_jmp_im(pc_start - s->cs_base); + gen_jmp_im(s, pc_start - s->cs_base); gen_lea_modrm(env, s, modrm); gen_helper_invlpg(cpu_env, s->A0); - gen_jmp_im(s->pc - s->cs_base); + gen_jmp_im(s, s->pc - s->cs_base); gen_eob(s); break; @@ -7613,7 +7619,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) goto illegal_op; } gen_update_cc_op(s); - gen_jmp_im(pc_start - s->cs_base); + gen_jmp_im(s, pc_start - s->cs_base); if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { gen_io_start(); } @@ -7688,11 +7694,11 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) a0 = NULL; } gen_op_mov_v_reg(ot, t1, reg); - tcg_gen_andi_tl(cpu_tmp0, t0, 3); + tcg_gen_andi_tl(s->tmp0, t0, 3); tcg_gen_andi_tl(t1, t1, 3); tcg_gen_movi_tl(t2, 0); label1 = gen_new_label(); - tcg_gen_brcond_tl(TCG_COND_GE, cpu_tmp0, t1, label1); + tcg_gen_brcond_tl(TCG_COND_GE, s->tmp0, t1, label1); tcg_gen_andi_tl(t0, t0, ~3); tcg_gen_or_tl(t0, t0, t1); tcg_gen_movi_tl(t2, CC_Z); @@ -7729,9 +7735,9 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) } else { gen_helper_lsl(t0, cpu_env, s->T0); } - tcg_gen_andi_tl(cpu_tmp0, cpu_cc_src, CC_Z); + tcg_gen_andi_tl(s->tmp0, cpu_cc_src, CC_Z); label1 = gen_new_label(); - tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, label1); + tcg_gen_brcondi_tl(TCG_COND_EQ, s->tmp0, 0, label1); gen_op_mov_reg_v(ot, reg, t0); gen_set_label(label1); set_cc_op(s, CC_OP_EFLAGS); @@ -7981,7 +7987,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) case 4: case 8: gen_update_cc_op(s); - gen_jmp_im(pc_start - s->cs_base); + gen_jmp_im(s, pc_start - s->cs_base); if (b & 2) { if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { gen_io_start(); @@ -7992,7 +7998,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { gen_io_end(); } - gen_jmp_im(s->pc - s->cs_base); + gen_jmp_im(s, s->pc - s->cs_base); gen_eob(s); } else { if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { @@ -8035,7 +8041,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) gen_op_mov_v_reg(ot, s->T0, rm); tcg_gen_movi_i32(cpu_tmp2_i32, reg); gen_helper_set_dr(cpu_env, cpu_tmp2_i32, s->T0); - gen_jmp_im(s->pc - s->cs_base); + gen_jmp_im(s, s->pc - s->cs_base); gen_eob(s); } else { gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_DR0 + reg); @@ -8052,7 +8058,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0); gen_helper_clts(cpu_env); /* abort block because static cpu state changed */ - gen_jmp_im(s->pc - s->cs_base); + gen_jmp_im(s, s->pc - s->cs_base); gen_eob(s); } break; @@ -8149,7 +8155,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) /* XRSTOR is how MPX is enabled, which changes how we translate. Thus we need to end the TB. */ gen_update_cc_op(s); - gen_jmp_im(s->pc - s->cs_base); + gen_jmp_im(s, s->pc - s->cs_base); gen_eob(s); break; @@ -8279,7 +8285,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) if (!(s->flags & HF_SMM_MASK)) goto illegal_op; gen_update_cc_op(s); - gen_jmp_im(s->pc - s->cs_base); + gen_jmp_im(s, s->pc - s->cs_base); gen_helper_rsm(cpu_env); gen_eob(s); break; @@ -8473,7 +8479,7 @@ static void i386_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu) dc->T1 = tcg_temp_new(); dc->A0 = tcg_temp_new(); - cpu_tmp0 = tcg_temp_new(); + dc->tmp0 = tcg_temp_new(); cpu_tmp1_i64 = tcg_temp_new_i64(); cpu_tmp2_i32 = tcg_temp_new_i32(); cpu_tmp3_i32 = tcg_temp_new_i32(); @@ -8550,7 +8556,7 @@ static void i386_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu) DisasContext *dc = container_of(dcbase, DisasContext, base); if (dc->base.is_jmp == DISAS_TOO_MANY) { - gen_jmp_im(dc->base.pc_next - dc->cs_base); + gen_jmp_im(dc, dc->base.pc_next - dc->cs_base); gen_eob(dc); } } From 5022f28f1e4033eb369b744ad61b96d086beca1b Mon Sep 17 00:00:00 2001 From: "Emilio G. Cota" Date: Tue, 11 Sep 2018 14:10:21 -0400 Subject: [PATCH 30/80] target/i386: move cpu_tmp4 to DisasContext Signed-off-by: Emilio G. Cota Reviewed-by: Richard Henderson Signed-off-by: Paolo Bonzini --- target/i386/translate.c | 78 ++++++++++++++++++++--------------------- 1 file changed, 39 insertions(+), 39 deletions(-) diff --git a/target/i386/translate.c b/target/i386/translate.c index 873231fb44..0ad6ffc4af 100644 --- a/target/i386/translate.c +++ b/target/i386/translate.c @@ -79,7 +79,6 @@ static TCGv cpu_seg_base[6]; static TCGv_i64 cpu_bndl[4]; static TCGv_i64 cpu_bndu[4]; -static TCGv cpu_tmp4; static TCGv_ptr cpu_ptr0, cpu_ptr1; static TCGv_i32 cpu_tmp2_i32, cpu_tmp3_i32; static TCGv_i64 cpu_tmp1_i64; @@ -141,6 +140,7 @@ typedef struct DisasContext { /* TCG local register indexes (only used inside old micro ops) */ TCGv tmp0; + TCGv tmp4; sigjmp_buf jmpbuf; } DisasContext; @@ -909,10 +909,10 @@ static CCPrepare gen_prepare_cc(DisasContext *s, int b, TCGv reg) size = s->cc_op - CC_OP_SUBB; switch (jcc_op) { case JCC_BE: - tcg_gen_mov_tl(cpu_tmp4, s->cc_srcT); - gen_extu(size, cpu_tmp4); + tcg_gen_mov_tl(s->tmp4, s->cc_srcT); + gen_extu(size, s->tmp4); t0 = gen_ext_tl(s->tmp0, cpu_cc_src, size, false); - cc = (CCPrepare) { .cond = TCG_COND_LEU, .reg = cpu_tmp4, + cc = (CCPrepare) { .cond = TCG_COND_LEU, .reg = s->tmp4, .reg2 = t0, .mask = -1, .use_reg2 = true }; break; @@ -922,10 +922,10 @@ static CCPrepare gen_prepare_cc(DisasContext *s, int b, TCGv reg) case JCC_LE: cond = TCG_COND_LE; fast_jcc_l: - tcg_gen_mov_tl(cpu_tmp4, s->cc_srcT); - gen_exts(size, cpu_tmp4); + tcg_gen_mov_tl(s->tmp4, s->cc_srcT); + gen_exts(size, s->tmp4); t0 = gen_ext_tl(s->tmp0, cpu_cc_src, size, true); - cc = (CCPrepare) { .cond = cond, .reg = cpu_tmp4, + cc = (CCPrepare) { .cond = cond, .reg = s->tmp4, .reg2 = t0, .mask = -1, .use_reg2 = true }; break; @@ -1277,32 +1277,32 @@ static void gen_op(DisasContext *s1, int op, TCGMemOp ot, int d) } switch(op) { case OP_ADCL: - gen_compute_eflags_c(s1, cpu_tmp4); + gen_compute_eflags_c(s1, s1->tmp4); if (s1->prefix & PREFIX_LOCK) { - tcg_gen_add_tl(s1->T0, cpu_tmp4, s1->T1); + tcg_gen_add_tl(s1->T0, s1->tmp4, s1->T1); tcg_gen_atomic_add_fetch_tl(s1->T0, s1->A0, s1->T0, s1->mem_index, ot | MO_LE); } else { tcg_gen_add_tl(s1->T0, s1->T0, s1->T1); - tcg_gen_add_tl(s1->T0, s1->T0, cpu_tmp4); + tcg_gen_add_tl(s1->T0, s1->T0, s1->tmp4); gen_op_st_rm_T0_A0(s1, ot, d); } - gen_op_update3_cc(s1, cpu_tmp4); + gen_op_update3_cc(s1, s1->tmp4); set_cc_op(s1, CC_OP_ADCB + ot); break; case OP_SBBL: - gen_compute_eflags_c(s1, cpu_tmp4); + gen_compute_eflags_c(s1, s1->tmp4); if (s1->prefix & PREFIX_LOCK) { - tcg_gen_add_tl(s1->T0, s1->T1, cpu_tmp4); + tcg_gen_add_tl(s1->T0, s1->T1, s1->tmp4); tcg_gen_neg_tl(s1->T0, s1->T0); tcg_gen_atomic_add_fetch_tl(s1->T0, s1->A0, s1->T0, s1->mem_index, ot | MO_LE); } else { tcg_gen_sub_tl(s1->T0, s1->T0, s1->T1); - tcg_gen_sub_tl(s1->T0, s1->T0, cpu_tmp4); + tcg_gen_sub_tl(s1->T0, s1->T0, s1->tmp4); gen_op_st_rm_T0_A0(s1, ot, d); } - gen_op_update3_cc(s1, cpu_tmp4); + gen_op_update3_cc(s1, s1->tmp4); set_cc_op(s1, CC_OP_SBBB + ot); break; case OP_ADDL: @@ -1492,15 +1492,15 @@ static void gen_shift_rm_im(DisasContext *s, TCGMemOp ot, int op1, int op2, if (is_right) { if (is_arith) { gen_exts(ot, s->T0); - tcg_gen_sari_tl(cpu_tmp4, s->T0, op2 - 1); + tcg_gen_sari_tl(s->tmp4, s->T0, op2 - 1); tcg_gen_sari_tl(s->T0, s->T0, op2); } else { gen_extu(ot, s->T0); - tcg_gen_shri_tl(cpu_tmp4, s->T0, op2 - 1); + tcg_gen_shri_tl(s->tmp4, s->T0, op2 - 1); tcg_gen_shri_tl(s->T0, s->T0, op2); } } else { - tcg_gen_shli_tl(cpu_tmp4, s->T0, op2 - 1); + tcg_gen_shli_tl(s->tmp4, s->T0, op2 - 1); tcg_gen_shli_tl(s->T0, s->T0, op2); } } @@ -1510,7 +1510,7 @@ static void gen_shift_rm_im(DisasContext *s, TCGMemOp ot, int op1, int op2, /* update eflags if non zero shift */ if (op2 != 0) { - tcg_gen_mov_tl(cpu_cc_src, cpu_tmp4); + tcg_gen_mov_tl(cpu_cc_src, s->tmp4); tcg_gen_mov_tl(cpu_cc_dst, s->T0); set_cc_op(s, (is_right ? CC_OP_SARB : CC_OP_SHLB) + ot); } @@ -1786,25 +1786,25 @@ static void gen_shiftd_rm_T1(DisasContext *s, TCGMemOp ot, int op1, if (is_right) { tcg_gen_shr_tl(s->tmp0, s->T0, s->tmp0); - tcg_gen_subfi_tl(cpu_tmp4, mask + 1, count); + tcg_gen_subfi_tl(s->tmp4, mask + 1, count); tcg_gen_shr_tl(s->T0, s->T0, count); - tcg_gen_shl_tl(s->T1, s->T1, cpu_tmp4); + tcg_gen_shl_tl(s->T1, s->T1, s->tmp4); } else { tcg_gen_shl_tl(s->tmp0, s->T0, s->tmp0); if (ot == MO_16) { /* Only needed if count > 16, for Intel behaviour. */ - tcg_gen_subfi_tl(cpu_tmp4, 33, count); - tcg_gen_shr_tl(cpu_tmp4, s->T1, cpu_tmp4); - tcg_gen_or_tl(s->tmp0, s->tmp0, cpu_tmp4); + tcg_gen_subfi_tl(s->tmp4, 33, count); + tcg_gen_shr_tl(s->tmp4, s->T1, s->tmp4); + tcg_gen_or_tl(s->tmp0, s->tmp0, s->tmp4); } - tcg_gen_subfi_tl(cpu_tmp4, mask + 1, count); + tcg_gen_subfi_tl(s->tmp4, mask + 1, count); tcg_gen_shl_tl(s->T0, s->T0, count); - tcg_gen_shr_tl(s->T1, s->T1, cpu_tmp4); + tcg_gen_shr_tl(s->T1, s->T1, s->tmp4); } - tcg_gen_movi_tl(cpu_tmp4, 0); - tcg_gen_movcond_tl(TCG_COND_EQ, s->T1, count, cpu_tmp4, - cpu_tmp4, s->T1); + tcg_gen_movi_tl(s->tmp4, 0); + tcg_gen_movcond_tl(TCG_COND_EQ, s->T1, count, s->tmp4, + s->tmp4, s->T1); tcg_gen_or_tl(s->T0, s->T0, s->T1); break; } @@ -2346,7 +2346,7 @@ static void gen_push_v(DisasContext *s, TCGv val) if (!CODE64(s)) { if (s->addseg) { - new_esp = cpu_tmp4; + new_esp = s->tmp4; tcg_gen_mov_tl(new_esp, s->A0); } gen_lea_v_seg(s, a_ot, s->A0, R_SS, -1); @@ -5068,8 +5068,8 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) tcg_const_i32(dflag - 1), tcg_const_i32(s->pc - s->cs_base)); } - tcg_gen_ld_tl(cpu_tmp4, cpu_env, offsetof(CPUX86State, eip)); - gen_jr(s, cpu_tmp4); + tcg_gen_ld_tl(s->tmp4, cpu_env, offsetof(CPUX86State, eip)); + gen_jr(s, s->tmp4); break; case 4: /* jmp Ev */ if (dflag == MO_16) { @@ -5092,8 +5092,8 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) gen_op_movl_seg_T0_vm(s, R_CS); gen_op_jmp_v(s->T1); } - tcg_gen_ld_tl(cpu_tmp4, cpu_env, offsetof(CPUX86State, eip)); - gen_jr(s, cpu_tmp4); + tcg_gen_ld_tl(s->tmp4, cpu_env, offsetof(CPUX86State, eip)); + gen_jr(s, s->tmp4); break; case 6: /* push Ev */ gen_push_v(s, s->T0); @@ -6821,9 +6821,9 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) s->mem_index, ot | MO_LE); break; } - tcg_gen_shr_tl(cpu_tmp4, s->T0, s->T1); + tcg_gen_shr_tl(s->tmp4, s->T0, s->T1); } else { - tcg_gen_shr_tl(cpu_tmp4, s->T0, s->T1); + tcg_gen_shr_tl(s->tmp4, s->T0, s->T1); switch (op) { case 0: /* bt */ /* Data already loaded; nothing to do. */ @@ -6867,13 +6867,13 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) We can get that same Z value (and the new C value) by leaving CC_DST alone, setting CC_SRC, and using a CC_OP_SAR of the same width. */ - tcg_gen_mov_tl(cpu_cc_src, cpu_tmp4); + tcg_gen_mov_tl(cpu_cc_src, s->tmp4); set_cc_op(s, ((s->cc_op - CC_OP_MULB) & 3) + CC_OP_SARB); break; default: /* Otherwise, generate EFLAGS and replace the C bit. */ gen_compute_eflags(s); - tcg_gen_deposit_tl(cpu_cc_src, cpu_cc_src, cpu_tmp4, + tcg_gen_deposit_tl(cpu_cc_src, cpu_cc_src, s->tmp4, ctz32(CC_C), 1); break; } @@ -8483,7 +8483,7 @@ static void i386_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu) cpu_tmp1_i64 = tcg_temp_new_i64(); cpu_tmp2_i32 = tcg_temp_new_i32(); cpu_tmp3_i32 = tcg_temp_new_i32(); - cpu_tmp4 = tcg_temp_new(); + dc->tmp4 = tcg_temp_new(); cpu_ptr0 = tcg_temp_new_ptr(); cpu_ptr1 = tcg_temp_new_ptr(); dc->cc_srcT = tcg_temp_local_new(); From 2ee2646491a293a92d1c85e90e12419a8c199ed0 Mon Sep 17 00:00:00 2001 From: "Emilio G. Cota" Date: Tue, 11 Sep 2018 14:11:35 -0400 Subject: [PATCH 31/80] target/i386: move cpu_ptr0 to DisasContext Signed-off-by: Emilio G. Cota Reviewed-by: Richard Henderson Signed-off-by: Paolo Bonzini --- target/i386/translate.c | 101 +++++++++++++++++++++------------------- 1 file changed, 52 insertions(+), 49 deletions(-) diff --git a/target/i386/translate.c b/target/i386/translate.c index 0ad6ffc4af..9531dafebe 100644 --- a/target/i386/translate.c +++ b/target/i386/translate.c @@ -79,7 +79,7 @@ static TCGv cpu_seg_base[6]; static TCGv_i64 cpu_bndl[4]; static TCGv_i64 cpu_bndu[4]; -static TCGv_ptr cpu_ptr0, cpu_ptr1; +static TCGv_ptr cpu_ptr1; static TCGv_i32 cpu_tmp2_i32, cpu_tmp3_i32; static TCGv_i64 cpu_tmp1_i64; @@ -141,6 +141,7 @@ typedef struct DisasContext { /* TCG local register indexes (only used inside old micro ops) */ TCGv tmp0; TCGv tmp4; + TCGv_ptr ptr0; sigjmp_buf jmpbuf; } DisasContext; @@ -3147,27 +3148,27 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, #endif { gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 0); - tcg_gen_addi_ptr(cpu_ptr0, cpu_env, + tcg_gen_addi_ptr(s->ptr0, cpu_env, offsetof(CPUX86State,fpregs[reg].mmx)); tcg_gen_trunc_tl_i32(cpu_tmp2_i32, s->T0); - gen_helper_movl_mm_T0_mmx(cpu_ptr0, cpu_tmp2_i32); + gen_helper_movl_mm_T0_mmx(s->ptr0, cpu_tmp2_i32); } break; case 0x16e: /* movd xmm, ea */ #ifdef TARGET_X86_64 if (s->dflag == MO_64) { gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 0); - tcg_gen_addi_ptr(cpu_ptr0, cpu_env, + tcg_gen_addi_ptr(s->ptr0, cpu_env, offsetof(CPUX86State,xmm_regs[reg])); - gen_helper_movq_mm_T0_xmm(cpu_ptr0, s->T0); + gen_helper_movq_mm_T0_xmm(s->ptr0, s->T0); } else #endif { gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 0); - tcg_gen_addi_ptr(cpu_ptr0, cpu_env, + tcg_gen_addi_ptr(s->ptr0, cpu_env, offsetof(CPUX86State,xmm_regs[reg])); tcg_gen_trunc_tl_i32(cpu_tmp2_i32, s->T0); - gen_helper_movl_mm_T0_xmm(cpu_ptr0, cpu_tmp2_i32); + gen_helper_movl_mm_T0_xmm(s->ptr0, cpu_tmp2_i32); } break; case 0x6f: /* movq mm, ea */ @@ -3312,14 +3313,14 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, goto illegal_op; field_length = x86_ldub_code(env, s) & 0x3F; bit_index = x86_ldub_code(env, s) & 0x3F; - tcg_gen_addi_ptr(cpu_ptr0, cpu_env, + tcg_gen_addi_ptr(s->ptr0, cpu_env, offsetof(CPUX86State,xmm_regs[reg])); if (b1 == 1) - gen_helper_extrq_i(cpu_env, cpu_ptr0, + gen_helper_extrq_i(cpu_env, s->ptr0, tcg_const_i32(bit_index), tcg_const_i32(field_length)); else - gen_helper_insertq_i(cpu_env, cpu_ptr0, + gen_helper_insertq_i(cpu_env, s->ptr0, tcg_const_i32(bit_index), tcg_const_i32(field_length)); } @@ -3471,22 +3472,22 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, rm = (modrm & 7); op2_offset = offsetof(CPUX86State,fpregs[rm].mmx); } - tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op2_offset); + tcg_gen_addi_ptr(s->ptr0, cpu_env, op2_offset); tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op1_offset); - sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1); + sse_fn_epp(cpu_env, s->ptr0, cpu_ptr1); break; case 0x050: /* movmskps */ rm = (modrm & 7) | REX_B(s); - tcg_gen_addi_ptr(cpu_ptr0, cpu_env, + tcg_gen_addi_ptr(s->ptr0, cpu_env, offsetof(CPUX86State,xmm_regs[rm])); - gen_helper_movmskps(cpu_tmp2_i32, cpu_env, cpu_ptr0); + gen_helper_movmskps(cpu_tmp2_i32, cpu_env, s->ptr0); tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp2_i32); break; case 0x150: /* movmskpd */ rm = (modrm & 7) | REX_B(s); - tcg_gen_addi_ptr(cpu_ptr0, cpu_env, + tcg_gen_addi_ptr(s->ptr0, cpu_env, offsetof(CPUX86State,xmm_regs[rm])); - gen_helper_movmskpd(cpu_tmp2_i32, cpu_env, cpu_ptr0); + gen_helper_movmskpd(cpu_tmp2_i32, cpu_env, s->ptr0); tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp2_i32); break; case 0x02a: /* cvtpi2ps */ @@ -3501,15 +3502,15 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, op2_offset = offsetof(CPUX86State,fpregs[rm].mmx); } op1_offset = offsetof(CPUX86State,xmm_regs[reg]); - tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset); + tcg_gen_addi_ptr(s->ptr0, cpu_env, op1_offset); tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset); switch(b >> 8) { case 0x0: - gen_helper_cvtpi2ps(cpu_env, cpu_ptr0, cpu_ptr1); + gen_helper_cvtpi2ps(cpu_env, s->ptr0, cpu_ptr1); break; default: case 0x1: - gen_helper_cvtpi2pd(cpu_env, cpu_ptr0, cpu_ptr1); + gen_helper_cvtpi2pd(cpu_env, s->ptr0, cpu_ptr1); break; } break; @@ -3518,15 +3519,15 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, ot = mo_64_32(s->dflag); gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); op1_offset = offsetof(CPUX86State,xmm_regs[reg]); - tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset); + tcg_gen_addi_ptr(s->ptr0, cpu_env, op1_offset); if (ot == MO_32) { SSEFunc_0_epi sse_fn_epi = sse_op_table3ai[(b >> 8) & 1]; tcg_gen_trunc_tl_i32(cpu_tmp2_i32, s->T0); - sse_fn_epi(cpu_env, cpu_ptr0, cpu_tmp2_i32); + sse_fn_epi(cpu_env, s->ptr0, cpu_tmp2_i32); } else { #ifdef TARGET_X86_64 SSEFunc_0_epl sse_fn_epl = sse_op_table3aq[(b >> 8) & 1]; - sse_fn_epl(cpu_env, cpu_ptr0, s->T0); + sse_fn_epl(cpu_env, s->ptr0, s->T0); #else goto illegal_op; #endif @@ -3546,20 +3547,20 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, op2_offset = offsetof(CPUX86State,xmm_regs[rm]); } op1_offset = offsetof(CPUX86State,fpregs[reg & 7].mmx); - tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset); + tcg_gen_addi_ptr(s->ptr0, cpu_env, op1_offset); tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset); switch(b) { case 0x02c: - gen_helper_cvttps2pi(cpu_env, cpu_ptr0, cpu_ptr1); + gen_helper_cvttps2pi(cpu_env, s->ptr0, cpu_ptr1); break; case 0x12c: - gen_helper_cvttpd2pi(cpu_env, cpu_ptr0, cpu_ptr1); + gen_helper_cvttpd2pi(cpu_env, s->ptr0, cpu_ptr1); break; case 0x02d: - gen_helper_cvtps2pi(cpu_env, cpu_ptr0, cpu_ptr1); + gen_helper_cvtps2pi(cpu_env, s->ptr0, cpu_ptr1); break; case 0x12d: - gen_helper_cvtpd2pi(cpu_env, cpu_ptr0, cpu_ptr1); + gen_helper_cvtpd2pi(cpu_env, s->ptr0, cpu_ptr1); break; } break; @@ -3582,17 +3583,17 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, rm = (modrm & 7) | REX_B(s); op2_offset = offsetof(CPUX86State,xmm_regs[rm]); } - tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op2_offset); + tcg_gen_addi_ptr(s->ptr0, cpu_env, op2_offset); if (ot == MO_32) { SSEFunc_i_ep sse_fn_i_ep = sse_op_table3bi[((b >> 7) & 2) | (b & 1)]; - sse_fn_i_ep(cpu_tmp2_i32, cpu_env, cpu_ptr0); + sse_fn_i_ep(cpu_tmp2_i32, cpu_env, s->ptr0); tcg_gen_extu_i32_tl(s->T0, cpu_tmp2_i32); } else { #ifdef TARGET_X86_64 SSEFunc_l_ep sse_fn_l_ep = sse_op_table3bq[((b >> 7) & 2) | (b & 1)]; - sse_fn_l_ep(s->T0, cpu_env, cpu_ptr0); + sse_fn_l_ep(s->T0, cpu_env, s->ptr0); #else goto illegal_op; #endif @@ -3665,12 +3666,14 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, goto illegal_op; if (b1) { rm = (modrm & 7) | REX_B(s); - tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,xmm_regs[rm])); - gen_helper_pmovmskb_xmm(cpu_tmp2_i32, cpu_env, cpu_ptr0); + tcg_gen_addi_ptr(s->ptr0, cpu_env, + offsetof(CPUX86State, xmm_regs[rm])); + gen_helper_pmovmskb_xmm(cpu_tmp2_i32, cpu_env, s->ptr0); } else { rm = (modrm & 7); - tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,fpregs[rm].mmx)); - gen_helper_pmovmskb_mmx(cpu_tmp2_i32, cpu_env, cpu_ptr0); + tcg_gen_addi_ptr(s->ptr0, cpu_env, + offsetof(CPUX86State, fpregs[rm].mmx)); + gen_helper_pmovmskb_mmx(cpu_tmp2_i32, cpu_env, s->ptr0); } reg = ((modrm >> 3) & 7) | rex_r; tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp2_i32); @@ -3745,9 +3748,9 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, goto unknown_op; } - tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset); + tcg_gen_addi_ptr(s->ptr0, cpu_env, op1_offset); tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset); - sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1); + sse_fn_epp(cpu_env, s->ptr0, cpu_ptr1); if (b == 0x17) { set_cc_op(s, CC_OP_EFLAGS); @@ -4294,9 +4297,9 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, } } - tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset); + tcg_gen_addi_ptr(s->ptr0, cpu_env, op1_offset); tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset); - sse_fn_eppi(cpu_env, cpu_ptr0, cpu_ptr1, tcg_const_i32(val)); + sse_fn_eppi(cpu_env, s->ptr0, cpu_ptr1, tcg_const_i32(val)); break; case 0x33a: @@ -4417,18 +4420,18 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW)) { goto illegal_op; } - tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset); + tcg_gen_addi_ptr(s->ptr0, cpu_env, op1_offset); tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset); - sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1); + sse_fn_epp(cpu_env, s->ptr0, cpu_ptr1); break; case 0x70: /* pshufx insn */ case 0xc6: /* pshufx insn */ val = x86_ldub_code(env, s); - tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset); + tcg_gen_addi_ptr(s->ptr0, cpu_env, op1_offset); tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset); /* XXX: introduce a new table? */ sse_fn_ppi = (SSEFunc_0_ppi)sse_fn_epp; - sse_fn_ppi(cpu_ptr0, cpu_ptr1, tcg_const_i32(val)); + sse_fn_ppi(s->ptr0, cpu_ptr1, tcg_const_i32(val)); break; case 0xc2: /* compare insns */ @@ -4437,9 +4440,9 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, goto unknown_op; sse_fn_epp = sse_op_table4[val][b1]; - tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset); + tcg_gen_addi_ptr(s->ptr0, cpu_env, op1_offset); tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset); - sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1); + sse_fn_epp(cpu_env, s->ptr0, cpu_ptr1); break; case 0xf7: /* maskmov : we must prepare A0 */ @@ -4449,16 +4452,16 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, gen_extu(s->aflag, s->A0); gen_add_A0_ds_seg(s); - tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset); + tcg_gen_addi_ptr(s->ptr0, cpu_env, op1_offset); tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset); /* XXX: introduce a new table? */ sse_fn_eppt = (SSEFunc_0_eppt)sse_fn_epp; - sse_fn_eppt(cpu_env, cpu_ptr0, cpu_ptr1, s->A0); + sse_fn_eppt(cpu_env, s->ptr0, cpu_ptr1, s->A0); break; default: - tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset); + tcg_gen_addi_ptr(s->ptr0, cpu_env, op1_offset); tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset); - sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1); + sse_fn_epp(cpu_env, s->ptr0, cpu_ptr1); break; } if (b == 0x2e || b == 0x2f) { @@ -8484,7 +8487,7 @@ static void i386_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu) cpu_tmp2_i32 = tcg_temp_new_i32(); cpu_tmp3_i32 = tcg_temp_new_i32(); dc->tmp4 = tcg_temp_new(); - cpu_ptr0 = tcg_temp_new_ptr(); + dc->ptr0 = tcg_temp_new_ptr(); cpu_ptr1 = tcg_temp_new_ptr(); dc->cc_srcT = tcg_temp_local_new(); } From 6387e8303ffb26cfb40b0f93372f1519229b4d2c Mon Sep 17 00:00:00 2001 From: "Emilio G. Cota" Date: Tue, 11 Sep 2018 14:14:06 -0400 Subject: [PATCH 32/80] target/i386: move cpu_ptr1 to DisasContext Signed-off-by: Emilio G. Cota Reviewed-by: Richard Henderson Signed-off-by: Paolo Bonzini --- target/i386/translate.c | 52 ++++++++++++++++++++--------------------- 1 file changed, 26 insertions(+), 26 deletions(-) diff --git a/target/i386/translate.c b/target/i386/translate.c index 9531dafebe..c51f61ca2c 100644 --- a/target/i386/translate.c +++ b/target/i386/translate.c @@ -79,7 +79,6 @@ static TCGv cpu_seg_base[6]; static TCGv_i64 cpu_bndl[4]; static TCGv_i64 cpu_bndu[4]; -static TCGv_ptr cpu_ptr1; static TCGv_i32 cpu_tmp2_i32, cpu_tmp3_i32; static TCGv_i64 cpu_tmp1_i64; @@ -142,6 +141,7 @@ typedef struct DisasContext { TCGv tmp0; TCGv tmp4; TCGv_ptr ptr0; + TCGv_ptr ptr1; sigjmp_buf jmpbuf; } DisasContext; @@ -3473,8 +3473,8 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, op2_offset = offsetof(CPUX86State,fpregs[rm].mmx); } tcg_gen_addi_ptr(s->ptr0, cpu_env, op2_offset); - tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op1_offset); - sse_fn_epp(cpu_env, s->ptr0, cpu_ptr1); + tcg_gen_addi_ptr(s->ptr1, cpu_env, op1_offset); + sse_fn_epp(cpu_env, s->ptr0, s->ptr1); break; case 0x050: /* movmskps */ rm = (modrm & 7) | REX_B(s); @@ -3503,14 +3503,14 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, } op1_offset = offsetof(CPUX86State,xmm_regs[reg]); tcg_gen_addi_ptr(s->ptr0, cpu_env, op1_offset); - tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset); + tcg_gen_addi_ptr(s->ptr1, cpu_env, op2_offset); switch(b >> 8) { case 0x0: - gen_helper_cvtpi2ps(cpu_env, s->ptr0, cpu_ptr1); + gen_helper_cvtpi2ps(cpu_env, s->ptr0, s->ptr1); break; default: case 0x1: - gen_helper_cvtpi2pd(cpu_env, s->ptr0, cpu_ptr1); + gen_helper_cvtpi2pd(cpu_env, s->ptr0, s->ptr1); break; } break; @@ -3548,19 +3548,19 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, } op1_offset = offsetof(CPUX86State,fpregs[reg & 7].mmx); tcg_gen_addi_ptr(s->ptr0, cpu_env, op1_offset); - tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset); + tcg_gen_addi_ptr(s->ptr1, cpu_env, op2_offset); switch(b) { case 0x02c: - gen_helper_cvttps2pi(cpu_env, s->ptr0, cpu_ptr1); + gen_helper_cvttps2pi(cpu_env, s->ptr0, s->ptr1); break; case 0x12c: - gen_helper_cvttpd2pi(cpu_env, s->ptr0, cpu_ptr1); + gen_helper_cvttpd2pi(cpu_env, s->ptr0, s->ptr1); break; case 0x02d: - gen_helper_cvtps2pi(cpu_env, s->ptr0, cpu_ptr1); + gen_helper_cvtps2pi(cpu_env, s->ptr0, s->ptr1); break; case 0x12d: - gen_helper_cvtpd2pi(cpu_env, s->ptr0, cpu_ptr1); + gen_helper_cvtpd2pi(cpu_env, s->ptr0, s->ptr1); break; } break; @@ -3749,8 +3749,8 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, } tcg_gen_addi_ptr(s->ptr0, cpu_env, op1_offset); - tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset); - sse_fn_epp(cpu_env, s->ptr0, cpu_ptr1); + tcg_gen_addi_ptr(s->ptr1, cpu_env, op2_offset); + sse_fn_epp(cpu_env, s->ptr0, s->ptr1); if (b == 0x17) { set_cc_op(s, CC_OP_EFLAGS); @@ -4298,8 +4298,8 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, } tcg_gen_addi_ptr(s->ptr0, cpu_env, op1_offset); - tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset); - sse_fn_eppi(cpu_env, s->ptr0, cpu_ptr1, tcg_const_i32(val)); + tcg_gen_addi_ptr(s->ptr1, cpu_env, op2_offset); + sse_fn_eppi(cpu_env, s->ptr0, s->ptr1, tcg_const_i32(val)); break; case 0x33a: @@ -4421,17 +4421,17 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, goto illegal_op; } tcg_gen_addi_ptr(s->ptr0, cpu_env, op1_offset); - tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset); - sse_fn_epp(cpu_env, s->ptr0, cpu_ptr1); + tcg_gen_addi_ptr(s->ptr1, cpu_env, op2_offset); + sse_fn_epp(cpu_env, s->ptr0, s->ptr1); break; case 0x70: /* pshufx insn */ case 0xc6: /* pshufx insn */ val = x86_ldub_code(env, s); tcg_gen_addi_ptr(s->ptr0, cpu_env, op1_offset); - tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset); + tcg_gen_addi_ptr(s->ptr1, cpu_env, op2_offset); /* XXX: introduce a new table? */ sse_fn_ppi = (SSEFunc_0_ppi)sse_fn_epp; - sse_fn_ppi(s->ptr0, cpu_ptr1, tcg_const_i32(val)); + sse_fn_ppi(s->ptr0, s->ptr1, tcg_const_i32(val)); break; case 0xc2: /* compare insns */ @@ -4441,8 +4441,8 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, sse_fn_epp = sse_op_table4[val][b1]; tcg_gen_addi_ptr(s->ptr0, cpu_env, op1_offset); - tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset); - sse_fn_epp(cpu_env, s->ptr0, cpu_ptr1); + tcg_gen_addi_ptr(s->ptr1, cpu_env, op2_offset); + sse_fn_epp(cpu_env, s->ptr0, s->ptr1); break; case 0xf7: /* maskmov : we must prepare A0 */ @@ -4453,15 +4453,15 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, gen_add_A0_ds_seg(s); tcg_gen_addi_ptr(s->ptr0, cpu_env, op1_offset); - tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset); + tcg_gen_addi_ptr(s->ptr1, cpu_env, op2_offset); /* XXX: introduce a new table? */ sse_fn_eppt = (SSEFunc_0_eppt)sse_fn_epp; - sse_fn_eppt(cpu_env, s->ptr0, cpu_ptr1, s->A0); + sse_fn_eppt(cpu_env, s->ptr0, s->ptr1, s->A0); break; default: tcg_gen_addi_ptr(s->ptr0, cpu_env, op1_offset); - tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset); - sse_fn_epp(cpu_env, s->ptr0, cpu_ptr1); + tcg_gen_addi_ptr(s->ptr1, cpu_env, op2_offset); + sse_fn_epp(cpu_env, s->ptr0, s->ptr1); break; } if (b == 0x2e || b == 0x2f) { @@ -8488,7 +8488,7 @@ static void i386_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu) cpu_tmp3_i32 = tcg_temp_new_i32(); dc->tmp4 = tcg_temp_new(); dc->ptr0 = tcg_temp_new_ptr(); - cpu_ptr1 = tcg_temp_new_ptr(); + dc->ptr1 = tcg_temp_new_ptr(); dc->cc_srcT = tcg_temp_local_new(); } From 6bd48f6f206b6f32a5bbeebc3ae6886d4f587981 Mon Sep 17 00:00:00 2001 From: "Emilio G. Cota" Date: Tue, 11 Sep 2018 14:17:18 -0400 Subject: [PATCH 33/80] target/i386: move cpu_tmp2_i32 to DisasContext Signed-off-by: Emilio G. Cota Reviewed-by: Richard Henderson Signed-off-by: Paolo Bonzini --- target/i386/translate.c | 347 ++++++++++++++++++++-------------------- 1 file changed, 174 insertions(+), 173 deletions(-) diff --git a/target/i386/translate.c b/target/i386/translate.c index c51f61ca2c..ec68f7dba1 100644 --- a/target/i386/translate.c +++ b/target/i386/translate.c @@ -79,7 +79,7 @@ static TCGv cpu_seg_base[6]; static TCGv_i64 cpu_bndl[4]; static TCGv_i64 cpu_bndu[4]; -static TCGv_i32 cpu_tmp2_i32, cpu_tmp3_i32; +static TCGv_i32 cpu_tmp3_i32; static TCGv_i64 cpu_tmp1_i64; #include "exec/gen-icount.h" @@ -142,6 +142,7 @@ typedef struct DisasContext { TCGv tmp4; TCGv_ptr ptr0; TCGv_ptr ptr1; + TCGv_i32 tmp2_i32; sigjmp_buf jmpbuf; } DisasContext; @@ -617,16 +618,16 @@ static void gen_check_io(DisasContext *s, TCGMemOp ot, target_ulong cur_eip, target_ulong next_eip; if (s->pe && (s->cpl > s->iopl || s->vm86)) { - tcg_gen_trunc_tl_i32(cpu_tmp2_i32, s->T0); + tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0); switch (ot) { case MO_8: - gen_helper_check_iob(cpu_env, cpu_tmp2_i32); + gen_helper_check_iob(cpu_env, s->tmp2_i32); break; case MO_16: - gen_helper_check_iow(cpu_env, cpu_tmp2_i32); + gen_helper_check_iow(cpu_env, s->tmp2_i32); break; case MO_32: - gen_helper_check_iol(cpu_env, cpu_tmp2_i32); + gen_helper_check_iol(cpu_env, s->tmp2_i32); break; default: tcg_abort(); @@ -637,8 +638,8 @@ static void gen_check_io(DisasContext *s, TCGMemOp ot, target_ulong cur_eip, gen_jmp_im(s, cur_eip); svm_flags |= (1 << (4 + ot)); next_eip = s->pc - s->cs_base; - tcg_gen_trunc_tl_i32(cpu_tmp2_i32, s->T0); - gen_helper_svm_check_io(cpu_env, cpu_tmp2_i32, + tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0); + gen_helper_svm_check_io(cpu_env, s->tmp2_i32, tcg_const_i32(svm_flags), tcg_const_i32(next_eip - cur_eip)); } @@ -1136,13 +1137,13 @@ static inline void gen_ins(DisasContext *s, TCGMemOp ot) case of page fault. */ tcg_gen_movi_tl(s->T0, 0); gen_op_st_v(s, ot, s->T0, s->A0); - tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[R_EDX]); - tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff); - gen_helper_in_func(ot, s->T0, cpu_tmp2_i32); + tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]); + tcg_gen_andi_i32(s->tmp2_i32, s->tmp2_i32, 0xffff); + gen_helper_in_func(ot, s->T0, s->tmp2_i32); gen_op_st_v(s, ot, s->T0, s->A0); gen_op_movl_T0_Dshift(s, ot); gen_op_add_reg_T0(s, s->aflag, R_EDI); - gen_bpt_io(s, cpu_tmp2_i32, ot); + gen_bpt_io(s, s->tmp2_i32, ot); if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { gen_io_end(); } @@ -1156,13 +1157,13 @@ static inline void gen_outs(DisasContext *s, TCGMemOp ot) gen_string_movl_A0_ESI(s); gen_op_ld_v(s, ot, s->T0, s->A0); - tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[R_EDX]); - tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff); + tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]); + tcg_gen_andi_i32(s->tmp2_i32, s->tmp2_i32, 0xffff); tcg_gen_trunc_tl_i32(cpu_tmp3_i32, s->T0); - gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32); + gen_helper_out_func(ot, s->tmp2_i32, cpu_tmp3_i32); gen_op_movl_T0_Dshift(s, ot); gen_op_add_reg_T0(s, s->aflag, R_ESI); - gen_bpt_io(s, cpu_tmp2_i32, ot); + gen_bpt_io(s, s->tmp2_i32, ot); if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { gen_io_end(); } @@ -1421,7 +1422,7 @@ static void gen_shift_flags(DisasContext *s, TCGMemOp ot, TCGv result, tcg_temp_free(z_tl); /* Get the two potential CC_OP values into temporaries. */ - tcg_gen_movi_i32(cpu_tmp2_i32, (is_right ? CC_OP_SARB : CC_OP_SHLB) + ot); + tcg_gen_movi_i32(s->tmp2_i32, (is_right ? CC_OP_SARB : CC_OP_SHLB) + ot); if (s->cc_op == CC_OP_DYNAMIC) { oldop = cpu_cc_op; } else { @@ -1433,7 +1434,7 @@ static void gen_shift_flags(DisasContext *s, TCGMemOp ot, TCGv result, z32 = tcg_const_i32(0); s32 = tcg_temp_new_i32(); tcg_gen_trunc_tl_i32(s32, count); - tcg_gen_movcond_i32(TCG_COND_NE, cpu_cc_op, s32, z32, cpu_tmp2_i32, oldop); + tcg_gen_movcond_i32(TCG_COND_NE, cpu_cc_op, s32, z32, s->tmp2_i32, oldop); tcg_temp_free_i32(z32); tcg_temp_free_i32(s32); @@ -1544,14 +1545,14 @@ static void gen_rot_rm_T1(DisasContext *s, TCGMemOp ot, int op1, int is_right) do_long: #ifdef TARGET_X86_64 case MO_32: - tcg_gen_trunc_tl_i32(cpu_tmp2_i32, s->T0); + tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0); tcg_gen_trunc_tl_i32(cpu_tmp3_i32, s->T1); if (is_right) { - tcg_gen_rotr_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32); + tcg_gen_rotr_i32(s->tmp2_i32, s->tmp2_i32, cpu_tmp3_i32); } else { - tcg_gen_rotl_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32); + tcg_gen_rotl_i32(s->tmp2_i32, s->tmp2_i32, cpu_tmp3_i32); } - tcg_gen_extu_i32_tl(s->T0, cpu_tmp2_i32); + tcg_gen_extu_i32_tl(s->T0, s->tmp2_i32); break; #endif default: @@ -1591,10 +1592,10 @@ static void gen_rot_rm_T1(DisasContext *s, TCGMemOp ot, int op1, int is_right) t0 = tcg_const_i32(0); t1 = tcg_temp_new_i32(); tcg_gen_trunc_tl_i32(t1, s->T1); - tcg_gen_movi_i32(cpu_tmp2_i32, CC_OP_ADCOX); + tcg_gen_movi_i32(s->tmp2_i32, CC_OP_ADCOX); tcg_gen_movi_i32(cpu_tmp3_i32, CC_OP_EFLAGS); tcg_gen_movcond_i32(TCG_COND_NE, cpu_cc_op, t1, t0, - cpu_tmp2_i32, cpu_tmp3_i32); + s->tmp2_i32, cpu_tmp3_i32); tcg_temp_free_i32(t0); tcg_temp_free_i32(t1); @@ -1620,13 +1621,13 @@ static void gen_rot_rm_im(DisasContext *s, TCGMemOp ot, int op1, int op2, switch (ot) { #ifdef TARGET_X86_64 case MO_32: - tcg_gen_trunc_tl_i32(cpu_tmp2_i32, s->T0); + tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0); if (is_right) { - tcg_gen_rotri_i32(cpu_tmp2_i32, cpu_tmp2_i32, op2); + tcg_gen_rotri_i32(s->tmp2_i32, s->tmp2_i32, op2); } else { - tcg_gen_rotli_i32(cpu_tmp2_i32, cpu_tmp2_i32, op2); + tcg_gen_rotli_i32(s->tmp2_i32, s->tmp2_i32, op2); } - tcg_gen_extu_i32_tl(s->T0, cpu_tmp2_i32); + tcg_gen_extu_i32_tl(s->T0, s->tmp2_i32); break; #endif default: @@ -2111,8 +2112,8 @@ static void gen_bndck(CPUX86State *env, DisasContext *s, int modrm, tcg_gen_ext32u_i64(cpu_tmp1_i64, cpu_tmp1_i64); } tcg_gen_setcond_i64(cond, cpu_tmp1_i64, cpu_tmp1_i64, bndv); - tcg_gen_extrl_i64_i32(cpu_tmp2_i32, cpu_tmp1_i64); - gen_helper_bndck(cpu_env, cpu_tmp2_i32); + tcg_gen_extrl_i64_i32(s->tmp2_i32, cpu_tmp1_i64); + gen_helper_bndck(cpu_env, s->tmp2_i32); } /* used for LEA and MOV AX, mem */ @@ -2289,8 +2290,8 @@ static inline void gen_op_movl_seg_T0_vm(DisasContext *s, int seg_reg) static void gen_movl_seg_T0(DisasContext *s, int seg_reg) { if (s->pe && !s->vm86) { - tcg_gen_trunc_tl_i32(cpu_tmp2_i32, s->T0); - gen_helper_load_seg(cpu_env, tcg_const_i32(seg_reg), cpu_tmp2_i32); + tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0); + gen_helper_load_seg(cpu_env, tcg_const_i32(seg_reg), s->tmp2_i32); /* abort translation because the addseg value may change or because ss32 may change. For R_SS, translation must always stop as a special handling must be done to disable hardware @@ -2684,10 +2685,10 @@ static inline void gen_op_movq(int d_offset, int s_offset) tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset); } -static inline void gen_op_movl(int d_offset, int s_offset) +static inline void gen_op_movl(DisasContext *s, int d_offset, int s_offset) { - tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env, s_offset); - tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, d_offset); + tcg_gen_ld_i32(s->tmp2_i32, cpu_env, s_offset); + tcg_gen_st_i32(s->tmp2_i32, cpu_env, d_offset); } static inline void gen_op_movq_env_0(int d_offset) @@ -3150,8 +3151,8 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 0); tcg_gen_addi_ptr(s->ptr0, cpu_env, offsetof(CPUX86State,fpregs[reg].mmx)); - tcg_gen_trunc_tl_i32(cpu_tmp2_i32, s->T0); - gen_helper_movl_mm_T0_mmx(s->ptr0, cpu_tmp2_i32); + tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0); + gen_helper_movl_mm_T0_mmx(s->ptr0, s->tmp2_i32); } break; case 0x16e: /* movd xmm, ea */ @@ -3167,8 +3168,8 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 0); tcg_gen_addi_ptr(s->ptr0, cpu_env, offsetof(CPUX86State,xmm_regs[reg])); - tcg_gen_trunc_tl_i32(cpu_tmp2_i32, s->T0); - gen_helper_movl_mm_T0_xmm(s->ptr0, cpu_tmp2_i32); + tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0); + gen_helper_movl_mm_T0_xmm(s->ptr0, s->tmp2_i32); } break; case 0x6f: /* movq mm, ea */ @@ -3213,7 +3214,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, offsetof(CPUX86State, xmm_regs[reg].ZMM_L(3))); } else { rm = (modrm & 7) | REX_B(s); - gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)), + gen_op_movl(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_L(0)), offsetof(CPUX86State,xmm_regs[rm].ZMM_L(0))); } break; @@ -3252,14 +3253,14 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg])); } else { rm = (modrm & 7) | REX_B(s); - gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)), + gen_op_movl(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_L(0)), offsetof(CPUX86State,xmm_regs[rm].ZMM_L(0))); - gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(2)), + gen_op_movl(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_L(2)), offsetof(CPUX86State,xmm_regs[rm].ZMM_L(2))); } - gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(1)), + gen_op_movl(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_L(1)), offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0))); - gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(3)), + gen_op_movl(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_L(3)), offsetof(CPUX86State,xmm_regs[reg].ZMM_L(2))); break; case 0x312: /* movddup */ @@ -3294,14 +3295,14 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg])); } else { rm = (modrm & 7) | REX_B(s); - gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(1)), + gen_op_movl(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_L(1)), offsetof(CPUX86State,xmm_regs[rm].ZMM_L(1))); - gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(3)), + gen_op_movl(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_L(3)), offsetof(CPUX86State,xmm_regs[rm].ZMM_L(3))); } - gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)), + gen_op_movl(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_L(0)), offsetof(CPUX86State,xmm_regs[reg].ZMM_L(1))); - gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(2)), + gen_op_movl(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_L(2)), offsetof(CPUX86State,xmm_regs[reg].ZMM_L(3))); break; case 0x178: @@ -3398,7 +3399,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, gen_op_st_v(s, MO_32, s->T0, s->A0); } else { rm = (modrm & 7) | REX_B(s); - gen_op_movl(offsetof(CPUX86State,xmm_regs[rm].ZMM_L(0)), + gen_op_movl(s, offsetof(CPUX86State, xmm_regs[rm].ZMM_L(0)), offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0))); } break; @@ -3480,15 +3481,15 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, rm = (modrm & 7) | REX_B(s); tcg_gen_addi_ptr(s->ptr0, cpu_env, offsetof(CPUX86State,xmm_regs[rm])); - gen_helper_movmskps(cpu_tmp2_i32, cpu_env, s->ptr0); - tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp2_i32); + gen_helper_movmskps(s->tmp2_i32, cpu_env, s->ptr0); + tcg_gen_extu_i32_tl(cpu_regs[reg], s->tmp2_i32); break; case 0x150: /* movmskpd */ rm = (modrm & 7) | REX_B(s); tcg_gen_addi_ptr(s->ptr0, cpu_env, offsetof(CPUX86State,xmm_regs[rm])); - gen_helper_movmskpd(cpu_tmp2_i32, cpu_env, s->ptr0); - tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp2_i32); + gen_helper_movmskpd(s->tmp2_i32, cpu_env, s->ptr0); + tcg_gen_extu_i32_tl(cpu_regs[reg], s->tmp2_i32); break; case 0x02a: /* cvtpi2ps */ case 0x12a: /* cvtpi2pd */ @@ -3522,8 +3523,8 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, tcg_gen_addi_ptr(s->ptr0, cpu_env, op1_offset); if (ot == MO_32) { SSEFunc_0_epi sse_fn_epi = sse_op_table3ai[(b >> 8) & 1]; - tcg_gen_trunc_tl_i32(cpu_tmp2_i32, s->T0); - sse_fn_epi(cpu_env, s->ptr0, cpu_tmp2_i32); + tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0); + sse_fn_epi(cpu_env, s->ptr0, s->tmp2_i32); } else { #ifdef TARGET_X86_64 SSEFunc_0_epl sse_fn_epl = sse_op_table3aq[(b >> 8) & 1]; @@ -3587,8 +3588,8 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, if (ot == MO_32) { SSEFunc_i_ep sse_fn_i_ep = sse_op_table3bi[((b >> 7) & 2) | (b & 1)]; - sse_fn_i_ep(cpu_tmp2_i32, cpu_env, s->ptr0); - tcg_gen_extu_i32_tl(s->T0, cpu_tmp2_i32); + sse_fn_i_ep(s->tmp2_i32, cpu_env, s->ptr0); + tcg_gen_extu_i32_tl(s->T0, s->tmp2_i32); } else { #ifdef TARGET_X86_64 SSEFunc_l_ep sse_fn_l_ep = @@ -3668,15 +3669,15 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, rm = (modrm & 7) | REX_B(s); tcg_gen_addi_ptr(s->ptr0, cpu_env, offsetof(CPUX86State, xmm_regs[rm])); - gen_helper_pmovmskb_xmm(cpu_tmp2_i32, cpu_env, s->ptr0); + gen_helper_pmovmskb_xmm(s->tmp2_i32, cpu_env, s->ptr0); } else { rm = (modrm & 7); tcg_gen_addi_ptr(s->ptr0, cpu_env, offsetof(CPUX86State, fpregs[rm].mmx)); - gen_helper_pmovmskb_mmx(cpu_tmp2_i32, cpu_env, s->ptr0); + gen_helper_pmovmskb_mmx(s->tmp2_i32, cpu_env, s->ptr0); } reg = ((modrm >> 3) & 7) | rex_r; - tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp2_i32); + tcg_gen_extu_i32_tl(cpu_regs[reg], s->tmp2_i32); break; case 0x138: @@ -3716,9 +3717,9 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, break; case 0x21: case 0x31: /* pmovsxbd, pmovzxbd */ case 0x24: case 0x34: /* pmovsxwq, pmovzxwq */ - tcg_gen_qemu_ld_i32(cpu_tmp2_i32, s->A0, + tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0, s->mem_index, MO_LEUL); - tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, op2_offset + + tcg_gen_st_i32(s->tmp2_i32, cpu_env, op2_offset + offsetof(ZMMReg, ZMM_L(0))); break; case 0x22: case 0x32: /* pmovsxbq, pmovzxbq */ @@ -3780,9 +3781,9 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, ot = MO_64; } - tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[reg]); + tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[reg]); gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); - gen_helper_crc32(s->T0, cpu_tmp2_i32, + gen_helper_crc32(s->T0, s->tmp2_i32, s->T0, tcg_const_i32(8 << ot)); ot = mo_64_32(s->dflag); @@ -3910,11 +3911,11 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); switch (ot) { default: - tcg_gen_trunc_tl_i32(cpu_tmp2_i32, s->T0); + tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0); tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_regs[R_EDX]); - tcg_gen_mulu2_i32(cpu_tmp2_i32, cpu_tmp3_i32, - cpu_tmp2_i32, cpu_tmp3_i32); - tcg_gen_extu_i32_tl(cpu_regs[s->vex_v], cpu_tmp2_i32); + tcg_gen_mulu2_i32(s->tmp2_i32, cpu_tmp3_i32, + s->tmp2_i32, cpu_tmp3_i32); + tcg_gen_extu_i32_tl(cpu_regs[s->vex_v], s->tmp2_i32); tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp3_i32); break; #ifdef TARGET_X86_64 @@ -4162,13 +4163,13 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, break; case 0x16: if (ot == MO_32) { /* pextrd */ - tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env, + tcg_gen_ld_i32(s->tmp2_i32, cpu_env, offsetof(CPUX86State, xmm_regs[reg].ZMM_L(val & 3))); if (mod == 3) { - tcg_gen_extu_i32_tl(cpu_regs[rm], cpu_tmp2_i32); + tcg_gen_extu_i32_tl(cpu_regs[rm], s->tmp2_i32); } else { - tcg_gen_qemu_st_i32(cpu_tmp2_i32, s->A0, + tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0, s->mem_index, MO_LEUL); } } else { /* pextrq */ @@ -4209,14 +4210,14 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, break; case 0x21: /* insertps */ if (mod == 3) { - tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env, + tcg_gen_ld_i32(s->tmp2_i32, cpu_env, offsetof(CPUX86State,xmm_regs[rm] .ZMM_L((val >> 6) & 3))); } else { - tcg_gen_qemu_ld_i32(cpu_tmp2_i32, s->A0, + tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0, s->mem_index, MO_LEUL); } - tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, + tcg_gen_st_i32(s->tmp2_i32, cpu_env, offsetof(CPUX86State,xmm_regs[reg] .ZMM_L((val >> 4) & 3))); if ((val >> 0) & 1) @@ -4239,12 +4240,12 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, case 0x22: if (ot == MO_32) { /* pinsrd */ if (mod == 3) { - tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[rm]); + tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[rm]); } else { - tcg_gen_qemu_ld_i32(cpu_tmp2_i32, s->A0, + tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0, s->mem_index, MO_LEUL); } - tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, + tcg_gen_st_i32(s->tmp2_i32, cpu_env, offsetof(CPUX86State, xmm_regs[reg].ZMM_L(val & 3))); } else { /* pinsrq */ @@ -4321,9 +4322,9 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, if (ot == MO_64) { tcg_gen_rotri_tl(s->T0, s->T0, b & 63); } else { - tcg_gen_trunc_tl_i32(cpu_tmp2_i32, s->T0); - tcg_gen_rotri_i32(cpu_tmp2_i32, cpu_tmp2_i32, b & 31); - tcg_gen_extu_i32_tl(s->T0, cpu_tmp2_i32); + tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0); + tcg_gen_rotri_i32(s->tmp2_i32, s->tmp2_i32, b & 31); + tcg_gen_extu_i32_tl(s->T0, s->tmp2_i32); } gen_op_mov_reg_v(ot, reg, s->T0); break; @@ -4880,11 +4881,11 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) break; default: case MO_32: - tcg_gen_trunc_tl_i32(cpu_tmp2_i32, s->T0); + tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0); tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_regs[R_EAX]); - tcg_gen_mulu2_i32(cpu_tmp2_i32, cpu_tmp3_i32, - cpu_tmp2_i32, cpu_tmp3_i32); - tcg_gen_extu_i32_tl(cpu_regs[R_EAX], cpu_tmp2_i32); + tcg_gen_mulu2_i32(s->tmp2_i32, cpu_tmp3_i32, + s->tmp2_i32, cpu_tmp3_i32); + tcg_gen_extu_i32_tl(cpu_regs[R_EAX], s->tmp2_i32); tcg_gen_extu_i32_tl(cpu_regs[R_EDX], cpu_tmp3_i32); tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]); tcg_gen_mov_tl(cpu_cc_src, cpu_regs[R_EDX]); @@ -4931,16 +4932,16 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) break; default: case MO_32: - tcg_gen_trunc_tl_i32(cpu_tmp2_i32, s->T0); + tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0); tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_regs[R_EAX]); - tcg_gen_muls2_i32(cpu_tmp2_i32, cpu_tmp3_i32, - cpu_tmp2_i32, cpu_tmp3_i32); - tcg_gen_extu_i32_tl(cpu_regs[R_EAX], cpu_tmp2_i32); + tcg_gen_muls2_i32(s->tmp2_i32, cpu_tmp3_i32, + s->tmp2_i32, cpu_tmp3_i32); + tcg_gen_extu_i32_tl(cpu_regs[R_EAX], s->tmp2_i32); tcg_gen_extu_i32_tl(cpu_regs[R_EDX], cpu_tmp3_i32); - tcg_gen_sari_i32(cpu_tmp2_i32, cpu_tmp2_i32, 31); + tcg_gen_sari_i32(s->tmp2_i32, s->tmp2_i32, 31); tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]); - tcg_gen_sub_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32); - tcg_gen_extu_i32_tl(cpu_cc_src, cpu_tmp2_i32); + tcg_gen_sub_i32(s->tmp2_i32, s->tmp2_i32, cpu_tmp3_i32); + tcg_gen_extu_i32_tl(cpu_cc_src, s->tmp2_i32); set_cc_op(s, CC_OP_MULL); break; #ifdef TARGET_X86_64 @@ -5061,13 +5062,13 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) gen_op_ld_v(s, MO_16, s->T0, s->A0); do_lcall: if (s->pe && !s->vm86) { - tcg_gen_trunc_tl_i32(cpu_tmp2_i32, s->T0); - gen_helper_lcall_protected(cpu_env, cpu_tmp2_i32, s->T1, + tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0); + gen_helper_lcall_protected(cpu_env, s->tmp2_i32, s->T1, tcg_const_i32(dflag - 1), tcg_const_tl(s->pc - s->cs_base)); } else { - tcg_gen_trunc_tl_i32(cpu_tmp2_i32, s->T0); - gen_helper_lcall_real(cpu_env, cpu_tmp2_i32, s->T1, + tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0); + gen_helper_lcall_real(cpu_env, s->tmp2_i32, s->T1, tcg_const_i32(dflag - 1), tcg_const_i32(s->pc - s->cs_base)); } @@ -5088,8 +5089,8 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) gen_op_ld_v(s, MO_16, s->T0, s->A0); do_ljmp: if (s->pe && !s->vm86) { - tcg_gen_trunc_tl_i32(cpu_tmp2_i32, s->T0); - gen_helper_ljmp_protected(cpu_env, cpu_tmp2_i32, s->T1, + tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0); + gen_helper_ljmp_protected(cpu_env, s->tmp2_i32, s->T1, tcg_const_tl(s->pc - s->cs_base)); } else { gen_op_movl_seg_T0_vm(s, R_CS); @@ -5208,15 +5209,15 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) break; #endif case MO_32: - tcg_gen_trunc_tl_i32(cpu_tmp2_i32, s->T0); + tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0); tcg_gen_trunc_tl_i32(cpu_tmp3_i32, s->T1); - tcg_gen_muls2_i32(cpu_tmp2_i32, cpu_tmp3_i32, - cpu_tmp2_i32, cpu_tmp3_i32); - tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp2_i32); - tcg_gen_sari_i32(cpu_tmp2_i32, cpu_tmp2_i32, 31); + tcg_gen_muls2_i32(s->tmp2_i32, cpu_tmp3_i32, + s->tmp2_i32, cpu_tmp3_i32); + tcg_gen_extu_i32_tl(cpu_regs[reg], s->tmp2_i32); + tcg_gen_sari_i32(s->tmp2_i32, s->tmp2_i32, 31); tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[reg]); - tcg_gen_sub_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32); - tcg_gen_extu_i32_tl(cpu_cc_src, cpu_tmp2_i32); + tcg_gen_sub_i32(s->tmp2_i32, s->tmp2_i32, cpu_tmp3_i32); + tcg_gen_extu_i32_tl(cpu_cc_src, s->tmp2_i32); break; default: tcg_gen_ext16s_tl(s->T0, s->T0); @@ -5820,14 +5821,14 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) switch(op >> 4) { case 0: - tcg_gen_qemu_ld_i32(cpu_tmp2_i32, s->A0, + tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0, s->mem_index, MO_LEUL); - gen_helper_flds_FT0(cpu_env, cpu_tmp2_i32); + gen_helper_flds_FT0(cpu_env, s->tmp2_i32); break; case 1: - tcg_gen_qemu_ld_i32(cpu_tmp2_i32, s->A0, + tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0, s->mem_index, MO_LEUL); - gen_helper_fildl_FT0(cpu_env, cpu_tmp2_i32); + gen_helper_fildl_FT0(cpu_env, s->tmp2_i32); break; case 2: tcg_gen_qemu_ld_i64(cpu_tmp1_i64, s->A0, @@ -5836,9 +5837,9 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) break; case 3: default: - tcg_gen_qemu_ld_i32(cpu_tmp2_i32, s->A0, + tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0, s->mem_index, MO_LESW); - gen_helper_fildl_FT0(cpu_env, cpu_tmp2_i32); + gen_helper_fildl_FT0(cpu_env, s->tmp2_i32); break; } @@ -5859,14 +5860,14 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) case 0: switch(op >> 4) { case 0: - tcg_gen_qemu_ld_i32(cpu_tmp2_i32, s->A0, + tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0, s->mem_index, MO_LEUL); - gen_helper_flds_ST0(cpu_env, cpu_tmp2_i32); + gen_helper_flds_ST0(cpu_env, s->tmp2_i32); break; case 1: - tcg_gen_qemu_ld_i32(cpu_tmp2_i32, s->A0, + tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0, s->mem_index, MO_LEUL); - gen_helper_fildl_ST0(cpu_env, cpu_tmp2_i32); + gen_helper_fildl_ST0(cpu_env, s->tmp2_i32); break; case 2: tcg_gen_qemu_ld_i64(cpu_tmp1_i64, s->A0, @@ -5875,9 +5876,9 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) break; case 3: default: - tcg_gen_qemu_ld_i32(cpu_tmp2_i32, s->A0, + tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0, s->mem_index, MO_LESW); - gen_helper_fildl_ST0(cpu_env, cpu_tmp2_i32); + gen_helper_fildl_ST0(cpu_env, s->tmp2_i32); break; } break; @@ -5885,8 +5886,8 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) /* XXX: the corresponding CPUID bit must be tested ! */ switch(op >> 4) { case 1: - gen_helper_fisttl_ST0(cpu_tmp2_i32, cpu_env); - tcg_gen_qemu_st_i32(cpu_tmp2_i32, s->A0, + gen_helper_fisttl_ST0(s->tmp2_i32, cpu_env); + tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0, s->mem_index, MO_LEUL); break; case 2: @@ -5896,8 +5897,8 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) break; case 3: default: - gen_helper_fistt_ST0(cpu_tmp2_i32, cpu_env); - tcg_gen_qemu_st_i32(cpu_tmp2_i32, s->A0, + gen_helper_fistt_ST0(s->tmp2_i32, cpu_env); + tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0, s->mem_index, MO_LEUW); break; } @@ -5906,13 +5907,13 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) default: switch(op >> 4) { case 0: - gen_helper_fsts_ST0(cpu_tmp2_i32, cpu_env); - tcg_gen_qemu_st_i32(cpu_tmp2_i32, s->A0, + gen_helper_fsts_ST0(s->tmp2_i32, cpu_env); + tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0, s->mem_index, MO_LEUL); break; case 1: - gen_helper_fistl_ST0(cpu_tmp2_i32, cpu_env); - tcg_gen_qemu_st_i32(cpu_tmp2_i32, s->A0, + gen_helper_fistl_ST0(s->tmp2_i32, cpu_env); + tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0, s->mem_index, MO_LEUL); break; case 2: @@ -5922,8 +5923,8 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) break; case 3: default: - gen_helper_fist_ST0(cpu_tmp2_i32, cpu_env); - tcg_gen_qemu_st_i32(cpu_tmp2_i32, s->A0, + gen_helper_fist_ST0(s->tmp2_i32, cpu_env); + tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0, s->mem_index, MO_LEUW); break; } @@ -5936,16 +5937,16 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) gen_helper_fldenv(cpu_env, s->A0, tcg_const_i32(dflag - 1)); break; case 0x0d: /* fldcw mem */ - tcg_gen_qemu_ld_i32(cpu_tmp2_i32, s->A0, + tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0, s->mem_index, MO_LEUW); - gen_helper_fldcw(cpu_env, cpu_tmp2_i32); + gen_helper_fldcw(cpu_env, s->tmp2_i32); break; case 0x0e: /* fnstenv mem */ gen_helper_fstenv(cpu_env, s->A0, tcg_const_i32(dflag - 1)); break; case 0x0f: /* fnstcw mem */ - gen_helper_fnstcw(cpu_tmp2_i32, cpu_env); - tcg_gen_qemu_st_i32(cpu_tmp2_i32, s->A0, + gen_helper_fnstcw(s->tmp2_i32, cpu_env); + tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0, s->mem_index, MO_LEUW); break; case 0x1d: /* fldt mem */ @@ -5962,8 +5963,8 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) gen_helper_fsave(cpu_env, s->A0, tcg_const_i32(dflag - 1)); break; case 0x2f: /* fnstsw mem */ - gen_helper_fnstsw(cpu_tmp2_i32, cpu_env); - tcg_gen_qemu_st_i32(cpu_tmp2_i32, s->A0, + gen_helper_fnstsw(s->tmp2_i32, cpu_env); + tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0, s->mem_index, MO_LEUW); break; case 0x3c: /* fbld */ @@ -6241,8 +6242,8 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) case 0x3c: /* df/4 */ switch(rm) { case 0: - gen_helper_fnstsw(cpu_tmp2_i32, cpu_env); - tcg_gen_extu_i32_tl(s->T0, cpu_tmp2_i32); + gen_helper_fnstsw(s->tmp2_i32, cpu_env); + tcg_gen_extu_i32_tl(s->T0, s->tmp2_i32); gen_op_mov_reg_v(MO_16, R_EAX, s->T0); break; default: @@ -6394,10 +6395,10 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { gen_io_start(); } - tcg_gen_movi_i32(cpu_tmp2_i32, val); - gen_helper_in_func(ot, s->T1, cpu_tmp2_i32); + tcg_gen_movi_i32(s->tmp2_i32, val); + gen_helper_in_func(ot, s->T1, s->tmp2_i32); gen_op_mov_reg_v(ot, R_EAX, s->T1); - gen_bpt_io(s, cpu_tmp2_i32, ot); + gen_bpt_io(s, s->tmp2_i32, ot); if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { gen_io_end(); gen_jmp(s, s->pc - s->cs_base); @@ -6415,10 +6416,10 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { gen_io_start(); } - tcg_gen_movi_i32(cpu_tmp2_i32, val); + tcg_gen_movi_i32(s->tmp2_i32, val); tcg_gen_trunc_tl_i32(cpu_tmp3_i32, s->T1); - gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32); - gen_bpt_io(s, cpu_tmp2_i32, ot); + gen_helper_out_func(ot, s->tmp2_i32, cpu_tmp3_i32); + gen_bpt_io(s, s->tmp2_i32, ot); if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { gen_io_end(); gen_jmp(s, s->pc - s->cs_base); @@ -6433,10 +6434,10 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { gen_io_start(); } - tcg_gen_trunc_tl_i32(cpu_tmp2_i32, s->T0); - gen_helper_in_func(ot, s->T1, cpu_tmp2_i32); + tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0); + gen_helper_in_func(ot, s->T1, s->tmp2_i32); gen_op_mov_reg_v(ot, R_EAX, s->T1); - gen_bpt_io(s, cpu_tmp2_i32, ot); + gen_bpt_io(s, s->tmp2_i32, ot); if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { gen_io_end(); gen_jmp(s, s->pc - s->cs_base); @@ -6453,10 +6454,10 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { gen_io_start(); } - tcg_gen_trunc_tl_i32(cpu_tmp2_i32, s->T0); + tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0); tcg_gen_trunc_tl_i32(cpu_tmp3_i32, s->T1); - gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32); - gen_bpt_io(s, cpu_tmp2_i32, ot); + gen_helper_out_func(ot, s->tmp2_i32, cpu_tmp3_i32); + gen_bpt_io(s, s->tmp2_i32, ot); if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { gen_io_end(); gen_jmp(s, s->pc - s->cs_base); @@ -6734,12 +6735,12 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) tcg_gen_ori_tl(cpu_cc_src, cpu_cc_src, CC_C); break; case 0xfc: /* cld */ - tcg_gen_movi_i32(cpu_tmp2_i32, 1); - tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUX86State, df)); + tcg_gen_movi_i32(s->tmp2_i32, 1); + tcg_gen_st_i32(s->tmp2_i32, cpu_env, offsetof(CPUX86State, df)); break; case 0xfd: /* std */ - tcg_gen_movi_i32(cpu_tmp2_i32, -1); - tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUX86State, df)); + tcg_gen_movi_i32(s->tmp2_i32, -1); + tcg_gen_st_i32(s->tmp2_i32, cpu_env, offsetof(CPUX86State, df)); break; /************************/ @@ -7071,11 +7072,11 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) goto illegal_op; gen_op_mov_v_reg(ot, s->T0, reg); gen_lea_modrm(env, s, modrm); - tcg_gen_trunc_tl_i32(cpu_tmp2_i32, s->T0); + tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0); if (ot == MO_16) { - gen_helper_boundw(cpu_env, s->A0, cpu_tmp2_i32); + gen_helper_boundw(cpu_env, s->A0, s->tmp2_i32); } else { - gen_helper_boundl(cpu_env, s->A0, cpu_tmp2_i32); + gen_helper_boundl(cpu_env, s->A0, s->tmp2_i32); } break; case 0x1c8 ... 0x1cf: /* bswap reg */ @@ -7264,8 +7265,8 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) } else { gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_WRITE); gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0); - tcg_gen_trunc_tl_i32(cpu_tmp2_i32, s->T0); - gen_helper_lldt(cpu_env, cpu_tmp2_i32); + tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0); + gen_helper_lldt(cpu_env, s->tmp2_i32); } break; case 1: /* str */ @@ -7285,8 +7286,8 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) } else { gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_WRITE); gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0); - tcg_gen_trunc_tl_i32(cpu_tmp2_i32, s->T0); - gen_helper_ltr(cpu_env, cpu_tmp2_i32); + tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0); + gen_helper_ltr(cpu_env, s->tmp2_i32); } break; case 4: /* verr */ @@ -7385,8 +7386,8 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) | PREFIX_REPZ | PREFIX_REPNZ))) { goto illegal_op; } - tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[R_ECX]); - gen_helper_xgetbv(cpu_tmp1_i64, cpu_env, cpu_tmp2_i32); + tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]); + gen_helper_xgetbv(cpu_tmp1_i64, cpu_env, s->tmp2_i32); tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], cpu_tmp1_i64); break; @@ -7402,8 +7403,8 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) } tcg_gen_concat_tl_i64(cpu_tmp1_i64, cpu_regs[R_EAX], cpu_regs[R_EDX]); - tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[R_ECX]); - gen_helper_xsetbv(cpu_env, cpu_tmp2_i32, cpu_tmp1_i64); + tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]); + gen_helper_xsetbv(cpu_env, s->tmp2_i32, cpu_tmp1_i64); /* End TB because translation flags may change. */ gen_jmp_im(s, s->pc - s->cs_base); gen_eob(s); @@ -7562,8 +7563,8 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) if (prefixes & PREFIX_LOCK) { goto illegal_op; } - tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[R_ECX]); - gen_helper_rdpkru(cpu_tmp1_i64, cpu_env, cpu_tmp2_i32); + tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]); + gen_helper_rdpkru(cpu_tmp1_i64, cpu_env, s->tmp2_i32); tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], cpu_tmp1_i64); break; case 0xef: /* wrpkru */ @@ -7572,8 +7573,8 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) } tcg_gen_concat_tl_i64(cpu_tmp1_i64, cpu_regs[R_EAX], cpu_regs[R_EDX]); - tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[R_ECX]); - gen_helper_wrpkru(cpu_env, cpu_tmp2_i32, cpu_tmp1_i64); + tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]); + gen_helper_wrpkru(cpu_env, s->tmp2_i32, cpu_tmp1_i64); break; CASE_MODRM_OP(6): /* lmsw */ if (s->cpl != 0) { @@ -8042,14 +8043,14 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) if (b & 2) { gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_DR0 + reg); gen_op_mov_v_reg(ot, s->T0, rm); - tcg_gen_movi_i32(cpu_tmp2_i32, reg); - gen_helper_set_dr(cpu_env, cpu_tmp2_i32, s->T0); + tcg_gen_movi_i32(s->tmp2_i32, reg); + gen_helper_set_dr(cpu_env, s->tmp2_i32, s->T0); gen_jmp_im(s, s->pc - s->cs_base); gen_eob(s); } else { gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_DR0 + reg); - tcg_gen_movi_i32(cpu_tmp2_i32, reg); - gen_helper_get_dr(s->T0, cpu_env, cpu_tmp2_i32); + tcg_gen_movi_i32(s->tmp2_i32, reg); + gen_helper_get_dr(s->T0, cpu_env, s->tmp2_i32); gen_op_mov_reg_v(ot, rm, s->T0); } } @@ -8116,8 +8117,8 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) break; } gen_lea_modrm(env, s, modrm); - tcg_gen_qemu_ld_i32(cpu_tmp2_i32, s->A0, s->mem_index, MO_LEUL); - gen_helper_ldmxcsr(cpu_env, cpu_tmp2_i32); + tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0, s->mem_index, MO_LEUL); + gen_helper_ldmxcsr(cpu_env, s->tmp2_i32); break; CASE_MODRM_MEM_OP(3): /* stmxcsr */ @@ -8216,8 +8217,8 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) TCGv base, treg, src, dst; /* Preserve hflags bits by testing CR4 at runtime. */ - tcg_gen_movi_i32(cpu_tmp2_i32, CR4_FSGSBASE_MASK); - gen_helper_cr4_testbit(cpu_env, cpu_tmp2_i32); + tcg_gen_movi_i32(s->tmp2_i32, CR4_FSGSBASE_MASK); + gen_helper_cr4_testbit(cpu_env, s->tmp2_i32); base = cpu_seg_base[modrm & 8 ? R_GS : R_FS]; treg = cpu_regs[(modrm & 7) | REX_B(s)]; @@ -8484,7 +8485,7 @@ static void i386_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu) dc->tmp0 = tcg_temp_new(); cpu_tmp1_i64 = tcg_temp_new_i64(); - cpu_tmp2_i32 = tcg_temp_new_i32(); + dc->tmp2_i32 = tcg_temp_new_i32(); cpu_tmp3_i32 = tcg_temp_new_i32(); dc->tmp4 = tcg_temp_new(); dc->ptr0 = tcg_temp_new_ptr(); From 4f82446de695f080ed148a0e47fc141e928665af Mon Sep 17 00:00:00 2001 From: "Emilio G. Cota" Date: Tue, 11 Sep 2018 14:17:56 -0400 Subject: [PATCH 34/80] target/i386: move cpu_tmp3_i32 to DisasContext Signed-off-by: Emilio G. Cota Reviewed-by: Richard Henderson Signed-off-by: Paolo Bonzini --- target/i386/translate.c | 64 ++++++++++++++++++++--------------------- 1 file changed, 32 insertions(+), 32 deletions(-) diff --git a/target/i386/translate.c b/target/i386/translate.c index ec68f7dba1..cd880cc2a8 100644 --- a/target/i386/translate.c +++ b/target/i386/translate.c @@ -79,7 +79,6 @@ static TCGv cpu_seg_base[6]; static TCGv_i64 cpu_bndl[4]; static TCGv_i64 cpu_bndu[4]; -static TCGv_i32 cpu_tmp3_i32; static TCGv_i64 cpu_tmp1_i64; #include "exec/gen-icount.h" @@ -143,6 +142,7 @@ typedef struct DisasContext { TCGv_ptr ptr0; TCGv_ptr ptr1; TCGv_i32 tmp2_i32; + TCGv_i32 tmp3_i32; sigjmp_buf jmpbuf; } DisasContext; @@ -1159,8 +1159,8 @@ static inline void gen_outs(DisasContext *s, TCGMemOp ot) tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]); tcg_gen_andi_i32(s->tmp2_i32, s->tmp2_i32, 0xffff); - tcg_gen_trunc_tl_i32(cpu_tmp3_i32, s->T0); - gen_helper_out_func(ot, s->tmp2_i32, cpu_tmp3_i32); + tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T0); + gen_helper_out_func(ot, s->tmp2_i32, s->tmp3_i32); gen_op_movl_T0_Dshift(s, ot); gen_op_add_reg_T0(s, s->aflag, R_ESI); gen_bpt_io(s, s->tmp2_i32, ot); @@ -1426,8 +1426,8 @@ static void gen_shift_flags(DisasContext *s, TCGMemOp ot, TCGv result, if (s->cc_op == CC_OP_DYNAMIC) { oldop = cpu_cc_op; } else { - tcg_gen_movi_i32(cpu_tmp3_i32, s->cc_op); - oldop = cpu_tmp3_i32; + tcg_gen_movi_i32(s->tmp3_i32, s->cc_op); + oldop = s->tmp3_i32; } /* Conditionally store the CC_OP value. */ @@ -1546,11 +1546,11 @@ static void gen_rot_rm_T1(DisasContext *s, TCGMemOp ot, int op1, int is_right) #ifdef TARGET_X86_64 case MO_32: tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0); - tcg_gen_trunc_tl_i32(cpu_tmp3_i32, s->T1); + tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T1); if (is_right) { - tcg_gen_rotr_i32(s->tmp2_i32, s->tmp2_i32, cpu_tmp3_i32); + tcg_gen_rotr_i32(s->tmp2_i32, s->tmp2_i32, s->tmp3_i32); } else { - tcg_gen_rotl_i32(s->tmp2_i32, s->tmp2_i32, cpu_tmp3_i32); + tcg_gen_rotl_i32(s->tmp2_i32, s->tmp2_i32, s->tmp3_i32); } tcg_gen_extu_i32_tl(s->T0, s->tmp2_i32); break; @@ -1593,9 +1593,9 @@ static void gen_rot_rm_T1(DisasContext *s, TCGMemOp ot, int op1, int is_right) t1 = tcg_temp_new_i32(); tcg_gen_trunc_tl_i32(t1, s->T1); tcg_gen_movi_i32(s->tmp2_i32, CC_OP_ADCOX); - tcg_gen_movi_i32(cpu_tmp3_i32, CC_OP_EFLAGS); + tcg_gen_movi_i32(s->tmp3_i32, CC_OP_EFLAGS); tcg_gen_movcond_i32(TCG_COND_NE, cpu_cc_op, t1, t0, - s->tmp2_i32, cpu_tmp3_i32); + s->tmp2_i32, s->tmp3_i32); tcg_temp_free_i32(t0); tcg_temp_free_i32(t1); @@ -3912,11 +3912,11 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, switch (ot) { default: tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0); - tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_regs[R_EDX]); - tcg_gen_mulu2_i32(s->tmp2_i32, cpu_tmp3_i32, - s->tmp2_i32, cpu_tmp3_i32); + tcg_gen_trunc_tl_i32(s->tmp3_i32, cpu_regs[R_EDX]); + tcg_gen_mulu2_i32(s->tmp2_i32, s->tmp3_i32, + s->tmp2_i32, s->tmp3_i32); tcg_gen_extu_i32_tl(cpu_regs[s->vex_v], s->tmp2_i32); - tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp3_i32); + tcg_gen_extu_i32_tl(cpu_regs[reg], s->tmp3_i32); break; #ifdef TARGET_X86_64 case MO_64: @@ -4882,11 +4882,11 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) default: case MO_32: tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0); - tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_regs[R_EAX]); - tcg_gen_mulu2_i32(s->tmp2_i32, cpu_tmp3_i32, - s->tmp2_i32, cpu_tmp3_i32); + tcg_gen_trunc_tl_i32(s->tmp3_i32, cpu_regs[R_EAX]); + tcg_gen_mulu2_i32(s->tmp2_i32, s->tmp3_i32, + s->tmp2_i32, s->tmp3_i32); tcg_gen_extu_i32_tl(cpu_regs[R_EAX], s->tmp2_i32); - tcg_gen_extu_i32_tl(cpu_regs[R_EDX], cpu_tmp3_i32); + tcg_gen_extu_i32_tl(cpu_regs[R_EDX], s->tmp3_i32); tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]); tcg_gen_mov_tl(cpu_cc_src, cpu_regs[R_EDX]); set_cc_op(s, CC_OP_MULL); @@ -4933,14 +4933,14 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) default: case MO_32: tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0); - tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_regs[R_EAX]); - tcg_gen_muls2_i32(s->tmp2_i32, cpu_tmp3_i32, - s->tmp2_i32, cpu_tmp3_i32); + tcg_gen_trunc_tl_i32(s->tmp3_i32, cpu_regs[R_EAX]); + tcg_gen_muls2_i32(s->tmp2_i32, s->tmp3_i32, + s->tmp2_i32, s->tmp3_i32); tcg_gen_extu_i32_tl(cpu_regs[R_EAX], s->tmp2_i32); - tcg_gen_extu_i32_tl(cpu_regs[R_EDX], cpu_tmp3_i32); + tcg_gen_extu_i32_tl(cpu_regs[R_EDX], s->tmp3_i32); tcg_gen_sari_i32(s->tmp2_i32, s->tmp2_i32, 31); tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]); - tcg_gen_sub_i32(s->tmp2_i32, s->tmp2_i32, cpu_tmp3_i32); + tcg_gen_sub_i32(s->tmp2_i32, s->tmp2_i32, s->tmp3_i32); tcg_gen_extu_i32_tl(cpu_cc_src, s->tmp2_i32); set_cc_op(s, CC_OP_MULL); break; @@ -5210,13 +5210,13 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) #endif case MO_32: tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0); - tcg_gen_trunc_tl_i32(cpu_tmp3_i32, s->T1); - tcg_gen_muls2_i32(s->tmp2_i32, cpu_tmp3_i32, - s->tmp2_i32, cpu_tmp3_i32); + tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T1); + tcg_gen_muls2_i32(s->tmp2_i32, s->tmp3_i32, + s->tmp2_i32, s->tmp3_i32); tcg_gen_extu_i32_tl(cpu_regs[reg], s->tmp2_i32); tcg_gen_sari_i32(s->tmp2_i32, s->tmp2_i32, 31); tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[reg]); - tcg_gen_sub_i32(s->tmp2_i32, s->tmp2_i32, cpu_tmp3_i32); + tcg_gen_sub_i32(s->tmp2_i32, s->tmp2_i32, s->tmp3_i32); tcg_gen_extu_i32_tl(cpu_cc_src, s->tmp2_i32); break; default: @@ -6417,8 +6417,8 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) gen_io_start(); } tcg_gen_movi_i32(s->tmp2_i32, val); - tcg_gen_trunc_tl_i32(cpu_tmp3_i32, s->T1); - gen_helper_out_func(ot, s->tmp2_i32, cpu_tmp3_i32); + tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T1); + gen_helper_out_func(ot, s->tmp2_i32, s->tmp3_i32); gen_bpt_io(s, s->tmp2_i32, ot); if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { gen_io_end(); @@ -6455,8 +6455,8 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) gen_io_start(); } tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0); - tcg_gen_trunc_tl_i32(cpu_tmp3_i32, s->T1); - gen_helper_out_func(ot, s->tmp2_i32, cpu_tmp3_i32); + tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T1); + gen_helper_out_func(ot, s->tmp2_i32, s->tmp3_i32); gen_bpt_io(s, s->tmp2_i32, ot); if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { gen_io_end(); @@ -8486,7 +8486,7 @@ static void i386_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu) dc->tmp0 = tcg_temp_new(); cpu_tmp1_i64 = tcg_temp_new_i64(); dc->tmp2_i32 = tcg_temp_new_i32(); - cpu_tmp3_i32 = tcg_temp_new_i32(); + dc->tmp3_i32 = tcg_temp_new_i32(); dc->tmp4 = tcg_temp_new(); dc->ptr0 = tcg_temp_new_ptr(); dc->ptr1 = tcg_temp_new_ptr(); From 776678b2961848a80387509c433dc04b0f761592 Mon Sep 17 00:00:00 2001 From: "Emilio G. Cota" Date: Tue, 11 Sep 2018 14:22:31 -0400 Subject: [PATCH 35/80] target/i386: move cpu_tmp1_i64 to DisasContext Signed-off-by: Emilio G. Cota Reviewed-by: Richard Henderson Signed-off-by: Paolo Bonzini --- target/i386/translate.c | 160 ++++++++++++++++++++-------------------- 1 file changed, 80 insertions(+), 80 deletions(-) diff --git a/target/i386/translate.c b/target/i386/translate.c index cd880cc2a8..61a98ef872 100644 --- a/target/i386/translate.c +++ b/target/i386/translate.c @@ -79,8 +79,6 @@ static TCGv cpu_seg_base[6]; static TCGv_i64 cpu_bndl[4]; static TCGv_i64 cpu_bndu[4]; -static TCGv_i64 cpu_tmp1_i64; - #include "exec/gen-icount.h" #ifdef TARGET_X86_64 @@ -143,6 +141,7 @@ typedef struct DisasContext { TCGv_ptr ptr1; TCGv_i32 tmp2_i32; TCGv_i32 tmp3_i32; + TCGv_i64 tmp1_i64; sigjmp_buf jmpbuf; } DisasContext; @@ -2107,12 +2106,12 @@ static void gen_bndck(CPUX86State *env, DisasContext *s, int modrm, { TCGv ea = gen_lea_modrm_1(s, gen_lea_modrm_0(env, s, modrm)); - tcg_gen_extu_tl_i64(cpu_tmp1_i64, ea); + tcg_gen_extu_tl_i64(s->tmp1_i64, ea); if (!CODE64(s)) { - tcg_gen_ext32u_i64(cpu_tmp1_i64, cpu_tmp1_i64); + tcg_gen_ext32u_i64(s->tmp1_i64, s->tmp1_i64); } - tcg_gen_setcond_i64(cond, cpu_tmp1_i64, cpu_tmp1_i64, bndv); - tcg_gen_extrl_i64_i32(s->tmp2_i32, cpu_tmp1_i64); + tcg_gen_setcond_i64(cond, s->tmp1_i64, s->tmp1_i64, bndv); + tcg_gen_extrl_i64_i32(s->tmp2_i32, s->tmp1_i64); gen_helper_bndck(cpu_env, s->tmp2_i32); } @@ -2641,48 +2640,48 @@ static void gen_jmp(DisasContext *s, target_ulong eip) static inline void gen_ldq_env_A0(DisasContext *s, int offset) { - tcg_gen_qemu_ld_i64(cpu_tmp1_i64, s->A0, s->mem_index, MO_LEQ); - tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset); + tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0, s->mem_index, MO_LEQ); + tcg_gen_st_i64(s->tmp1_i64, cpu_env, offset); } static inline void gen_stq_env_A0(DisasContext *s, int offset) { - tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset); - tcg_gen_qemu_st_i64(cpu_tmp1_i64, s->A0, s->mem_index, MO_LEQ); + tcg_gen_ld_i64(s->tmp1_i64, cpu_env, offset); + tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0, s->mem_index, MO_LEQ); } static inline void gen_ldo_env_A0(DisasContext *s, int offset) { int mem_index = s->mem_index; - tcg_gen_qemu_ld_i64(cpu_tmp1_i64, s->A0, mem_index, MO_LEQ); - tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(ZMMReg, ZMM_Q(0))); + tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0, mem_index, MO_LEQ); + tcg_gen_st_i64(s->tmp1_i64, cpu_env, offset + offsetof(ZMMReg, ZMM_Q(0))); tcg_gen_addi_tl(s->tmp0, s->A0, 8); - tcg_gen_qemu_ld_i64(cpu_tmp1_i64, s->tmp0, mem_index, MO_LEQ); - tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(ZMMReg, ZMM_Q(1))); + tcg_gen_qemu_ld_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEQ); + tcg_gen_st_i64(s->tmp1_i64, cpu_env, offset + offsetof(ZMMReg, ZMM_Q(1))); } static inline void gen_sto_env_A0(DisasContext *s, int offset) { int mem_index = s->mem_index; - tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(ZMMReg, ZMM_Q(0))); - tcg_gen_qemu_st_i64(cpu_tmp1_i64, s->A0, mem_index, MO_LEQ); + tcg_gen_ld_i64(s->tmp1_i64, cpu_env, offset + offsetof(ZMMReg, ZMM_Q(0))); + tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0, mem_index, MO_LEQ); tcg_gen_addi_tl(s->tmp0, s->A0, 8); - tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(ZMMReg, ZMM_Q(1))); - tcg_gen_qemu_st_i64(cpu_tmp1_i64, s->tmp0, mem_index, MO_LEQ); + tcg_gen_ld_i64(s->tmp1_i64, cpu_env, offset + offsetof(ZMMReg, ZMM_Q(1))); + tcg_gen_qemu_st_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEQ); } -static inline void gen_op_movo(int d_offset, int s_offset) +static inline void gen_op_movo(DisasContext *s, int d_offset, int s_offset) { - tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset + offsetof(ZMMReg, ZMM_Q(0))); - tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset + offsetof(ZMMReg, ZMM_Q(0))); - tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset + offsetof(ZMMReg, ZMM_Q(1))); - tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset + offsetof(ZMMReg, ZMM_Q(1))); + tcg_gen_ld_i64(s->tmp1_i64, cpu_env, s_offset + offsetof(ZMMReg, ZMM_Q(0))); + tcg_gen_st_i64(s->tmp1_i64, cpu_env, d_offset + offsetof(ZMMReg, ZMM_Q(0))); + tcg_gen_ld_i64(s->tmp1_i64, cpu_env, s_offset + offsetof(ZMMReg, ZMM_Q(1))); + tcg_gen_st_i64(s->tmp1_i64, cpu_env, d_offset + offsetof(ZMMReg, ZMM_Q(1))); } -static inline void gen_op_movq(int d_offset, int s_offset) +static inline void gen_op_movq(DisasContext *s, int d_offset, int s_offset) { - tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset); - tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset); + tcg_gen_ld_i64(s->tmp1_i64, cpu_env, s_offset); + tcg_gen_st_i64(s->tmp1_i64, cpu_env, d_offset); } static inline void gen_op_movl(DisasContext *s, int d_offset, int s_offset) @@ -2691,10 +2690,10 @@ static inline void gen_op_movl(DisasContext *s, int d_offset, int s_offset) tcg_gen_st_i32(s->tmp2_i32, cpu_env, d_offset); } -static inline void gen_op_movq_env_0(int d_offset) +static inline void gen_op_movq_env_0(DisasContext *s, int d_offset) { - tcg_gen_movi_i64(cpu_tmp1_i64, 0); - tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset); + tcg_gen_movi_i64(s->tmp1_i64, 0); + tcg_gen_st_i64(s->tmp1_i64, cpu_env, d_offset); } typedef void (*SSEFunc_i_ep)(TCGv_i32 val, TCGv_ptr env, TCGv_ptr reg); @@ -3178,9 +3177,9 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, gen_ldq_env_A0(s, offsetof(CPUX86State, fpregs[reg].mmx)); } else { rm = (modrm & 7); - tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, + tcg_gen_ld_i64(s->tmp1_i64, cpu_env, offsetof(CPUX86State,fpregs[rm].mmx)); - tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, + tcg_gen_st_i64(s->tmp1_i64, cpu_env, offsetof(CPUX86State,fpregs[reg].mmx)); } break; @@ -3195,7 +3194,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg])); } else { rm = (modrm & 7) | REX_B(s); - gen_op_movo(offsetof(CPUX86State,xmm_regs[reg]), + gen_op_movo(s, offsetof(CPUX86State, xmm_regs[reg]), offsetof(CPUX86State,xmm_regs[rm])); } break; @@ -3230,7 +3229,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, offsetof(CPUX86State, xmm_regs[reg].ZMM_L(3))); } else { rm = (modrm & 7) | REX_B(s); - gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)), + gen_op_movq(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_Q(0)), offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0))); } break; @@ -3243,7 +3242,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, } else { /* movhlps */ rm = (modrm & 7) | REX_B(s); - gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)), + gen_op_movq(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_Q(0)), offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(1))); } break; @@ -3270,10 +3269,10 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, xmm_regs[reg].ZMM_Q(0))); } else { rm = (modrm & 7) | REX_B(s); - gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)), + gen_op_movq(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_Q(0)), offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0))); } - gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(1)), + gen_op_movq(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_Q(1)), offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0))); break; case 0x016: /* movhps */ @@ -3285,7 +3284,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, } else { /* movlhps */ rm = (modrm & 7) | REX_B(s); - gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(1)), + gen_op_movq(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_Q(1)), offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0))); } break; @@ -3361,10 +3360,10 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, xmm_regs[reg].ZMM_Q(0))); } else { rm = (modrm & 7) | REX_B(s); - gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)), + gen_op_movq(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_Q(0)), offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0))); } - gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(1))); + gen_op_movq_env_0(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_Q(1))); break; case 0x7f: /* movq ea, mm */ if (mod != 3) { @@ -3372,7 +3371,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, gen_stq_env_A0(s, offsetof(CPUX86State, fpregs[reg].mmx)); } else { rm = (modrm & 7); - gen_op_movq(offsetof(CPUX86State,fpregs[rm].mmx), + gen_op_movq(s, offsetof(CPUX86State, fpregs[rm].mmx), offsetof(CPUX86State,fpregs[reg].mmx)); } break; @@ -3387,7 +3386,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, gen_sto_env_A0(s, offsetof(CPUX86State, xmm_regs[reg])); } else { rm = (modrm & 7) | REX_B(s); - gen_op_movo(offsetof(CPUX86State,xmm_regs[rm]), + gen_op_movo(s, offsetof(CPUX86State, xmm_regs[rm]), offsetof(CPUX86State,xmm_regs[reg])); } break; @@ -3410,7 +3409,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, xmm_regs[reg].ZMM_Q(0))); } else { rm = (modrm & 7) | REX_B(s); - gen_op_movq(offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0)), + gen_op_movq(s, offsetof(CPUX86State, xmm_regs[rm].ZMM_Q(0)), offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0))); } break; @@ -3643,22 +3642,23 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, xmm_regs[reg].ZMM_Q(0))); } else { rm = (modrm & 7) | REX_B(s); - gen_op_movq(offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0)), + gen_op_movq(s, offsetof(CPUX86State, xmm_regs[rm].ZMM_Q(0)), offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0))); - gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(1))); + gen_op_movq_env_0(s, + offsetof(CPUX86State, xmm_regs[rm].ZMM_Q(1))); } break; case 0x2d6: /* movq2dq */ gen_helper_enter_mmx(cpu_env); rm = (modrm & 7); - gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)), + gen_op_movq(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_Q(0)), offsetof(CPUX86State,fpregs[rm].mmx)); - gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(1))); + gen_op_movq_env_0(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_Q(1))); break; case 0x3d6: /* movdq2q */ gen_helper_enter_mmx(cpu_env); rm = (modrm & 7) | REX_B(s); - gen_op_movq(offsetof(CPUX86State,fpregs[reg & 7].mmx), + gen_op_movq(s, offsetof(CPUX86State, fpregs[reg & 7].mmx), offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0))); break; case 0xd7: /* pmovmskb */ @@ -4174,13 +4174,13 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, } } else { /* pextrq */ #ifdef TARGET_X86_64 - tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, + tcg_gen_ld_i64(s->tmp1_i64, cpu_env, offsetof(CPUX86State, xmm_regs[reg].ZMM_Q(val & 1))); if (mod == 3) { - tcg_gen_mov_i64(cpu_regs[rm], cpu_tmp1_i64); + tcg_gen_mov_i64(cpu_regs[rm], s->tmp1_i64); } else { - tcg_gen_qemu_st_i64(cpu_tmp1_i64, s->A0, + tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0, s->mem_index, MO_LEQ); } #else @@ -4251,12 +4251,12 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, } else { /* pinsrq */ #ifdef TARGET_X86_64 if (mod == 3) { - gen_op_mov_v_reg(ot, cpu_tmp1_i64, rm); + gen_op_mov_v_reg(ot, s->tmp1_i64, rm); } else { - tcg_gen_qemu_ld_i64(cpu_tmp1_i64, s->A0, + tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0, s->mem_index, MO_LEQ); } - tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, + tcg_gen_st_i64(s->tmp1_i64, cpu_env, offsetof(CPUX86State, xmm_regs[reg].ZMM_Q(val & 1))); #else @@ -5831,9 +5831,9 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) gen_helper_fildl_FT0(cpu_env, s->tmp2_i32); break; case 2: - tcg_gen_qemu_ld_i64(cpu_tmp1_i64, s->A0, + tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0, s->mem_index, MO_LEQ); - gen_helper_fldl_FT0(cpu_env, cpu_tmp1_i64); + gen_helper_fldl_FT0(cpu_env, s->tmp1_i64); break; case 3: default: @@ -5870,9 +5870,9 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) gen_helper_fildl_ST0(cpu_env, s->tmp2_i32); break; case 2: - tcg_gen_qemu_ld_i64(cpu_tmp1_i64, s->A0, + tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0, s->mem_index, MO_LEQ); - gen_helper_fldl_ST0(cpu_env, cpu_tmp1_i64); + gen_helper_fldl_ST0(cpu_env, s->tmp1_i64); break; case 3: default: @@ -5891,8 +5891,8 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) s->mem_index, MO_LEUL); break; case 2: - gen_helper_fisttll_ST0(cpu_tmp1_i64, cpu_env); - tcg_gen_qemu_st_i64(cpu_tmp1_i64, s->A0, + gen_helper_fisttll_ST0(s->tmp1_i64, cpu_env); + tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0, s->mem_index, MO_LEQ); break; case 3: @@ -5917,8 +5917,8 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) s->mem_index, MO_LEUL); break; case 2: - gen_helper_fstl_ST0(cpu_tmp1_i64, cpu_env); - tcg_gen_qemu_st_i64(cpu_tmp1_i64, s->A0, + gen_helper_fstl_ST0(s->tmp1_i64, cpu_env); + tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0, s->mem_index, MO_LEQ); break; case 3: @@ -5975,12 +5975,12 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) gen_helper_fpop(cpu_env); break; case 0x3d: /* fildll */ - tcg_gen_qemu_ld_i64(cpu_tmp1_i64, s->A0, s->mem_index, MO_LEQ); - gen_helper_fildll_ST0(cpu_env, cpu_tmp1_i64); + tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0, s->mem_index, MO_LEQ); + gen_helper_fildll_ST0(cpu_env, s->tmp1_i64); break; case 0x3f: /* fistpll */ - gen_helper_fistll_ST0(cpu_tmp1_i64, cpu_env); - tcg_gen_qemu_st_i64(cpu_tmp1_i64, s->A0, s->mem_index, MO_LEQ); + gen_helper_fistll_ST0(s->tmp1_i64, cpu_env); + tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0, s->mem_index, MO_LEQ); gen_helper_fpop(cpu_env); break; default: @@ -7387,8 +7387,8 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) goto illegal_op; } tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]); - gen_helper_xgetbv(cpu_tmp1_i64, cpu_env, s->tmp2_i32); - tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], cpu_tmp1_i64); + gen_helper_xgetbv(s->tmp1_i64, cpu_env, s->tmp2_i32); + tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], s->tmp1_i64); break; case 0xd1: /* xsetbv */ @@ -7401,10 +7401,10 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); break; } - tcg_gen_concat_tl_i64(cpu_tmp1_i64, cpu_regs[R_EAX], + tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX], cpu_regs[R_EDX]); tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]); - gen_helper_xsetbv(cpu_env, s->tmp2_i32, cpu_tmp1_i64); + gen_helper_xsetbv(cpu_env, s->tmp2_i32, s->tmp1_i64); /* End TB because translation flags may change. */ gen_jmp_im(s, s->pc - s->cs_base); gen_eob(s); @@ -7564,17 +7564,17 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) goto illegal_op; } tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]); - gen_helper_rdpkru(cpu_tmp1_i64, cpu_env, s->tmp2_i32); - tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], cpu_tmp1_i64); + gen_helper_rdpkru(s->tmp1_i64, cpu_env, s->tmp2_i32); + tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], s->tmp1_i64); break; case 0xef: /* wrpkru */ if (prefixes & PREFIX_LOCK) { goto illegal_op; } - tcg_gen_concat_tl_i64(cpu_tmp1_i64, cpu_regs[R_EAX], + tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX], cpu_regs[R_EDX]); tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]); - gen_helper_wrpkru(cpu_env, s->tmp2_i32, cpu_tmp1_i64); + gen_helper_wrpkru(cpu_env, s->tmp2_i32, s->tmp1_i64); break; CASE_MODRM_OP(6): /* lmsw */ if (s->cpl != 0) { @@ -8141,9 +8141,9 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) goto illegal_op; } gen_lea_modrm(env, s, modrm); - tcg_gen_concat_tl_i64(cpu_tmp1_i64, cpu_regs[R_EAX], + tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX], cpu_regs[R_EDX]); - gen_helper_xsave(cpu_env, s->A0, cpu_tmp1_i64); + gen_helper_xsave(cpu_env, s->A0, s->tmp1_i64); break; CASE_MODRM_MEM_OP(5): /* xrstor */ @@ -8153,9 +8153,9 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) goto illegal_op; } gen_lea_modrm(env, s, modrm); - tcg_gen_concat_tl_i64(cpu_tmp1_i64, cpu_regs[R_EAX], + tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX], cpu_regs[R_EDX]); - gen_helper_xrstor(cpu_env, s->A0, cpu_tmp1_i64); + gen_helper_xrstor(cpu_env, s->A0, s->tmp1_i64); /* XRSTOR is how MPX is enabled, which changes how we translate. Thus we need to end the TB. */ gen_update_cc_op(s); @@ -8181,9 +8181,9 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) goto illegal_op; } gen_lea_modrm(env, s, modrm); - tcg_gen_concat_tl_i64(cpu_tmp1_i64, cpu_regs[R_EAX], + tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX], cpu_regs[R_EDX]); - gen_helper_xsaveopt(cpu_env, s->A0, cpu_tmp1_i64); + gen_helper_xsaveopt(cpu_env, s->A0, s->tmp1_i64); } break; @@ -8484,7 +8484,7 @@ static void i386_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu) dc->A0 = tcg_temp_new(); dc->tmp0 = tcg_temp_new(); - cpu_tmp1_i64 = tcg_temp_new_i64(); + dc->tmp1_i64 = tcg_temp_new_i64(); dc->tmp2_i32 = tcg_temp_new_i32(); dc->tmp3_i32 = tcg_temp_new_i32(); dc->tmp4 = tcg_temp_new(); From 1dbe15ef57abdf7b6a26c8e638abf6413a4b9d0c Mon Sep 17 00:00:00 2001 From: "Emilio G. Cota" Date: Tue, 11 Sep 2018 16:07:54 -0400 Subject: [PATCH 36/80] target/i386: move x86_64_hregs to DisasContext And convert it to a bool to use an existing hole in the struct. Signed-off-by: Emilio G. Cota Reviewed-by: Richard Henderson Signed-off-by: Paolo Bonzini --- target/i386/translate.c | 307 ++++++++++++++++++++-------------------- 1 file changed, 154 insertions(+), 153 deletions(-) diff --git a/target/i386/translate.c b/target/i386/translate.c index 61a98ef872..b8222dc4ba 100644 --- a/target/i386/translate.c +++ b/target/i386/translate.c @@ -81,10 +81,6 @@ static TCGv_i64 cpu_bndu[4]; #include "exec/gen-icount.h" -#ifdef TARGET_X86_64 -static int x86_64_hregs; -#endif - typedef struct DisasContext { DisasContextBase base; @@ -109,6 +105,9 @@ typedef struct DisasContext { int ss32; /* 32 bit stack segment */ CCOp cc_op; /* current CC operation */ bool cc_op_dirty; +#ifdef TARGET_X86_64 + bool x86_64_hregs; +#endif int addseg; /* non zero if either DS/ES/SS have a non zero base */ int f_st; /* currently unused */ int vm86; /* vm86 mode */ @@ -307,13 +306,13 @@ static void gen_update_cc_op(DisasContext *s) * [AH, CH, DH, BH], ie "bits 15..8 of register N-4". Return * true for this special case, false otherwise. */ -static inline bool byte_reg_is_xH(int reg) +static inline bool byte_reg_is_xH(DisasContext *s, int reg) { if (reg < 4) { return false; } #ifdef TARGET_X86_64 - if (reg >= 8 || x86_64_hregs) { + if (reg >= 8 || s->x86_64_hregs) { return false; } #endif @@ -360,11 +359,11 @@ static inline TCGMemOp mo_b_d32(int b, TCGMemOp ot) return b & 1 ? (ot == MO_16 ? MO_16 : MO_32) : MO_8; } -static void gen_op_mov_reg_v(TCGMemOp ot, int reg, TCGv t0) +static void gen_op_mov_reg_v(DisasContext *s, TCGMemOp ot, int reg, TCGv t0) { switch(ot) { case MO_8: - if (!byte_reg_is_xH(reg)) { + if (!byte_reg_is_xH(s, reg)) { tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], t0, 0, 8); } else { tcg_gen_deposit_tl(cpu_regs[reg - 4], cpu_regs[reg - 4], t0, 8, 8); @@ -388,9 +387,10 @@ static void gen_op_mov_reg_v(TCGMemOp ot, int reg, TCGv t0) } } -static inline void gen_op_mov_v_reg(TCGMemOp ot, TCGv t0, int reg) +static inline +void gen_op_mov_v_reg(DisasContext *s, TCGMemOp ot, TCGv t0, int reg) { - if (ot == MO_8 && byte_reg_is_xH(reg)) { + if (ot == MO_8 && byte_reg_is_xH(s, reg)) { tcg_gen_extract_tl(t0, cpu_regs[reg - 4], 8, 8); } else { tcg_gen_mov_tl(t0, cpu_regs[reg]); @@ -414,13 +414,13 @@ static inline void gen_op_add_reg_im(DisasContext *s, TCGMemOp size, int reg, int32_t val) { tcg_gen_addi_tl(s->tmp0, cpu_regs[reg], val); - gen_op_mov_reg_v(size, reg, s->tmp0); + gen_op_mov_reg_v(s, size, reg, s->tmp0); } static inline void gen_op_add_reg_T0(DisasContext *s, TCGMemOp size, int reg) { tcg_gen_add_tl(s->tmp0, cpu_regs[reg], s->T0); - gen_op_mov_reg_v(size, reg, s->tmp0); + gen_op_mov_reg_v(s, size, reg, s->tmp0); } static inline void gen_op_ld_v(DisasContext *s, int idx, TCGv t0, TCGv a0) @@ -438,7 +438,7 @@ static inline void gen_op_st_rm_T0_A0(DisasContext *s, int idx, int d) if (d == OR_TMP0) { gen_op_st_v(s, idx, s->T0, s->A0); } else { - gen_op_mov_reg_v(idx, d, s->T0); + gen_op_mov_reg_v(s, idx, d, s->T0); } } @@ -1077,7 +1077,7 @@ static TCGLabel *gen_jz_ecx_string(DisasContext *s, target_ulong next_eip) static inline void gen_stos(DisasContext *s, TCGMemOp ot) { - gen_op_mov_v_reg(MO_32, s->T0, R_EAX); + gen_op_mov_v_reg(s, MO_32, s->T0, R_EAX); gen_string_movl_A0_EDI(s); gen_op_st_v(s, ot, s->T0, s->A0); gen_op_movl_T0_Dshift(s, ot); @@ -1088,7 +1088,7 @@ static inline void gen_lods(DisasContext *s, TCGMemOp ot) { gen_string_movl_A0_ESI(s); gen_op_ld_v(s, ot, s->T0, s->A0); - gen_op_mov_reg_v(ot, R_EAX, s->T0); + gen_op_mov_reg_v(s, ot, R_EAX, s->T0); gen_op_movl_T0_Dshift(s, ot); gen_op_add_reg_T0(s, s->aflag, R_ESI); } @@ -1272,7 +1272,7 @@ static void gen_helper_fp_arith_STN_ST0(int op, int opreg) static void gen_op(DisasContext *s1, int op, TCGMemOp ot, int d) { if (d != OR_TMP0) { - gen_op_mov_v_reg(ot, s1->T0, d); + gen_op_mov_v_reg(s1, ot, s1->T0, d); } else if (!(s1->prefix & PREFIX_LOCK)) { gen_op_ld_v(s1, ot, s1->T0, s1->A0); } @@ -1383,7 +1383,7 @@ static void gen_inc(DisasContext *s1, TCGMemOp ot, int d, int c) s1->mem_index, ot | MO_LE); } else { if (d != OR_TMP0) { - gen_op_mov_v_reg(ot, s1->T0, d); + gen_op_mov_v_reg(s1, ot, s1->T0, d); } else { gen_op_ld_v(s1, ot, s1->T0, s1->A0); } @@ -1450,7 +1450,7 @@ static void gen_shift_rm_T1(DisasContext *s, TCGMemOp ot, int op1, if (op1 == OR_TMP0) { gen_op_ld_v(s, ot, s->T0, s->A0); } else { - gen_op_mov_v_reg(ot, s->T0, op1); + gen_op_mov_v_reg(s, ot, s->T0, op1); } tcg_gen_andi_tl(s->T1, s->T1, mask); @@ -1486,7 +1486,7 @@ static void gen_shift_rm_im(DisasContext *s, TCGMemOp ot, int op1, int op2, if (op1 == OR_TMP0) gen_op_ld_v(s, ot, s->T0, s->A0); else - gen_op_mov_v_reg(ot, s->T0, op1); + gen_op_mov_v_reg(s, ot, s->T0, op1); op2 &= mask; if (op2 != 0) { @@ -1526,7 +1526,7 @@ static void gen_rot_rm_T1(DisasContext *s, TCGMemOp ot, int op1, int is_right) if (op1 == OR_TMP0) { gen_op_ld_v(s, ot, s->T0, s->A0); } else { - gen_op_mov_v_reg(ot, s->T0, op1); + gen_op_mov_v_reg(s, ot, s->T0, op1); } tcg_gen_andi_tl(s->T1, s->T1, mask); @@ -1612,7 +1612,7 @@ static void gen_rot_rm_im(DisasContext *s, TCGMemOp ot, int op1, int op2, if (op1 == OR_TMP0) { gen_op_ld_v(s, ot, s->T0, s->A0); } else { - gen_op_mov_v_reg(ot, s->T0, op1); + gen_op_mov_v_reg(s, ot, s->T0, op1); } op2 &= mask; @@ -1690,7 +1690,7 @@ static void gen_rotc_rm_T1(DisasContext *s, TCGMemOp ot, int op1, if (op1 == OR_TMP0) gen_op_ld_v(s, ot, s->T0, s->A0); else - gen_op_mov_v_reg(ot, s->T0, op1); + gen_op_mov_v_reg(s, ot, s->T0, op1); if (is_right) { switch (ot) { @@ -1746,7 +1746,7 @@ static void gen_shiftd_rm_T1(DisasContext *s, TCGMemOp ot, int op1, if (op1 == OR_TMP0) { gen_op_ld_v(s, ot, s->T0, s->A0); } else { - gen_op_mov_v_reg(ot, s->T0, op1); + gen_op_mov_v_reg(s, ot, s->T0, op1); } count = tcg_temp_new(); @@ -1820,7 +1820,7 @@ static void gen_shiftd_rm_T1(DisasContext *s, TCGMemOp ot, int op1, static void gen_shift(DisasContext *s1, int op, TCGMemOp ot, int d, int s) { if (s != OR_TMP1) - gen_op_mov_v_reg(ot, s1->T1, s); + gen_op_mov_v_reg(s1, ot, s1->T1, s); switch(op) { case OP_ROL: gen_rot_rm_T1(s1, ot, d, 0); @@ -2133,23 +2133,23 @@ static void gen_ldst_modrm(CPUX86State *env, DisasContext *s, int modrm, if (mod == 3) { if (is_store) { if (reg != OR_TMP0) - gen_op_mov_v_reg(ot, s->T0, reg); - gen_op_mov_reg_v(ot, rm, s->T0); + gen_op_mov_v_reg(s, ot, s->T0, reg); + gen_op_mov_reg_v(s, ot, rm, s->T0); } else { - gen_op_mov_v_reg(ot, s->T0, rm); + gen_op_mov_v_reg(s, ot, s->T0, rm); if (reg != OR_TMP0) - gen_op_mov_reg_v(ot, reg, s->T0); + gen_op_mov_reg_v(s, ot, reg, s->T0); } } else { gen_lea_modrm(env, s, modrm); if (is_store) { if (reg != OR_TMP0) - gen_op_mov_v_reg(ot, s->T0, reg); + gen_op_mov_v_reg(s, ot, s->T0, reg); gen_op_st_v(s, ot, s->T0, s->A0); } else { gen_op_ld_v(s, ot, s->T0, s->A0); if (reg != OR_TMP0) - gen_op_mov_reg_v(ot, reg, s->T0); + gen_op_mov_reg_v(s, ot, reg, s->T0); } } } @@ -2260,7 +2260,7 @@ static void gen_cmovcc1(CPUX86State *env, DisasContext *s, TCGMemOp ot, int b, tcg_gen_movcond_tl(cc.cond, s->T0, cc.reg, cc.reg2, s->T0, cpu_regs[reg]); - gen_op_mov_reg_v(ot, reg, s->T0); + gen_op_mov_reg_v(s, ot, reg, s->T0); if (cc.mask != -1) { tcg_temp_free(cc.reg); @@ -2354,7 +2354,7 @@ static void gen_push_v(DisasContext *s, TCGv val) } gen_op_st_v(s, d_ot, val, s->A0); - gen_op_mov_reg_v(a_ot, R_ESP, new_esp); + gen_op_mov_reg_v(s, a_ot, R_ESP, new_esp); } /* two step pop is necessary for precise exceptions */ @@ -2409,7 +2409,7 @@ static void gen_popa(DisasContext *s) tcg_gen_addi_tl(s->A0, cpu_regs[R_ESP], i * size); gen_lea_v_seg(s, s_ot, s->A0, R_SS, -1); gen_op_ld_v(s, d_ot, s->T0, s->A0); - gen_op_mov_reg_v(d_ot, 7 - i, s->T0); + gen_op_mov_reg_v(s, d_ot, 7 - i, s->T0); } gen_stack_update(s, 8 * size); @@ -2448,11 +2448,11 @@ static void gen_enter(DisasContext *s, int esp_addend, int level) } /* Copy the FrameTemp value to EBP. */ - gen_op_mov_reg_v(a_ot, R_EBP, s->T1); + gen_op_mov_reg_v(s, a_ot, R_EBP, s->T1); /* Compute the final value of ESP. */ tcg_gen_subi_tl(s->T1, s->T1, esp_addend + size * level); - gen_op_mov_reg_v(a_ot, R_ESP, s->T1); + gen_op_mov_reg_v(s, a_ot, R_ESP, s->T1); } static void gen_leave(DisasContext *s) @@ -2465,8 +2465,8 @@ static void gen_leave(DisasContext *s) tcg_gen_addi_tl(s->T1, cpu_regs[R_EBP], 1 << d_ot); - gen_op_mov_reg_v(d_ot, R_EBP, s->T0); - gen_op_mov_reg_v(a_ot, R_ESP, s->T1); + gen_op_mov_reg_v(s, d_ot, R_EBP, s->T0); + gen_op_mov_reg_v(s, a_ot, R_ESP, s->T1); } static void gen_exception(DisasContext *s, int trapno, target_ulong cur_eip) @@ -3598,7 +3598,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, goto illegal_op; #endif } - gen_op_mov_reg_v(ot, reg, s->T0); + gen_op_mov_reg_v(s, ot, reg, s->T0); break; case 0xc4: /* pinsrw */ case 0x1c4: @@ -3633,7 +3633,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, offsetof(CPUX86State,fpregs[rm].mmx.MMX_W(val))); } reg = ((modrm >> 3) & 7) | rex_r; - gen_op_mov_reg_v(ot, reg, s->T0); + gen_op_mov_reg_v(s, ot, reg, s->T0); break; case 0x1d6: /* movq ea, xmm */ if (mod != 3) { @@ -3787,7 +3787,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, s->T0, tcg_const_i32(8 << ot)); ot = mo_64_32(s->dflag); - gen_op_mov_reg_v(ot, reg, s->T0); + gen_op_mov_reg_v(s, ot, reg, s->T0); break; case 0x1f0: /* crc32 or movbe */ @@ -3814,7 +3814,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, if ((b & 1) == 0) { tcg_gen_qemu_ld_tl(s->T0, s->A0, s->mem_index, ot | MO_BE); - gen_op_mov_reg_v(ot, reg, s->T0); + gen_op_mov_reg_v(s, ot, reg, s->T0); } else { tcg_gen_qemu_st_tl(cpu_regs[reg], s->A0, s->mem_index, ot | MO_BE); @@ -3830,7 +3830,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, ot = mo_64_32(s->dflag); gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); tcg_gen_andc_tl(s->T0, s->T0, cpu_regs[s->vex_v]); - gen_op_mov_reg_v(ot, reg, s->T0); + gen_op_mov_reg_v(s, ot, reg, s->T0); gen_op_update1_cc(s); set_cc_op(s, CC_OP_LOGICB + ot); break; @@ -3868,7 +3868,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, tcg_gen_subi_tl(s->T1, s->T1, 1); tcg_gen_and_tl(s->T0, s->T0, s->T1); - gen_op_mov_reg_v(ot, reg, s->T0); + gen_op_mov_reg_v(s, ot, reg, s->T0); gen_op_update1_cc(s); set_cc_op(s, CC_OP_LOGICB + ot); } @@ -3896,7 +3896,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, tcg_gen_movi_tl(s->A0, -1); tcg_gen_shl_tl(s->A0, s->A0, s->T1); tcg_gen_andc_tl(s->T0, s->T0, s->A0); - gen_op_mov_reg_v(ot, reg, s->T0); + gen_op_mov_reg_v(s, ot, reg, s->T0); gen_op_update1_cc(s); set_cc_op(s, CC_OP_BMILGB + ot); break; @@ -4071,7 +4071,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, } tcg_gen_shr_tl(s->T0, s->T0, s->T1); } - gen_op_mov_reg_v(ot, reg, s->T0); + gen_op_mov_reg_v(s, ot, reg, s->T0); break; case 0x0f3: @@ -4104,7 +4104,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, goto unknown_op; } tcg_gen_mov_tl(cpu_cc_dst, s->T0); - gen_op_mov_reg_v(ot, s->vex_v, s->T0); + gen_op_mov_reg_v(s, ot, s->vex_v, s->T0); set_cc_op(s, CC_OP_BMILGB + ot); break; @@ -4145,7 +4145,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, tcg_gen_ld8u_tl(s->T0, cpu_env, offsetof(CPUX86State, xmm_regs[reg].ZMM_B(val & 15))); if (mod == 3) { - gen_op_mov_reg_v(ot, rm, s->T0); + gen_op_mov_reg_v(s, ot, rm, s->T0); } else { tcg_gen_qemu_st_tl(s->T0, s->A0, s->mem_index, MO_UB); @@ -4155,7 +4155,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, tcg_gen_ld16u_tl(s->T0, cpu_env, offsetof(CPUX86State, xmm_regs[reg].ZMM_W(val & 7))); if (mod == 3) { - gen_op_mov_reg_v(ot, rm, s->T0); + gen_op_mov_reg_v(s, ot, rm, s->T0); } else { tcg_gen_qemu_st_tl(s->T0, s->A0, s->mem_index, MO_LEUW); @@ -4192,7 +4192,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, tcg_gen_ld32u_tl(s->T0, cpu_env, offsetof(CPUX86State, xmm_regs[reg].ZMM_L(val & 3))); if (mod == 3) { - gen_op_mov_reg_v(ot, rm, s->T0); + gen_op_mov_reg_v(s, ot, rm, s->T0); } else { tcg_gen_qemu_st_tl(s->T0, s->A0, s->mem_index, MO_LEUL); @@ -4200,7 +4200,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, break; case 0x20: /* pinsrb */ if (mod == 3) { - gen_op_mov_v_reg(MO_32, s->T0, rm); + gen_op_mov_v_reg(s, MO_32, s->T0, rm); } else { tcg_gen_qemu_ld_tl(s->T0, s->A0, s->mem_index, MO_UB); @@ -4251,7 +4251,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, } else { /* pinsrq */ #ifdef TARGET_X86_64 if (mod == 3) { - gen_op_mov_v_reg(ot, s->tmp1_i64, rm); + gen_op_mov_v_reg(s, ot, s->tmp1_i64, rm); } else { tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0, s->mem_index, MO_LEQ); @@ -4326,7 +4326,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, tcg_gen_rotri_i32(s->tmp2_i32, s->tmp2_i32, b & 31); tcg_gen_extu_i32_tl(s->T0, s->tmp2_i32); } - gen_op_mov_reg_v(ot, reg, s->T0); + gen_op_mov_reg_v(s, ot, reg, s->T0); break; default: @@ -4489,7 +4489,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) #ifdef TARGET_X86_64 s->rex_x = 0; s->rex_b = 0; - x86_64_hregs = 0; + s->x86_64_hregs = false; #endif s->rip_offset = 0; /* for relative ip address */ s->vex_l = 0; @@ -4548,7 +4548,8 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) rex_r = (b & 0x4) << 1; s->rex_x = (b & 0x2) << 2; REX_B(s) = (b & 0x1) << 3; - x86_64_hregs = 1; /* select uniform byte register addressing */ + /* select uniform byte register addressing */ + s->x86_64_hregs = true; goto next_byte; } break; @@ -4576,7 +4577,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) goto illegal_op; } #ifdef TARGET_X86_64 - if (x86_64_hregs) { + if (s->x86_64_hregs) { goto illegal_op; } #endif @@ -4681,12 +4682,12 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) /* xor reg, reg optimisation */ set_cc_op(s, CC_OP_CLR); tcg_gen_movi_tl(s->T0, 0); - gen_op_mov_reg_v(ot, reg, s->T0); + gen_op_mov_reg_v(s, ot, reg, s->T0); break; } else { opreg = rm; } - gen_op_mov_v_reg(ot, s->T1, reg); + gen_op_mov_v_reg(s, ot, s->T1, reg); gen_op(s, op, ot, opreg); break; case 1: /* OP Gv, Ev */ @@ -4700,7 +4701,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) } else if (op == OP_XORL && rm == reg) { goto xor_zero; } else { - gen_op_mov_v_reg(ot, s->T1, rm); + gen_op_mov_v_reg(s, ot, s->T1, rm); } gen_op(s, op, ot, reg); break; @@ -4786,7 +4787,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) gen_op_ld_v(s, ot, s->T0, s->A0); } } else { - gen_op_mov_v_reg(ot, s->T0, rm); + gen_op_mov_v_reg(s, ot, s->T0, rm); } switch(op) { @@ -4809,7 +4810,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) if (mod != 3) { gen_op_st_v(s, ot, s->T0, s->A0); } else { - gen_op_mov_reg_v(ot, rm, s->T0); + gen_op_mov_reg_v(s, ot, rm, s->T0); } } break; @@ -4847,7 +4848,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) if (mod != 3) { gen_op_st_v(s, ot, s->T0, s->A0); } else { - gen_op_mov_reg_v(ot, rm, s->T0); + gen_op_mov_reg_v(s, ot, rm, s->T0); } } gen_op_update_neg_cc(s); @@ -4856,26 +4857,26 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) case 4: /* mul */ switch(ot) { case MO_8: - gen_op_mov_v_reg(MO_8, s->T1, R_EAX); + gen_op_mov_v_reg(s, MO_8, s->T1, R_EAX); tcg_gen_ext8u_tl(s->T0, s->T0); tcg_gen_ext8u_tl(s->T1, s->T1); /* XXX: use 32 bit mul which could be faster */ tcg_gen_mul_tl(s->T0, s->T0, s->T1); - gen_op_mov_reg_v(MO_16, R_EAX, s->T0); + gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0); tcg_gen_mov_tl(cpu_cc_dst, s->T0); tcg_gen_andi_tl(cpu_cc_src, s->T0, 0xff00); set_cc_op(s, CC_OP_MULB); break; case MO_16: - gen_op_mov_v_reg(MO_16, s->T1, R_EAX); + gen_op_mov_v_reg(s, MO_16, s->T1, R_EAX); tcg_gen_ext16u_tl(s->T0, s->T0); tcg_gen_ext16u_tl(s->T1, s->T1); /* XXX: use 32 bit mul which could be faster */ tcg_gen_mul_tl(s->T0, s->T0, s->T1); - gen_op_mov_reg_v(MO_16, R_EAX, s->T0); + gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0); tcg_gen_mov_tl(cpu_cc_dst, s->T0); tcg_gen_shri_tl(s->T0, s->T0, 16); - gen_op_mov_reg_v(MO_16, R_EDX, s->T0); + gen_op_mov_reg_v(s, MO_16, R_EDX, s->T0); tcg_gen_mov_tl(cpu_cc_src, s->T0); set_cc_op(s, CC_OP_MULW); break; @@ -4905,29 +4906,29 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) case 5: /* imul */ switch(ot) { case MO_8: - gen_op_mov_v_reg(MO_8, s->T1, R_EAX); + gen_op_mov_v_reg(s, MO_8, s->T1, R_EAX); tcg_gen_ext8s_tl(s->T0, s->T0); tcg_gen_ext8s_tl(s->T1, s->T1); /* XXX: use 32 bit mul which could be faster */ tcg_gen_mul_tl(s->T0, s->T0, s->T1); - gen_op_mov_reg_v(MO_16, R_EAX, s->T0); + gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0); tcg_gen_mov_tl(cpu_cc_dst, s->T0); tcg_gen_ext8s_tl(s->tmp0, s->T0); tcg_gen_sub_tl(cpu_cc_src, s->T0, s->tmp0); set_cc_op(s, CC_OP_MULB); break; case MO_16: - gen_op_mov_v_reg(MO_16, s->T1, R_EAX); + gen_op_mov_v_reg(s, MO_16, s->T1, R_EAX); tcg_gen_ext16s_tl(s->T0, s->T0); tcg_gen_ext16s_tl(s->T1, s->T1); /* XXX: use 32 bit mul which could be faster */ tcg_gen_mul_tl(s->T0, s->T0, s->T1); - gen_op_mov_reg_v(MO_16, R_EAX, s->T0); + gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0); tcg_gen_mov_tl(cpu_cc_dst, s->T0); tcg_gen_ext16s_tl(s->tmp0, s->T0); tcg_gen_sub_tl(cpu_cc_src, s->T0, s->tmp0); tcg_gen_shri_tl(s->T0, s->T0, 16); - gen_op_mov_reg_v(MO_16, R_EDX, s->T0); + gen_op_mov_reg_v(s, MO_16, R_EDX, s->T0); set_cc_op(s, CC_OP_MULW); break; default: @@ -5026,7 +5027,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) if (op >= 2 && op != 3 && op != 5) gen_op_ld_v(s, ot, s->T0, s->A0); } else { - gen_op_mov_v_reg(ot, s->T0, rm); + gen_op_mov_v_reg(s, ot, s->T0, rm); } switch(op) { @@ -5115,7 +5116,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) reg = ((modrm >> 3) & 7) | rex_r; gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); - gen_op_mov_v_reg(ot, s->T1, reg); + gen_op_mov_v_reg(s, ot, s->T1, reg); gen_op_testl_T0_T1_cc(s); set_cc_op(s, CC_OP_LOGICB + ot); break; @@ -5125,7 +5126,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) ot = mo_b_d(b, dflag); val = insn_get(env, s, ot); - gen_op_mov_v_reg(ot, s->T0, OR_EAX); + gen_op_mov_v_reg(s, ot, s->T0, OR_EAX); tcg_gen_movi_tl(s->T1, val); gen_op_testl_T0_T1_cc(s); set_cc_op(s, CC_OP_LOGICB + ot); @@ -5135,20 +5136,20 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) switch (dflag) { #ifdef TARGET_X86_64 case MO_64: - gen_op_mov_v_reg(MO_32, s->T0, R_EAX); + gen_op_mov_v_reg(s, MO_32, s->T0, R_EAX); tcg_gen_ext32s_tl(s->T0, s->T0); - gen_op_mov_reg_v(MO_64, R_EAX, s->T0); + gen_op_mov_reg_v(s, MO_64, R_EAX, s->T0); break; #endif case MO_32: - gen_op_mov_v_reg(MO_16, s->T0, R_EAX); + gen_op_mov_v_reg(s, MO_16, s->T0, R_EAX); tcg_gen_ext16s_tl(s->T0, s->T0); - gen_op_mov_reg_v(MO_32, R_EAX, s->T0); + gen_op_mov_reg_v(s, MO_32, R_EAX, s->T0); break; case MO_16: - gen_op_mov_v_reg(MO_8, s->T0, R_EAX); + gen_op_mov_v_reg(s, MO_8, s->T0, R_EAX); tcg_gen_ext8s_tl(s->T0, s->T0); - gen_op_mov_reg_v(MO_16, R_EAX, s->T0); + gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0); break; default: tcg_abort(); @@ -5158,22 +5159,22 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) switch (dflag) { #ifdef TARGET_X86_64 case MO_64: - gen_op_mov_v_reg(MO_64, s->T0, R_EAX); + gen_op_mov_v_reg(s, MO_64, s->T0, R_EAX); tcg_gen_sari_tl(s->T0, s->T0, 63); - gen_op_mov_reg_v(MO_64, R_EDX, s->T0); + gen_op_mov_reg_v(s, MO_64, R_EDX, s->T0); break; #endif case MO_32: - gen_op_mov_v_reg(MO_32, s->T0, R_EAX); + gen_op_mov_v_reg(s, MO_32, s->T0, R_EAX); tcg_gen_ext32s_tl(s->T0, s->T0); tcg_gen_sari_tl(s->T0, s->T0, 31); - gen_op_mov_reg_v(MO_32, R_EDX, s->T0); + gen_op_mov_reg_v(s, MO_32, R_EDX, s->T0); break; case MO_16: - gen_op_mov_v_reg(MO_16, s->T0, R_EAX); + gen_op_mov_v_reg(s, MO_16, s->T0, R_EAX); tcg_gen_ext16s_tl(s->T0, s->T0); tcg_gen_sari_tl(s->T0, s->T0, 15); - gen_op_mov_reg_v(MO_16, R_EDX, s->T0); + gen_op_mov_reg_v(s, MO_16, R_EDX, s->T0); break; default: tcg_abort(); @@ -5197,7 +5198,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) val = (int8_t)insn_get(env, s, MO_8); tcg_gen_movi_tl(s->T1, val); } else { - gen_op_mov_v_reg(ot, s->T1, reg); + gen_op_mov_v_reg(s, ot, s->T1, reg); } switch (ot) { #ifdef TARGET_X86_64 @@ -5227,7 +5228,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) tcg_gen_mov_tl(cpu_cc_dst, s->T0); tcg_gen_ext16s_tl(s->tmp0, s->T0); tcg_gen_sub_tl(cpu_cc_src, s->T0, s->tmp0); - gen_op_mov_reg_v(ot, reg, s->T0); + gen_op_mov_reg_v(s, ot, reg, s->T0); break; } set_cc_op(s, CC_OP_MULB + ot); @@ -5238,13 +5239,13 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) modrm = x86_ldub_code(env, s); reg = ((modrm >> 3) & 7) | rex_r; mod = (modrm >> 6) & 3; - gen_op_mov_v_reg(ot, s->T0, reg); + gen_op_mov_v_reg(s, ot, s->T0, reg); if (mod == 3) { rm = (modrm & 7) | REX_B(s); - gen_op_mov_v_reg(ot, s->T1, rm); + gen_op_mov_v_reg(s, ot, s->T1, rm); tcg_gen_add_tl(s->T0, s->T0, s->T1); - gen_op_mov_reg_v(ot, reg, s->T1); - gen_op_mov_reg_v(ot, rm, s->T0); + gen_op_mov_reg_v(s, ot, reg, s->T1); + gen_op_mov_reg_v(s, ot, rm, s->T0); } else { gen_lea_modrm(env, s, modrm); if (s->prefix & PREFIX_LOCK) { @@ -5256,7 +5257,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) tcg_gen_add_tl(s->T0, s->T0, s->T1); gen_op_st_v(s, ot, s->T0, s->A0); } - gen_op_mov_reg_v(ot, reg, s->T1); + gen_op_mov_reg_v(s, ot, reg, s->T1); } gen_op_update2_cc(s); set_cc_op(s, CC_OP_ADDB + ot); @@ -5273,7 +5274,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) oldv = tcg_temp_new(); newv = tcg_temp_new(); cmpv = tcg_temp_new(); - gen_op_mov_v_reg(ot, newv, reg); + gen_op_mov_v_reg(s, ot, newv, reg); tcg_gen_mov_tl(cmpv, cpu_regs[R_EAX]); if (s->prefix & PREFIX_LOCK) { @@ -5283,11 +5284,11 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) gen_lea_modrm(env, s, modrm); tcg_gen_atomic_cmpxchg_tl(oldv, s->A0, cmpv, newv, s->mem_index, ot | MO_LE); - gen_op_mov_reg_v(ot, R_EAX, oldv); + gen_op_mov_reg_v(s, ot, R_EAX, oldv); } else { if (mod == 3) { rm = (modrm & 7) | REX_B(s); - gen_op_mov_v_reg(ot, oldv, rm); + gen_op_mov_v_reg(s, ot, oldv, rm); } else { gen_lea_modrm(env, s, modrm); gen_op_ld_v(s, ot, oldv, s->A0); @@ -5298,15 +5299,15 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) /* store value = (old == cmp ? new : old); */ tcg_gen_movcond_tl(TCG_COND_EQ, newv, oldv, cmpv, newv, oldv); if (mod == 3) { - gen_op_mov_reg_v(ot, R_EAX, oldv); - gen_op_mov_reg_v(ot, rm, newv); + gen_op_mov_reg_v(s, ot, R_EAX, oldv); + gen_op_mov_reg_v(s, ot, rm, newv); } else { /* Perform an unconditional store cycle like physical cpu; must be before changing accumulator to ensure idempotency if the store faults and the instruction is restarted */ gen_op_st_v(s, ot, newv, s->A0); - gen_op_mov_reg_v(ot, R_EAX, oldv); + gen_op_mov_reg_v(s, ot, R_EAX, oldv); } } tcg_gen_mov_tl(cpu_cc_src, oldv); @@ -5351,14 +5352,14 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) /**************************/ /* push/pop */ case 0x50 ... 0x57: /* push */ - gen_op_mov_v_reg(MO_32, s->T0, (b & 7) | REX_B(s)); + gen_op_mov_v_reg(s, MO_32, s->T0, (b & 7) | REX_B(s)); gen_push_v(s, s->T0); break; case 0x58 ... 0x5f: /* pop */ ot = gen_pop_T0(s); /* NOTE: order is important for pop %sp */ gen_pop_update(s, ot); - gen_op_mov_reg_v(ot, (b & 7) | REX_B(s), s->T0); + gen_op_mov_reg_v(s, ot, (b & 7) | REX_B(s), s->T0); break; case 0x60: /* pusha */ if (CODE64(s)) @@ -5388,7 +5389,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) /* NOTE: order is important for pop %sp */ gen_pop_update(s, ot); rm = (modrm & 7) | REX_B(s); - gen_op_mov_reg_v(ot, rm, s->T0); + gen_op_mov_reg_v(s, ot, rm, s->T0); } else { /* NOTE: order is important too for MMU exceptions */ s->popl_esp_hack = 1 << ot; @@ -5478,7 +5479,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) if (mod != 3) { gen_op_st_v(s, ot, s->T0, s->A0); } else { - gen_op_mov_reg_v(ot, (modrm & 7) | REX_B(s), s->T0); + gen_op_mov_reg_v(s, ot, (modrm & 7) | REX_B(s), s->T0); } break; case 0x8a: @@ -5488,7 +5489,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) reg = ((modrm >> 3) & 7) | rex_r; gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); - gen_op_mov_reg_v(ot, reg, s->T0); + gen_op_mov_reg_v(s, ot, reg, s->T0); break; case 0x8e: /* mov seg, Gv */ modrm = x86_ldub_code(env, s); @@ -5540,10 +5541,10 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) rm = (modrm & 7) | REX_B(s); if (mod == 3) { - if (s_ot == MO_SB && byte_reg_is_xH(rm)) { + if (s_ot == MO_SB && byte_reg_is_xH(s, rm)) { tcg_gen_sextract_tl(s->T0, cpu_regs[rm - 4], 8, 8); } else { - gen_op_mov_v_reg(ot, s->T0, rm); + gen_op_mov_v_reg(s, ot, s->T0, rm); switch (s_ot) { case MO_UB: tcg_gen_ext8u_tl(s->T0, s->T0); @@ -5560,11 +5561,11 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) break; } } - gen_op_mov_reg_v(d_ot, reg, s->T0); + gen_op_mov_reg_v(s, d_ot, reg, s->T0); } else { gen_lea_modrm(env, s, modrm); gen_op_ld_v(s, s_ot, s->T0, s->A0); - gen_op_mov_reg_v(d_ot, reg, s->T0); + gen_op_mov_reg_v(s, d_ot, reg, s->T0); } } break; @@ -5579,7 +5580,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) AddressParts a = gen_lea_modrm_0(env, s, modrm); TCGv ea = gen_lea_modrm_1(s, a); gen_lea_v_seg(s, s->aflag, ea, -1, -1); - gen_op_mov_reg_v(dflag, reg, s->A0); + gen_op_mov_reg_v(s, dflag, reg, s->A0); } break; @@ -5605,9 +5606,9 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) gen_add_A0_ds_seg(s); if ((b & 2) == 0) { gen_op_ld_v(s, ot, s->T0, s->A0); - gen_op_mov_reg_v(ot, R_EAX, s->T0); + gen_op_mov_reg_v(s, ot, R_EAX, s->T0); } else { - gen_op_mov_v_reg(ot, s->T0, R_EAX); + gen_op_mov_v_reg(s, ot, s->T0, R_EAX); gen_op_st_v(s, ot, s->T0, s->A0); } } @@ -5619,12 +5620,12 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) gen_extu(s->aflag, s->A0); gen_add_A0_ds_seg(s); gen_op_ld_v(s, MO_8, s->T0, s->A0); - gen_op_mov_reg_v(MO_8, R_EAX, s->T0); + gen_op_mov_reg_v(s, MO_8, R_EAX, s->T0); break; case 0xb0 ... 0xb7: /* mov R, Ib */ val = insn_get(env, s, MO_8); tcg_gen_movi_tl(s->T0, val); - gen_op_mov_reg_v(MO_8, (b & 7) | REX_B(s), s->T0); + gen_op_mov_reg_v(s, MO_8, (b & 7) | REX_B(s), s->T0); break; case 0xb8 ... 0xbf: /* mov R, Iv */ #ifdef TARGET_X86_64 @@ -5634,7 +5635,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) tmp = x86_ldq_code(env, s); reg = (b & 7) | REX_B(s); tcg_gen_movi_tl(s->T0, tmp); - gen_op_mov_reg_v(MO_64, reg, s->T0); + gen_op_mov_reg_v(s, MO_64, reg, s->T0); } else #endif { @@ -5642,7 +5643,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) val = insn_get(env, s, ot); reg = (b & 7) | REX_B(s); tcg_gen_movi_tl(s->T0, val); - gen_op_mov_reg_v(ot, reg, s->T0); + gen_op_mov_reg_v(s, ot, reg, s->T0); } break; @@ -5661,17 +5662,17 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) if (mod == 3) { rm = (modrm & 7) | REX_B(s); do_xchg_reg: - gen_op_mov_v_reg(ot, s->T0, reg); - gen_op_mov_v_reg(ot, s->T1, rm); - gen_op_mov_reg_v(ot, rm, s->T0); - gen_op_mov_reg_v(ot, reg, s->T1); + gen_op_mov_v_reg(s, ot, s->T0, reg); + gen_op_mov_v_reg(s, ot, s->T1, rm); + gen_op_mov_reg_v(s, ot, rm, s->T0); + gen_op_mov_reg_v(s, ot, reg, s->T1); } else { gen_lea_modrm(env, s, modrm); - gen_op_mov_v_reg(ot, s->T0, reg); + gen_op_mov_v_reg(s, ot, s->T0, reg); /* for xchg, lock is implicit */ tcg_gen_atomic_xchg_tl(s->T1, s->A0, s->T0, s->mem_index, ot | MO_LE); - gen_op_mov_reg_v(ot, reg, s->T1); + gen_op_mov_reg_v(s, ot, reg, s->T1); } break; case 0xc4: /* les Gv */ @@ -5704,7 +5705,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) gen_op_ld_v(s, MO_16, s->T0, s->A0); gen_movl_seg_T0(s, op); /* then put the data */ - gen_op_mov_reg_v(ot, reg, s->T1); + gen_op_mov_reg_v(s, ot, reg, s->T1); if (s->base.is_jmp) { gen_jmp_im(s, s->pc - s->cs_base); gen_eob(s); @@ -5783,7 +5784,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) } else { opreg = rm; } - gen_op_mov_v_reg(ot, s->T1, reg); + gen_op_mov_v_reg(s, ot, s->T1, reg); if (shift) { TCGv imm = tcg_const_tl(x86_ldub_code(env, s)); @@ -6244,7 +6245,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) case 0: gen_helper_fnstsw(s->tmp2_i32, cpu_env); tcg_gen_extu_i32_tl(s->T0, s->tmp2_i32); - gen_op_mov_reg_v(MO_16, R_EAX, s->T0); + gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0); break; default: goto unknown_op; @@ -6397,7 +6398,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) } tcg_gen_movi_i32(s->tmp2_i32, val); gen_helper_in_func(ot, s->T1, s->tmp2_i32); - gen_op_mov_reg_v(ot, R_EAX, s->T1); + gen_op_mov_reg_v(s, ot, R_EAX, s->T1); gen_bpt_io(s, s->tmp2_i32, ot); if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { gen_io_end(); @@ -6411,7 +6412,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) tcg_gen_movi_tl(s->T0, val); gen_check_io(s, ot, pc_start - s->cs_base, svm_is_rep(prefixes)); - gen_op_mov_v_reg(ot, s->T1, R_EAX); + gen_op_mov_v_reg(s, ot, s->T1, R_EAX); if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { gen_io_start(); @@ -6436,7 +6437,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) } tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0); gen_helper_in_func(ot, s->T1, s->tmp2_i32); - gen_op_mov_reg_v(ot, R_EAX, s->T1); + gen_op_mov_reg_v(s, ot, R_EAX, s->T1); gen_bpt_io(s, s->tmp2_i32, ot); if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { gen_io_end(); @@ -6449,7 +6450,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) tcg_gen_ext16u_tl(s->T0, cpu_regs[R_EDX]); gen_check_io(s, ot, pc_start - s->cs_base, svm_is_rep(prefixes)); - gen_op_mov_v_reg(ot, s->T1, R_EAX); + gen_op_mov_v_reg(s, ot, s->T1, R_EAX); if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { gen_io_start(); @@ -6708,7 +6709,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) case 0x9e: /* sahf */ if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM)) goto illegal_op; - gen_op_mov_v_reg(MO_8, s->T0, R_AH); + gen_op_mov_v_reg(s, MO_8, s->T0, R_AH); gen_compute_eflags(s); tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, CC_O); tcg_gen_andi_tl(s->T0, s->T0, CC_S | CC_Z | CC_A | CC_P | CC_C); @@ -6720,7 +6721,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) gen_compute_eflags(s); /* Note: gen_compute_eflags() only gives the condition codes */ tcg_gen_ori_tl(s->T0, cpu_cc_src, 0x02); - gen_op_mov_reg_v(MO_8, R_AH, s->T0); + gen_op_mov_reg_v(s, MO_8, R_AH, s->T0); break; case 0xf5: /* cmc */ gen_compute_eflags(s); @@ -6758,7 +6759,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) gen_op_ld_v(s, ot, s->T0, s->A0); } } else { - gen_op_mov_v_reg(ot, s->T0, rm); + gen_op_mov_v_reg(s, ot, s->T0, rm); } /* load shift */ val = x86_ldub_code(env, s); @@ -6784,7 +6785,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) reg = ((modrm >> 3) & 7) | rex_r; mod = (modrm >> 6) & 3; rm = (modrm & 7) | REX_B(s); - gen_op_mov_v_reg(MO_32, s->T1, reg); + gen_op_mov_v_reg(s, MO_32, s->T1, reg); if (mod != 3) { AddressParts a = gen_lea_modrm_0(env, s, modrm); /* specific case: we need to add a displacement */ @@ -6797,7 +6798,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) gen_op_ld_v(s, ot, s->T0, s->A0); } } else { - gen_op_mov_v_reg(ot, s->T0, rm); + gen_op_mov_v_reg(s, ot, s->T0, rm); } bt_op: tcg_gen_andi_tl(s->T1, s->T1, (1 << (3 + ot)) - 1); @@ -6847,7 +6848,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) if (mod != 3) { gen_op_st_v(s, ot, s->T0, s->A0); } else { - gen_op_mov_reg_v(ot, rm, s->T0); + gen_op_mov_reg_v(s, ot, rm, s->T0); } } } @@ -6930,7 +6931,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) tcg_gen_ctz_tl(s->T0, s->T0, cpu_regs[reg]); } } - gen_op_mov_reg_v(ot, reg, s->T0); + gen_op_mov_reg_v(s, ot, reg, s->T0); break; /************************/ /* bcd */ @@ -7070,7 +7071,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) mod = (modrm >> 6) & 3; if (mod == 3) goto illegal_op; - gen_op_mov_v_reg(ot, s->T0, reg); + gen_op_mov_v_reg(s, ot, s->T0, reg); gen_lea_modrm(env, s, modrm); tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0); if (ot == MO_16) { @@ -7083,16 +7084,16 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) reg = (b & 7) | REX_B(s); #ifdef TARGET_X86_64 if (dflag == MO_64) { - gen_op_mov_v_reg(MO_64, s->T0, reg); + gen_op_mov_v_reg(s, MO_64, s->T0, reg); tcg_gen_bswap64_i64(s->T0, s->T0); - gen_op_mov_reg_v(MO_64, reg, s->T0); + gen_op_mov_reg_v(s, MO_64, reg, s->T0); } else #endif { - gen_op_mov_v_reg(MO_32, s->T0, reg); + gen_op_mov_v_reg(s, MO_32, s->T0, reg); tcg_gen_ext32u_tl(s->T0, s->T0); tcg_gen_bswap32_tl(s->T0, s->T0); - gen_op_mov_reg_v(MO_32, reg, s->T0); + gen_op_mov_reg_v(s, MO_32, reg, s->T0); } break; case 0xd6: /* salc */ @@ -7100,7 +7101,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) goto illegal_op; gen_compute_eflags_c(s, s->T0); tcg_gen_neg_tl(s->T0, s->T0); - gen_op_mov_reg_v(MO_8, R_EAX, s->T0); + gen_op_mov_reg_v(s, MO_8, R_EAX, s->T0); break; case 0xe0: /* loopnz */ case 0xe1: /* loopz */ @@ -7661,16 +7662,16 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) rm = (modrm & 7) | REX_B(s); if (mod == 3) { - gen_op_mov_v_reg(MO_32, s->T0, rm); + gen_op_mov_v_reg(s, MO_32, s->T0, rm); /* sign extend */ if (d_ot == MO_64) { tcg_gen_ext32s_tl(s->T0, s->T0); } - gen_op_mov_reg_v(d_ot, reg, s->T0); + gen_op_mov_reg_v(s, d_ot, reg, s->T0); } else { gen_lea_modrm(env, s, modrm); gen_op_ld_v(s, MO_32 | MO_SIGN, s->T0, s->A0); - gen_op_mov_reg_v(d_ot, reg, s->T0); + gen_op_mov_reg_v(s, d_ot, reg, s->T0); } } else #endif @@ -7694,10 +7695,10 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) a0 = tcg_temp_local_new(); tcg_gen_mov_tl(a0, s->A0); } else { - gen_op_mov_v_reg(ot, t0, rm); + gen_op_mov_v_reg(s, ot, t0, rm); a0 = NULL; } - gen_op_mov_v_reg(ot, t1, reg); + gen_op_mov_v_reg(s, ot, t1, reg); tcg_gen_andi_tl(s->tmp0, t0, 3); tcg_gen_andi_tl(t1, t1, 3); tcg_gen_movi_tl(t2, 0); @@ -7711,7 +7712,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) gen_op_st_v(s, ot, t0, a0); tcg_temp_free(a0); } else { - gen_op_mov_reg_v(ot, rm, t0); + gen_op_mov_reg_v(s, ot, rm, t0); } gen_compute_eflags(s); tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_Z); @@ -7742,7 +7743,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) tcg_gen_andi_tl(s->tmp0, cpu_cc_src, CC_Z); label1 = gen_new_label(); tcg_gen_brcondi_tl(TCG_COND_EQ, s->tmp0, 0, label1); - gen_op_mov_reg_v(ot, reg, t0); + gen_op_mov_reg_v(s, ot, reg, t0); gen_set_label(label1); set_cc_op(s, CC_OP_EFLAGS); tcg_temp_free(t0); @@ -7996,7 +7997,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { gen_io_start(); } - gen_op_mov_v_reg(ot, s->T0, rm); + gen_op_mov_v_reg(s, ot, s->T0, rm); gen_helper_write_crN(cpu_env, tcg_const_i32(reg), s->T0); if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { @@ -8009,7 +8010,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) gen_io_start(); } gen_helper_read_crN(s->T0, cpu_env, tcg_const_i32(reg)); - gen_op_mov_reg_v(ot, rm, s->T0); + gen_op_mov_reg_v(s, ot, rm, s->T0); if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { gen_io_end(); } @@ -8042,7 +8043,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) } if (b & 2) { gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_DR0 + reg); - gen_op_mov_v_reg(ot, s->T0, rm); + gen_op_mov_v_reg(s, ot, s->T0, rm); tcg_gen_movi_i32(s->tmp2_i32, reg); gen_helper_set_dr(cpu_env, s->tmp2_i32, s->T0); gen_jmp_im(s, s->pc - s->cs_base); @@ -8051,7 +8052,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_DR0 + reg); tcg_gen_movi_i32(s->tmp2_i32, reg); gen_helper_get_dr(s->T0, cpu_env, s->tmp2_i32); - gen_op_mov_reg_v(ot, rm, s->T0); + gen_op_mov_reg_v(s, ot, rm, s->T0); } } break; @@ -8313,7 +8314,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) gen_extu(ot, s->T0); tcg_gen_mov_tl(cpu_cc_src, s->T0); tcg_gen_ctpop_tl(s->T0, s->T0); - gen_op_mov_reg_v(ot, reg, s->T0); + gen_op_mov_reg_v(s, ot, reg, s->T0); set_cc_op(s, CC_OP_POPCNT); break; From 0a7fa00a13f0852ec6fa83ab987a5ee7978d9867 Mon Sep 17 00:00:00 2001 From: "Emilio G. Cota" Date: Mon, 13 Aug 2018 20:52:26 -0400 Subject: [PATCH 37/80] configure: enable mttcg for i386 and x86_64 Reviewed-by: Richard Henderson Signed-off-by: Emilio G. Cota Signed-off-by: Paolo Bonzini --- configure | 2 ++ 1 file changed, 2 insertions(+) diff --git a/configure b/configure index 59d1ade2d3..4fc1feaa6f 100755 --- a/configure +++ b/configure @@ -7024,12 +7024,14 @@ TARGET_ABI_DIR="" case "$target_name" in i386) + mttcg="yes" gdb_xml_files="i386-32bit.xml i386-32bit-core.xml i386-32bit-sse.xml" target_compiler=$cross_cc_i386 target_compiler_cflags=$cross_cc_ccflags_i386 ;; x86_64) TARGET_BASE_ARCH=i386 + mttcg="yes" gdb_xml_files="i386-64bit.xml i386-64bit-core.xml i386-64bit-sse.xml" target_compiler=$cross_cc_x86_64 ;; From 0c08185f8fe1eb20edec1a2bf32b4d219cc023f0 Mon Sep 17 00:00:00 2001 From: Pavel Dovgalyuk Date: Wed, 12 Sep 2018 11:19:45 +0300 Subject: [PATCH 38/80] replay: wake up vCPU when replaying In record/replay icount mode vCPU thread and iothread synchronize the execution using the checkpoints. vCPU thread processes the virtual timers and iothread processes all others. When iothread wants to wake up sleeping vCPU thread, it sends dummy queued work. Therefore it could be the following sequence of the events in record mode: - IO: sending dummy work - IO: processing timers - CPU: wakeup - CPU: clearing dummy work - CPU: processing virtual timers But due to the races in replay mode the sequence may change: - IO: sending dummy work - CPU: wakeup - CPU: clearing dummy work - CPU: sleeping again because nothing to do - IO: Processing timers - CPU: zzzz In this case vCPU will not wake up, because dummy work is not to be set up again. This patch tries to wake up the vCPU when it sleeps and the icount warp checkpoint isn't met. It means that vCPU has something to do, because there are no other reasons of non-matching warp checkpoint. Signed-off-by: Pavel Dovgalyuk -- v5: improve checking that vCPU is still sleeping Message-Id: <20180912081945.3228.19776.stgit@pasha-VirtualBox> Signed-off-by: Paolo Bonzini --- cpus.c | 31 +++++++++++++++++++++---------- include/sysemu/replay.h | 3 +++ replay/replay.c | 12 ++++++++++++ 3 files changed, 36 insertions(+), 10 deletions(-) diff --git a/cpus.c b/cpus.c index d8b3b46cc8..68f08f5b2d 100644 --- a/cpus.c +++ b/cpus.c @@ -583,18 +583,29 @@ void qemu_start_warp_timer(void) return; } - /* warp clock deterministically in record/replay mode */ - if (!replay_checkpoint(CHECKPOINT_CLOCK_WARP_START)) { - return; - } + if (replay_mode != REPLAY_MODE_PLAY) { + if (!all_cpu_threads_idle()) { + return; + } - if (!all_cpu_threads_idle()) { - return; - } + if (qtest_enabled()) { + /* When testing, qtest commands advance icount. */ + return; + } - if (qtest_enabled()) { - /* When testing, qtest commands advance icount. */ - return; + replay_checkpoint(CHECKPOINT_CLOCK_WARP_START); + } else { + /* warp clock deterministically in record/replay mode */ + if (!replay_checkpoint(CHECKPOINT_CLOCK_WARP_START)) { + /* vCPU is sleeping and warp can't be started. + It is probably a race condition: notification sent + to vCPU was processed in advance and vCPU went to sleep. + Therefore we have to wake it up for doing someting. */ + if (replay_has_checkpoint()) { + qemu_clock_notify(QEMU_CLOCK_VIRTUAL); + } + return; + } } /* We want to use the earliest deadline from ALL vm_clocks */ diff --git a/include/sysemu/replay.h b/include/sysemu/replay.h index 3ced6bc231..7f7a594eca 100644 --- a/include/sysemu/replay.h +++ b/include/sysemu/replay.h @@ -120,6 +120,9 @@ void replay_shutdown_request(ShutdownCause cause); Returns 0 in PLAY mode if checkpoint was not found. Returns 1 in all other cases. */ bool replay_checkpoint(ReplayCheckpoint checkpoint); +/*! Used to determine that checkpoint is pending. + Does not proceed to the next event in the log. */ +bool replay_has_checkpoint(void); /* Asynchronous events queue */ diff --git a/replay/replay.c b/replay/replay.c index 8228261401..379b51ab46 100644 --- a/replay/replay.c +++ b/replay/replay.c @@ -224,6 +224,18 @@ out: return res; } +bool replay_has_checkpoint(void) +{ + bool res = false; + if (replay_mode == REPLAY_MODE_PLAY) { + g_assert(replay_mutex_locked()); + replay_account_executed_instructions(); + res = EVENT_CHECKPOINT <= replay_state.data_kind + && replay_state.data_kind <= EVENT_CHECKPOINT_LAST; + } + return res; +} + static void replay_enable(const char *fname, int mode) { const char *fmode = NULL; From d873fe03766481b72549ff15ee647c086a98c12f Mon Sep 17 00:00:00 2001 From: Pavel Dovgalyuk Date: Wed, 12 Sep 2018 11:18:59 +0300 Subject: [PATCH 39/80] replay: flush events when exiting This patch adds events processing when emulation finishes instead of just cleaning the queue. Now the bdrv coroutines will be in consistent state when emulator closes. It allows correct polling of the block layer at exit. Signed-off-by: Pavel Dovgalyuk Message-Id: <20180912081859.3228.79735.stgit@pasha-VirtualBox> Signed-off-by: Paolo Bonzini --- replay/replay-events.c | 14 +------------- replay/replay-internal.h | 2 -- 2 files changed, 1 insertion(+), 15 deletions(-) diff --git a/replay/replay-events.c b/replay/replay-events.c index 707de3867b..0964a82838 100644 --- a/replay/replay-events.c +++ b/replay/replay-events.c @@ -94,18 +94,6 @@ void replay_disable_events(void) } } -void replay_clear_events(void) -{ - g_assert(replay_mutex_locked()); - - while (!QTAILQ_EMPTY(&events_list)) { - Event *event = QTAILQ_FIRST(&events_list); - QTAILQ_REMOVE(&events_list, event, events); - - g_free(event); - } -} - /*! Adds specified async event to the queue */ void replay_add_event(ReplayAsyncEventKind event_kind, void *opaque, @@ -308,7 +296,7 @@ void replay_init_events(void) void replay_finish_events(void) { events_enabled = false; - replay_clear_events(); + replay_flush_events(); } bool replay_events_enabled(void) diff --git a/replay/replay-internal.h b/replay/replay-internal.h index ac4b27b674..9b0fd916a3 100644 --- a/replay/replay-internal.h +++ b/replay/replay-internal.h @@ -142,8 +142,6 @@ void replay_init_events(void); void replay_finish_events(void); /*! Flushes events queue */ void replay_flush_events(void); -/*! Clears events list before loading new VM state */ -void replay_clear_events(void); /*! Returns true if there are any unsaved events in the queue */ bool replay_has_events(void); /*! Saves events from queue into the file */ From f9f1f56e4da088b993ce28775c271d5bcdcf49ae Mon Sep 17 00:00:00 2001 From: Pavel Dovgalyuk Date: Wed, 12 Sep 2018 11:19:10 +0300 Subject: [PATCH 40/80] translator: fix breakpoint processing QEMU cannot pass through the breakpoints when 'si' command is used in remote gdb. This patch disables inserting the breakpoints when we are already single stepping though the gdb remote protocol. This patch also fixes icount calculation for the blocks that include breakpoints - instruction with breakpoint is not executed and shouldn't be used in icount calculation. Signed-off-by: Pavel Dovgalyuk Message-Id: <20180912081910.3228.8523.stgit@pasha-VirtualBox> Signed-off-by: Paolo Bonzini --- accel/tcg/translator.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/accel/tcg/translator.c b/accel/tcg/translator.c index 0f9dca9113..afd0a49ea6 100644 --- a/accel/tcg/translator.c +++ b/accel/tcg/translator.c @@ -34,6 +34,8 @@ void translator_loop_temp_check(DisasContextBase *db) void translator_loop(const TranslatorOps *ops, DisasContextBase *db, CPUState *cpu, TranslationBlock *tb) { + int bp_insn = 0; + /* Initialize DisasContext */ db->tb = tb; db->pc_first = tb->pc; @@ -71,11 +73,13 @@ void translator_loop(const TranslatorOps *ops, DisasContextBase *db, tcg_debug_assert(db->is_jmp == DISAS_NEXT); /* no early exit */ /* Pass breakpoint hits to target for further processing */ - if (unlikely(!QTAILQ_EMPTY(&cpu->breakpoints))) { + if (!db->singlestep_enabled + && unlikely(!QTAILQ_EMPTY(&cpu->breakpoints))) { CPUBreakpoint *bp; QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) { if (bp->pc == db->pc_next) { if (ops->breakpoint_check(db, cpu, bp)) { + bp_insn = 1; break; } } @@ -118,7 +122,7 @@ void translator_loop(const TranslatorOps *ops, DisasContextBase *db, /* Emit code to exit the TB, as indicated by db->is_jmp. */ ops->tb_stop(db, cpu); - gen_tb_end(db->tb, db->num_insns); + gen_tb_end(db->tb, db->num_insns - bp_insn); /* The disas_log hook may use these values rather than recompute. */ db->tb->size = db->pc_next - db->pc_first; From bb3d7702e8dd0fd84c9496e226b46ce964b76e13 Mon Sep 17 00:00:00 2001 From: Pavel Dovgalyuk Date: Wed, 12 Sep 2018 11:19:39 +0300 Subject: [PATCH 41/80] replay: allow loading any snapshots before recording This patch enables using -loadvm in recording mode to allow starting the execution recording from any of the available snapshots. It also fixes loading of the record/replay state, therefore snapshots created in replay mode may also be used for starting the new recording. Signed-off-by: Pavel Dovgalyuk Message-Id: <20180912081939.3228.56131.stgit@pasha-VirtualBox> Signed-off-by: Paolo Bonzini --- replay/replay-snapshot.c | 17 ++++++++++++----- vl.c | 7 ++++--- 2 files changed, 16 insertions(+), 8 deletions(-) diff --git a/replay/replay-snapshot.c b/replay/replay-snapshot.c index 2ab85cfc60..16bacc98bc 100644 --- a/replay/replay-snapshot.c +++ b/replay/replay-snapshot.c @@ -33,11 +33,18 @@ static int replay_pre_save(void *opaque) static int replay_post_load(void *opaque, int version_id) { ReplayState *state = opaque; - fseek(replay_file, state->file_offset, SEEK_SET); - qemu_clock_set_last(QEMU_CLOCK_HOST, state->host_clock_last); - /* If this was a vmstate, saved in recording mode, - we need to initialize replay data fields. */ - replay_fetch_data_kind(); + if (replay_mode == REPLAY_MODE_PLAY) { + fseek(replay_file, state->file_offset, SEEK_SET); + qemu_clock_set_last(QEMU_CLOCK_HOST, state->host_clock_last); + /* If this was a vmstate, saved in recording mode, + we need to initialize replay data fields. */ + replay_fetch_data_kind(); + } else if (replay_mode == REPLAY_MODE_RECORD) { + /* This is only useful for loading the initial state. + Therefore reset all the counters. */ + state->instructions_count = 0; + state->block_request_id = 0; + } return 0; } diff --git a/vl.c b/vl.c index f2964d9e43..0388852deb 100644 --- a/vl.c +++ b/vl.c @@ -4535,9 +4535,7 @@ int main(int argc, char **argv, char **envp) replay_checkpoint(CHECKPOINT_RESET); qemu_system_reset(SHUTDOWN_CAUSE_NONE); register_global_state(); - if (replay_mode != REPLAY_MODE_NONE) { - replay_vmstate_init(); - } else if (loadvm) { + if (loadvm) { Error *local_err = NULL; if (load_snapshot(loadvm, &local_err) < 0) { error_report_err(local_err); @@ -4545,6 +4543,9 @@ int main(int argc, char **argv, char **envp) exit(1); } } + if (replay_mode != REPLAY_MODE_NONE) { + replay_vmstate_init(); + } qdev_prop_check_globals(); if (vmstate_dump_file) { From 87f4fe7653baf55b5c2f2753fe6003f473c07342 Mon Sep 17 00:00:00 2001 From: Pavel Dovgalyuk Date: Wed, 12 Sep 2018 11:20:02 +0300 Subject: [PATCH 42/80] timer: introduce new virtual clock Slirp and VNC modules use virtual clock for processing some events that are related to the guest execution speed. But virtual clock-related events are consideres to be deterministic and are recorded/replayed by icount mechanism. But slirp and VNC lie outside the recorded guest core (which includes CPU and peripherals). Therefore slirp and VNC are external for the guest, but should work at guest speed. This patch introduces new virtual clock which can be used for external subsystems for running timers that are synchronized with the guest. Signed-off-by: Pavel Dovgalyuk Message-Id: <20180912082002.3228.82417.stgit@pasha-VirtualBox> Signed-off-by: Paolo Bonzini --- include/qemu/timer.h | 9 +++++++++ util/qemu-timer.c | 2 ++ 2 files changed, 11 insertions(+) diff --git a/include/qemu/timer.h b/include/qemu/timer.h index 39ea907e65..a005ed2692 100644 --- a/include/qemu/timer.h +++ b/include/qemu/timer.h @@ -42,6 +42,14 @@ * In icount mode, this clock counts nanoseconds while the virtual * machine is running. It is used to increase @QEMU_CLOCK_VIRTUAL * while the CPUs are sleeping and thus not executing instructions. + * + * @QEMU_CLOCK_VIRTUAL_EXT: virtual clock for external subsystems + * + * The virtual clock only runs during the emulation. It stops + * when the virtual machine is stopped. The timers for this clock + * do not recorded in rr mode, therefore this clock could be used + * for the subsystems that operate outside the guest core. + * */ typedef enum { @@ -49,6 +57,7 @@ typedef enum { QEMU_CLOCK_VIRTUAL = 1, QEMU_CLOCK_HOST = 2, QEMU_CLOCK_VIRTUAL_RT = 3, + QEMU_CLOCK_VIRTUAL_EXT = 4, QEMU_CLOCK_MAX } QEMUClockType; diff --git a/util/qemu-timer.c b/util/qemu-timer.c index 86bfe84037..eb60d8f73a 100644 --- a/util/qemu-timer.c +++ b/util/qemu-timer.c @@ -496,6 +496,7 @@ bool timerlist_run_timers(QEMUTimerList *timer_list) switch (timer_list->clock->type) { case QEMU_CLOCK_REALTIME: + case QEMU_CLOCK_VIRTUAL_EXT: break; default: case QEMU_CLOCK_VIRTUAL: @@ -597,6 +598,7 @@ int64_t qemu_clock_get_ns(QEMUClockType type) return get_clock(); default: case QEMU_CLOCK_VIRTUAL: + case QEMU_CLOCK_VIRTUAL_EXT: if (use_icount) { return cpu_get_icount(); } else { From 775a412bf83f6bc0c5c02091ee06cf649b34c593 Mon Sep 17 00:00:00 2001 From: Pavel Dovgalyuk Date: Wed, 12 Sep 2018 11:20:07 +0300 Subject: [PATCH 43/80] slirp: fix ipv6 timers ICMP implementation for IPv6 uses timers based on virtual clock. This is incorrect because this service is not related to the guest state, and its events should not be recorded and replayed. This patch changes using virtual clock to the new virtual_ext clock. Signed-off-by: Pavel Dovgalyuk Message-Id: <20180912082007.3228.91491.stgit@pasha-VirtualBox> Reviewed-by: Samuel Thibault Signed-off-by: Paolo Bonzini --- slirp/ip6_icmp.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/slirp/ip6_icmp.c b/slirp/ip6_icmp.c index ee333d05a2..3f41187cfe 100644 --- a/slirp/ip6_icmp.c +++ b/slirp/ip6_icmp.c @@ -17,7 +17,7 @@ static void ra_timer_handler(void *opaque) { Slirp *slirp = opaque; timer_mod(slirp->ra_timer, - qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + NDP_Interval); + qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_EXT) + NDP_Interval); ndp_send_ra(slirp); } @@ -27,9 +27,10 @@ void icmp6_init(Slirp *slirp) return; } - slirp->ra_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL, ra_timer_handler, slirp); + slirp->ra_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL_EXT, + ra_timer_handler, slirp); timer_mod(slirp->ra_timer, - qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + NDP_Interval); + qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_EXT) + NDP_Interval); } void icmp6_cleanup(Slirp *slirp) From 9888091404a702d7ec79d51b088d994b9fc121bd Mon Sep 17 00:00:00 2001 From: Pavel Dovgalyuk Date: Wed, 12 Sep 2018 11:20:13 +0300 Subject: [PATCH 44/80] ui: fix virtual timers UI uses timers based on virtual clock for managing key queue. This is incorrect because this service is not related to the guest state, and its events should not be recorded and replayed. But these timers should stop when the guest is not executing. This patch changes using virtual clock to the new virtual_ext clock, which runs as virtual clock, but its timers are not saved to the log. Signed-off-by: Pavel Dovgalyuk Message-Id: <20180912082013.3228.33664.stgit@pasha-VirtualBox> Signed-off-by: Paolo Bonzini --- ui/input.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/ui/input.c b/ui/input.c index 51b1019252..dd7f6d7f21 100644 --- a/ui/input.c +++ b/ui/input.c @@ -271,7 +271,7 @@ static void qemu_input_queue_process(void *opaque) item = QTAILQ_FIRST(queue); switch (item->type) { case QEMU_INPUT_QUEUE_DELAY: - timer_mod(item->timer, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + timer_mod(item->timer, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_EXT) + item->delay_ms); return; case QEMU_INPUT_QUEUE_EVENT: @@ -301,7 +301,7 @@ static void qemu_input_queue_delay(struct QemuInputEventQueueHead *queue, queue_count++; if (start_timer) { - timer_mod(item->timer, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + timer_mod(item->timer, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_EXT) + item->delay_ms); } } @@ -448,8 +448,8 @@ void qemu_input_event_send_key_delay(uint32_t delay_ms) } if (!kbd_timer) { - kbd_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL, qemu_input_queue_process, - &kbd_queue); + kbd_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL_EXT, + qemu_input_queue_process, &kbd_queue); } if (queue_count < queue_limit) { qemu_input_queue_delay(&kbd_queue, kbd_timer, From 2247936a043a609a156cf90c9f346254c48169e4 Mon Sep 17 00:00:00 2001 From: Li Qiang Date: Wed, 12 Sep 2018 09:01:11 -0700 Subject: [PATCH 45/80] fw_cfg_mem: add read memory region callback Signed-off-by: Li Qiang Message-Id: <20180912160118.21158-2-liq3ea@163.com> Signed-off-by: Paolo Bonzini --- hw/nvram/fw_cfg.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/hw/nvram/fw_cfg.c b/hw/nvram/fw_cfg.c index d79a568f54..6de7809f1a 100644 --- a/hw/nvram/fw_cfg.c +++ b/hw/nvram/fw_cfg.c @@ -434,6 +434,11 @@ static bool fw_cfg_data_mem_valid(void *opaque, hwaddr addr, return addr == 0; } +static uint64_t fw_cfg_ctl_mem_read(void *opaque, hwaddr addr, unsigned size) +{ + return 0; +} + static void fw_cfg_ctl_mem_write(void *opaque, hwaddr addr, uint64_t value, unsigned size) { @@ -468,6 +473,7 @@ static bool fw_cfg_comb_valid(void *opaque, hwaddr addr, } static const MemoryRegionOps fw_cfg_ctl_mem_ops = { + .read = fw_cfg_ctl_mem_read, .write = fw_cfg_ctl_mem_write, .endianness = DEVICE_BIG_ENDIAN, .valid.accepts = fw_cfg_ctl_mem_valid, From af71743ad6c475b682c21767202e6b005894c795 Mon Sep 17 00:00:00 2001 From: Li Qiang Date: Wed, 12 Sep 2018 09:01:12 -0700 Subject: [PATCH 46/80] hw: debugexit: add read callback Signed-off-by: Li Qiang Message-Id: <20180912160118.21158-3-liq3ea@163.com> Signed-off-by: Paolo Bonzini --- hw/misc/debugexit.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/hw/misc/debugexit.c b/hw/misc/debugexit.c index 84fa1a5b9d..bed293247e 100644 --- a/hw/misc/debugexit.c +++ b/hw/misc/debugexit.c @@ -23,6 +23,11 @@ typedef struct ISADebugExitState { MemoryRegion io; } ISADebugExitState; +static uint64_t debug_exit_read(void *opaque, hwaddr addr, unsigned size) +{ + return 0; +} + static void debug_exit_write(void *opaque, hwaddr addr, uint64_t val, unsigned width) { @@ -30,6 +35,7 @@ static void debug_exit_write(void *opaque, hwaddr addr, uint64_t val, } static const MemoryRegionOps debug_exit_ops = { + .read = debug_exit_read, .write = debug_exit_write, .valid.min_access_size = 1, .valid.max_access_size = 4, From 57cdec5e5f5f0838bf1a8c9feb4e304144a019f3 Mon Sep 17 00:00:00 2001 From: Li Qiang Date: Wed, 12 Sep 2018 09:01:14 -0700 Subject: [PATCH 47/80] hw: pc-testdev: add read memory region callback Also change the write callback name. Signed-off-by: Li Qiang Message-Id: <20180912160118.21158-5-liq3ea@163.com> Signed-off-by: Paolo Bonzini --- hw/misc/pc-testdev.c | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/hw/misc/pc-testdev.c b/hw/misc/pc-testdev.c index b81d820084..697eb88c97 100644 --- a/hw/misc/pc-testdev.c +++ b/hw/misc/pc-testdev.c @@ -58,7 +58,12 @@ typedef struct PCTestdev { #define TESTDEV(obj) \ OBJECT_CHECK(PCTestdev, (obj), TYPE_TESTDEV) -static void test_irq_line(void *opaque, hwaddr addr, uint64_t data, +static uint64_t test_irq_line_read(void *opaque, hwaddr addr, unsigned size) +{ + return 0; +} + +static void test_irq_line_write(void *opaque, hwaddr addr, uint64_t data, unsigned len) { PCTestdev *dev = opaque; @@ -68,7 +73,8 @@ static void test_irq_line(void *opaque, hwaddr addr, uint64_t data, } static const MemoryRegionOps test_irq_ops = { - .write = test_irq_line, + .read = test_irq_line_read, + .write = test_irq_line_write, .valid.min_access_size = 1, .valid.max_access_size = 1, .endianness = DEVICE_LITTLE_ENDIAN, @@ -110,7 +116,12 @@ static const MemoryRegionOps test_ioport_byte_ops = { .endianness = DEVICE_LITTLE_ENDIAN, }; -static void test_flush_page(void *opaque, hwaddr addr, uint64_t data, +static uint64_t test_flush_page_read(void *opaque, hwaddr addr, unsigned size) +{ + return 0; +} + +static void test_flush_page_write(void *opaque, hwaddr addr, uint64_t data, unsigned len) { hwaddr page = 4096; @@ -126,7 +137,8 @@ static void test_flush_page(void *opaque, hwaddr addr, uint64_t data, } static const MemoryRegionOps test_flush_ops = { - .write = test_flush_page, + .read = test_flush_page_read, + .write = test_flush_page_write, .valid.min_access_size = 4, .valid.max_access_size = 4, .endianness = DEVICE_LITTLE_ENDIAN, From e9b6be9260a0f43d7b9b709674f3fddd7432057b Mon Sep 17 00:00:00 2001 From: Li Qiang Date: Wed, 12 Sep 2018 09:01:13 -0700 Subject: [PATCH 48/80] hw: hyperv_testdev: add read callback Signed-off-by: Li Qiang Message-Id: <20180912160118.21158-4-liq3ea@163.com> Signed-off-by: Paolo Bonzini --- hw/misc/hyperv_testdev.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/hw/misc/hyperv_testdev.c b/hw/misc/hyperv_testdev.c index bf6bbfa8cf..7549f470b1 100644 --- a/hw/misc/hyperv_testdev.c +++ b/hw/misc/hyperv_testdev.c @@ -105,7 +105,12 @@ static void hv_synic_test_dev_control(HypervTestDev *dev, uint32_t ctl, } } -static void hv_test_dev_control(void *opaque, hwaddr addr, uint64_t data, +static uint64_t hv_test_dev_read(void *opaque, hwaddr addr, unsigned size) +{ + return 0; +} + +static void hv_test_dev_write(void *opaque, hwaddr addr, uint64_t data, uint32_t len) { HypervTestDev *dev = HYPERV_TEST_DEV(opaque); @@ -127,7 +132,8 @@ static void hv_test_dev_control(void *opaque, hwaddr addr, uint64_t data, } static const MemoryRegionOps synic_test_sint_ops = { - .write = hv_test_dev_control, + .read = hv_test_dev_read, + .write = hv_test_dev_write, .valid.min_access_size = 4, .valid.max_access_size = 4, .endianness = DEVICE_LITTLE_ENDIAN, From 1cd3d492624da399d66c4c3e6a5eabb8f96bb0a2 Mon Sep 17 00:00:00 2001 From: Igor Mammedov Date: Tue, 4 Sep 2018 14:39:37 +0200 Subject: [PATCH 49/80] memory: cleanup side effects of memory_region_init_foo() on failure if MemoryRegion intialization fails it's left in semi-initialized state, where it's size is not 0 and attached as child to owner object. And this leds to crash in following use-case: (monitor) object_add memory-backend-file,id=mem1,size=99999G,mem-path=/tmp/foo,discard-data=yes memory.c:2083: memory_region_get_ram_ptr: Assertion `mr->ram_block' failed Aborted (core dumped) it happens due to assumption that memory region is intialized when memory_region_size() != 0 and therefore it's ok to access it in file_backend_unparent() if (memory_region_size() != 0) memory_region_get_ram_ptr() which happens when object_add fails and unparents failed backend making file_backend_unparent() access invalid memory region. Fix it by making sure that memory_region_init_foo() APIs cleanup externally visible side effects on failure (like set size to 0 and unparenting object) Signed-off-by: Igor Mammedov Message-Id: <1536064777-42312-1-git-send-email-imammedo@redhat.com> Signed-off-by: Paolo Bonzini --- memory.c | 48 ++++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 42 insertions(+), 6 deletions(-) diff --git a/memory.c b/memory.c index 9b73892768..aceadb2bf8 100644 --- a/memory.c +++ b/memory.c @@ -1518,12 +1518,18 @@ void memory_region_init_ram_shared_nomigrate(MemoryRegion *mr, bool share, Error **errp) { + Error *err = NULL; memory_region_init(mr, owner, name, size); mr->ram = true; mr->terminates = true; mr->destructor = memory_region_destructor_ram; - mr->ram_block = qemu_ram_alloc(size, share, mr, errp); + mr->ram_block = qemu_ram_alloc(size, share, mr, &err); mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0; + if (err) { + mr->size = int128_zero(); + object_unparent(OBJECT(mr)); + error_propagate(errp, err); + } } void memory_region_init_resizeable_ram(MemoryRegion *mr, @@ -1536,13 +1542,19 @@ void memory_region_init_resizeable_ram(MemoryRegion *mr, void *host), Error **errp) { + Error *err = NULL; memory_region_init(mr, owner, name, size); mr->ram = true; mr->terminates = true; mr->destructor = memory_region_destructor_ram; mr->ram_block = qemu_ram_alloc_resizeable(size, max_size, resized, - mr, errp); + mr, &err); mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0; + if (err) { + mr->size = int128_zero(); + object_unparent(OBJECT(mr)); + error_propagate(errp, err); + } } #ifdef __linux__ @@ -1555,13 +1567,19 @@ void memory_region_init_ram_from_file(MemoryRegion *mr, const char *path, Error **errp) { + Error *err = NULL; memory_region_init(mr, owner, name, size); mr->ram = true; mr->terminates = true; mr->destructor = memory_region_destructor_ram; mr->align = align; - mr->ram_block = qemu_ram_alloc_from_file(size, mr, ram_flags, path, errp); + mr->ram_block = qemu_ram_alloc_from_file(size, mr, ram_flags, path, &err); mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0; + if (err) { + mr->size = int128_zero(); + object_unparent(OBJECT(mr)); + error_propagate(errp, err); + } } void memory_region_init_ram_from_fd(MemoryRegion *mr, @@ -1572,14 +1590,20 @@ void memory_region_init_ram_from_fd(MemoryRegion *mr, int fd, Error **errp) { + Error *err = NULL; memory_region_init(mr, owner, name, size); mr->ram = true; mr->terminates = true; mr->destructor = memory_region_destructor_ram; mr->ram_block = qemu_ram_alloc_from_fd(size, mr, share ? RAM_SHARED : 0, - fd, errp); + fd, &err); mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0; + if (err) { + mr->size = int128_zero(); + object_unparent(OBJECT(mr)); + error_propagate(errp, err); + } } #endif @@ -1630,13 +1654,19 @@ void memory_region_init_rom_nomigrate(MemoryRegion *mr, uint64_t size, Error **errp) { + Error *err = NULL; memory_region_init(mr, owner, name, size); mr->ram = true; mr->readonly = true; mr->terminates = true; mr->destructor = memory_region_destructor_ram; - mr->ram_block = qemu_ram_alloc(size, false, mr, errp); + mr->ram_block = qemu_ram_alloc(size, false, mr, &err); mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0; + if (err) { + mr->size = int128_zero(); + object_unparent(OBJECT(mr)); + error_propagate(errp, err); + } } void memory_region_init_rom_device_nomigrate(MemoryRegion *mr, @@ -1647,6 +1677,7 @@ void memory_region_init_rom_device_nomigrate(MemoryRegion *mr, uint64_t size, Error **errp) { + Error *err = NULL; assert(ops); memory_region_init(mr, owner, name, size); mr->ops = ops; @@ -1654,7 +1685,12 @@ void memory_region_init_rom_device_nomigrate(MemoryRegion *mr, mr->terminates = true; mr->rom_device = true; mr->destructor = memory_region_destructor_ram; - mr->ram_block = qemu_ram_alloc(size, false, mr, errp); + mr->ram_block = qemu_ram_alloc(size, false, mr, &err); + if (err) { + mr->size = int128_zero(); + object_unparent(OBJECT(mr)); + error_propagate(errp, err); + } } void memory_region_init_iommu(void *_iommu_mr, From ded2bcdf3724b80da86b984ae31abf1ebb6369af Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marc-Andr=C3=A9=20Lureau?= Date: Fri, 17 Aug 2018 15:52:21 +0200 Subject: [PATCH 50/80] Revert "chardev: tcp: postpone TLS work until machine done" MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This reverts commit 99f2f54174a595e3ada6e4332fcd2b37ebb0d55d. See next commit reverting 25679e5d58e258e9950685ffbd0cae4cd40d9cc2 as well for rationale. Signed-off-by: Marc-André Lureau Message-Id: <20180817135224.22971-2-marcandre.lureau@redhat.com> Signed-off-by: Paolo Bonzini --- chardev/char-socket.c | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/chardev/char-socket.c b/chardev/char-socket.c index efbad6ee7c..e28d2cca4c 100644 --- a/chardev/char-socket.c +++ b/chardev/char-socket.c @@ -32,7 +32,6 @@ #include "qapi/error.h" #include "qapi/clone-visitor.h" #include "qapi/qapi-visit-sockets.h" -#include "sysemu/sysemu.h" #include "chardev/char-io.h" @@ -724,11 +723,6 @@ static void tcp_chr_tls_init(Chardev *chr) Error *err = NULL; gchar *name; - if (!machine_init_done) { - /* This will be postponed to machine_done notifier */ - return; - } - if (s->is_listen) { tioc = qio_channel_tls_new_server( s->ioc, s->tls_creds, @@ -1169,10 +1163,6 @@ static int tcp_chr_machine_done_hook(Chardev *chr) tcp_chr_connect_async(chr); } - if (s->ioc && s->tls_creds) { - tcp_chr_tls_init(chr); - } - return 0; } From 5573f98fa66df133154529beaabf9d6a331d94bd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marc-Andr=C3=A9=20Lureau?= Date: Fri, 17 Aug 2018 15:52:22 +0200 Subject: [PATCH 51/80] Revert "chardev: tcp: postpone async connection setup" MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This reverts commit 25679e5d58e258e9950685ffbd0cae4cd40d9cc2. This commit broke "reconnect socket" chardev that are created after "machine_done": they no longer try to connect. It broke also vhost-user-test that uses chardev while there is no "machine_done" event. The goal of this patch was to move the "connect" source to the frontend context. chr->gcontext is set with qemu_chr_fe_set_handlers(). But there is no guarantee that it will be called, so we can't delay connection until then: the chardev should still attempt to connect during open(). qemu_chr_fe_set_handlers() is eventually called later and will update the context. Unless there is a good reason to not use initially the default context, I think we should revert to the previous state to fix the regressions. Signed-off-by: Marc-André Lureau Message-Id: <20180817135224.22971-3-marcandre.lureau@redhat.com> Signed-off-by: Paolo Bonzini --- chardev/char-socket.c | 17 +++-------------- 1 file changed, 3 insertions(+), 14 deletions(-) diff --git a/chardev/char-socket.c b/chardev/char-socket.c index e28d2cca4c..14f6567f6a 100644 --- a/chardev/char-socket.c +++ b/chardev/char-socket.c @@ -1005,8 +1005,9 @@ static void qmp_chardev_open_socket(Chardev *chr, s->reconnect_time = reconnect; } - /* If reconnect_time is set, will do that in chr_machine_done. */ - if (!s->reconnect_time) { + if (s->reconnect_time) { + tcp_chr_connect_async(chr); + } else { if (s->is_listen) { char *name; s->listener = qio_net_listener_new(); @@ -1155,17 +1156,6 @@ char_socket_get_connected(Object *obj, Error **errp) return s->connected; } -static int tcp_chr_machine_done_hook(Chardev *chr) -{ - SocketChardev *s = SOCKET_CHARDEV(chr); - - if (s->reconnect_time) { - tcp_chr_connect_async(chr); - } - - return 0; -} - static void char_socket_class_init(ObjectClass *oc, void *data) { ChardevClass *cc = CHARDEV_CLASS(oc); @@ -1181,7 +1171,6 @@ static void char_socket_class_init(ObjectClass *oc, void *data) cc->chr_add_client = tcp_chr_add_client; cc->chr_add_watch = tcp_chr_add_watch; cc->chr_update_read_handler = tcp_chr_update_read_handler; - cc->chr_machine_done = tcp_chr_machine_done_hook; object_class_property_add(oc, "addr", "SocketAddress", char_socket_get_addr, NULL, From dfe9ea200ab1ba941149e90a6b3d220afc2dcc4c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marc-Andr=C3=A9=20Lureau?= Date: Fri, 17 Aug 2018 15:52:23 +0200 Subject: [PATCH 52/80] char-socket: update all ioc handlers when changing context MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit So far, tcp_chr_update_read_handler() only updated the read handler. Let's also update the hup handler. Factorize the code while at it. (note that s->ioc != NULL when s->connected) Signed-off-by: Marc-André Lureau Message-Id: <20180817135224.22971-4-marcandre.lureau@redhat.com> Signed-off-by: Paolo Bonzini --- chardev/char-socket.c | 59 ++++++++++++++++++++++++------------------- 1 file changed, 33 insertions(+), 26 deletions(-) diff --git a/chardev/char-socket.c b/chardev/char-socket.c index 14f6567f6a..7cd0ae2824 100644 --- a/chardev/char-socket.c +++ b/chardev/char-socket.c @@ -353,6 +353,15 @@ static GSource *tcp_chr_add_watch(Chardev *chr, GIOCondition cond) return qio_channel_create_watch(s->ioc, cond); } +static void remove_hup_source(SocketChardev *s) +{ + if (s->hup_source != NULL) { + g_source_destroy(s->hup_source); + g_source_unref(s->hup_source); + s->hup_source = NULL; + } +} + static void tcp_chr_free_connection(Chardev *chr) { SocketChardev *s = SOCKET_CHARDEV(chr); @@ -367,11 +376,7 @@ static void tcp_chr_free_connection(Chardev *chr) s->read_msgfds_num = 0; } - if (s->hup_source != NULL) { - g_source_destroy(s->hup_source); - g_source_unref(s->hup_source); - s->hup_source = NULL; - } + remove_hup_source(s); tcp_set_msgfds(chr, NULL, 0); remove_fd_in_watch(chr); @@ -540,6 +545,27 @@ static char *sockaddr_to_str(struct sockaddr_storage *ss, socklen_t ss_len, } } +static void update_ioc_handlers(SocketChardev *s) +{ + Chardev *chr = CHARDEV(s); + + if (!s->connected) { + return; + } + + remove_fd_in_watch(chr); + chr->gsource = io_add_watch_poll(chr, s->ioc, + tcp_chr_read_poll, + tcp_chr_read, chr, + chr->gcontext); + + remove_hup_source(s); + s->hup_source = qio_channel_create_watch(s->ioc, G_IO_HUP); + g_source_set_callback(s->hup_source, (GSourceFunc)tcp_chr_hup, + chr, NULL); + g_source_attach(s->hup_source, chr->gcontext); +} + static void tcp_chr_connect(void *opaque) { Chardev *chr = CHARDEV(opaque); @@ -552,16 +578,7 @@ static void tcp_chr_connect(void *opaque) s->is_listen, s->is_telnet); s->connected = 1; - chr->gsource = io_add_watch_poll(chr, s->ioc, - tcp_chr_read_poll, - tcp_chr_read, - chr, chr->gcontext); - - s->hup_source = qio_channel_create_watch(s->ioc, G_IO_HUP); - g_source_set_callback(s->hup_source, (GSourceFunc)tcp_chr_hup, - chr, NULL); - g_source_attach(s->hup_source, chr->gcontext); - + update_ioc_handlers(s); qemu_chr_be_event(chr, CHR_EVENT_OPENED); } @@ -592,17 +609,7 @@ static void tcp_chr_update_read_handler(Chardev *chr) tcp_chr_telnet_init(CHARDEV(s)); } - if (!s->connected) { - return; - } - - remove_fd_in_watch(chr); - if (s->ioc) { - chr->gsource = io_add_watch_poll(chr, s->ioc, - tcp_chr_read_poll, - tcp_chr_read, chr, - chr->gcontext); - } + update_ioc_handlers(s); } static gboolean tcp_chr_telnet_init_io(QIOChannel *ioc, From 1ef64f1482c1f0df142eb7e4ab41bfe90ad1e65b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marc-Andr=C3=A9=20Lureau?= Date: Thu, 23 Aug 2018 16:31:24 +0200 Subject: [PATCH 53/80] test-char: fix random socket test failure MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Peter reported a test failure on FreeBSD with the new reconnect test: MALLOC_PERTURB_=${MALLOC_PERTURB_:-$(( ${RANDOM:-0} % 255 + 1))} gtester -k --verbose -m=quick tests/test-char TEST: tests/test-char... (pid=16190) /char/null: OK /char/invalid: OK /char/ringbuf: OK /char/mux: OK /char/stdio: OK /char/pipe: OK /char/file: OK /char/file-fifo: OK /char/udp: OK /char/serial: OK /char/hotswap: OK /char/socket/basic: OK /char/socket/reconnect: FAIL GTester: last random seed: R02S521380d9c12f1dac3ad1763bf5665c27 (pid=16367) /char/socket/fdpass: OK FAIL: tests/test-char ** ERROR:tests/test-char.c:353:char_socket_test_common: assertion failed: (object_property_get_bool(OBJECT(chr_client), "connected", &error_abort)) It turns out that the socket test code checks both server and client connection states, but doesn't wait for both. Wait for the client side as well. Signed-off-by: Marc-André Lureau Reviewed-by: Paolo Bonzini Message-Id: <20180823143125.16767-5-marcandre.lureau@redhat.com> Signed-off-by: Paolo Bonzini --- tests/test-char.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/tests/test-char.c b/tests/test-char.c index 2a2ff32904..431577a214 100644 --- a/tests/test-char.c +++ b/tests/test-char.c @@ -347,6 +347,12 @@ static void char_socket_test_common(Chardev *chr) g_assert_cmpint(id, >, 0); main_loop(); + d.chr = chr_client; + id = g_idle_add(char_socket_test_idle, &d); + g_source_set_name_by_id(id, "test-idle"); + g_assert_cmpint(id, >, 0); + main_loop(); + g_assert(object_property_get_bool(OBJECT(chr), "connected", &error_abort)); g_assert(object_property_get_bool(OBJECT(chr_client), "connected", &error_abort)); @@ -356,6 +362,7 @@ static void char_socket_test_common(Chardev *chr) object_unparent(OBJECT(chr_client)); + d.chr = chr; d.conn_expected = false; g_idle_add(char_socket_test_idle, &d); main_loop(); From 5b9d17bb7605da6abe92f4f1c5e40efc0c081eb4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marc-Andr=C3=A9=20Lureau?= Date: Fri, 17 Aug 2018 15:52:24 +0200 Subject: [PATCH 54/80] test-char: add socket reconnect test MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This test exhibits a regression fixed by the previous reverts. Signed-off-by: Marc-André Lureau Message-Id: <20180817135224.22971-5-marcandre.lureau@redhat.com> Signed-off-by: Paolo Bonzini --- tests/test-char.c | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/tests/test-char.c b/tests/test-char.c index 431577a214..831e37fbf4 100644 --- a/tests/test-char.c +++ b/tests/test-char.c @@ -307,7 +307,7 @@ static int socket_can_read_hello(void *opaque) return 10; } -static void char_socket_test_common(Chardev *chr) +static void char_socket_test_common(Chardev *chr, bool reconnect) { Chardev *chr_client; QObject *addr; @@ -327,7 +327,8 @@ static void char_socket_test_common(Chardev *chr) addr = object_property_get_qobject(OBJECT(chr), "addr", &error_abort); qdict = qobject_to(QDict, addr); port = qdict_get_str(qdict, "port"); - tmp = g_strdup_printf("tcp:127.0.0.1:%s", port); + tmp = g_strdup_printf("tcp:127.0.0.1:%s%s", port, + reconnect ? ",reconnect=1" : ""); qobject_unref(qdict); qemu_chr_fe_init(&be, chr, &error_abort); @@ -375,7 +376,15 @@ static void char_socket_basic_test(void) { Chardev *chr = qemu_chr_new("server", "tcp:127.0.0.1:0,server,nowait"); - char_socket_test_common(chr); + char_socket_test_common(chr, false); +} + + +static void char_socket_reconnect_test(void) +{ + Chardev *chr = qemu_chr_new("server", "tcp:127.0.0.1:0,server,nowait"); + + char_socket_test_common(chr, true); } @@ -407,7 +416,7 @@ static void char_socket_fdpass_test(void) qemu_opts_del(opts); - char_socket_test_common(chr); + char_socket_test_common(chr, false); } @@ -826,6 +835,7 @@ int main(int argc, char **argv) g_test_add_func("/char/file-fifo", char_file_fifo_test); #endif g_test_add_func("/char/socket/basic", char_socket_basic_test); + g_test_add_func("/char/socket/reconnect", char_socket_reconnect_test); g_test_add_func("/char/socket/fdpass", char_socket_fdpass_test); g_test_add_func("/char/udp", char_udp_test); #ifdef HAVE_CHARDEV_SERIAL From 27e18b8952f8b7a1e26350846f8a0d5a9b33bfb8 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Wed, 22 Aug 2018 12:38:20 +0200 Subject: [PATCH 55/80] char-pty: remove unnecessary #ifdef For some reason __APPLE__ was not checked in pty code. However, the #ifdef is redundant: this file is already compiled only if CONFIG_POSIX, same as util/qemu-openpty.c which it uses. Reported-by: Roman Bolshakov Signed-off-by: Paolo Bonzini --- chardev/char-pty.c | 6 ------ 1 file changed, 6 deletions(-) diff --git a/chardev/char-pty.c b/chardev/char-pty.c index 68fd4e20c3..e8d9a53476 100644 --- a/chardev/char-pty.c +++ b/chardev/char-pty.c @@ -31,10 +31,6 @@ #include "chardev/char-io.h" -#if defined(__linux__) || defined(__sun__) || defined(__FreeBSD__) \ - || defined(__NetBSD__) || defined(__OpenBSD__) || defined(__DragonFly__) \ - || defined(__GLIBC__) - typedef struct { Chardev parent; QIOChannel *ioc; @@ -299,5 +295,3 @@ static void register_types(void) } type_init(register_types); - -#endif From 92d5f1a4147c3722b5e9a8bcfb7dc261b7a8b855 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Tue, 21 Aug 2018 15:31:24 +0200 Subject: [PATCH 56/80] target/i386: unify masking of interrupts Interrupt handling depends on various flags in env->hflags or env->hflags2, and the exact detail were not exactly replicated between x86_cpu_has_work and x86_cpu_exec_interrupt. Create a new function that extracts the highest-priority non-masked interrupt, and use it in both functions. Signed-off-by: Paolo Bonzini --- target/i386/cpu.c | 51 +++++++++++++++---- target/i386/cpu.h | 1 + target/i386/seg_helper.c | 106 ++++++++++++++++++--------------------- 3 files changed, 91 insertions(+), 67 deletions(-) diff --git a/target/i386/cpu.c b/target/i386/cpu.c index f24295e6e4..c88876dfe3 100644 --- a/target/i386/cpu.c +++ b/target/i386/cpu.c @@ -5429,20 +5429,51 @@ static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb) cpu->env.eip = tb->pc - tb->cs_base; } -static bool x86_cpu_has_work(CPUState *cs) +int x86_cpu_pending_interrupt(CPUState *cs, int interrupt_request) { X86CPU *cpu = X86_CPU(cs); CPUX86State *env = &cpu->env; - return ((cs->interrupt_request & (CPU_INTERRUPT_HARD | - CPU_INTERRUPT_POLL)) && - (env->eflags & IF_MASK)) || - (cs->interrupt_request & (CPU_INTERRUPT_NMI | - CPU_INTERRUPT_INIT | - CPU_INTERRUPT_SIPI | - CPU_INTERRUPT_MCE)) || - ((cs->interrupt_request & CPU_INTERRUPT_SMI) && - !(env->hflags & HF_SMM_MASK)); +#if !defined(CONFIG_USER_ONLY) + if (interrupt_request & CPU_INTERRUPT_POLL) { + return CPU_INTERRUPT_POLL; + } +#endif + if (interrupt_request & CPU_INTERRUPT_SIPI) { + return CPU_INTERRUPT_SIPI; + } + + if (env->hflags2 & HF2_GIF_MASK) { + if ((interrupt_request & CPU_INTERRUPT_SMI) && + !(env->hflags & HF_SMM_MASK)) { + return CPU_INTERRUPT_SMI; + } else if ((interrupt_request & CPU_INTERRUPT_NMI) && + !(env->hflags2 & HF2_NMI_MASK)) { + return CPU_INTERRUPT_NMI; + } else if (interrupt_request & CPU_INTERRUPT_MCE) { + return CPU_INTERRUPT_MCE; + } else if ((interrupt_request & CPU_INTERRUPT_HARD) && + (((env->hflags2 & HF2_VINTR_MASK) && + (env->hflags2 & HF2_HIF_MASK)) || + (!(env->hflags2 & HF2_VINTR_MASK) && + (env->eflags & IF_MASK && + !(env->hflags & HF_INHIBIT_IRQ_MASK))))) { + return CPU_INTERRUPT_HARD; +#if !defined(CONFIG_USER_ONLY) + } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) && + (env->eflags & IF_MASK) && + !(env->hflags & HF_INHIBIT_IRQ_MASK)) { + return CPU_INTERRUPT_VIRQ; +#endif + } + } + + return 0; +} + +static bool x86_cpu_has_work(CPUState *cs) +{ + return x86_cpu_pending_interrupt(cs, cs->interrupt_request) != 0; } static void x86_disas_set_info(CPUState *cs, disassemble_info *info) diff --git a/target/i386/cpu.h b/target/i386/cpu.h index b572a8e4aa..227a5d9b61 100644 --- a/target/i386/cpu.h +++ b/target/i386/cpu.h @@ -1485,6 +1485,7 @@ extern struct VMStateDescription vmstate_x86_cpu; */ void x86_cpu_do_interrupt(CPUState *cpu); bool x86_cpu_exec_interrupt(CPUState *cpu, int int_req); +int x86_cpu_pending_interrupt(CPUState *cs, int interrupt_request); int x86_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cpu, int cpuid, void *opaque); diff --git a/target/i386/seg_helper.c b/target/i386/seg_helper.c index d1cbc6ebf0..ba9cc688ac 100644 --- a/target/i386/seg_helper.c +++ b/target/i386/seg_helper.c @@ -1319,74 +1319,66 @@ bool x86_cpu_exec_interrupt(CPUState *cs, int interrupt_request) { X86CPU *cpu = X86_CPU(cs); CPUX86State *env = &cpu->env; - bool ret = false; + int intno; + interrupt_request = x86_cpu_pending_interrupt(cs, interrupt_request); + if (!interrupt_request) { + return false; + } + + /* Don't process multiple interrupt requests in a single call. + * This is required to make icount-driven execution deterministic. + */ + switch (interrupt_request) { #if !defined(CONFIG_USER_ONLY) - if (interrupt_request & CPU_INTERRUPT_POLL) { + case CPU_INTERRUPT_POLL: cs->interrupt_request &= ~CPU_INTERRUPT_POLL; apic_poll_irq(cpu->apic_state); - /* Don't process multiple interrupt requests in a single call. - This is required to make icount-driven execution deterministic. */ - return true; - } + break; #endif - if (interrupt_request & CPU_INTERRUPT_SIPI) { + case CPU_INTERRUPT_SIPI: do_cpu_sipi(cpu); - ret = true; - } else if (env->hflags2 & HF2_GIF_MASK) { - if ((interrupt_request & CPU_INTERRUPT_SMI) && - !(env->hflags & HF_SMM_MASK)) { - cpu_svm_check_intercept_param(env, SVM_EXIT_SMI, 0, 0); - cs->interrupt_request &= ~CPU_INTERRUPT_SMI; - do_smm_enter(cpu); - ret = true; - } else if ((interrupt_request & CPU_INTERRUPT_NMI) && - !(env->hflags2 & HF2_NMI_MASK)) { - cpu_svm_check_intercept_param(env, SVM_EXIT_NMI, 0, 0); - cs->interrupt_request &= ~CPU_INTERRUPT_NMI; - env->hflags2 |= HF2_NMI_MASK; - do_interrupt_x86_hardirq(env, EXCP02_NMI, 1); - ret = true; - } else if (interrupt_request & CPU_INTERRUPT_MCE) { - cs->interrupt_request &= ~CPU_INTERRUPT_MCE; - do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0); - ret = true; - } else if ((interrupt_request & CPU_INTERRUPT_HARD) && - (((env->hflags2 & HF2_VINTR_MASK) && - (env->hflags2 & HF2_HIF_MASK)) || - (!(env->hflags2 & HF2_VINTR_MASK) && - (env->eflags & IF_MASK && - !(env->hflags & HF_INHIBIT_IRQ_MASK))))) { - int intno; - cpu_svm_check_intercept_param(env, SVM_EXIT_INTR, 0, 0); - cs->interrupt_request &= ~(CPU_INTERRUPT_HARD | - CPU_INTERRUPT_VIRQ); - intno = cpu_get_pic_interrupt(env); - qemu_log_mask(CPU_LOG_TB_IN_ASM, - "Servicing hardware INT=0x%02x\n", intno); - do_interrupt_x86_hardirq(env, intno, 1); - /* ensure that no TB jump will be modified as - the program flow was changed */ - ret = true; + break; + case CPU_INTERRUPT_SMI: + cpu_svm_check_intercept_param(env, SVM_EXIT_SMI, 0, 0); + cs->interrupt_request &= ~CPU_INTERRUPT_SMI; + do_smm_enter(cpu); + break; + case CPU_INTERRUPT_NMI: + cpu_svm_check_intercept_param(env, SVM_EXIT_NMI, 0, 0); + cs->interrupt_request &= ~CPU_INTERRUPT_NMI; + env->hflags2 |= HF2_NMI_MASK; + do_interrupt_x86_hardirq(env, EXCP02_NMI, 1); + break; + case CPU_INTERRUPT_MCE: + cs->interrupt_request &= ~CPU_INTERRUPT_MCE; + do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0); + break; + case CPU_INTERRUPT_HARD: + cpu_svm_check_intercept_param(env, SVM_EXIT_INTR, 0, 0); + cs->interrupt_request &= ~(CPU_INTERRUPT_HARD | + CPU_INTERRUPT_VIRQ); + intno = cpu_get_pic_interrupt(env); + qemu_log_mask(CPU_LOG_TB_IN_ASM, + "Servicing hardware INT=0x%02x\n", intno); + do_interrupt_x86_hardirq(env, intno, 1); + break; #if !defined(CONFIG_USER_ONLY) - } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) && - (env->eflags & IF_MASK) && - !(env->hflags & HF_INHIBIT_IRQ_MASK)) { - int intno; - /* FIXME: this should respect TPR */ - cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR, 0, 0); - intno = x86_ldl_phys(cs, env->vm_vmcb + case CPU_INTERRUPT_VIRQ: + /* FIXME: this should respect TPR */ + cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR, 0, 0); + intno = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.int_vector)); - qemu_log_mask(CPU_LOG_TB_IN_ASM, - "Servicing virtual hardware INT=0x%02x\n", intno); - do_interrupt_x86_hardirq(env, intno, 1); - cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ; - ret = true; + qemu_log_mask(CPU_LOG_TB_IN_ASM, + "Servicing virtual hardware INT=0x%02x\n", intno); + do_interrupt_x86_hardirq(env, intno, 1); + cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ; + break; #endif - } } - return ret; + /* Ensure that no TB jump will be modified as the program flow was changed. */ + return true; } void helper_lldt(CPUX86State *env, int selector) From f8dc4c645ec2956a6cd97e0ca0fdd4753181f735 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Tue, 21 Aug 2018 09:40:22 +0200 Subject: [PATCH 57/80] target/i386: rename HF_SVMI_MASK to HF_GUEST_MASK This flag will be used for KVM's nested VMX migration; the HF_GUEST_MASK name is already used in KVM, adopt it in QEMU as well. Signed-off-by: Paolo Bonzini --- target/i386/cpu.h | 4 ++-- target/i386/excp_helper.c | 2 +- target/i386/seg_helper.c | 6 +++--- target/i386/svm_helper.c | 6 +++--- target/i386/translate.c | 4 ++-- 5 files changed, 11 insertions(+), 11 deletions(-) diff --git a/target/i386/cpu.h b/target/i386/cpu.h index 227a5d9b61..6f0e4de740 100644 --- a/target/i386/cpu.h +++ b/target/i386/cpu.h @@ -171,7 +171,7 @@ typedef enum X86Seg { #define HF_AC_SHIFT 18 /* must be same as eflags */ #define HF_SMM_SHIFT 19 /* CPU in SMM mode */ #define HF_SVME_SHIFT 20 /* SVME enabled (copy of EFER.SVME) */ -#define HF_SVMI_SHIFT 21 /* SVM intercepts are active */ +#define HF_GUEST_SHIFT 21 /* SVM intercepts are active */ #define HF_OSFXSR_SHIFT 22 /* CR4.OSFXSR */ #define HF_SMAP_SHIFT 23 /* CR4.SMAP */ #define HF_IOBPT_SHIFT 24 /* an io breakpoint enabled */ @@ -196,7 +196,7 @@ typedef enum X86Seg { #define HF_AC_MASK (1 << HF_AC_SHIFT) #define HF_SMM_MASK (1 << HF_SMM_SHIFT) #define HF_SVME_MASK (1 << HF_SVME_SHIFT) -#define HF_SVMI_MASK (1 << HF_SVMI_SHIFT) +#define HF_GUEST_MASK (1 << HF_GUEST_SHIFT) #define HF_OSFXSR_MASK (1 << HF_OSFXSR_SHIFT) #define HF_SMAP_MASK (1 << HF_SMAP_SHIFT) #define HF_IOBPT_MASK (1 << HF_IOBPT_SHIFT) diff --git a/target/i386/excp_helper.c b/target/i386/excp_helper.c index 37a33d5ae0..49231f6b69 100644 --- a/target/i386/excp_helper.c +++ b/target/i386/excp_helper.c @@ -53,7 +53,7 @@ static int check_exception(CPUX86State *env, int intno, int *error_code, #if !defined(CONFIG_USER_ONLY) if (env->old_exception == EXCP08_DBLE) { - if (env->hflags & HF_SVMI_MASK) { + if (env->hflags & HF_GUEST_MASK) { cpu_vmexit(env, SVM_EXIT_SHUTDOWN, 0, retaddr); /* does not return */ } diff --git a/target/i386/seg_helper.c b/target/i386/seg_helper.c index ba9cc688ac..33714bc6e1 100644 --- a/target/i386/seg_helper.c +++ b/target/i386/seg_helper.c @@ -1244,7 +1244,7 @@ static void do_interrupt_all(X86CPU *cpu, int intno, int is_int, } if (env->cr[0] & CR0_PE_MASK) { #if !defined(CONFIG_USER_ONLY) - if (env->hflags & HF_SVMI_MASK) { + if (env->hflags & HF_GUEST_MASK) { handle_even_inj(env, intno, is_int, error_code, is_hw, 0); } #endif @@ -1259,7 +1259,7 @@ static void do_interrupt_all(X86CPU *cpu, int intno, int is_int, } } else { #if !defined(CONFIG_USER_ONLY) - if (env->hflags & HF_SVMI_MASK) { + if (env->hflags & HF_GUEST_MASK) { handle_even_inj(env, intno, is_int, error_code, is_hw, 1); } #endif @@ -1267,7 +1267,7 @@ static void do_interrupt_all(X86CPU *cpu, int intno, int is_int, } #if !defined(CONFIG_USER_ONLY) - if (env->hflags & HF_SVMI_MASK) { + if (env->hflags & HF_GUEST_MASK) { CPUState *cs = CPU(cpu); uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, diff --git a/target/i386/svm_helper.c b/target/i386/svm_helper.c index 342ece082f..9fd22a883b 100644 --- a/target/i386/svm_helper.c +++ b/target/i386/svm_helper.c @@ -228,7 +228,7 @@ void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend) } /* enable intercepts */ - env->hflags |= HF_SVMI_MASK; + env->hflags |= HF_GUEST_MASK; env->tsc_offset = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.tsc_offset)); @@ -503,7 +503,7 @@ void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type, { CPUState *cs = CPU(x86_env_get_cpu(env)); - if (likely(!(env->hflags & HF_SVMI_MASK))) { + if (likely(!(env->hflags & HF_GUEST_MASK))) { return; } switch (type) { @@ -697,7 +697,7 @@ void do_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1) /* Reload the host state from vm_hsave */ env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK); - env->hflags &= ~HF_SVMI_MASK; + env->hflags &= ~HF_GUEST_MASK; env->intercept = 0; env->intercept_exceptions = 0; cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ; diff --git a/target/i386/translate.c b/target/i386/translate.c index b8222dc4ba..8fcd88e326 100644 --- a/target/i386/translate.c +++ b/target/i386/translate.c @@ -632,7 +632,7 @@ static void gen_check_io(DisasContext *s, TCGMemOp ot, target_ulong cur_eip, tcg_abort(); } } - if(s->flags & HF_SVMI_MASK) { + if(s->flags & HF_GUEST_MASK) { gen_update_cc_op(s); gen_jmp_im(s, cur_eip); svm_flags |= (1 << (4 + ot)); @@ -2316,7 +2316,7 @@ gen_svm_check_intercept_param(DisasContext *s, target_ulong pc_start, uint32_t type, uint64_t param) { /* no SVM activated; fast case */ - if (likely(!(s->flags & HF_SVMI_MASK))) + if (likely(!(s->flags & HF_GUEST_MASK))) return; gen_update_cc_op(s); gen_jmp_im(s, pc_start - s->cs_base); From 5b8063c40672f19716705f1342dc32e5030c2f43 Mon Sep 17 00:00:00 2001 From: Liran Alon Date: Fri, 14 Sep 2018 03:38:26 +0300 Subject: [PATCH 58/80] i386: Compile CPUX86State xsave_buf only when support KVM or HVF While at it, also rename var to indicate it is not used only in KVM. Reviewed-by: Nikita Leshchenko Reviewed-by: Patrick Colp Signed-off-by: Liran Alon Message-Id: <20180914003827.124570-2-liran.alon@oracle.com> Signed-off-by: Paolo Bonzini --- target/i386/cpu.h | 4 +++- target/i386/hvf/README.md | 2 +- target/i386/hvf/hvf.c | 2 +- target/i386/hvf/x86hvf.c | 4 ++-- target/i386/kvm.c | 6 +++--- 5 files changed, 10 insertions(+), 8 deletions(-) diff --git a/target/i386/cpu.h b/target/i386/cpu.h index 6f0e4de740..730c06f80a 100644 --- a/target/i386/cpu.h +++ b/target/i386/cpu.h @@ -1327,7 +1327,9 @@ typedef struct CPUX86State { bool tsc_valid; int64_t tsc_khz; int64_t user_tsc_khz; /* for sanity check only */ - void *kvm_xsave_buf; +#if defined(CONFIG_KVM) || defined(CONFIG_HVF) + void *xsave_buf; +#endif #if defined(CONFIG_HVF) HVFX86EmulatorState *hvf_emul; #endif diff --git a/target/i386/hvf/README.md b/target/i386/hvf/README.md index 0d27a0d52b..2d33477aca 100644 --- a/target/i386/hvf/README.md +++ b/target/i386/hvf/README.md @@ -2,6 +2,6 @@ These sources (and ../hvf-all.c) are adapted from Veertu Inc's vdhh (Veertu Desktop Hosted Hypervisor) (last known location: https://github.com/veertuinc/vdhh) with some minor changes, the most significant of which were: -1. Adapt to our current QEMU's `CPUState` structure and `address_space_rw` API; many struct members have been moved around (emulated x86 state, kvm_xsave_buf) due to historical differences + QEMU needing to handle more emulation targets. +1. Adapt to our current QEMU's `CPUState` structure and `address_space_rw` API; many struct members have been moved around (emulated x86 state, xsave_buf) due to historical differences + QEMU needing to handle more emulation targets. 2. Removal of `apic_page` and hyperv-related functionality. 3. More relaxed use of `qemu_mutex_lock_iothread`. diff --git a/target/i386/hvf/hvf.c b/target/i386/hvf/hvf.c index df69e6d0a7..5db167df98 100644 --- a/target/i386/hvf/hvf.c +++ b/target/i386/hvf/hvf.c @@ -587,7 +587,7 @@ int hvf_init_vcpu(CPUState *cpu) hvf_reset_vcpu(cpu); x86cpu = X86_CPU(cpu); - x86cpu->env.kvm_xsave_buf = qemu_memalign(4096, 4096); + x86cpu->env.xsave_buf = qemu_memalign(4096, 4096); hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_STAR, 1); hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_LSTAR, 1); diff --git a/target/i386/hvf/x86hvf.c b/target/i386/hvf/x86hvf.c index 6c88939b96..df8e946fbc 100644 --- a/target/i386/hvf/x86hvf.c +++ b/target/i386/hvf/x86hvf.c @@ -75,7 +75,7 @@ void hvf_put_xsave(CPUState *cpu_state) struct X86XSaveArea *xsave; - xsave = X86_CPU(cpu_state)->env.kvm_xsave_buf; + xsave = X86_CPU(cpu_state)->env.xsave_buf; x86_cpu_xsave_all_areas(X86_CPU(cpu_state), xsave); @@ -163,7 +163,7 @@ void hvf_get_xsave(CPUState *cpu_state) { struct X86XSaveArea *xsave; - xsave = X86_CPU(cpu_state)->env.kvm_xsave_buf; + xsave = X86_CPU(cpu_state)->env.xsave_buf; if (hv_vcpu_read_fpstate(cpu_state->hvf_fd, (void*)xsave, 4096)) { abort(); diff --git a/target/i386/kvm.c b/target/i386/kvm.c index de892db671..dc4047b02f 100644 --- a/target/i386/kvm.c +++ b/target/i386/kvm.c @@ -1189,7 +1189,7 @@ int kvm_arch_init_vcpu(CPUState *cs) } if (has_xsave) { - env->kvm_xsave_buf = qemu_memalign(4096, sizeof(struct kvm_xsave)); + env->xsave_buf = qemu_memalign(4096, sizeof(struct kvm_xsave)); } cpu->kvm_msr_buf = g_malloc0(MSR_BUF_SIZE); @@ -1639,7 +1639,7 @@ ASSERT_OFFSET(XSAVE_PKRU, pkru_state); static int kvm_put_xsave(X86CPU *cpu) { CPUX86State *env = &cpu->env; - X86XSaveArea *xsave = env->kvm_xsave_buf; + X86XSaveArea *xsave = env->xsave_buf; if (!has_xsave) { return kvm_put_fpu(cpu); @@ -2081,7 +2081,7 @@ static int kvm_get_fpu(X86CPU *cpu) static int kvm_get_xsave(X86CPU *cpu) { CPUX86State *env = &cpu->env; - X86XSaveArea *xsave = env->kvm_xsave_buf; + X86XSaveArea *xsave = env->xsave_buf; int ret; if (!has_xsave) { From 8371158bbaff91234cfba1c401ff523e04785b0f Mon Sep 17 00:00:00 2001 From: Li Qiang Date: Thu, 13 Sep 2018 22:11:11 -0700 Subject: [PATCH 59/80] hw: edu: replace device name with macro Just as other devices do. Signed-off-by: Li Qiang Message-Id: <1536901871-2729-1-git-send-email-liq3ea@gmail.com> Signed-off-by: Paolo Bonzini --- hw/misc/edu.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/hw/misc/edu.c b/hw/misc/edu.c index df26a4d046..0687ffd343 100644 --- a/hw/misc/edu.c +++ b/hw/misc/edu.c @@ -30,7 +30,8 @@ #include "qemu/main-loop.h" /* iothread mutex */ #include "qapi/visitor.h" -#define EDU(obj) OBJECT_CHECK(EduState, obj, "edu") +#define TYPE_PCI_EDU_DEVICE "edu" +#define EDU(obj) OBJECT_CHECK(EduState, obj, TYPE_PCI_EDU_DEVICE) #define FACT_IRQ 0x00000001 #define DMA_IRQ 0x00000100 @@ -414,7 +415,7 @@ static void pci_edu_register_types(void) { }, }; static const TypeInfo edu_info = { - .name = "edu", + .name = TYPE_PCI_EDU_DEVICE, .parent = TYPE_PCI_DEVICE, .instance_size = sizeof(EduState), .instance_init = edu_instance_init, From a52fbc37a46691762540b043c4cf5f9e7eb1a244 Mon Sep 17 00:00:00 2001 From: Viktor Prutyanov Date: Wed, 29 Aug 2018 15:41:24 +0300 Subject: [PATCH 60/80] dump: move Windows dump structures definitions This patch moves definitions of Windows dump structures to include/qemu/win_dump_defs.h to keep create_win_dump() prototype separate. Signed-off-by: Viktor Prutyanov Message-Id: <1535546488-30208-2-git-send-email-viktor.prutyanov@virtuozzo.com> Signed-off-by: Paolo Bonzini --- include/qemu/win_dump_defs.h | 179 +++++++++++++++++++++++++++++++++++ win_dump.h | 166 +------------------------------- 2 files changed, 183 insertions(+), 162 deletions(-) create mode 100644 include/qemu/win_dump_defs.h diff --git a/include/qemu/win_dump_defs.h b/include/qemu/win_dump_defs.h new file mode 100644 index 0000000000..145096e8ee --- /dev/null +++ b/include/qemu/win_dump_defs.h @@ -0,0 +1,179 @@ +/* + * Windows crashdump definitions + * + * Copyright (c) 2018 Virtuozzo International GmbH + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#ifndef QEMU_WIN_DUMP_DEFS_H +#define QEMU_WIN_DUMP_DEFS_H + +typedef struct WinDumpPhyMemRun64 { + uint64_t BasePage; + uint64_t PageCount; +} QEMU_PACKED WinDumpPhyMemRun64; + +typedef struct WinDumpPhyMemDesc64 { + uint32_t NumberOfRuns; + uint32_t unused; + uint64_t NumberOfPages; + WinDumpPhyMemRun64 Run[43]; +} QEMU_PACKED WinDumpPhyMemDesc64; + +typedef struct WinDumpExceptionRecord { + uint32_t ExceptionCode; + uint32_t ExceptionFlags; + uint64_t ExceptionRecord; + uint64_t ExceptionAddress; + uint32_t NumberParameters; + uint32_t unused; + uint64_t ExceptionInformation[15]; +} QEMU_PACKED WinDumpExceptionRecord; + +typedef struct WinDumpHeader64 { + char Signature[4]; + char ValidDump[4]; + uint32_t MajorVersion; + uint32_t MinorVersion; + uint64_t DirectoryTableBase; + uint64_t PfnDatabase; + uint64_t PsLoadedModuleList; + uint64_t PsActiveProcessHead; + uint32_t MachineImageType; + uint32_t NumberProcessors; + union { + struct { + uint32_t BugcheckCode; + uint32_t unused0; + uint64_t BugcheckParameter1; + uint64_t BugcheckParameter2; + uint64_t BugcheckParameter3; + uint64_t BugcheckParameter4; + }; + uint8_t BugcheckData[40]; + }; + uint8_t VersionUser[32]; + uint64_t KdDebuggerDataBlock; + union { + WinDumpPhyMemDesc64 PhysicalMemoryBlock; + uint8_t PhysicalMemoryBlockBuffer[704]; + }; + union { + uint8_t ContextBuffer[3000]; + }; + WinDumpExceptionRecord Exception; + uint32_t DumpType; + uint32_t unused1; + uint64_t RequiredDumpSpace; + uint64_t SystemTime; + char Comment[128]; + uint64_t SystemUpTime; + uint32_t MiniDumpFields; + uint32_t SecondaryDataState; + uint32_t ProductType; + uint32_t SuiteMask; + uint32_t WriterStatus; + uint8_t unused2; + uint8_t KdSecondaryVersion; + uint8_t reserved[4018]; +} QEMU_PACKED WinDumpHeader64; + +#define KDBG_OWNER_TAG_OFFSET64 0x10 +#define KDBG_MM_PFN_DATABASE_OFFSET64 0xC0 +#define KDBG_KI_BUGCHECK_DATA_OFFSET64 0x88 +#define KDBG_KI_PROCESSOR_BLOCK_OFFSET64 0x218 +#define KDBG_OFFSET_PRCB_CONTEXT_OFFSET64 0x338 + +#define VMCOREINFO_ELF_NOTE_HDR_SIZE 24 + +#define WIN_CTX_X64 0x00100000L + +#define WIN_CTX_CTL 0x00000001L +#define WIN_CTX_INT 0x00000002L +#define WIN_CTX_SEG 0x00000004L +#define WIN_CTX_FP 0x00000008L +#define WIN_CTX_DBG 0x00000010L + +#define WIN_CTX_FULL (WIN_CTX_X64 | WIN_CTX_CTL | WIN_CTX_INT | WIN_CTX_FP) +#define WIN_CTX_ALL (WIN_CTX_FULL | WIN_CTX_SEG | WIN_CTX_DBG) + +#define LIVE_SYSTEM_DUMP 0x00000161 + +typedef struct WinM128A { + uint64_t low; + int64_t high; +} QEMU_ALIGNED(16) WinM128A; + +typedef struct WinContext { + uint64_t PHome[6]; + + uint32_t ContextFlags; + uint32_t MxCsr; + + uint16_t SegCs; + uint16_t SegDs; + uint16_t SegEs; + uint16_t SegFs; + uint16_t SegGs; + uint16_t SegSs; + uint32_t EFlags; + + uint64_t Dr0; + uint64_t Dr1; + uint64_t Dr2; + uint64_t Dr3; + uint64_t Dr6; + uint64_t Dr7; + + uint64_t Rax; + uint64_t Rcx; + uint64_t Rdx; + uint64_t Rbx; + uint64_t Rsp; + uint64_t Rbp; + uint64_t Rsi; + uint64_t Rdi; + uint64_t R8; + uint64_t R9; + uint64_t R10; + uint64_t R11; + uint64_t R12; + uint64_t R13; + uint64_t R14; + uint64_t R15; + + uint64_t Rip; + + struct { + uint16_t ControlWord; + uint16_t StatusWord; + uint8_t TagWord; + uint8_t Reserved1; + uint16_t ErrorOpcode; + uint32_t ErrorOffset; + uint16_t ErrorSelector; + uint16_t Reserved2; + uint32_t DataOffset; + uint16_t DataSelector; + uint16_t Reserved3; + uint32_t MxCsr; + uint32_t MxCsr_Mask; + WinM128A FloatRegisters[8]; + WinM128A XmmRegisters[16]; + uint8_t Reserved4[96]; + } FltSave; + + WinM128A VectorRegister[26]; + uint64_t VectorControl; + + uint64_t DebugControl; + uint64_t LastBranchToRip; + uint64_t LastBranchFromRip; + uint64_t LastExceptionToRip; + uint64_t LastExceptionFromRip; +} QEMU_ALIGNED(16) WinContext; + +#endif /* QEMU_WIN_DUMP_DEFS_H */ diff --git a/win_dump.h b/win_dump.h index f9e1faf8eb..b8c25348f4 100644 --- a/win_dump.h +++ b/win_dump.h @@ -8,169 +8,11 @@ * */ -typedef struct WinDumpPhyMemRun64 { - uint64_t BasePage; - uint64_t PageCount; -} QEMU_PACKED WinDumpPhyMemRun64; +#ifndef WIN_DUMP_H +#define WIN_DUMP_H -typedef struct WinDumpPhyMemDesc64 { - uint32_t NumberOfRuns; - uint32_t unused; - uint64_t NumberOfPages; - WinDumpPhyMemRun64 Run[43]; -} QEMU_PACKED WinDumpPhyMemDesc64; - -typedef struct WinDumpExceptionRecord { - uint32_t ExceptionCode; - uint32_t ExceptionFlags; - uint64_t ExceptionRecord; - uint64_t ExceptionAddress; - uint32_t NumberParameters; - uint32_t unused; - uint64_t ExceptionInformation[15]; -} QEMU_PACKED WinDumpExceptionRecord; - -typedef struct WinDumpHeader64 { - char Signature[4]; - char ValidDump[4]; - uint32_t MajorVersion; - uint32_t MinorVersion; - uint64_t DirectoryTableBase; - uint64_t PfnDatabase; - uint64_t PsLoadedModuleList; - uint64_t PsActiveProcessHead; - uint32_t MachineImageType; - uint32_t NumberProcessors; - union { - struct { - uint32_t BugcheckCode; - uint32_t unused0; - uint64_t BugcheckParameter1; - uint64_t BugcheckParameter2; - uint64_t BugcheckParameter3; - uint64_t BugcheckParameter4; - }; - uint8_t BugcheckData[40]; - }; - uint8_t VersionUser[32]; - uint64_t KdDebuggerDataBlock; - union { - WinDumpPhyMemDesc64 PhysicalMemoryBlock; - uint8_t PhysicalMemoryBlockBuffer[704]; - }; - union { - uint8_t ContextBuffer[3000]; - }; - WinDumpExceptionRecord Exception; - uint32_t DumpType; - uint32_t unused1; - uint64_t RequiredDumpSpace; - uint64_t SystemTime; - char Comment[128]; - uint64_t SystemUpTime; - uint32_t MiniDumpFields; - uint32_t SecondaryDataState; - uint32_t ProductType; - uint32_t SuiteMask; - uint32_t WriterStatus; - uint8_t unused2; - uint8_t KdSecondaryVersion; - uint8_t reserved[4018]; -} QEMU_PACKED WinDumpHeader64; +#include "qemu/win_dump_defs.h" void create_win_dump(DumpState *s, Error **errp); -#define KDBG_OWNER_TAG_OFFSET64 0x10 -#define KDBG_MM_PFN_DATABASE_OFFSET64 0xC0 -#define KDBG_KI_BUGCHECK_DATA_OFFSET64 0x88 -#define KDBG_KI_PROCESSOR_BLOCK_OFFSET64 0x218 -#define KDBG_OFFSET_PRCB_CONTEXT_OFFSET64 0x338 - -#define VMCOREINFO_ELF_NOTE_HDR_SIZE 24 - -#define WIN_CTX_X64 0x00100000L - -#define WIN_CTX_CTL 0x00000001L -#define WIN_CTX_INT 0x00000002L -#define WIN_CTX_SEG 0x00000004L -#define WIN_CTX_FP 0x00000008L -#define WIN_CTX_DBG 0x00000010L - -#define WIN_CTX_FULL (WIN_CTX_X64 | WIN_CTX_CTL | WIN_CTX_INT | WIN_CTX_FP) -#define WIN_CTX_ALL (WIN_CTX_FULL | WIN_CTX_SEG | WIN_CTX_DBG) - -#define LIVE_SYSTEM_DUMP 0x00000161 - -typedef struct WinM128A { - uint64_t low; - int64_t high; -} QEMU_ALIGNED(16) WinM128A; - -typedef struct WinContext { - uint64_t PHome[6]; - - uint32_t ContextFlags; - uint32_t MxCsr; - - uint16_t SegCs; - uint16_t SegDs; - uint16_t SegEs; - uint16_t SegFs; - uint16_t SegGs; - uint16_t SegSs; - uint32_t EFlags; - - uint64_t Dr0; - uint64_t Dr1; - uint64_t Dr2; - uint64_t Dr3; - uint64_t Dr6; - uint64_t Dr7; - - uint64_t Rax; - uint64_t Rcx; - uint64_t Rdx; - uint64_t Rbx; - uint64_t Rsp; - uint64_t Rbp; - uint64_t Rsi; - uint64_t Rdi; - uint64_t R8; - uint64_t R9; - uint64_t R10; - uint64_t R11; - uint64_t R12; - uint64_t R13; - uint64_t R14; - uint64_t R15; - - uint64_t Rip; - - struct { - uint16_t ControlWord; - uint16_t StatusWord; - uint8_t TagWord; - uint8_t Reserved1; - uint16_t ErrorOpcode; - uint32_t ErrorOffset; - uint16_t ErrorSelector; - uint16_t Reserved2; - uint32_t DataOffset; - uint16_t DataSelector; - uint16_t Reserved3; - uint32_t MxCsr; - uint32_t MxCsr_Mask; - WinM128A FloatRegisters[8]; - WinM128A XmmRegisters[16]; - uint8_t Reserved4[96]; - } FltSave; - - WinM128A VectorRegister[26]; - uint64_t VectorControl; - - uint64_t DebugControl; - uint64_t LastBranchToRip; - uint64_t LastBranchFromRip; - uint64_t LastExceptionToRip; - uint64_t LastExceptionFromRip; -} QEMU_ALIGNED(16) WinContext; +#endif /* WIN_DUMP_H */ From 3fa2d384c245bcee3a9ecfa11f298b76ea4c9d57 Mon Sep 17 00:00:00 2001 From: Viktor Prutyanov Date: Wed, 29 Aug 2018 15:41:25 +0300 Subject: [PATCH 61/80] contrib: add elf2dmp tool elf2dmp is a converter from ELF dump (produced by 'dump-guest-memory') to Windows MEMORY.DMP format (also know as 'Complete Memory Dump') which can be opened in WinDbg. This tool can help if VMCoreInfo device/driver is absent in Windows VM and 'dump-guest-memory -w' is not available but dump can be created in ELF format. The tool works as follows: 1. Determine the system paging root looking at GS_BASE or KERNEL_GS_BASE to locate the PRCB structure and finds the kernel CR3 nearby if QEMU CPU state CR3 is not suitable. 2. Find an address within the kernel image by dereferencing the first IDT entry and scans virtual memory upwards until the start of the kernel. 3. Download a PDB matching the kernel from the Microsoft symbol store, and figure out the layout of certain relevant structures necessary for the dump. 4. Populate the corresponding structures in the memory image and create the appropriate dump header. Signed-off-by: Viktor Prutyanov Message-Id: <1535546488-30208-3-git-send-email-viktor.prutyanov@virtuozzo.com> Signed-off-by: Paolo Bonzini --- Makefile | 5 + Makefile.objs | 1 + configure | 3 + contrib/elf2dmp/Makefile.objs | 1 + contrib/elf2dmp/addrspace.c | 233 ++++++++++++++ contrib/elf2dmp/addrspace.h | 44 +++ contrib/elf2dmp/download.c | 47 +++ contrib/elf2dmp/download.h | 13 + contrib/elf2dmp/err.h | 13 + contrib/elf2dmp/kdbg.h | 194 +++++++++++ contrib/elf2dmp/main.c | 589 ++++++++++++++++++++++++++++++++++ contrib/elf2dmp/pdb.c | 322 +++++++++++++++++++ contrib/elf2dmp/pdb.h | 241 ++++++++++++++ contrib/elf2dmp/pe.h | 121 +++++++ contrib/elf2dmp/qemu_elf.c | 164 ++++++++++ contrib/elf2dmp/qemu_elf.h | 51 +++ 16 files changed, 2042 insertions(+) create mode 100644 contrib/elf2dmp/Makefile.objs create mode 100644 contrib/elf2dmp/addrspace.c create mode 100644 contrib/elf2dmp/addrspace.h create mode 100644 contrib/elf2dmp/download.c create mode 100644 contrib/elf2dmp/download.h create mode 100644 contrib/elf2dmp/err.h create mode 100644 contrib/elf2dmp/kdbg.h create mode 100644 contrib/elf2dmp/main.c create mode 100644 contrib/elf2dmp/pdb.c create mode 100644 contrib/elf2dmp/pdb.h create mode 100644 contrib/elf2dmp/pe.h create mode 100644 contrib/elf2dmp/qemu_elf.c create mode 100644 contrib/elf2dmp/qemu_elf.h diff --git a/Makefile b/Makefile index 3730092817..1144d6e3ba 100644 --- a/Makefile +++ b/Makefile @@ -415,6 +415,7 @@ dummy := $(call unnest-vars,, \ chardev-obj-y \ util-obj-y \ qga-obj-y \ + elf2dmp-obj-y \ ivshmem-client-obj-y \ ivshmem-server-obj-y \ libvhost-user-obj-y \ @@ -710,6 +711,10 @@ ifneq ($(EXESUF),) qemu-ga: qemu-ga$(EXESUF) $(QGA_VSS_PROVIDER) $(QEMU_GA_MSI) endif +elf2dmp: LIBS = $(CURL_LIBS) +elf2dmp: $(elf2dmp-obj-y) + $(call LINK, $^) + ifdef CONFIG_IVSHMEM ivshmem-client$(EXESUF): $(ivshmem-client-obj-y) $(COMMON_LDADDS) $(call LINK, $^) diff --git a/Makefile.objs b/Makefile.objs index ce9c79235e..1e1ff387d7 100644 --- a/Makefile.objs +++ b/Makefile.objs @@ -186,6 +186,7 @@ qga-vss-dll-obj-y = qga/ ###################################################################### # contrib +elf2dmp-obj-y = contrib/elf2dmp/ ivshmem-client-obj-$(CONFIG_IVSHMEM) = contrib/ivshmem-client/ ivshmem-server-obj-$(CONFIG_IVSHMEM) = contrib/ivshmem-server/ libvhost-user-obj-y = contrib/libvhost-user/ diff --git a/configure b/configure index 4fc1feaa6f..f3d4b799a5 100755 --- a/configure +++ b/configure @@ -5721,6 +5721,9 @@ if test "$want_tools" = "yes" ; then if [ "$ivshmem" = "yes" ]; then tools="ivshmem-client\$(EXESUF) ivshmem-server\$(EXESUF) $tools" fi + if [ "$posix" = "yes" ] && [ "$curl" = "yes" ]; then + tools="elf2dmp $tools" + fi fi if test "$softmmu" = yes ; then if test "$linux" = yes; then diff --git a/contrib/elf2dmp/Makefile.objs b/contrib/elf2dmp/Makefile.objs new file mode 100644 index 0000000000..e3140f58cf --- /dev/null +++ b/contrib/elf2dmp/Makefile.objs @@ -0,0 +1 @@ +elf2dmp-obj-y = main.o addrspace.o download.o pdb.o qemu_elf.o diff --git a/contrib/elf2dmp/addrspace.c b/contrib/elf2dmp/addrspace.c new file mode 100644 index 0000000000..8a76069cb5 --- /dev/null +++ b/contrib/elf2dmp/addrspace.c @@ -0,0 +1,233 @@ +/* + * Copyright (c) 2018 Virtuozzo International GmbH + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * + */ + +#include "qemu/osdep.h" +#include "addrspace.h" + +static struct pa_block *pa_space_find_block(struct pa_space *ps, uint64_t pa) +{ + size_t i; + for (i = 0; i < ps->block_nr; i++) { + if (ps->block[i].paddr <= pa && + pa <= ps->block[i].paddr + ps->block[i].size) { + return ps->block + i; + } + } + + return NULL; +} + +static uint8_t *pa_space_resolve(struct pa_space *ps, uint64_t pa) +{ + struct pa_block *block = pa_space_find_block(ps, pa); + + if (!block) { + return NULL; + } + + return block->addr + (pa - block->paddr); +} + +int pa_space_create(struct pa_space *ps, QEMU_Elf *qemu_elf) +{ + Elf64_Half phdr_nr = elf_getphdrnum(qemu_elf->map); + Elf64_Phdr *phdr = elf64_getphdr(qemu_elf->map); + size_t block_i = 0; + size_t i; + + ps->block_nr = 0; + + for (i = 0; i < phdr_nr; i++) { + if (phdr[i].p_type == PT_LOAD) { + ps->block_nr++; + } + } + + ps->block = malloc(sizeof(*ps->block) * ps->block_nr); + if (!ps->block) { + return 1; + } + + for (i = 0; i < phdr_nr; i++) { + if (phdr[i].p_type == PT_LOAD) { + ps->block[block_i] = (struct pa_block) { + .addr = (uint8_t *)qemu_elf->map + phdr[i].p_offset, + .paddr = phdr[i].p_paddr, + .size = phdr[i].p_filesz, + }; + block_i++; + } + } + + return 0; +} + +void pa_space_destroy(struct pa_space *ps) +{ + ps->block_nr = 0; + free(ps->block); +} + +void va_space_set_dtb(struct va_space *vs, uint64_t dtb) +{ + vs->dtb = dtb & 0x00ffffffffff000; +} + +void va_space_create(struct va_space *vs, struct pa_space *ps, uint64_t dtb) +{ + vs->ps = ps; + va_space_set_dtb(vs, dtb); +} + +static uint64_t get_pml4e(struct va_space *vs, uint64_t va) +{ + uint64_t pa = (vs->dtb & 0xffffffffff000) | ((va & 0xff8000000000) >> 36); + + return *(uint64_t *)pa_space_resolve(vs->ps, pa); +} + +static uint64_t get_pdpi(struct va_space *vs, uint64_t va, uint64_t pml4e) +{ + uint64_t pdpte_paddr = (pml4e & 0xffffffffff000) | + ((va & 0x7FC0000000) >> 27); + + return *(uint64_t *)pa_space_resolve(vs->ps, pdpte_paddr); +} + +static uint64_t pde_index(uint64_t va) +{ + return (va >> 21) & 0x1FF; +} + +static uint64_t pdba_base(uint64_t pdpe) +{ + return pdpe & 0xFFFFFFFFFF000; +} + +static uint64_t get_pgd(struct va_space *vs, uint64_t va, uint64_t pdpe) +{ + uint64_t pgd_entry = pdba_base(pdpe) + pde_index(va) * 8; + + return *(uint64_t *)pa_space_resolve(vs->ps, pgd_entry); +} + +static uint64_t pte_index(uint64_t va) +{ + return (va >> 12) & 0x1FF; +} + +static uint64_t ptba_base(uint64_t pde) +{ + return pde & 0xFFFFFFFFFF000; +} + +static uint64_t get_pte(struct va_space *vs, uint64_t va, uint64_t pgd) +{ + uint64_t pgd_val = ptba_base(pgd) + pte_index(va) * 8; + + return *(uint64_t *)pa_space_resolve(vs->ps, pgd_val); +} + +static uint64_t get_paddr(uint64_t va, uint64_t pte) +{ + return (pte & 0xFFFFFFFFFF000) | (va & 0xFFF); +} + +static bool is_present(uint64_t entry) +{ + return entry & 0x1; +} + +static bool page_size_flag(uint64_t entry) +{ + return entry & (1 << 7); +} + +static uint64_t get_1GB_paddr(uint64_t va, uint64_t pdpte) +{ + return (pdpte & 0xfffffc0000000) | (va & 0x3fffffff); +} + +static uint64_t get_2MB_paddr(uint64_t va, uint64_t pgd_entry) +{ + return (pgd_entry & 0xfffffffe00000) | (va & 0x00000001fffff); +} + +static uint64_t va_space_va2pa(struct va_space *vs, uint64_t va) +{ + uint64_t pml4e, pdpe, pgd, pte; + + pml4e = get_pml4e(vs, va); + if (!is_present(pml4e)) { + return INVALID_PA; + } + + pdpe = get_pdpi(vs, va, pml4e); + if (!is_present(pdpe)) { + return INVALID_PA; + } + + if (page_size_flag(pdpe)) { + return get_1GB_paddr(va, pdpe); + } + + pgd = get_pgd(vs, va, pdpe); + if (!is_present(pgd)) { + return INVALID_PA; + } + + if (page_size_flag(pgd)) { + return get_2MB_paddr(va, pgd); + } + + pte = get_pte(vs, va, pgd); + if (!is_present(pte)) { + return INVALID_PA; + } + + return get_paddr(va, pte); +} + +void *va_space_resolve(struct va_space *vs, uint64_t va) +{ + uint64_t pa = va_space_va2pa(vs, va); + + if (pa == INVALID_PA) { + return NULL; + } + + return pa_space_resolve(vs->ps, pa); +} + +int va_space_rw(struct va_space *vs, uint64_t addr, + void *buf, size_t size, int is_write) +{ + while (size) { + uint64_t page = addr & PFN_MASK; + size_t s = (page + PAGE_SIZE) - addr; + void *ptr; + + s = (s > size) ? size : s; + + ptr = va_space_resolve(vs, addr); + if (!ptr) { + return 1; + } + + if (is_write) { + memcpy(ptr, buf, s); + } else { + memcpy(buf, ptr, s); + } + + size -= s; + buf = (uint8_t *)buf + s; + addr += s; + } + + return 0; +} diff --git a/contrib/elf2dmp/addrspace.h b/contrib/elf2dmp/addrspace.h new file mode 100644 index 0000000000..d87f6a18c6 --- /dev/null +++ b/contrib/elf2dmp/addrspace.h @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2018 Virtuozzo International GmbH + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * + */ + +#ifndef ADDRSPACE_H +#define ADDRSPACE_H + +#include "qemu_elf.h" + +#define PAGE_BITS 12 +#define PAGE_SIZE (1ULL << PAGE_BITS) +#define PFN_MASK (~(PAGE_SIZE - 1)) + +#define INVALID_PA UINT64_MAX + +struct pa_block { + uint8_t *addr; + uint64_t paddr; + uint64_t size; +}; + +struct pa_space { + size_t block_nr; + struct pa_block *block; +}; + +struct va_space { + uint64_t dtb; + struct pa_space *ps; +}; + +int pa_space_create(struct pa_space *ps, QEMU_Elf *qemu_elf); +void pa_space_destroy(struct pa_space *ps); + +void va_space_create(struct va_space *vs, struct pa_space *ps, uint64_t dtb); +void va_space_set_dtb(struct va_space *vs, uint64_t dtb); +void *va_space_resolve(struct va_space *vs, uint64_t va); +int va_space_rw(struct va_space *vs, uint64_t addr, + void *buf, size_t size, int is_write); + +#endif /* ADDRSPACE_H */ diff --git a/contrib/elf2dmp/download.c b/contrib/elf2dmp/download.c new file mode 100644 index 0000000000..d09e607431 --- /dev/null +++ b/contrib/elf2dmp/download.c @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2018 Virtuozzo International GmbH + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * + */ + +#include "qemu/osdep.h" +#include +#include "download.h" + +int download_url(const char *name, const char *url) +{ + int err = 0; + FILE *file; + CURL *curl = curl_easy_init(); + + if (!curl) { + return 1; + } + + file = fopen(name, "wb"); + if (!file) { + err = 1; + goto out_curl; + } + + curl_easy_setopt(curl, CURLOPT_URL, url); + curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, NULL); + curl_easy_setopt(curl, CURLOPT_WRITEDATA, file); + curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1); + curl_easy_setopt(curl, CURLOPT_NOPROGRESS, 0); + + if (curl_easy_perform(curl) != CURLE_OK) { + err = 1; + fclose(file); + unlink(name); + goto out_curl; + } + + err = fclose(file); + +out_curl: + curl_easy_cleanup(curl); + + return err; +} diff --git a/contrib/elf2dmp/download.h b/contrib/elf2dmp/download.h new file mode 100644 index 0000000000..5c274925f7 --- /dev/null +++ b/contrib/elf2dmp/download.h @@ -0,0 +1,13 @@ +/* + * Copyright (c) 2018 Virtuozzo International GmbH + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * + */ + +#ifndef DOWNLOAD_H +#define DOWNLOAD_H + +int download_url(const char *name, const char *url); + +#endif /* DOWNLOAD_H */ diff --git a/contrib/elf2dmp/err.h b/contrib/elf2dmp/err.h new file mode 100644 index 0000000000..5456bd5a30 --- /dev/null +++ b/contrib/elf2dmp/err.h @@ -0,0 +1,13 @@ +/* + * Copyright (c) 2018 Virtuozzo International GmbH + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * + */ + +#ifndef ERR_H +#define ERR_H + +#define eprintf(...) fprintf(stderr, __VA_ARGS__) + +#endif /* ERR_H */ diff --git a/contrib/elf2dmp/kdbg.h b/contrib/elf2dmp/kdbg.h new file mode 100644 index 0000000000..851b57c321 --- /dev/null +++ b/contrib/elf2dmp/kdbg.h @@ -0,0 +1,194 @@ +/* + * Copyright (c) 2018 Virtuozzo International GmbH + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * + */ + +#ifndef KDBG_H +#define KDBG_H + +typedef struct DBGKD_GET_VERSION64 { + uint16_t MajorVersion; + uint16_t MinorVersion; + uint8_t ProtocolVersion; + uint8_t KdSecondaryVersion; + uint16_t Flags; + uint16_t MachineType; + uint8_t MaxPacketType; + uint8_t MaxStateChange; + uint8_t MaxManipulate; + uint8_t Simulation; + uint16_t Unused[1]; + uint64_t KernBase; + uint64_t PsLoadedModuleList; + uint64_t DebuggerDataList; +} DBGKD_GET_VERSION64; + +typedef struct DBGKD_DEBUG_DATA_HEADER64 { + struct LIST_ENTRY64 { + struct LIST_ENTRY64 *Flink; + struct LIST_ENTRY64 *Blink; + } List; + uint32_t OwnerTag; + uint32_t Size; +} DBGKD_DEBUG_DATA_HEADER64; + +typedef struct KDDEBUGGER_DATA64 { + DBGKD_DEBUG_DATA_HEADER64 Header; + + uint64_t KernBase; + uint64_t BreakpointWithStatus; + uint64_t SavedContext; + uint16_t ThCallbackStack; + uint16_t NextCallback; + uint16_t FramePointer; + uint16_t PaeEnabled:1; + uint64_t KiCallUserMode; + uint64_t KeUserCallbackDispatcher; + uint64_t PsLoadedModuleList; + uint64_t PsActiveProcessHead; + uint64_t PspCidTable; + uint64_t ExpSystemResourcesList; + uint64_t ExpPagedPoolDescriptor; + uint64_t ExpNumberOfPagedPools; + uint64_t KeTimeIncrement; + uint64_t KeBugCheckCallbackListHead; + uint64_t KiBugcheckData; + uint64_t IopErrorLogListHead; + uint64_t ObpRootDirectoryObject; + uint64_t ObpTypeObjectType; + uint64_t MmSystemCacheStart; + uint64_t MmSystemCacheEnd; + uint64_t MmSystemCacheWs; + uint64_t MmPfnDatabase; + uint64_t MmSystemPtesStart; + uint64_t MmSystemPtesEnd; + uint64_t MmSubsectionBase; + uint64_t MmNumberOfPagingFiles; + uint64_t MmLowestPhysicalPage; + uint64_t MmHighestPhysicalPage; + uint64_t MmNumberOfPhysicalPages; + uint64_t MmMaximumNonPagedPoolInBytes; + uint64_t MmNonPagedSystemStart; + uint64_t MmNonPagedPoolStart; + uint64_t MmNonPagedPoolEnd; + uint64_t MmPagedPoolStart; + uint64_t MmPagedPoolEnd; + uint64_t MmPagedPoolInformation; + uint64_t MmPageSize; + uint64_t MmSizeOfPagedPoolInBytes; + uint64_t MmTotalCommitLimit; + uint64_t MmTotalCommittedPages; + uint64_t MmSharedCommit; + uint64_t MmDriverCommit; + uint64_t MmProcessCommit; + uint64_t MmPagedPoolCommit; + uint64_t MmExtendedCommit; + uint64_t MmZeroedPageListHead; + uint64_t MmFreePageListHead; + uint64_t MmStandbyPageListHead; + uint64_t MmModifiedPageListHead; + uint64_t MmModifiedNoWritePageListHead; + uint64_t MmAvailablePages; + uint64_t MmResidentAvailablePages; + uint64_t PoolTrackTable; + uint64_t NonPagedPoolDescriptor; + uint64_t MmHighestUserAddress; + uint64_t MmSystemRangeStart; + uint64_t MmUserProbeAddress; + uint64_t KdPrintCircularBuffer; + uint64_t KdPrintCircularBufferEnd; + uint64_t KdPrintWritePointer; + uint64_t KdPrintRolloverCount; + uint64_t MmLoadedUserImageList; + + /* NT 5.1 Addition */ + + uint64_t NtBuildLab; + uint64_t KiNormalSystemCall; + + /* NT 5.0 hotfix addition */ + + uint64_t KiProcessorBlock; + uint64_t MmUnloadedDrivers; + uint64_t MmLastUnloadedDriver; + uint64_t MmTriageActionTaken; + uint64_t MmSpecialPoolTag; + uint64_t KernelVerifier; + uint64_t MmVerifierData; + uint64_t MmAllocatedNonPagedPool; + uint64_t MmPeakCommitment; + uint64_t MmTotalCommitLimitMaximum; + uint64_t CmNtCSDVersion; + + /* NT 5.1 Addition */ + + uint64_t MmPhysicalMemoryBlock; + uint64_t MmSessionBase; + uint64_t MmSessionSize; + uint64_t MmSystemParentTablePage; + + /* Server 2003 addition */ + + uint64_t MmVirtualTranslationBase; + uint16_t OffsetKThreadNextProcessor; + uint16_t OffsetKThreadTeb; + uint16_t OffsetKThreadKernelStack; + uint16_t OffsetKThreadInitialStack; + uint16_t OffsetKThreadApcProcess; + uint16_t OffsetKThreadState; + uint16_t OffsetKThreadBStore; + uint16_t OffsetKThreadBStoreLimit; + uint16_t SizeEProcess; + uint16_t OffsetEprocessPeb; + uint16_t OffsetEprocessParentCID; + uint16_t OffsetEprocessDirectoryTableBase; + uint16_t SizePrcb; + uint16_t OffsetPrcbDpcRoutine; + uint16_t OffsetPrcbCurrentThread; + uint16_t OffsetPrcbMhz; + uint16_t OffsetPrcbCpuType; + uint16_t OffsetPrcbVendorString; + uint16_t OffsetPrcbProcStateContext; + uint16_t OffsetPrcbNumber; + uint16_t SizeEThread; + uint64_t KdPrintCircularBufferPtr; + uint64_t KdPrintBufferSize; + uint64_t KeLoaderBlock; + uint16_t SizePcr; + uint16_t OffsetPcrSelfPcr; + uint16_t OffsetPcrCurrentPrcb; + uint16_t OffsetPcrContainedPrcb; + uint16_t OffsetPcrInitialBStore; + uint16_t OffsetPcrBStoreLimit; + uint16_t OffsetPcrInitialStack; + uint16_t OffsetPcrStackLimit; + uint16_t OffsetPrcbPcrPage; + uint16_t OffsetPrcbProcStateSpecialReg; + uint16_t GdtR0Code; + uint16_t GdtR0Data; + uint16_t GdtR0Pcr; + uint16_t GdtR3Code; + uint16_t GdtR3Data; + uint16_t GdtR3Teb; + uint16_t GdtLdt; + uint16_t GdtTss; + uint16_t Gdt64R3CmCode; + uint16_t Gdt64R3CmTeb; + uint64_t IopNumTriageDumpDataBlocks; + uint64_t IopTriageDumpDataBlocks; + + /* Longhorn addition */ + + uint64_t VfCrashDataBlock; + uint64_t MmBadPagesDetected; + uint64_t MmZeroedPageSingleBitErrorsDetected; + + /* Windows 7 addition */ + + uint64_t EtwpDebuggerData; + uint16_t OffsetPrcbContext; +} KDDEBUGGER_DATA64; + +#endif /* KDBG_H */ diff --git a/contrib/elf2dmp/main.c b/contrib/elf2dmp/main.c new file mode 100644 index 0000000000..9b93dab662 --- /dev/null +++ b/contrib/elf2dmp/main.c @@ -0,0 +1,589 @@ +/* + * Copyright (c) 2018 Virtuozzo International GmbH + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * + */ + +#include "qemu/osdep.h" +#include "err.h" +#include "addrspace.h" +#include "pe.h" +#include "pdb.h" +#include "kdbg.h" +#include "download.h" +#include "qemu/win_dump_defs.h" + +#define SYM_URL_BASE "https://msdl.microsoft.com/download/symbols/" +#define PDB_NAME "ntkrnlmp.pdb" + +#define INITIAL_MXCSR 0x1f80 + +typedef struct idt_desc { + uint16_t offset1; /* offset bits 0..15 */ + uint16_t selector; + uint8_t ist; + uint8_t type_attr; + uint16_t offset2; /* offset bits 16..31 */ + uint32_t offset3; /* offset bits 32..63 */ + uint32_t rsrvd; +} __attribute__ ((packed)) idt_desc_t; + +static uint64_t idt_desc_addr(idt_desc_t desc) +{ + return (uint64_t)desc.offset1 | ((uint64_t)desc.offset2 << 16) | + ((uint64_t)desc.offset3 << 32); +} + +static const uint64_t SharedUserData = 0xfffff78000000000; + +#define KUSD_OFFSET_SUITE_MASK 0x2d0 +#define KUSD_OFFSET_PRODUCT_TYPE 0x264 + +#define SYM_RESOLVE(base, r, s) ((s = pdb_resolve(base, r, #s)),\ + s ? printf(#s" = 0x%016lx\n", s) : eprintf("Failed to resolve "#s"\n"), s) + +static uint64_t rol(uint64_t x, uint64_t y) +{ + return (x << y) | (x >> (64 - y)); +} + +/* + * Decoding algorithm can be found in Volatility project + */ +static void kdbg_decode(uint64_t *dst, uint64_t *src, size_t size, + uint64_t kwn, uint64_t kwa, uint64_t kdbe) +{ + size_t i; + assert(size % sizeof(uint64_t) == 0); + for (i = 0; i < size / sizeof(uint64_t); i++) { + uint64_t block; + + block = src[i]; + block = rol(block ^ kwn, (uint8_t)kwn); + block = __builtin_bswap64(block ^ kdbe) ^ kwa; + dst[i] = block; + } +} + +static KDDEBUGGER_DATA64 *get_kdbg(uint64_t KernBase, struct pdb_reader *pdb, + struct va_space *vs, uint64_t KdDebuggerDataBlock) +{ + const char OwnerTag[4] = "KDBG"; + KDDEBUGGER_DATA64 *kdbg = NULL; + DBGKD_DEBUG_DATA_HEADER64 kdbg_hdr; + bool decode = false; + uint64_t kwn, kwa, KdpDataBlockEncoded; + + if (va_space_rw(vs, + KdDebuggerDataBlock + offsetof(KDDEBUGGER_DATA64, Header), + &kdbg_hdr, sizeof(kdbg_hdr), 0)) { + eprintf("Failed to extract KDBG header\n"); + return NULL; + } + + if (memcmp(&kdbg_hdr.OwnerTag, OwnerTag, sizeof(OwnerTag))) { + uint64_t KiWaitNever, KiWaitAlways; + + decode = true; + + if (!SYM_RESOLVE(KernBase, pdb, KiWaitNever) || + !SYM_RESOLVE(KernBase, pdb, KiWaitAlways) || + !SYM_RESOLVE(KernBase, pdb, KdpDataBlockEncoded)) { + return NULL; + } + + if (va_space_rw(vs, KiWaitNever, &kwn, sizeof(kwn), 0) || + va_space_rw(vs, KiWaitAlways, &kwa, sizeof(kwa), 0)) { + return NULL; + } + + printf("[KiWaitNever] = 0x%016lx\n", kwn); + printf("[KiWaitAlways] = 0x%016lx\n", kwa); + + /* + * If KDBG header can be decoded, KDBG size is available + * and entire KDBG can be decoded. + */ + printf("Decoding KDBG header...\n"); + kdbg_decode((uint64_t *)&kdbg_hdr, (uint64_t *)&kdbg_hdr, + sizeof(kdbg_hdr), kwn, kwa, KdpDataBlockEncoded); + + printf("Owner tag is \'%.4s\'\n", (char *)&kdbg_hdr.OwnerTag); + if (memcmp(&kdbg_hdr.OwnerTag, OwnerTag, sizeof(OwnerTag))) { + eprintf("Failed to decode KDBG header\n"); + return NULL; + } + } + + kdbg = malloc(kdbg_hdr.Size); + if (!kdbg) { + return NULL; + } + + if (va_space_rw(vs, KdDebuggerDataBlock, kdbg, kdbg_hdr.Size, 0)) { + eprintf("Failed to extract entire KDBG\n"); + return NULL; + } + + if (!decode) { + return kdbg; + } + + printf("Decoding KdDebuggerDataBlock...\n"); + kdbg_decode((uint64_t *)kdbg, (uint64_t *)kdbg, kdbg_hdr.Size, + kwn, kwa, KdpDataBlockEncoded); + + va_space_rw(vs, KdDebuggerDataBlock, kdbg, kdbg_hdr.Size, 1); + + return kdbg; +} + +static void win_context_init_from_qemu_cpu_state(WinContext *ctx, + QEMUCPUState *s) +{ + WinContext win_ctx = (WinContext){ + .ContextFlags = WIN_CTX_X64 | WIN_CTX_INT | WIN_CTX_SEG | WIN_CTX_CTL, + .MxCsr = INITIAL_MXCSR, + + .SegCs = s->cs.selector, + .SegSs = s->ss.selector, + .SegDs = s->ds.selector, + .SegEs = s->es.selector, + .SegFs = s->fs.selector, + .SegGs = s->gs.selector, + .EFlags = (uint32_t)s->rflags, + + .Rax = s->rax, + .Rbx = s->rbx, + .Rcx = s->rcx, + .Rdx = s->rdx, + .Rsp = s->rsp, + .Rbp = s->rbp, + .Rsi = s->rsi, + .Rdi = s->rdi, + .R8 = s->r8, + .R9 = s->r9, + .R10 = s->r10, + .R11 = s->r11, + .R12 = s->r12, + .R13 = s->r13, + .R14 = s->r14, + .R15 = s->r15, + + .Rip = s->rip, + .FltSave = { + .MxCsr = INITIAL_MXCSR, + }, + }; + + *ctx = win_ctx; +} + +/* + * Finds paging-structure hierarchy base, + * if previously set doesn't give access to kernel structures + */ +static int fix_dtb(struct va_space *vs, QEMU_Elf *qe) +{ + /* + * Firstly, test previously set DTB. + */ + if (va_space_resolve(vs, SharedUserData)) { + return 0; + } + + /* + * Secondly, find CPU which run system task. + */ + size_t i; + for (i = 0; i < qe->state_nr; i++) { + QEMUCPUState *s = qe->state[i]; + + if (is_system(s)) { + va_space_set_dtb(vs, s->cr[3]); + printf("DTB 0x%016lx has been found from CPU #%zu" + " as system task CR3\n", vs->dtb, i); + return !(va_space_resolve(vs, SharedUserData)); + } + } + + /* + * Thirdly, use KERNEL_GS_BASE from CPU #0 as PRCB address and + * CR3 as [Prcb+0x7000] + */ + if (qe->has_kernel_gs_base) { + QEMUCPUState *s = qe->state[0]; + uint64_t Prcb = s->kernel_gs_base; + uint64_t *cr3 = va_space_resolve(vs, Prcb + 0x7000); + + if (!cr3) { + return 1; + } + + va_space_set_dtb(vs, *cr3); + printf("DirectoryTableBase = 0x%016lx has been found from CPU #0" + " as interrupt handling CR3\n", vs->dtb); + return !(va_space_resolve(vs, SharedUserData)); + } + + return 1; +} + +static int fill_header(WinDumpHeader64 *hdr, struct pa_space *ps, + struct va_space *vs, uint64_t KdDebuggerDataBlock, + KDDEBUGGER_DATA64 *kdbg, uint64_t KdVersionBlock, int nr_cpus) +{ + uint32_t *suite_mask = va_space_resolve(vs, SharedUserData + + KUSD_OFFSET_SUITE_MASK); + int32_t *product_type = va_space_resolve(vs, SharedUserData + + KUSD_OFFSET_PRODUCT_TYPE); + DBGKD_GET_VERSION64 kvb; + WinDumpHeader64 h; + size_t i; + + QEMU_BUILD_BUG_ON(KUSD_OFFSET_SUITE_MASK >= PAGE_SIZE); + QEMU_BUILD_BUG_ON(KUSD_OFFSET_PRODUCT_TYPE >= PAGE_SIZE); + + if (!suite_mask || !product_type) { + return 1; + } + + if (va_space_rw(vs, KdVersionBlock, &kvb, sizeof(kvb), 0)) { + eprintf("Failed to extract KdVersionBlock\n"); + return 1; + } + + h = (WinDumpHeader64) { + .Signature = "PAGE", + .ValidDump = "DU64", + .MajorVersion = kvb.MajorVersion, + .MinorVersion = kvb.MinorVersion, + .DirectoryTableBase = vs->dtb, + .PfnDatabase = kdbg->MmPfnDatabase, + .PsLoadedModuleList = kdbg->PsLoadedModuleList, + .PsActiveProcessHead = kdbg->PsActiveProcessHead, + .MachineImageType = kvb.MachineType, + .NumberProcessors = nr_cpus, + .BugcheckCode = LIVE_SYSTEM_DUMP, + .KdDebuggerDataBlock = KdDebuggerDataBlock, + .DumpType = 1, + .Comment = "Hello from elf2dmp!", + .SuiteMask = *suite_mask, + .ProductType = *product_type, + .SecondaryDataState = kvb.KdSecondaryVersion, + .PhysicalMemoryBlock = (WinDumpPhyMemDesc64) { + .NumberOfRuns = ps->block_nr, + }, + .RequiredDumpSpace = sizeof(h), + }; + + for (i = 0; i < ps->block_nr; i++) { + h.PhysicalMemoryBlock.NumberOfPages += ps->block[i].size / PAGE_SIZE; + h.PhysicalMemoryBlock.Run[i] = (WinDumpPhyMemRun64) { + .BasePage = ps->block[i].paddr / PAGE_SIZE, + .PageCount = ps->block[i].size / PAGE_SIZE, + }; + } + + h.RequiredDumpSpace += h.PhysicalMemoryBlock.NumberOfPages << PAGE_BITS; + + *hdr = h; + + return 0; +} + +static int fill_context(KDDEBUGGER_DATA64 *kdbg, + struct va_space *vs, QEMU_Elf *qe) +{ + int i; + for (i = 0; i < qe->state_nr; i++) { + uint64_t Prcb; + uint64_t Context; + WinContext ctx; + QEMUCPUState *s = qe->state[i]; + + if (va_space_rw(vs, kdbg->KiProcessorBlock + sizeof(Prcb) * i, + &Prcb, sizeof(Prcb), 0)) { + eprintf("Failed to read CPU #%d PRCB location\n", i); + return 1; + } + + if (va_space_rw(vs, Prcb + kdbg->OffsetPrcbContext, + &Context, sizeof(Context), 0)) { + eprintf("Failed to read CPU #%d ContextFrame location\n", i); + return 1; + } + + printf("Filling context for CPU #%d...\n", i); + win_context_init_from_qemu_cpu_state(&ctx, s); + + if (va_space_rw(vs, Context, &ctx, sizeof(ctx), 1)) { + eprintf("Failed to fill CPU #%d context\n", i); + return 1; + } + } + + return 0; +} + +static int write_dump(struct pa_space *ps, + WinDumpHeader64 *hdr, const char *name) +{ + FILE *dmp_file = fopen(name, "wb"); + size_t i; + + if (!dmp_file) { + eprintf("Failed to open output file \'%s\'\n", name); + return 1; + } + + printf("Writing header to file...\n"); + + if (fwrite(hdr, sizeof(*hdr), 1, dmp_file) != 1) { + eprintf("Failed to write dump header\n"); + fclose(dmp_file); + return 1; + } + + for (i = 0; i < ps->block_nr; i++) { + struct pa_block *b = &ps->block[i]; + + printf("Writing block #%zu/%zu to file...\n", i, ps->block_nr); + if (fwrite(b->addr, b->size, 1, dmp_file) != 1) { + eprintf("Failed to write dump header\n"); + fclose(dmp_file); + return 1; + } + } + + return fclose(dmp_file); +} + +static int pe_get_pdb_symstore_hash(uint64_t base, void *start_addr, + char *hash, struct va_space *vs) +{ + const char e_magic[2] = "MZ"; + const char Signature[4] = "PE\0\0"; + const char sign_rsds[4] = "RSDS"; + IMAGE_DOS_HEADER *dos_hdr = start_addr; + IMAGE_NT_HEADERS64 nt_hdrs; + IMAGE_FILE_HEADER *file_hdr = &nt_hdrs.FileHeader; + IMAGE_OPTIONAL_HEADER64 *opt_hdr = &nt_hdrs.OptionalHeader; + IMAGE_DATA_DIRECTORY *data_dir = nt_hdrs.OptionalHeader.DataDirectory; + IMAGE_DEBUG_DIRECTORY debug_dir; + OMFSignatureRSDS rsds; + char *pdb_name; + size_t pdb_name_sz; + size_t i; + + QEMU_BUILD_BUG_ON(sizeof(*dos_hdr) >= PAGE_SIZE); + + if (memcmp(&dos_hdr->e_magic, e_magic, sizeof(e_magic))) { + return 1; + } + + if (va_space_rw(vs, base + dos_hdr->e_lfanew, + &nt_hdrs, sizeof(nt_hdrs), 0)) { + return 1; + } + + if (memcmp(&nt_hdrs.Signature, Signature, sizeof(Signature)) || + file_hdr->Machine != 0x8664 || opt_hdr->Magic != 0x020b) { + return 1; + } + + printf("Debug Directory RVA = 0x%016x\n", + data_dir[IMAGE_FILE_DEBUG_DIRECTORY].VirtualAddress); + + if (va_space_rw(vs, + base + data_dir[IMAGE_FILE_DEBUG_DIRECTORY].VirtualAddress, + &debug_dir, sizeof(debug_dir), 0)) { + return 1; + } + + if (debug_dir.Type != IMAGE_DEBUG_TYPE_CODEVIEW) { + return 1; + } + + if (va_space_rw(vs, + base + debug_dir.AddressOfRawData, + &rsds, sizeof(rsds), 0)) { + return 1; + } + + printf("CodeView signature is \'%.4s\'\n", rsds.Signature); + + if (memcmp(&rsds.Signature, sign_rsds, sizeof(sign_rsds))) { + return 1; + } + + pdb_name_sz = debug_dir.SizeOfData - sizeof(rsds); + pdb_name = malloc(pdb_name_sz); + if (!pdb_name) { + return 1; + } + + if (va_space_rw(vs, base + debug_dir.AddressOfRawData + + offsetof(OMFSignatureRSDS, name), pdb_name, pdb_name_sz, 0)) { + free(pdb_name); + return 1; + } + + printf("PDB name is \'%s\', \'%s\' expected\n", pdb_name, PDB_NAME); + + if (strcmp(pdb_name, PDB_NAME)) { + eprintf("Unexpected PDB name, it seems the kernel isn't found\n"); + free(pdb_name); + return 1; + } + + free(pdb_name); + + sprintf(hash, "%.08x%.04x%.04x%.02x%.02x", rsds.guid.a, rsds.guid.b, + rsds.guid.c, rsds.guid.d[0], rsds.guid.d[1]); + hash += 20; + for (i = 0; i < 6; i++, hash += 2) { + sprintf(hash, "%.02x", rsds.guid.e[i]); + } + + sprintf(hash, "%.01x", rsds.age); + + return 0; +} + +int main(int argc, char *argv[]) +{ + int err = 0; + QEMU_Elf qemu_elf; + struct pa_space ps; + struct va_space vs; + QEMUCPUState *state; + idt_desc_t first_idt_desc; + uint64_t KernBase; + void *nt_start_addr = NULL; + WinDumpHeader64 header; + char pdb_hash[34]; + char pdb_url[] = SYM_URL_BASE PDB_NAME + "/0123456789ABCDEF0123456789ABCDEFx/" PDB_NAME; + struct pdb_reader pdb; + uint64_t KdDebuggerDataBlock; + KDDEBUGGER_DATA64 *kdbg; + uint64_t KdVersionBlock; + + if (argc != 3) { + eprintf("usage:\n\t%s elf_file dmp_file\n", argv[0]); + return 1; + } + + if (QEMU_Elf_init(&qemu_elf, argv[1])) { + eprintf("Failed to initialize QEMU ELF dump\n"); + return 1; + } + + if (pa_space_create(&ps, &qemu_elf)) { + eprintf("Failed to initialize physical address space\n"); + err = 1; + goto out_elf; + } + + state = qemu_elf.state[0]; + printf("CPU #0 CR3 is 0x%016lx\n", state->cr[3]); + + va_space_create(&vs, &ps, state->cr[3]); + if (fix_dtb(&vs, &qemu_elf)) { + eprintf("Failed to find paging base\n"); + err = 1; + goto out_elf; + } + + printf("CPU #0 IDT is at 0x%016lx\n", state->idt.base); + + if (va_space_rw(&vs, state->idt.base, + &first_idt_desc, sizeof(first_idt_desc), 0)) { + eprintf("Failed to get CPU #0 IDT[0]\n"); + err = 1; + goto out_ps; + } + printf("CPU #0 IDT[0] -> 0x%016lx\n", idt_desc_addr(first_idt_desc)); + + KernBase = idt_desc_addr(first_idt_desc) & ~(PAGE_SIZE - 1); + printf("Searching kernel downwards from 0x%16lx...\n", KernBase); + + for (; KernBase >= 0xfffff78000000000; KernBase -= PAGE_SIZE) { + nt_start_addr = va_space_resolve(&vs, KernBase); + if (!nt_start_addr) { + continue; + } + + if (*(uint16_t *)nt_start_addr == 0x5a4d) { /* MZ */ + break; + } + } + + printf("KernBase = 0x%16lx, signature is \'%.2s\'\n", KernBase, + (char *)nt_start_addr); + + if (pe_get_pdb_symstore_hash(KernBase, nt_start_addr, pdb_hash, &vs)) { + eprintf("Failed to get PDB symbol store hash\n"); + err = 1; + goto out_ps; + } + + sprintf(pdb_url, "%s%s/%s/%s", SYM_URL_BASE, PDB_NAME, pdb_hash, PDB_NAME); + printf("PDB URL is %s\n", pdb_url); + + if (download_url(PDB_NAME, pdb_url)) { + eprintf("Failed to download PDB file\n"); + err = 1; + goto out_ps; + } + + if (pdb_init_from_file(PDB_NAME, &pdb)) { + eprintf("Failed to initialize PDB reader\n"); + err = 1; + goto out_pdb_file; + } + + if (!SYM_RESOLVE(KernBase, &pdb, KdDebuggerDataBlock) || + !SYM_RESOLVE(KernBase, &pdb, KdVersionBlock)) { + err = 1; + goto out_pdb; + } + + kdbg = get_kdbg(KernBase, &pdb, &vs, KdDebuggerDataBlock); + if (!kdbg) { + err = 1; + goto out_pdb; + } + + if (fill_header(&header, &ps, &vs, KdDebuggerDataBlock, kdbg, + KdVersionBlock, qemu_elf.state_nr)) { + err = 1; + goto out_pdb; + } + + if (fill_context(kdbg, &vs, &qemu_elf)) { + err = 1; + goto out_pdb; + } + + if (write_dump(&ps, &header, argv[2])) { + eprintf("Failed to save dump\n"); + err = 1; + goto out_kdbg; + } + +out_kdbg: + free(kdbg); +out_pdb: + pdb_exit(&pdb); +out_pdb_file: + unlink(PDB_NAME); +out_ps: + pa_space_destroy(&ps); +out_elf: + QEMU_Elf_exit(&qemu_elf); + + return err; +} diff --git a/contrib/elf2dmp/pdb.c b/contrib/elf2dmp/pdb.c new file mode 100644 index 0000000000..bcb01b414f --- /dev/null +++ b/contrib/elf2dmp/pdb.c @@ -0,0 +1,322 @@ +/* + * Copyright (c) 2018 Virtuozzo International GmbH + * + * Based on source of Wine project + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA + */ + +#include "qemu/osdep.h" +#include "pdb.h" +#include "err.h" + +static uint32_t pdb_get_file_size(const struct pdb_reader *r, unsigned idx) +{ + return r->ds.toc->file_size[idx]; +} + +static pdb_seg *get_seg_by_num(struct pdb_reader *r, size_t n) +{ + size_t i = 0; + char *ptr; + + for (ptr = r->segs; (ptr < r->segs + r->segs_size); ) { + i++; + ptr += 8; + if (i == n) { + break; + } + ptr += sizeof(pdb_seg); + } + + return (pdb_seg *)ptr; +} + +uint64_t pdb_find_public_v3_symbol(struct pdb_reader *r, const char *name) +{ + size_t size = pdb_get_file_size(r, r->symbols->gsym_file); + int length; + const union codeview_symbol *sym; + const uint8_t *root = r->modimage; + size_t i; + + for (i = 0; i < size; i += length) { + sym = (const void *)(root + i); + length = sym->generic.len + 2; + + if (!sym->generic.id || length < 4) { + break; + } + + if (sym->generic.id == S_PUB_V3 && + !strcmp(name, sym->public_v3.name)) { + pdb_seg *segment = get_seg_by_num(r, sym->public_v3.segment); + uint32_t sect_rva = segment->dword[1]; + uint64_t rva = sect_rva + sym->public_v3.offset; + + printf("%s: 0x%016x(%d:\'%.8s\') + 0x%08x = 0x%09lx\n", name, + sect_rva, sym->public_v3.segment, + ((char *)segment - 8), sym->public_v3.offset, rva); + return rva; + } + } + + return 0; +} + +uint64_t pdb_resolve(uint64_t img_base, struct pdb_reader *r, const char *name) +{ + uint64_t rva = pdb_find_public_v3_symbol(r, name); + + if (!rva) { + return 0; + } + + return img_base + rva; +} + +static void pdb_reader_ds_exit(struct pdb_reader *r) +{ + free(r->ds.toc); +} + +static void pdb_exit_symbols(struct pdb_reader *r) +{ + free(r->modimage); + free(r->symbols); +} + +static void pdb_exit_segments(struct pdb_reader *r) +{ + free(r->segs); +} + +static void *pdb_ds_read(const PDB_DS_HEADER *header, + const uint32_t *block_list, int size) +{ + int i, nBlocks; + uint8_t *buffer; + + if (!size) { + return NULL; + } + + nBlocks = (size + header->block_size - 1) / header->block_size; + + buffer = malloc(nBlocks * header->block_size); + if (!buffer) { + return NULL; + } + + for (i = 0; i < nBlocks; i++) { + memcpy(buffer + i * header->block_size, (const char *)header + + block_list[i] * header->block_size, header->block_size); + } + + return buffer; +} + +static void *pdb_ds_read_file(struct pdb_reader* r, uint32_t file_number) +{ + const uint32_t *block_list; + uint32_t block_size; + const uint32_t *file_size; + size_t i; + + if (!r->ds.toc || file_number >= r->ds.toc->num_files) { + return NULL; + } + + file_size = r->ds.toc->file_size; + r->file_used[file_number / 32] |= 1 << (file_number % 32); + + if (file_size[file_number] == 0 || file_size[file_number] == 0xFFFFFFFF) { + return NULL; + } + + block_list = file_size + r->ds.toc->num_files; + block_size = r->ds.header->block_size; + + for (i = 0; i < file_number; i++) { + block_list += (file_size[i] + block_size - 1) / block_size; + } + + return pdb_ds_read(r->ds.header, block_list, file_size[file_number]); +} + +static int pdb_init_segments(struct pdb_reader *r) +{ + char *segs; + unsigned stream_idx = r->sidx.segments; + + segs = pdb_ds_read_file(r, stream_idx); + if (!segs) { + return 1; + } + + r->segs = segs; + r->segs_size = pdb_get_file_size(r, stream_idx); + + return 0; +} + +static int pdb_init_symbols(struct pdb_reader *r) +{ + int err = 0; + PDB_SYMBOLS *symbols; + PDB_STREAM_INDEXES *sidx = &r->sidx; + + memset(sidx, -1, sizeof(*sidx)); + + symbols = pdb_ds_read_file(r, 3); + if (!symbols) { + return 1; + } + + r->symbols = symbols; + + if (symbols->stream_index_size != sizeof(PDB_STREAM_INDEXES)) { + err = 1; + goto out_symbols; + } + + memcpy(sidx, (const char *)symbols + sizeof(PDB_SYMBOLS) + + symbols->module_size + symbols->offset_size + + symbols->hash_size + symbols->srcmodule_size + + symbols->pdbimport_size + symbols->unknown2_size, sizeof(*sidx)); + + /* Read global symbol table */ + r->modimage = pdb_ds_read_file(r, symbols->gsym_file); + if (!r->modimage) { + err = 1; + goto out_symbols; + } + + return 0; + +out_symbols: + free(symbols); + + return err; +} + +static int pdb_reader_ds_init(struct pdb_reader *r, PDB_DS_HEADER *hdr) +{ + memset(r->file_used, 0, sizeof(r->file_used)); + r->ds.header = hdr; + r->ds.toc = pdb_ds_read(hdr, (uint32_t *)((uint8_t *)hdr + + hdr->toc_page * hdr->block_size), hdr->toc_size); + + if (!r->ds.toc) { + return 1; + } + + return 0; +} + +static int pdb_reader_init(struct pdb_reader *r, void *data) +{ + int err = 0; + const char pdb7[] = "Microsoft C/C++ MSF 7.00"; + + if (memcmp(data, pdb7, sizeof(pdb7) - 1)) { + return 1; + } + + if (pdb_reader_ds_init(r, data)) { + return 1; + } + + r->ds.root = pdb_ds_read_file(r, 1); + if (!r->ds.root) { + err = 1; + goto out_ds; + } + + if (pdb_init_symbols(r)) { + err = 1; + goto out_root; + } + + if (pdb_init_segments(r)) { + err = 1; + goto out_sym; + } + + return 0; + +out_sym: + pdb_exit_symbols(r); +out_root: + free(r->ds.root); +out_ds: + pdb_reader_ds_exit(r); + + return err; +} + +static void pdb_reader_exit(struct pdb_reader *r) +{ + pdb_exit_segments(r); + pdb_exit_symbols(r); + free(r->ds.root); + pdb_reader_ds_exit(r); +} + +int pdb_init_from_file(const char *name, struct pdb_reader *reader) +{ + int err = 0; + int fd; + void *map; + struct stat st; + + fd = open(name, O_RDONLY, 0); + if (fd == -1) { + eprintf("Failed to open PDB file \'%s\'\n", name); + return 1; + } + reader->fd = fd; + + fstat(fd, &st); + reader->file_size = st.st_size; + + map = mmap(NULL, st.st_size, PROT_READ, MAP_PRIVATE, fd, 0); + if (map == MAP_FAILED) { + eprintf("Failed to map PDB file\n"); + err = 1; + goto out_fd; + } + + if (pdb_reader_init(reader, map)) { + err = 1; + goto out_unmap; + } + + return 0; + +out_unmap: + munmap(map, st.st_size); +out_fd: + close(fd); + + return err; +} + +void pdb_exit(struct pdb_reader *reader) +{ + munmap(reader->ds.header, reader->file_size); + close(reader->fd); + pdb_reader_exit(reader); +} diff --git a/contrib/elf2dmp/pdb.h b/contrib/elf2dmp/pdb.h new file mode 100644 index 0000000000..4351a2dd61 --- /dev/null +++ b/contrib/elf2dmp/pdb.h @@ -0,0 +1,241 @@ +/* + * Copyright (c) 2018 Virtuozzo International GmbH + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * + */ + +#ifndef PDB_H +#define PDB_H + +#include +#include + +typedef struct GUID { + unsigned int Data1; + unsigned short Data2; + unsigned short Data3; + unsigned char Data4[8]; +} GUID; + +struct PDB_FILE { + uint32_t size; + uint32_t unknown; +}; + +typedef struct PDB_DS_HEADER { + char signature[32]; + uint32_t block_size; + uint32_t unknown1; + uint32_t num_pages; + uint32_t toc_size; + uint32_t unknown2; + uint32_t toc_page; +} PDB_DS_HEADER; + +typedef struct PDB_DS_TOC { + uint32_t num_files; + uint32_t file_size[1]; +} PDB_DS_TOC; + +typedef struct PDB_DS_ROOT { + uint32_t Version; + uint32_t TimeDateStamp; + uint32_t Age; + GUID guid; + uint32_t cbNames; + char names[1]; +} PDB_DS_ROOT; + +typedef struct PDB_TYPES_OLD { + uint32_t version; + uint16_t first_index; + uint16_t last_index; + uint32_t type_size; + uint16_t file; + uint16_t pad; +} PDB_TYPES_OLD; + +typedef struct PDB_TYPES { + uint32_t version; + uint32_t type_offset; + uint32_t first_index; + uint32_t last_index; + uint32_t type_size; + uint16_t file; + uint16_t pad; + uint32_t hash_size; + uint32_t hash_base; + uint32_t hash_offset; + uint32_t hash_len; + uint32_t search_offset; + uint32_t search_len; + uint32_t unknown_offset; + uint32_t unknown_len; +} PDB_TYPES; + +typedef struct PDB_SYMBOL_RANGE { + uint16_t segment; + uint16_t pad1; + uint32_t offset; + uint32_t size; + uint32_t characteristics; + uint16_t index; + uint16_t pad2; +} PDB_SYMBOL_RANGE; + +typedef struct PDB_SYMBOL_RANGE_EX { + uint16_t segment; + uint16_t pad1; + uint32_t offset; + uint32_t size; + uint32_t characteristics; + uint16_t index; + uint16_t pad2; + uint32_t timestamp; + uint32_t unknown; +} PDB_SYMBOL_RANGE_EX; + +typedef struct PDB_SYMBOL_FILE { + uint32_t unknown1; + PDB_SYMBOL_RANGE range; + uint16_t flag; + uint16_t file; + uint32_t symbol_size; + uint32_t lineno_size; + uint32_t unknown2; + uint32_t nSrcFiles; + uint32_t attribute; + char filename[1]; +} PDB_SYMBOL_FILE; + +typedef struct PDB_SYMBOL_FILE_EX { + uint32_t unknown1; + PDB_SYMBOL_RANGE_EX range; + uint16_t flag; + uint16_t file; + uint32_t symbol_size; + uint32_t lineno_size; + uint32_t unknown2; + uint32_t nSrcFiles; + uint32_t attribute; + uint32_t reserved[2]; + char filename[1]; +} PDB_SYMBOL_FILE_EX; + +typedef struct PDB_SYMBOL_SOURCE { + uint16_t nModules; + uint16_t nSrcFiles; + uint16_t table[1]; +} PDB_SYMBOL_SOURCE; + +typedef struct PDB_SYMBOL_IMPORT { + uint32_t unknown1; + uint32_t unknown2; + uint32_t TimeDateStamp; + uint32_t Age; + char filename[1]; +} PDB_SYMBOL_IMPORT; + +typedef struct PDB_SYMBOLS_OLD { + uint16_t hash1_file; + uint16_t hash2_file; + uint16_t gsym_file; + uint16_t pad; + uint32_t module_size; + uint32_t offset_size; + uint32_t hash_size; + uint32_t srcmodule_size; +} PDB_SYMBOLS_OLD; + +typedef struct PDB_SYMBOLS { + uint32_t signature; + uint32_t version; + uint32_t unknown; + uint32_t hash1_file; + uint32_t hash2_file; + uint16_t gsym_file; + uint16_t unknown1; + uint32_t module_size; + uint32_t offset_size; + uint32_t hash_size; + uint32_t srcmodule_size; + uint32_t pdbimport_size; + uint32_t resvd0; + uint32_t stream_index_size; + uint32_t unknown2_size; + uint16_t resvd3; + uint16_t machine; + uint32_t resvd4; +} PDB_SYMBOLS; + +typedef struct { + uint16_t FPO; + uint16_t unk0; + uint16_t unk1; + uint16_t unk2; + uint16_t unk3; + uint16_t segments; +} PDB_STREAM_INDEXES_OLD; + +typedef struct { + uint16_t FPO; + uint16_t unk0; + uint16_t unk1; + uint16_t unk2; + uint16_t unk3; + uint16_t segments; + uint16_t unk4; + uint16_t unk5; + uint16_t unk6; + uint16_t FPO_EXT; + uint16_t unk7; +} PDB_STREAM_INDEXES; + +union codeview_symbol { + struct { + int16_t len; + int16_t id; + } generic; + + struct { + int16_t len; + int16_t id; + uint32_t symtype; + uint32_t offset; + uint16_t segment; + char name[1]; + } public_v3; +}; + +#define S_PUB_V3 0x110E + +typedef struct pdb_seg { + uint32_t dword[8]; +} __attribute__ ((packed)) pdb_seg; + +#define IMAGE_FILE_MACHINE_I386 0x014c +#define IMAGE_FILE_MACHINE_AMD64 0x8664 + +struct pdb_reader { + int fd; + size_t file_size; + struct { + PDB_DS_HEADER *header; + PDB_DS_TOC *toc; + PDB_DS_ROOT *root; + } ds; + uint32_t file_used[1024]; + PDB_SYMBOLS *symbols; + PDB_STREAM_INDEXES sidx; + uint8_t *modimage; + char *segs; + size_t segs_size; +}; + +int pdb_init_from_file(const char *name, struct pdb_reader *reader); +void pdb_exit(struct pdb_reader *reader); +uint64_t pdb_resolve(uint64_t img_base, struct pdb_reader *r, const char *name); +uint64_t pdb_find_public_v3_symbol(struct pdb_reader *reader, const char *name); + +#endif /* PDB_H */ diff --git a/contrib/elf2dmp/pe.h b/contrib/elf2dmp/pe.h new file mode 100644 index 0000000000..374e06a9c5 --- /dev/null +++ b/contrib/elf2dmp/pe.h @@ -0,0 +1,121 @@ +/* + * Copyright (c) 2018 Virtuozzo International GmbH + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * + */ + +#ifndef PE_H +#define PE_H + +#include + +typedef struct IMAGE_DOS_HEADER { + uint16_t e_magic; /* 0x00: MZ Header signature */ + uint16_t e_cblp; /* 0x02: Bytes on last page of file */ + uint16_t e_cp; /* 0x04: Pages in file */ + uint16_t e_crlc; /* 0x06: Relocations */ + uint16_t e_cparhdr; /* 0x08: Size of header in paragraphs */ + uint16_t e_minalloc; /* 0x0a: Minimum extra paragraphs needed */ + uint16_t e_maxalloc; /* 0x0c: Maximum extra paragraphs needed */ + uint16_t e_ss; /* 0x0e: Initial (relative) SS value */ + uint16_t e_sp; /* 0x10: Initial SP value */ + uint16_t e_csum; /* 0x12: Checksum */ + uint16_t e_ip; /* 0x14: Initial IP value */ + uint16_t e_cs; /* 0x16: Initial (relative) CS value */ + uint16_t e_lfarlc; /* 0x18: File address of relocation table */ + uint16_t e_ovno; /* 0x1a: Overlay number */ + uint16_t e_res[4]; /* 0x1c: Reserved words */ + uint16_t e_oemid; /* 0x24: OEM identifier (for e_oeminfo) */ + uint16_t e_oeminfo; /* 0x26: OEM information; e_oemid specific */ + uint16_t e_res2[10]; /* 0x28: Reserved words */ + uint32_t e_lfanew; /* 0x3c: Offset to extended header */ +} __attribute__ ((packed)) IMAGE_DOS_HEADER; + +typedef struct IMAGE_FILE_HEADER { + uint16_t Machine; + uint16_t NumberOfSections; + uint32_t TimeDateStamp; + uint32_t PointerToSymbolTable; + uint32_t NumberOfSymbols; + uint16_t SizeOfOptionalHeader; + uint16_t Characteristics; +} __attribute__ ((packed)) IMAGE_FILE_HEADER; + +typedef struct IMAGE_DATA_DIRECTORY { + uint32_t VirtualAddress; + uint32_t Size; +} __attribute__ ((packed)) IMAGE_DATA_DIRECTORY; + +#define IMAGE_NUMBEROF_DIRECTORY_ENTRIES 16 + +typedef struct IMAGE_OPTIONAL_HEADER64 { + uint16_t Magic; /* 0x20b */ + uint8_t MajorLinkerVersion; + uint8_t MinorLinkerVersion; + uint32_t SizeOfCode; + uint32_t SizeOfInitializedData; + uint32_t SizeOfUninitializedData; + uint32_t AddressOfEntryPoint; + uint32_t BaseOfCode; + uint64_t ImageBase; + uint32_t SectionAlignment; + uint32_t FileAlignment; + uint16_t MajorOperatingSystemVersion; + uint16_t MinorOperatingSystemVersion; + uint16_t MajorImageVersion; + uint16_t MinorImageVersion; + uint16_t MajorSubsystemVersion; + uint16_t MinorSubsystemVersion; + uint32_t Win32VersionValue; + uint32_t SizeOfImage; + uint32_t SizeOfHeaders; + uint32_t CheckSum; + uint16_t Subsystem; + uint16_t DllCharacteristics; + uint64_t SizeOfStackReserve; + uint64_t SizeOfStackCommit; + uint64_t SizeOfHeapReserve; + uint64_t SizeOfHeapCommit; + uint32_t LoaderFlags; + uint32_t NumberOfRvaAndSizes; + IMAGE_DATA_DIRECTORY DataDirectory[IMAGE_NUMBEROF_DIRECTORY_ENTRIES]; +} __attribute__ ((packed)) IMAGE_OPTIONAL_HEADER64; + +typedef struct IMAGE_NT_HEADERS64 { + uint32_t Signature; + IMAGE_FILE_HEADER FileHeader; + IMAGE_OPTIONAL_HEADER64 OptionalHeader; +} __attribute__ ((packed)) IMAGE_NT_HEADERS64; + +#define IMAGE_FILE_DEBUG_DIRECTORY 6 + +typedef struct IMAGE_DEBUG_DIRECTORY { + uint32_t Characteristics; + uint32_t TimeDateStamp; + uint16_t MajorVersion; + uint16_t MinorVersion; + uint32_t Type; + uint32_t SizeOfData; + uint32_t AddressOfRawData; + uint32_t PointerToRawData; +} __attribute__ ((packed)) IMAGE_DEBUG_DIRECTORY; + +#define IMAGE_DEBUG_TYPE_CODEVIEW 2 + +typedef struct guid_t { + uint32_t a; + uint16_t b; + uint16_t c; + uint8_t d[2]; + uint8_t e[6]; +} __attribute__ ((packed)) guid_t; + +typedef struct OMFSignatureRSDS { + char Signature[4]; + guid_t guid; + uint32_t age; + char name[]; +} __attribute__ ((packed)) OMFSignatureRSDS; + +#endif /* PE_H */ diff --git a/contrib/elf2dmp/qemu_elf.c b/contrib/elf2dmp/qemu_elf.c new file mode 100644 index 0000000000..e9c0d2534a --- /dev/null +++ b/contrib/elf2dmp/qemu_elf.c @@ -0,0 +1,164 @@ +/* + * Copyright (c) 2018 Virtuozzo International GmbH + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * + */ + +#include "qemu/osdep.h" +#include "err.h" +#include "qemu_elf.h" + +#define QEMU_NOTE_NAME "QEMU" + +#ifndef ROUND_UP +#define ROUND_UP(n, d) (((n) + (d) - 1) & -(0 ? (n) : (d))) +#endif + +#ifndef DIV_ROUND_UP +#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d)) +#endif + +#define ELF_NOTE_SIZE(hdr_size, name_size, desc_size) \ + ((DIV_ROUND_UP((hdr_size), 4) + \ + DIV_ROUND_UP((name_size), 4) + \ + DIV_ROUND_UP((desc_size), 4)) * 4) + +int is_system(QEMUCPUState *s) +{ + return s->gs.base >> 63; +} + +static char *nhdr_get_name(Elf64_Nhdr *nhdr) +{ + return (char *)nhdr + ROUND_UP(sizeof(*nhdr), 4); +} + +static void *nhdr_get_desc(Elf64_Nhdr *nhdr) +{ + return nhdr_get_name(nhdr) + ROUND_UP(nhdr->n_namesz, 4); +} + +static Elf64_Nhdr *nhdr_get_next(Elf64_Nhdr *nhdr) +{ + return (void *)((uint8_t *)nhdr + ELF_NOTE_SIZE(sizeof(*nhdr), + nhdr->n_namesz, nhdr->n_descsz)); +} + +Elf64_Phdr *elf64_getphdr(void *map) +{ + Elf64_Ehdr *ehdr = map; + Elf64_Phdr *phdr = (void *)((uint8_t *)map + ehdr->e_phoff); + + return phdr; +} + +Elf64_Half elf_getphdrnum(void *map) +{ + Elf64_Ehdr *ehdr = map; + + return ehdr->e_phnum; +} + +static int init_states(QEMU_Elf *qe) +{ + Elf64_Phdr *phdr = elf64_getphdr(qe->map); + Elf64_Nhdr *start = (void *)((uint8_t *)qe->map + phdr[0].p_offset); + Elf64_Nhdr *end = (void *)((uint8_t *)start + phdr[0].p_memsz); + Elf64_Nhdr *nhdr; + size_t cpu_nr = 0; + + if (phdr[0].p_type != PT_NOTE) { + eprintf("Failed to find PT_NOTE\n"); + return 1; + } + + qe->has_kernel_gs_base = 1; + + for (nhdr = start; nhdr < end; nhdr = nhdr_get_next(nhdr)) { + if (!strcmp(nhdr_get_name(nhdr), QEMU_NOTE_NAME)) { + QEMUCPUState *state = nhdr_get_desc(nhdr); + + if (state->size < sizeof(*state)) { + eprintf("CPU #%zu: QEMU CPU state size %u doesn't match\n", + cpu_nr, state->size); + /* + * We assume either every QEMU CPU state has KERNEL_GS_BASE or + * no one has. + */ + qe->has_kernel_gs_base = 0; + } + cpu_nr++; + } + } + + printf("%zu CPU states has been found\n", cpu_nr); + + qe->state = malloc(sizeof(*qe->state) * cpu_nr); + if (!qe->state) { + return 1; + } + + cpu_nr = 0; + + for (nhdr = start; nhdr < end; nhdr = nhdr_get_next(nhdr)) { + if (!strcmp(nhdr_get_name(nhdr), QEMU_NOTE_NAME)) { + qe->state[cpu_nr] = nhdr_get_desc(nhdr); + cpu_nr++; + } + } + + qe->state_nr = cpu_nr; + + return 0; +} + +static void exit_states(QEMU_Elf *qe) +{ + free(qe->state); +} + +int QEMU_Elf_init(QEMU_Elf *qe, const char *filename) +{ + int err = 0; + struct stat st; + + qe->fd = open(filename, O_RDONLY, 0); + if (qe->fd == -1) { + eprintf("Failed to open ELF dump file \'%s\'\n", filename); + return 1; + } + + fstat(qe->fd, &st); + qe->size = st.st_size; + + qe->map = mmap(NULL, qe->size, PROT_READ | PROT_WRITE, + MAP_PRIVATE, qe->fd, 0); + if (qe->map == MAP_FAILED) { + eprintf("Failed to map ELF file\n"); + err = 1; + goto out_fd; + } + + if (init_states(qe)) { + eprintf("Failed to extract QEMU CPU states\n"); + err = 1; + goto out_unmap; + } + + return 0; + +out_unmap: + munmap(qe->map, qe->size); +out_fd: + close(qe->fd); + + return err; +} + +void QEMU_Elf_exit(QEMU_Elf *qe) +{ + exit_states(qe); + munmap(qe->map, qe->size); + close(qe->fd); +} diff --git a/contrib/elf2dmp/qemu_elf.h b/contrib/elf2dmp/qemu_elf.h new file mode 100644 index 0000000000..d85d6558fa --- /dev/null +++ b/contrib/elf2dmp/qemu_elf.h @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2018 Virtuozzo International GmbH + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * + */ + +#ifndef QEMU_ELF_H +#define QEMU_ELF_H + +#include +#include + +typedef struct QEMUCPUSegment { + uint32_t selector; + uint32_t limit; + uint32_t flags; + uint32_t pad; + uint64_t base; +} QEMUCPUSegment; + +typedef struct QEMUCPUState { + uint32_t version; + uint32_t size; + uint64_t rax, rbx, rcx, rdx, rsi, rdi, rsp, rbp; + uint64_t r8, r9, r10, r11, r12, r13, r14, r15; + uint64_t rip, rflags; + QEMUCPUSegment cs, ds, es, fs, gs, ss; + QEMUCPUSegment ldt, tr, gdt, idt; + uint64_t cr[5]; + uint64_t kernel_gs_base; +} QEMUCPUState; + +int is_system(QEMUCPUState *s); + +typedef struct QEMU_Elf { + int fd; + size_t size; + void *map; + QEMUCPUState **state; + size_t state_nr; + int has_kernel_gs_base; +} QEMU_Elf; + +int QEMU_Elf_init(QEMU_Elf *qe, const char *filename); +void QEMU_Elf_exit(QEMU_Elf *qe); + +Elf64_Phdr *elf64_getphdr(void *map); +Elf64_Half elf_getphdrnum(void *map); + +#endif /* QEMU_ELF_H */ From 5ee547bb78a8cb868d68e4b9122b0df5650ed8a7 Mon Sep 17 00:00:00 2001 From: Viktor Prutyanov Date: Tue, 18 Sep 2018 12:54:22 +0300 Subject: [PATCH 62/80] MAINTAINERS: add myself as elf2dmp maintainer Add myself as contrib/elf2dmp maintainer and elf2dmp as maintained. Signed-off-by: Viktor Prutyanov Message-Id: <20180918095422.4468-1-viktor.prutyanov@phystech.edu> Signed-off-by: Paolo Bonzini --- MAINTAINERS | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/MAINTAINERS b/MAINTAINERS index 15503f41d8..3275cc6bbe 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1903,6 +1903,11 @@ S: Maintained F: include/qemu/iova-tree.h F: util/iova-tree.c +elf2dmp +M: Viktor Prutyanov +S: Maintained +F: contrib/elf2dmp/ + Usermode Emulation ------------------ Overall From 4c3e250627bfa2ed272660d5376ac0f6ff458556 Mon Sep 17 00:00:00 2001 From: Yongji Xie Date: Wed, 11 Jul 2018 21:22:44 +0800 Subject: [PATCH 63/80] kvmclock: run KVM_KVMCLOCK_CTRL ioctl in vcpu thread According to KVM API Documentation, we should only run vcpu ioctls from the same thread that was used to create the vcpu. This patch makes KVM_KVMCLOCK_CTRL ioctl consistent with the Documentation. No functional change. Signed-off-by: Yongji Xie Signed-off-by: Chai Wen Message-Id: <1531315364-2551-1-git-send-email-xieyongji@baidu.com> Signed-off-by: Paolo Bonzini Signed-off-by: Yongji Xie --- hw/i386/kvm/clock.c | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/hw/i386/kvm/clock.c b/hw/i386/kvm/clock.c index 0bf1c60a06..25ea783bec 100644 --- a/hw/i386/kvm/clock.c +++ b/hw/i386/kvm/clock.c @@ -147,6 +147,15 @@ static void kvm_update_clock(KVMClockState *s) s->clock_is_reliable = kvm_has_adjust_clock_stable(); } +static void do_kvmclock_ctrl(CPUState *cpu, run_on_cpu_data data) +{ + int ret = kvm_vcpu_ioctl(cpu, KVM_KVMCLOCK_CTRL, 0); + + if (ret && ret != -EINVAL) { + fprintf(stderr, "%s: %s\n", __func__, strerror(-ret)); + } +} + static void kvmclock_vm_state_change(void *opaque, int running, RunState state) { @@ -183,13 +192,7 @@ static void kvmclock_vm_state_change(void *opaque, int running, return; } CPU_FOREACH(cpu) { - ret = kvm_vcpu_ioctl(cpu, KVM_KVMCLOCK_CTRL, 0); - if (ret) { - if (ret != -EINVAL) { - fprintf(stderr, "%s: %s\n", __func__, strerror(-ret)); - } - return; - } + run_on_cpu(cpu, do_kvmclock_ctrl, RUN_ON_CPU_NULL); } } else { From 51f43d5792e3d7a52c0be380e9b855fc4a05a816 Mon Sep 17 00:00:00 2001 From: Fam Zheng Date: Mon, 17 Sep 2018 16:31:38 +0800 Subject: [PATCH 64/80] scsi-block: Deprecate rotation_rate This option is added together with scsi-disk but is never honoured, becuase we don't emulate the VPD page for scsi-block. We could intercept and inject the user specified value like for max xfer len, but it's probably not helpful since the intent of 070f80095ad was for random entropy aspects, not for performance. If emulated rotation rate is desired, scsi-hd is more suitable. Signed-off-by: Fam Zheng Message-Id: <20180917083138.3948-1-famz@redhat.com> Signed-off-by: Paolo Bonzini --- hw/scsi/scsi-disk.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/hw/scsi/scsi-disk.c b/hw/scsi/scsi-disk.c index 5ae7baa082..c43163cef4 100644 --- a/hw/scsi/scsi-disk.c +++ b/hw/scsi/scsi-disk.c @@ -2610,6 +2610,12 @@ static void scsi_block_realize(SCSIDevice *dev, Error **errp) return; } + if (s->rotation_rate) { + error_report_once("rotation_rate is specified for scsi-block but is " + "not implemented. This option is deprecated and will " + "be removed in a future version"); + } + /* check we are using a driver managing SG_IO (version 3 and after) */ rc = blk_ioctl(s->qdev.conf.blk, SG_GET_VERSION_NUM, &sg_version); if (rc < 0) { From c921370b22cba70ada74cef43f7d36c011648ec6 Mon Sep 17 00:00:00 2001 From: Mark Cave-Ayland Date: Mon, 17 Sep 2018 06:32:29 +0100 Subject: [PATCH 65/80] lsi53c895a: convert to trace-events Signed-off-by: Mark Cave-Ayland Message-Id: <20180917053229.4853-1-mark.cave-ayland@ilande.co.uk> Signed-off-by: Paolo Bonzini --- hw/scsi/lsi53c895a.c | 214 +++++++++++++++++++++---------------------- hw/scsi/trace-events | 62 +++++++++++++ 2 files changed, 165 insertions(+), 111 deletions(-) diff --git a/hw/scsi/lsi53c895a.c b/hw/scsi/lsi53c895a.c index 996b40650d..d1e6534311 100644 --- a/hw/scsi/lsi53c895a.c +++ b/hw/scsi/lsi53c895a.c @@ -20,20 +20,7 @@ #include "hw/scsi/scsi.h" #include "sysemu/dma.h" #include "qemu/log.h" - -//#define DEBUG_LSI -//#define DEBUG_LSI_REG - -#ifdef DEBUG_LSI -#define DPRINTF(fmt, ...) \ -do { printf("lsi_scsi: " fmt , ## __VA_ARGS__); } while (0) -#define BADF(fmt, ...) \ -do { fprintf(stderr, "lsi_scsi: error: " fmt , ## __VA_ARGS__); exit(1);} while (0) -#else -#define DPRINTF(fmt, ...) do {} while(0) -#define BADF(fmt, ...) \ -do { fprintf(stderr, "lsi_scsi: error: " fmt , ## __VA_ARGS__);} while (0) -#endif +#include "trace.h" static const char *names[] = { "SCNTL0", "SCNTL1", "SCNTL2", "SCNTL3", "SCID", "SXFER", "SDID", "GPREG", @@ -313,7 +300,7 @@ static inline int lsi_irq_on_rsl(LSIState *s) static void lsi_soft_reset(LSIState *s) { - DPRINTF("Reset\n"); + trace_lsi_reset(); s->carry = 0; s->msg_action = 0; @@ -484,15 +471,13 @@ static void lsi_update_irq(LSIState *s) level = 1; if (level != last_level) { - DPRINTF("Update IRQ level %d dstat %02x sist %02x%02x\n", - level, s->dstat, s->sist1, s->sist0); + trace_lsi_update_irq(level, s->dstat, s->sist1, s->sist0); last_level = level; } lsi_set_irq(s, level); if (!level && lsi_irq_on_rsl(s) && !(s->scntl1 & LSI_SCNTL1_CON)) { - DPRINTF("Handled IRQs & disconnected, looking for pending " - "processes\n"); + trace_lsi_update_irq_disconnected(); QTAILQ_FOREACH(p, &s->queue, next) { if (p->pending) { lsi_reselect(s, p); @@ -508,8 +493,7 @@ static void lsi_script_scsi_interrupt(LSIState *s, int stat0, int stat1) uint32_t mask0; uint32_t mask1; - DPRINTF("SCSI Interrupt 0x%02x%02x prev 0x%02x%02x\n", - stat1, stat0, s->sist1, s->sist0); + trace_lsi_script_scsi_interrupt(stat1, stat0, s->sist1, s->sist0); s->sist0 |= stat0; s->sist1 |= stat1; /* Stop processor on fatal or unmasked interrupt. As a special hack @@ -527,7 +511,7 @@ static void lsi_script_scsi_interrupt(LSIState *s, int stat0, int stat1) /* Stop SCRIPTS execution and raise a DMA interrupt. */ static void lsi_script_dma_interrupt(LSIState *s, int stat) { - DPRINTF("DMA Interrupt 0x%x prev 0x%x\n", stat, s->dstat); + trace_lsi_script_dma_interrupt(stat, s->dstat); s->dstat |= stat; lsi_update_irq(s); lsi_stop_script(s); @@ -547,9 +531,9 @@ static void lsi_bad_phase(LSIState *s, int out, int new_phase) } else { s->dsp = (s->scntl2 & LSI_SCNTL2_WSR ? s->pmjad2 : s->pmjad1); } - DPRINTF("Data phase mismatch jump to %08x\n", s->dsp); + trace_lsi_bad_phase_jump(s->dsp); } else { - DPRINTF("Phase mismatch interrupt\n"); + trace_lsi_bad_phase_interrupt(); lsi_script_scsi_interrupt(s, LSI_SIST0_MA, 0); lsi_stop_script(s); } @@ -576,7 +560,7 @@ static void lsi_disconnect(LSIState *s) static void lsi_bad_selection(LSIState *s, uint32_t id) { - DPRINTF("Selected absent target %d\n", id); + trace_lsi_bad_selection(id); lsi_script_scsi_interrupt(s, 0, LSI_SIST1_STO); lsi_disconnect(s); } @@ -591,7 +575,7 @@ static void lsi_do_dma(LSIState *s, int out) assert(s->current); if (!s->current->dma_len) { /* Wait until data is available. */ - DPRINTF("DMA no data available\n"); + trace_lsi_do_dma_unavailable(); return; } @@ -611,7 +595,7 @@ static void lsi_do_dma(LSIState *s, int out) else if (s->sbms) addr |= ((uint64_t)s->sbms << 32); - DPRINTF("DMA addr=0x" DMA_ADDR_FMT " len=%d\n", addr, count); + trace_lsi_do_dma(addr, count); s->csbc += count; s->dnad += count; s->dbc -= count; @@ -640,7 +624,7 @@ static void lsi_queue_command(LSIState *s) { lsi_request *p = s->current; - DPRINTF("Queueing tag=0x%x\n", p->tag); + trace_lsi_queue_command(p->tag); assert(s->current != NULL); assert(s->current->dma_len == 0); QTAILQ_INSERT_TAIL(&s->queue, s->current, next); @@ -654,9 +638,9 @@ static void lsi_queue_command(LSIState *s) static void lsi_add_msg_byte(LSIState *s, uint8_t data) { if (s->msg_len >= LSI_MAX_MSGIN_LEN) { - BADF("MSG IN data too long\n"); + trace_lsi_add_msg_byte_error(); } else { - DPRINTF("MSG IN 0x%02x\n", data); + trace_lsi_add_msg_byte(data); s->msg[s->msg_len++] = data; } } @@ -676,7 +660,7 @@ static void lsi_reselect(LSIState *s, lsi_request *p) if (!(s->dcntl & LSI_DCNTL_COM)) { s->sfbr = 1 << (id & 0x7); } - DPRINTF("Reselected target %d\n", id); + trace_lsi_reselect(id); s->scntl1 |= LSI_SCNTL1_CON; lsi_set_phase(s, PHASE_MI); s->msg_action = p->out ? 2 : 3; @@ -732,7 +716,7 @@ static int lsi_queue_req(LSIState *s, SCSIRequest *req, uint32_t len) lsi_request *p = req->hba_private; if (p->pending) { - BADF("Multiple IO pending for request %p\n", p); + trace_lsi_queue_req_error(p); } p->pending = len; /* Reselect if waiting for it, or if reselection triggers an IRQ @@ -747,7 +731,7 @@ static int lsi_queue_req(LSIState *s, SCSIRequest *req, uint32_t len) lsi_reselect(s, p); return 0; } else { - DPRINTF("Queueing IO tag=0x%x\n", p->tag); + trace_lsi_queue_req(p->tag); p->pending = len; return 1; } @@ -760,7 +744,7 @@ static void lsi_command_complete(SCSIRequest *req, uint32_t status, size_t resid int out; out = (s->sstat1 & PHASE_MASK) == PHASE_DO; - DPRINTF("Command complete status=%d\n", (int)status); + trace_lsi_command_complete(status); s->status = status; s->command_complete = 2; if (s->waiting && s->dbc != 0) { @@ -795,7 +779,7 @@ static void lsi_transfer_data(SCSIRequest *req, uint32_t len) out = (s->sstat1 & PHASE_MASK) == PHASE_DO; /* host adapter (re)connected */ - DPRINTF("Data ready tag=0x%x len=%d\n", req->tag, len); + trace_lsi_transfer_data(req->tag, len); s->current->dma_len = len; s->command_complete = 1; if (s->waiting) { @@ -814,7 +798,7 @@ static void lsi_do_command(LSIState *s) uint32_t id; int n; - DPRINTF("Send command len=%d\n", s->dbc); + trace_lsi_do_command(s->dbc); if (s->dbc > 16) s->dbc = 16; pci_dma_read(PCI_DEVICE(s), s->dnad, buf, s->dbc); @@ -862,9 +846,10 @@ static void lsi_do_command(LSIState *s) static void lsi_do_status(LSIState *s) { uint8_t status; - DPRINTF("Get status len=%d status=%d\n", s->dbc, s->status); - if (s->dbc != 1) - BADF("Bad Status move\n"); + trace_lsi_do_status(s->dbc, s->status); + if (s->dbc != 1) { + trace_lsi_do_status_error(); + } s->dbc = 1; status = s->status; s->sfbr = status; @@ -877,7 +862,7 @@ static void lsi_do_status(LSIState *s) static void lsi_do_msgin(LSIState *s) { int len; - DPRINTF("Message in len=%d/%d\n", s->dbc, s->msg_len); + trace_lsi_do_msgin(s->dbc, s->msg_len); s->sfbr = s->msg[0]; len = s->msg_len; if (len > s->dbc) @@ -942,36 +927,36 @@ static void lsi_do_msgout(LSIState *s) current_req = lsi_find_by_tag(s, current_tag); } - DPRINTF("MSG out len=%d\n", s->dbc); + trace_lsi_do_msgout(s->dbc); while (s->dbc) { msg = lsi_get_msgbyte(s); s->sfbr = msg; switch (msg) { case 0x04: - DPRINTF("MSG: Disconnect\n"); + trace_lsi_do_msgout_disconnect(); lsi_disconnect(s); break; case 0x08: - DPRINTF("MSG: No Operation\n"); + trace_lsi_do_msgout_noop(); lsi_set_phase(s, PHASE_CMD); break; case 0x01: len = lsi_get_msgbyte(s); msg = lsi_get_msgbyte(s); (void)len; /* avoid a warning about unused variable*/ - DPRINTF("Extended message 0x%x (len %d)\n", msg, len); + trace_lsi_do_msgout_extended(msg, len); switch (msg) { case 1: - DPRINTF("SDTR (ignored)\n"); + trace_lsi_do_msgout_ignored("SDTR"); lsi_skip_msgbytes(s, 2); break; case 3: - DPRINTF("WDTR (ignored)\n"); + trace_lsi_do_msgout_ignored("WDTR"); lsi_skip_msgbytes(s, 1); break; case 4: - DPRINTF("PPR (ignored)\n"); + trace_lsi_do_msgout_ignored("PPR"); lsi_skip_msgbytes(s, 5); break; default: @@ -980,19 +965,20 @@ static void lsi_do_msgout(LSIState *s) break; case 0x20: /* SIMPLE queue */ s->select_tag |= lsi_get_msgbyte(s) | LSI_TAG_VALID; - DPRINTF("SIMPLE queue tag=0x%x\n", s->select_tag & 0xff); + trace_lsi_do_msgout_simplequeue(s->select_tag & 0xff); break; case 0x21: /* HEAD of queue */ - BADF("HEAD queue not implemented\n"); + qemu_log_mask(LOG_UNIMP, "lsi_scsi: HEAD queue not implemented\n"); s->select_tag |= lsi_get_msgbyte(s) | LSI_TAG_VALID; break; case 0x22: /* ORDERED queue */ - BADF("ORDERED queue not implemented\n"); + qemu_log_mask(LOG_UNIMP, + "lsi_scsi: ORDERED queue not implemented\n"); s->select_tag |= lsi_get_msgbyte(s) | LSI_TAG_VALID; break; case 0x0d: /* The ABORT TAG message clears the current I/O process only. */ - DPRINTF("MSG: ABORT TAG tag=0x%x\n", current_tag); + trace_lsi_do_msgout_abort(current_tag); if (current_req) { scsi_req_cancel(current_req->req); } @@ -1004,17 +990,17 @@ static void lsi_do_msgout(LSIState *s) /* The ABORT message clears all I/O processes for the selecting initiator on the specified logical unit of the target. */ if (msg == 0x06) { - DPRINTF("MSG: ABORT tag=0x%x\n", current_tag); + trace_lsi_do_msgout_abort(current_tag); } /* The CLEAR QUEUE message clears all I/O processes for all initiators on the specified logical unit of the target. */ if (msg == 0x0e) { - DPRINTF("MSG: CLEAR QUEUE tag=0x%x\n", current_tag); + trace_lsi_do_msgout_clearqueue(current_tag); } /* The BUS DEVICE RESET message clears all I/O processes for all initiators on all logical units of the target. */ if (msg == 0x0c) { - DPRINTF("MSG: BUS DEVICE RESET tag=0x%x\n", current_tag); + trace_lsi_do_msgout_busdevicereset(current_tag); } /* clear the current I/O process */ @@ -1042,14 +1028,14 @@ static void lsi_do_msgout(LSIState *s) goto bad; } s->current_lun = msg & 7; - DPRINTF("Select LUN %d\n", s->current_lun); + trace_lsi_do_msgout_select(s->current_lun); lsi_set_phase(s, PHASE_CMD); break; } } return; bad: - BADF("Unimplemented message 0x%02x\n", msg); + qemu_log_mask(LOG_UNIMP, "Unimplemented message 0x%02x\n", msg); lsi_set_phase(s, PHASE_MI); lsi_add_msg_byte(s, 7); /* MESSAGE REJECT */ s->msg_action = 0; @@ -1061,7 +1047,7 @@ static void lsi_memcpy(LSIState *s, uint32_t dest, uint32_t src, int count) int n; uint8_t buf[LSI_BUF_SIZE]; - DPRINTF("memcpy dest 0x%08x src 0x%08x count %d\n", dest, src, count); + trace_lsi_memcpy(dest, src, count); while (count) { n = (count > LSI_BUF_SIZE) ? LSI_BUF_SIZE : count; lsi_mem_read(s, src, buf, n); @@ -1076,7 +1062,7 @@ static void lsi_wait_reselect(LSIState *s) { lsi_request *p; - DPRINTF("Wait Reselect\n"); + trace_lsi_wait_reselect(); QTAILQ_FOREACH(p, &s->queue, next) { if (p->pending) { @@ -1109,14 +1095,14 @@ again: } addr = read_dword(s, s->dsp + 4); addr_high = 0; - DPRINTF("SCRIPTS dsp=%08x opcode %08x arg %08x\n", s->dsp, insn, addr); + trace_lsi_execute_script(s->dsp, insn, addr); s->dsps = addr; s->dcmd = insn >> 24; s->dsp += 8; switch (insn >> 30) { case 0: /* Block move. */ if (s->sist1 & LSI_SIST1_STO) { - DPRINTF("Delayed select timeout\n"); + trace_lsi_execute_script_blockmove_delayed(); lsi_stop_script(s); break; } @@ -1171,8 +1157,9 @@ again: addr_high = s->dbms; break; default: - BADF("Illegal selector specified (0x%x > 0x15)" - " for 64-bit DMA block move", selector); + qemu_log_mask(LOG_GUEST_ERROR, + "lsi_scsi: Illegal selector specified (0x%x > 0x15) " + "for 64-bit DMA block move", selector); break; } } @@ -1184,8 +1171,8 @@ again: s->ia = s->dsp - 12; } if ((s->sstat1 & PHASE_MASK) != ((insn >> 24) & 7)) { - DPRINTF("Wrong phase got %d expected %d\n", - s->sstat1 & PHASE_MASK, (insn >> 24) & 7); + trace_lsi_execute_script_blockmove_badphase(s->sstat1 & PHASE_MASK, + (insn >> 24) & 7); lsi_script_scsi_interrupt(s, LSI_SIST0_MA, 0); break; } @@ -1217,8 +1204,8 @@ again: lsi_do_msgin(s); break; default: - BADF("Unimplemented phase %d\n", s->sstat1 & PHASE_MASK); - exit(1); + qemu_log_mask(LOG_UNIMP, "lsi_scsi: Unimplemented phase %d\n", + s->sstat1 & PHASE_MASK); } s->dfifo = s->dbc & 0xff; s->ctest5 = (s->ctest5 & 0xfc) | ((s->dbc >> 8) & 3); @@ -1246,7 +1233,7 @@ again: case 0: /* Select */ s->sdid = id; if (s->scntl1 & LSI_SCNTL1_CON) { - DPRINTF("Already reselected, jumping to alternative address\n"); + trace_lsi_execute_script_io_alreadyreselected(); s->dsp = s->dnad; break; } @@ -1256,8 +1243,8 @@ again: lsi_bad_selection(s, id); break; } - DPRINTF("Selected target %d%s\n", - id, insn & (1 << 3) ? " ATN" : ""); + trace_lsi_execute_script_io_selected(id, + insn & (1 << 3) ? " ATN" : ""); /* ??? Linux drivers compain when this is set. Maybe it only applies in low-level mode (unimplemented). lsi_script_scsi_interrupt(s, LSI_SIST0_CMP, 0); */ @@ -1269,7 +1256,7 @@ again: lsi_set_phase(s, PHASE_MO); break; case 1: /* Disconnect */ - DPRINTF("Wait Disconnect\n"); + trace_lsi_execute_script_io_disconnect(); s->scntl1 &= ~LSI_SCNTL1_CON; break; case 2: /* Wait Reselect */ @@ -1278,7 +1265,7 @@ again: } break; case 3: /* Set */ - DPRINTF("Set%s%s%s%s\n", + trace_lsi_execute_script_io_set( insn & (1 << 3) ? " ATN" : "", insn & (1 << 6) ? " ACK" : "", insn & (1 << 9) ? " TM" : "", @@ -1288,14 +1275,14 @@ again: lsi_set_phase(s, PHASE_MO); } if (insn & (1 << 9)) { - BADF("Target mode not implemented\n"); - exit(1); + qemu_log_mask(LOG_UNIMP, + "lsi_scsi: Target mode not implemented\n"); } if (insn & (1 << 10)) s->carry = 1; break; case 4: /* Clear */ - DPRINTF("Clear%s%s%s%s\n", + trace_lsi_execute_script_io_clear( insn & (1 << 3) ? " ATN" : "", insn & (1 << 6) ? " ACK" : "", insn & (1 << 9) ? " TM" : "", @@ -1313,18 +1300,17 @@ again: uint8_t data8; int reg; int operator; -#ifdef DEBUG_LSI + static const char *opcode_names[3] = {"Write", "Read", "Read-Modify-Write"}; static const char *operator_names[8] = {"MOV", "SHL", "OR", "XOR", "AND", "SHR", "ADD", "ADC"}; -#endif reg = ((insn >> 16) & 0x7f) | (insn & 0x80); data8 = (insn >> 8) & 0xff; opcode = (insn >> 27) & 7; operator = (insn >> 24) & 7; - DPRINTF("%s reg 0x%x %s data8=0x%02x sfbr=0x%02x%s\n", + trace_lsi_execute_script_io_opcode( opcode_names[opcode - 5], reg, operator_names[operator], data8, s->sfbr, (insn & (1 << 23)) ? " SFBR" : ""); @@ -1404,21 +1390,21 @@ again: int jmp; if ((insn & 0x002e0000) == 0) { - DPRINTF("NOP\n"); + trace_lsi_execute_script_tc_nop(); break; } if (s->sist1 & LSI_SIST1_STO) { - DPRINTF("Delayed select timeout\n"); + trace_lsi_execute_script_tc_delayedselect_timeout(); lsi_stop_script(s); break; } cond = jmp = (insn & (1 << 19)) != 0; if (cond == jmp && (insn & (1 << 21))) { - DPRINTF("Compare carry %d\n", s->carry == jmp); + trace_lsi_execute_script_tc_compc(s->carry == jmp); cond = s->carry != 0; } if (cond == jmp && (insn & (1 << 17))) { - DPRINTF("Compare phase %d %c= %d\n", + trace_lsi_execute_script_tc_compp( (s->sstat1 & PHASE_MASK), jmp ? '=' : '!', ((insn >> 24) & 7)); @@ -1428,7 +1414,7 @@ again: uint8_t mask; mask = (~insn >> 8) & 0xff; - DPRINTF("Compare data 0x%x & 0x%x %c= 0x%x\n", + trace_lsi_execute_script_tc_compd( s->sfbr, mask, jmp ? '=' : '!', insn & mask); cond = (s->sfbr & mask) == (insn & mask); } @@ -1439,21 +1425,21 @@ again: } switch ((insn >> 27) & 7) { case 0: /* Jump */ - DPRINTF("Jump to 0x%08x\n", addr); + trace_lsi_execute_script_tc_jump(addr); s->adder = addr; s->dsp = addr; break; case 1: /* Call */ - DPRINTF("Call 0x%08x\n", addr); + trace_lsi_execute_script_tc_call(addr); s->temp = s->dsp; s->dsp = addr; break; case 2: /* Return */ - DPRINTF("Return to 0x%08x\n", s->temp); + trace_lsi_execute_script_tc_return(s->temp); s->dsp = s->temp; break; case 3: /* Interrupt */ - DPRINTF("Interrupt 0x%08x\n", s->dsps); + trace_lsi_execute_script_tc_interrupt(s->dsps); if ((insn & (1 << 20)) != 0) { s->istat0 |= LSI_ISTAT0_INTF; lsi_update_irq(s); @@ -1462,12 +1448,12 @@ again: } break; default: - DPRINTF("Illegal transfer control\n"); + trace_lsi_execute_script_tc_illegal(); lsi_script_dma_interrupt(s, LSI_DSTAT_IID); break; } } else { - DPRINTF("Control condition failed\n"); + trace_lsi_execute_script_tc_cc_failed(); } } break; @@ -1495,13 +1481,12 @@ again: reg = (insn >> 16) & 0xff; if (insn & (1 << 24)) { pci_dma_read(pci_dev, addr, data, n); - DPRINTF("Load reg 0x%x size %d addr 0x%08x = %08x\n", reg, n, - addr, *(int *)data); + trace_lsi_execute_script_mm_load(reg, n, addr, *(int *)data); for (i = 0; i < n; i++) { lsi_reg_writeb(s, reg + i, data[i]); } } else { - DPRINTF("Store reg 0x%x size %d addr 0x%08x\n", reg, n, addr); + trace_lsi_execute_script_mm_store(reg, n, addr); for (i = 0; i < n; i++) { data[i] = lsi_reg_readb(s, reg + i); } @@ -1515,8 +1500,10 @@ again: assume this is the case and force an unexpected device disconnect. This is apparently sufficient to beat the drivers into submission. */ - if (!(s->sien0 & LSI_SIST0_UDC)) - fprintf(stderr, "inf. loop with UDC masked\n"); + if (!(s->sien0 & LSI_SIST0_UDC)) { + qemu_log_mask(LOG_GUEST_ERROR, + "lsi_scsi: inf. loop with UDC masked"); + } lsi_script_scsi_interrupt(s, LSI_SIST0_UDC, 0); lsi_disconnect(s); } else if (s->istat1 & LSI_ISTAT1_SRUN && !s->waiting) { @@ -1526,7 +1513,7 @@ again: goto again; } } - DPRINTF("SCRIPTS execution stopped\n"); + trace_lsi_execute_script_stop(); } static uint8_t lsi_reg_readb(LSIState *s, int offset) @@ -1761,10 +1748,8 @@ static uint8_t lsi_reg_readb(LSIState *s, int offset) #undef CASE_GET_REG24 #undef CASE_GET_REG32 -#ifdef DEBUG_LSI_REG - DPRINTF("Read reg %s %x = %02x\n", - offset < ARRAY_SIZE(names) ? names[offset] : "???", offset, ret); -#endif + trace_lsi_reg_read(offset < ARRAY_SIZE(names) ? names[offset] : "???", + offset, ret); return ret; } @@ -1782,21 +1767,22 @@ static void lsi_reg_writeb(LSIState *s, int offset, uint8_t val) case addr + 2: s->name &= 0xff00ffff; s->name |= val << 16; break; \ case addr + 3: s->name &= 0x00ffffff; s->name |= val << 24; break; -#ifdef DEBUG_LSI_REG - DPRINTF("Write reg %s %x = %02x\n", - offset < ARRAY_SIZE(names) ? names[offset] : "???", offset, val); -#endif + trace_lsi_reg_write(offset < ARRAY_SIZE(names) ? names[offset] : "???", + offset, val); + switch (offset) { case 0x00: /* SCNTL0 */ s->scntl0 = val; if (val & LSI_SCNTL0_START) { - BADF("Start sequence not implemented\n"); + qemu_log_mask(LOG_UNIMP, + "lsi_scsi: Start sequence not implemented\n"); } break; case 0x01: /* SCNTL1 */ s->scntl1 = val & ~LSI_SCNTL1_SST; if (val & LSI_SCNTL1_IARB) { - BADF("Immediate Arbritration not implemented\n"); + qemu_log_mask(LOG_UNIMP, + "lsi_scsi: Immediate Arbritration not implemented\n"); } if (val & LSI_SCNTL1_RST) { if (!(s->sstat0 & LSI_SSTAT0_RST)) { @@ -1823,7 +1809,8 @@ static void lsi_reg_writeb(LSIState *s, int offset, uint8_t val) break; case 0x06: /* SDID */ if ((s->ssid & 0x80) && (val & 0xf) != (s->ssid & 0xf)) { - BADF("Destination ID does not match SSID\n"); + qemu_log_mask(LOG_GUEST_ERROR, + "lsi_scsi: Destination ID does not match SSID\n"); } s->sdid = val & 0xf; break; @@ -1851,7 +1838,7 @@ static void lsi_reg_writeb(LSIState *s, int offset, uint8_t val) lsi_update_irq(s); } if (s->waiting == 1 && val & LSI_ISTAT0_SIGP) { - DPRINTF("Woken by SIGP\n"); + trace_lsi_awoken(); s->waiting = 0; s->dsp = s->dnad; lsi_execute_script(s); @@ -1878,13 +1865,15 @@ static void lsi_reg_writeb(LSIState *s, int offset, uint8_t val) CASE_SET_REG32(temp, 0x1c) case 0x21: /* CTEST4 */ if (val & 7) { - BADF("Unimplemented CTEST4-FBL 0x%x\n", val); + qemu_log_mask(LOG_UNIMP, + "lsi_scsi: Unimplemented CTEST4-FBL 0x%x\n", val); } s->ctest4 = val; break; case 0x22: /* CTEST5 */ if (val & (LSI_CTEST5_ADCK | LSI_CTEST5_BBCK)) { - BADF("CTEST5 DMA increment not implemented\n"); + qemu_log_mask(LOG_UNIMP, + "lsi_scsi: CTEST5 DMA increment not implemented\n"); } s->ctest5 = val; break; @@ -1941,7 +1930,8 @@ static void lsi_reg_writeb(LSIState *s, int offset, uint8_t val) break; case 0x49: /* STIME1 */ if (val & 0xf) { - DPRINTF("General purpose timer not implemented\n"); + qemu_log_mask(LOG_UNIMP, + "lsi_scsi: General purpose timer not implemented\n"); /* ??? Raising the interrupt immediately seems to be sufficient to keep the FreeBSD driver happy. */ lsi_script_scsi_interrupt(s, 0, LSI_SIST1_GEN); @@ -1958,13 +1948,15 @@ static void lsi_reg_writeb(LSIState *s, int offset, uint8_t val) break; case 0x4e: /* STEST2 */ if (val & 1) { - BADF("Low level mode not implemented\n"); + qemu_log_mask(LOG_UNIMP, + "lsi_scsi: Low level mode not implemented\n"); } s->stest2 = val; break; case 0x4f: /* STEST3 */ if (val & 0x41) { - BADF("SCSI FIFO test mode not implemented\n"); + qemu_log_mask(LOG_UNIMP, + "lsi_scsi: SCSI FIFO test mode not implemented\n"); } s->stest3 = val; break; diff --git a/hw/scsi/trace-events b/hw/scsi/trace-events index 6e299d0338..0fb6a99616 100644 --- a/hw/scsi/trace-events +++ b/hw/scsi/trace-events @@ -229,3 +229,65 @@ spapr_vscsi_process_login(void) "Got login, sending response !" spapr_vscsi_queue_cmd_no_drive(uint64_t lun) "Command for lun 0x%08" PRIx64 " with no drive" spapr_vscsi_queue_cmd(uint32_t qtag, unsigned cdb, const char *cmd, int lun, int ret) "Queued command tag 0x%"PRIx32" CMD 0x%x=%s LUN %d ret: %d" spapr_vscsi_do_crq(unsigned c0, unsigned c1) "crq: %02x %02x ..." + +# hw/scsi/lsi53c895a.c +lsi_reset(void) "Reset" +lsi_update_irq(int level, uint8_t dstat, uint8_t sist1, uint8_t sist0) "Update IRQ level %d dstat 0x%02x sist 0x%02x0x%02x" +lsi_update_irq_disconnected(void) "Handled IRQs & disconnected, looking for pending processes" +lsi_script_scsi_interrupt(uint8_t stat1, uint8_t stat0, uint8_t sist1, uint8_t sist0) "SCSI Interrupt 0x%02x0x%02x prev 0x%02x0x%02x" +lsi_script_dma_interrupt(uint8_t stat, uint8_t dstat) "DMA Interrupt 0x%x prev 0x%x" +lsi_bad_phase_jump(uint32_t dsp) "Data phase mismatch jump to 0x%"PRIX32 +lsi_bad_phase_interrupt(void) "Phase mismatch interrupt" +lsi_bad_selection(uint32_t id) "Selected absent target %"PRIu32 +lsi_do_dma_unavailable(void) "DMA no data available" +lsi_do_dma(uint64_t addr, int len) "DMA addr=0x%"PRIx64" len=%d" +lsi_queue_command(uint32_t tag) "Queueing tag=0x%"PRId32 +lsi_add_msg_byte_error(void) "MSG IN data too long" +lsi_add_msg_byte(uint8_t data) "MSG IN 0x%02x" +lsi_reselect(int id) "Reselected target %d" +lsi_queue_req_error(void *p) "Multiple IO pending for request %p" +lsi_queue_req(uint32_t tag) "Queueing IO tag=0x%"PRIx32 +lsi_command_complete(uint32_t status) "Command complete status=%"PRId32 +lsi_transfer_data(uint32_t tag, uint32_t len) "Data ready tag=0x%"PRIx32" len=%"PRId32 +lsi_do_command(uint32_t dbc) "Send command len=%"PRId32 +lsi_do_status(uint32_t dbc, uint8_t status) "Get status len=%"PRId32" status=%d" +lsi_do_status_error(void) "Bad Status move" +lsi_do_msgin(uint32_t dbc, int len) "Message in len=%"PRId32" %d" +lsi_do_msgout(uint32_t dbc) "MSG out len=%"PRId32 +lsi_do_msgout_disconnect(void) "MSG: Disconnect" +lsi_do_msgout_noop(void) "MSG: No Operation" +lsi_do_msgout_extended(uint8_t msg, uint8_t len) "Extended message 0x%x (len %d)" +lsi_do_msgout_ignored(const char *msg) "%s (ignored)" +lsi_do_msgout_simplequeue(uint8_t select_tag) "SIMPLE queue tag=0x%x" +lsi_do_msgout_abort(uint32_t tag) "MSG: ABORT TAG tag=0x%"PRId32 +lsi_do_msgout_clearqueue(uint32_t tag) "MSG: CLEAR QUEUE tag=0x%"PRIx32 +lsi_do_msgout_busdevicereset(uint32_t tag) "MSG: BUS DEVICE RESET tag=0x%"PRIx32 +lsi_do_msgout_select(int id) "Select LUN %d" +lsi_memcpy(uint32_t dest, uint32_t src, int count) "memcpy dest 0x%"PRIx32" src 0x%"PRIx32" count %d" +lsi_wait_reselect(void) "Wait Reselect" +lsi_execute_script(uint32_t dsp, uint32_t insn, uint32_t addr) "SCRIPTS dsp=0x%"PRIx32" opcode 0x%"PRIx32" arg 0x%"PRIx32 +lsi_execute_script_blockmove_delayed(void) "Delayed select timeout" +lsi_execute_script_blockmove_badphase(uint8_t phase, uint8_t expected) "Wrong phase got %d expected %d" +lsi_execute_script_io_alreadyreselected(void) "Already reselected, jumping to alternative address" +lsi_execute_script_io_selected(uint8_t id, const char *atn) "Selected target %d%s" +lsi_execute_script_io_disconnect(void) "Wait Disconnect" +lsi_execute_script_io_set(const char *atn, const char *ack, const char *tm, const char *cc) "Set%s%s%s%s" +lsi_execute_script_io_clear(const char *atn, const char *ack, const char *tm, const char *cc) "Clear%s%s%s%s" +lsi_execute_script_io_opcode(const char *opcode, int reg, const char *opname, uint8_t data8, uint32_t sfbr, const char *ssfbr) "%s reg 0x%x %s data8=0x%02x sfbr=0x%02x%s" +lsi_execute_script_tc_nop(void) "NOP" +lsi_execute_script_tc_delayedselect_timeout(void) "Delayed select timeout" +lsi_execute_script_tc_compc(int result) "Compare carry %d" +lsi_execute_script_tc_compp(uint8_t phase, int op, uint8_t insn_phase) "Compare phase %d %c= %d" +lsi_execute_script_tc_compd(uint32_t sfbr, uint8_t mask, int op, int result) "Compare data 0x%"PRIx32" & 0x%x %c= 0x%x" +lsi_execute_script_tc_jump(uint32_t addr) "Jump to 0x%"PRIx32 +lsi_execute_script_tc_call(uint32_t addr) "Call 0x%"PRIx32 +lsi_execute_script_tc_return(uint32_t addr) "Return to 0x%"PRIx32 +lsi_execute_script_tc_interrupt(uint32_t addr) "Interrupt 0x%"PRIx32 +lsi_execute_script_tc_illegal(void) "Illegal transfer control" +lsi_execute_script_tc_cc_failed(void) "Control condition failed" +lsi_execute_script_mm_load(int reg, int n, uint32_t addr, int data) "Load reg 0x%x size %d addr 0x%"PRIx32" = 0x%08x" +lsi_execute_script_mm_store(int reg, int n, uint32_t addr) "Store reg 0x%x size %d addr 0x%"PRIx32 +lsi_execute_script_stop(void) "SCRIPTS execution stopped" +lsi_awoken(void) "Woken by SIGP" +lsi_reg_read(const char *name, int offset, uint8_t ret) "Read reg %s 0x%x = 0x%02x" +lsi_reg_write(const char *name, int offset, uint8_t val) "Write reg %s 0x%x = 0x%02x" From dcf6760a64f6cacc5b7a77b0e530f3fffef5e189 Mon Sep 17 00:00:00 2001 From: Thomas Huth Date: Mon, 17 Sep 2018 19:08:54 +0200 Subject: [PATCH 66/80] accel/tcg: Remove dead code The global cpu_single_env variable has been removed more than 5 years ago, so apparently nobody used this dead debug code in that timeframe anymore. Thus let's remove it completely now. Signed-off-by: Thomas Huth Message-Id: <1537204134-15905-1-git-send-email-thuth@redhat.com> Reviewed-by: Peter Maydell Signed-off-by: Paolo Bonzini --- accel/tcg/translate-all.c | 9 --------- 1 file changed, 9 deletions(-) diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c index 9ffbbc2fbd..ad5c758246 100644 --- a/accel/tcg/translate-all.c +++ b/accel/tcg/translate-all.c @@ -2009,15 +2009,6 @@ void tb_invalidate_phys_page_fast(struct page_collection *pages, { PageDesc *p; -#if 0 - if (1) { - qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n", - cpu_single_env->mem_io_vaddr, len, - cpu_single_env->eip, - cpu_single_env->eip + - (intptr_t)cpu_single_env->segs[R_CS].base); - } -#endif assert_memory_lock(); p = page_find(start >> TARGET_PAGE_BITS); From 422ca1432f7b44f2a9f3ad94a65d36927da021fa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marc-Andr=C3=A9=20Lureau?= Date: Wed, 12 Sep 2018 16:53:03 +0400 Subject: [PATCH 67/80] qom/object: add some interface asserts MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit An interface can't have any instance size or callback, or itself implement other interfaces (this is unsupported). Signed-off-by: Marc-André Lureau Message-Id: <20180912125303.29158-1-marcandre.lureau@redhat.com> Signed-off-by: Paolo Bonzini --- qom/object.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/qom/object.c b/qom/object.c index 75d1d48944..9222b23172 100644 --- a/qom/object.c +++ b/qom/object.c @@ -286,7 +286,14 @@ static void type_initialize(TypeImpl *ti) if (ti->instance_size == 0) { ti->abstract = true; } - + if (type_is_ancestor(ti, type_interface)) { + assert(ti->instance_size == 0); + assert(ti->abstract); + assert(!ti->instance_init); + assert(!ti->instance_post_init); + assert(!ti->instance_finalize); + assert(!ti->num_interfaces); + } ti->class = g_malloc0(ti->class_size); parent = type_get_parent(ti); From 442c3b4594bb5a537c5f83dd9c65a0919723eca8 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Tue, 18 Sep 2018 11:28:19 +0200 Subject: [PATCH 68/80] hvf: drop unused variable Signed-off-by: Paolo Bonzini --- target/i386/hvf/hvf.c | 1 - 1 file changed, 1 deletion(-) diff --git a/target/i386/hvf/hvf.c b/target/i386/hvf/hvf.c index 5db167df98..9f52bc413a 100644 --- a/target/i386/hvf/hvf.c +++ b/target/i386/hvf/hvf.c @@ -72,7 +72,6 @@ #include "sysemu/sysemu.h" #include "target/i386/cpu.h" -pthread_rwlock_t mem_lock = PTHREAD_RWLOCK_INITIALIZER; HVFState *hvf_state; int hvf_disabled = 1; From 41d54dc09f1f327dedc79d5ba0b1b437ab7b0e94 Mon Sep 17 00:00:00 2001 From: Pavel Dovgalyuk Date: Thu, 20 Sep 2018 10:17:03 +0300 Subject: [PATCH 69/80] target/i386: fix translation for icount mode This patch fixes the checking of boundary crossing instructions. In icount mode only first instruction of the block may cross the page boundary to keep the translation deterministic. These conditions already existed, but compared the wrong variable. Signed-off-by: Pavel Dovgalyuk Message-Id: <20180920071702.22477.43980.stgit@pasha-VirtualBox> Signed-off-by: Paolo Bonzini --- target/i386/translate.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/target/i386/translate.c b/target/i386/translate.c index 8fcd88e326..83c1ebe491 100644 --- a/target/i386/translate.c +++ b/target/i386/translate.c @@ -8538,10 +8538,10 @@ static void i386_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) chance to happen */ dc->base.is_jmp = DISAS_TOO_MANY; } else if ((tb_cflags(dc->base.tb) & CF_USE_ICOUNT) - && ((dc->base.pc_next & TARGET_PAGE_MASK) - != ((dc->base.pc_next + TARGET_MAX_INSN_SIZE - 1) + && ((pc_next & TARGET_PAGE_MASK) + != ((pc_next + TARGET_MAX_INSN_SIZE - 1) & TARGET_PAGE_MASK) - || (dc->base.pc_next & ~TARGET_PAGE_MASK) == 0)) { + || (pc_next & ~TARGET_PAGE_MASK) == 0)) { /* Do not cross the boundary of the pages in icount mode, it can cause an exception. Do it only when boundary is crossed by the first instruction in the block. From d5dbde4645fe56a1bcd678f85fa26c5548bcf552 Mon Sep 17 00:00:00 2001 From: Hikaru Nishida Date: Mon, 24 Sep 2018 21:32:05 +0900 Subject: [PATCH 70/80] hostmem-file: make available memory-backend-file on POSIX-based hosts Before this change, memory-backend-file object is valid for Linux hosts only because hostmem-file.c is compiled only on Linux hosts. However, other POSIX-based hosts (such as macOS) can support memory-backend-file object in the same way as on Linux hosts. This patch makes hostmem-file.c and related functions to be compiled on all POSIX-based hosts to make available memory-backend-file on them. Signed-off-by: Hikaru Nishida Message-Id: <20180924123205.29651-1-hikarupsp@gmail.com> Signed-off-by: Paolo Bonzini --- backends/Makefile.objs | 2 +- backends/hostmem-file.c | 2 +- exec.c | 4 ++-- include/exec/memory.h | 2 +- memory.c | 2 +- 5 files changed, 6 insertions(+), 6 deletions(-) diff --git a/backends/Makefile.objs b/backends/Makefile.objs index ad7c0325ed..717fcbdae4 100644 --- a/backends/Makefile.objs +++ b/backends/Makefile.objs @@ -4,7 +4,7 @@ common-obj-$(CONFIG_POSIX) += rng-random.o common-obj-$(CONFIG_TPM) += tpm.o common-obj-y += hostmem.o hostmem-ram.o -common-obj-$(CONFIG_LINUX) += hostmem-file.o +common-obj-$(CONFIG_POSIX) += hostmem-file.o common-obj-y += cryptodev.o common-obj-y += cryptodev-builtin.o diff --git a/backends/hostmem-file.c b/backends/hostmem-file.c index 2476dcb435..e64074954f 100644 --- a/backends/hostmem-file.c +++ b/backends/hostmem-file.c @@ -51,7 +51,7 @@ file_backend_memory_alloc(HostMemoryBackend *backend, Error **errp) error_setg(errp, "mem-path property not set"); return; } -#ifndef CONFIG_LINUX +#ifndef CONFIG_POSIX error_setg(errp, "-mem-path not supported on this host"); #else if (!host_memory_backend_mr_inited(backend)) { diff --git a/exec.c b/exec.c index 6826c8337d..d0821e69aa 100644 --- a/exec.c +++ b/exec.c @@ -1734,7 +1734,7 @@ long qemu_getrampagesize(void) } #endif -#ifdef __linux__ +#ifdef CONFIG_POSIX static int64_t get_file_size(int fd) { int64_t size = lseek(fd, 0, SEEK_END); @@ -2230,7 +2230,7 @@ static void ram_block_add(RAMBlock *new_block, Error **errp, bool shared) } } -#ifdef __linux__ +#ifdef CONFIG_POSIX RAMBlock *qemu_ram_alloc_from_fd(ram_addr_t size, MemoryRegion *mr, uint32_t ram_flags, int fd, Error **errp) diff --git a/include/exec/memory.h b/include/exec/memory.h index eb4f2fb249..e78a9a4f48 100644 --- a/include/exec/memory.h +++ b/include/exec/memory.h @@ -633,7 +633,7 @@ void memory_region_init_resizeable_ram(MemoryRegion *mr, uint64_t length, void *host), Error **errp); -#ifdef __linux__ +#ifdef CONFIG_POSIX /** * memory_region_init_ram_from_file: Initialize RAM memory region with a diff --git a/memory.c b/memory.c index aceadb2bf8..f797d82dd1 100644 --- a/memory.c +++ b/memory.c @@ -1557,7 +1557,7 @@ void memory_region_init_resizeable_ram(MemoryRegion *mr, } } -#ifdef __linux__ +#ifdef CONFIG_POSIX void memory_region_init_ram_from_file(MemoryRegion *mr, struct Object *owner, const char *name, From b255df7e6ee2b6da7a54b62b8e6c145054fec0db Mon Sep 17 00:00:00 2001 From: Pavel Dovgalyuk Date: Wed, 12 Sep 2018 11:19:50 +0300 Subject: [PATCH 71/80] replay: replay BH for IDE trim operation This patch makes IDE trim BH deterministic, because it affects the device state. Therefore its invocation should be replayed instead of running at the random moment. Signed-off-by: Pavel Dovgalyuk Reviewed-by: Paolo Bonzini Message-Id: <20180912081950.3228.68987.stgit@pasha-VirtualBox> Acked-by: John Snow Signed-off-by: Paolo Bonzini --- hw/ide/core.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/hw/ide/core.c b/hw/ide/core.c index 2c62efc536..04e22e751d 100644 --- a/hw/ide/core.c +++ b/hw/ide/core.c @@ -35,6 +35,7 @@ #include "sysemu/block-backend.h" #include "qapi/error.h" #include "qemu/cutils.h" +#include "sysemu/replay.h" #include "hw/ide/internal.h" #include "trace.h" @@ -479,7 +480,7 @@ static void ide_issue_trim_cb(void *opaque, int ret) done: iocb->aiocb = NULL; if (iocb->bh) { - qemu_bh_schedule(iocb->bh); + replay_bh_schedule_event(iocb->bh); } } From d41ca5afe3bc513ecf10b3ba5aa59523e3cd54aa Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Wed, 26 Sep 2018 23:17:42 +0200 Subject: [PATCH 72/80] virtio: do not take address of packed members The address of a packed member is not packed, which may cause accesses to unaligned pointers. Avoid this by reading the packed value before passing it to another function. Cc: Jason Wang Cc: Peter Maydell Signed-off-by: Paolo Bonzini --- hw/char/virtio-serial-bus.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/hw/char/virtio-serial-bus.c b/hw/char/virtio-serial-bus.c index d2dd8ab502..04e3ebe352 100644 --- a/hw/char/virtio-serial-bus.c +++ b/hw/char/virtio-serial-bus.c @@ -667,9 +667,9 @@ static void virtio_serial_save_device(VirtIODevice *vdev, QEMUFile *f) /* The config space (ignored on the far end in current versions) */ get_config(vdev, (uint8_t *)&config); - qemu_put_be16s(f, &config.cols); - qemu_put_be16s(f, &config.rows); - qemu_put_be32s(f, &config.max_nr_ports); + qemu_put_be16(f, config.cols); + qemu_put_be16(f, config.rows); + qemu_put_be32(f, config.max_nr_ports); /* The ports map */ max_nr_ports = s->serial.max_virtserial_ports; From 36960b4d66d2dd59174f230766d1f4eaffec60a3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philippe=20Mathieu-Daud=C3=A9?= Date: Thu, 27 Sep 2018 02:24:14 +0200 Subject: [PATCH 73/80] memory: Use MAKE_64BIT_MASK() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Suggested-by: Paolo Bonzini Signed-off-by: Philippe Mathieu-Daudé Message-Id: <20180927002416.1781-2-f4bug@amsat.org> Signed-off-by: Paolo Bonzini --- memory.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/memory.c b/memory.c index f797d82dd1..b3051d99f5 100644 --- a/memory.c +++ b/memory.c @@ -582,7 +582,7 @@ static MemTxResult access_with_adjusted_size(hwaddr addr, /* FIXME: support unaligned access? */ access_size = MAX(MIN(size, access_size_max), access_size_min); - access_mask = -1ULL >> (64 - access_size * 8); + access_mask = MAKE_64BIT_MASK(0, access_size * 8); if (memory_region_big_endian(mr)) { for (i = 0; i < size; i += access_size) { r |= access_fn(mr, addr + i, value, access_size, From 3c754a9383ac70f316f1b98aec203182de250c42 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philippe=20Mathieu-Daud=C3=A9?= Date: Thu, 27 Sep 2018 02:24:15 +0200 Subject: [PATCH 74/80] memory: Refactor common shifting code from accessors MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Philippe Mathieu-Daudé Message-Id: <20180927002416.1781-3-f4bug@amsat.org> Signed-off-by: Paolo Bonzini --- memory.c | 30 +++++++++++++++++++++--------- 1 file changed, 21 insertions(+), 9 deletions(-) diff --git a/memory.c b/memory.c index b3051d99f5..a4d3fa7823 100644 --- a/memory.c +++ b/memory.c @@ -374,6 +374,21 @@ static void adjust_endianness(MemoryRegion *mr, uint64_t *data, unsigned size) } } +static inline void memory_region_shift_read_access(uint64_t *value, + unsigned shift, + uint64_t mask, + uint64_t tmp) +{ + *value |= (tmp & mask) << shift; +} + +static inline uint64_t memory_region_shift_write_access(uint64_t *value, + unsigned shift, + uint64_t mask) +{ + return (*value >> shift) & mask; +} + static hwaddr memory_region_to_absolute_addr(MemoryRegion *mr, hwaddr offset) { MemoryRegion *root; @@ -418,7 +433,7 @@ static MemTxResult memory_region_oldmmio_read_accessor(MemoryRegion *mr, hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr); trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size); } - *value |= (tmp & mask) << shift; + memory_region_shift_read_access(value, shift, mask, tmp); return MEMTX_OK; } @@ -444,7 +459,7 @@ static MemTxResult memory_region_read_accessor(MemoryRegion *mr, hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr); trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size); } - *value |= (tmp & mask) << shift; + memory_region_shift_read_access(value, shift, mask, tmp); return MEMTX_OK; } @@ -471,7 +486,7 @@ static MemTxResult memory_region_read_with_attrs_accessor(MemoryRegion *mr, hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr); trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size); } - *value |= (tmp & mask) << shift; + memory_region_shift_read_access(value, shift, mask, tmp); return r; } @@ -483,9 +498,8 @@ static MemTxResult memory_region_oldmmio_write_accessor(MemoryRegion *mr, uint64_t mask, MemTxAttrs attrs) { - uint64_t tmp; + uint64_t tmp = memory_region_shift_write_access(value, shift, mask); - tmp = (*value >> shift) & mask; if (mr->subpage) { trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size); } else if (mr == &io_mem_notdirty) { @@ -509,9 +523,8 @@ static MemTxResult memory_region_write_accessor(MemoryRegion *mr, uint64_t mask, MemTxAttrs attrs) { - uint64_t tmp; + uint64_t tmp = memory_region_shift_write_access(value, shift, mask); - tmp = (*value >> shift) & mask; if (mr->subpage) { trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size); } else if (mr == &io_mem_notdirty) { @@ -535,9 +548,8 @@ static MemTxResult memory_region_write_with_attrs_accessor(MemoryRegion *mr, uint64_t mask, MemTxAttrs attrs) { - uint64_t tmp; + uint64_t tmp = memory_region_shift_write_access(value, shift, mask); - tmp = (*value >> shift) & mask; if (mr->subpage) { trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size); } else if (mr == &io_mem_notdirty) { From 98f52cdbb5cb44c0ec69a133fc34505ea7c26520 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philippe=20Mathieu-Daud=C3=A9?= Date: Thu, 27 Sep 2018 02:24:16 +0200 Subject: [PATCH 75/80] memory: Fix access_with_adjusted_size(small size) on big-endian memory regions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Memory regions configured as DEVICE_BIG_ENDIAN (or DEVICE_NATIVE_ENDIAN on big-endian guest) behave incorrectly when the memory access 'size' is smaller than the implementation 'access_size'. In the following code segment from access_with_adjusted_size(): if (memory_region_big_endian(mr)) { for (i = 0; i < size; i += access_size) { r |= access_fn(mr, addr + i, value, access_size, (size - access_size - i) * 8, access_mask, attrs); } (size - access_size - i) * 8 is the number of bits that will arithmetic shift the current value. Currently we can only 'left' shift a read() access, and 'right' shift a write(). When the access 'size' is smaller than the implementation, we get a negative number of bits to shift. For the read() case, a negative 'left' shift is a 'right' shift :) However since the 'shift' type is unsigned, there is currently no way to right shift. Fix this by changing the access_fn() prototype to handle signed shift values, and modify the memory_region_shift_read|write_access() helpers to correctly arithmetic shift the opposite direction when the 'shift' value is negative. Signed-off-by: Philippe Mathieu-Daudé Message-Id: <20180927002416.1781-4-f4bug@amsat.org> Signed-off-by: Paolo Bonzini --- memory.c | 34 +++++++++++++++++++++++----------- 1 file changed, 23 insertions(+), 11 deletions(-) diff --git a/memory.c b/memory.c index a4d3fa7823..b96aec7ba3 100644 --- a/memory.c +++ b/memory.c @@ -375,18 +375,30 @@ static void adjust_endianness(MemoryRegion *mr, uint64_t *data, unsigned size) } static inline void memory_region_shift_read_access(uint64_t *value, - unsigned shift, + signed shift, uint64_t mask, uint64_t tmp) { - *value |= (tmp & mask) << shift; + if (shift >= 0) { + *value |= (tmp & mask) << shift; + } else { + *value |= (tmp & mask) >> -shift; + } } static inline uint64_t memory_region_shift_write_access(uint64_t *value, - unsigned shift, + signed shift, uint64_t mask) { - return (*value >> shift) & mask; + uint64_t tmp; + + if (shift >= 0) { + tmp = (*value >> shift) & mask; + } else { + tmp = (*value << -shift) & mask; + } + + return tmp; } static hwaddr memory_region_to_absolute_addr(MemoryRegion *mr, hwaddr offset) @@ -415,7 +427,7 @@ static MemTxResult memory_region_oldmmio_read_accessor(MemoryRegion *mr, hwaddr addr, uint64_t *value, unsigned size, - unsigned shift, + signed shift, uint64_t mask, MemTxAttrs attrs) { @@ -441,7 +453,7 @@ static MemTxResult memory_region_read_accessor(MemoryRegion *mr, hwaddr addr, uint64_t *value, unsigned size, - unsigned shift, + signed shift, uint64_t mask, MemTxAttrs attrs) { @@ -467,7 +479,7 @@ static MemTxResult memory_region_read_with_attrs_accessor(MemoryRegion *mr, hwaddr addr, uint64_t *value, unsigned size, - unsigned shift, + signed shift, uint64_t mask, MemTxAttrs attrs) { @@ -494,7 +506,7 @@ static MemTxResult memory_region_oldmmio_write_accessor(MemoryRegion *mr, hwaddr addr, uint64_t *value, unsigned size, - unsigned shift, + signed shift, uint64_t mask, MemTxAttrs attrs) { @@ -519,7 +531,7 @@ static MemTxResult memory_region_write_accessor(MemoryRegion *mr, hwaddr addr, uint64_t *value, unsigned size, - unsigned shift, + signed shift, uint64_t mask, MemTxAttrs attrs) { @@ -544,7 +556,7 @@ static MemTxResult memory_region_write_with_attrs_accessor(MemoryRegion *mr, hwaddr addr, uint64_t *value, unsigned size, - unsigned shift, + signed shift, uint64_t mask, MemTxAttrs attrs) { @@ -574,7 +586,7 @@ static MemTxResult access_with_adjusted_size(hwaddr addr, hwaddr addr, uint64_t *value, unsigned size, - unsigned shift, + signed shift, uint64_t mask, MemTxAttrs attrs), MemoryRegion *mr, From 62a0db942dec6ebfec19aac2b604737d3c9a2d75 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Fri, 24 Aug 2018 18:04:20 +0100 Subject: [PATCH 76/80] memory: Remove old_mmio accessors Now that all the users of old_mmio MemoryRegion accessors have been converted, we can remove the core code support. Signed-off-by: Peter Maydell Message-Id: <20180824170422.5783-2-peter.maydell@linaro.org> Based-on: <20180802174042.29234-1-peter.maydell@linaro.org> Reviewed-by: Richard Henderson Signed-off-by: Paolo Bonzini --- docs/devel/memory.txt | 2 -- include/exec/memory.h | 5 ---- memory.c | 63 ++----------------------------------------- 3 files changed, 2 insertions(+), 68 deletions(-) diff --git a/docs/devel/memory.txt b/docs/devel/memory.txt index c1dee1252c..4fff0d5603 100644 --- a/docs/devel/memory.txt +++ b/docs/devel/memory.txt @@ -342,5 +342,3 @@ various constraints can be supplied to control how these callbacks are called: - .impl.unaligned specifies that the *implementation* supports unaligned accesses; if false, unaligned accesses will be emulated by two aligned accesses. - - .old_mmio eases the porting of code that was formerly using - cpu_register_io_memory(). It should not be used in new code. diff --git a/include/exec/memory.h b/include/exec/memory.h index e78a9a4f48..3a427aacf1 100644 --- a/include/exec/memory.h +++ b/include/exec/memory.h @@ -201,11 +201,6 @@ struct MemoryRegionOps { */ bool unaligned; } impl; - - /* If .read and .write are not present, old_mmio may be used for - * backwards compatibility with old mmio registration - */ - const MemoryRegionMmio old_mmio; }; enum IOMMUMemoryRegionAttr { diff --git a/memory.c b/memory.c index b96aec7ba3..d852f1143d 100644 --- a/memory.c +++ b/memory.c @@ -423,32 +423,6 @@ static int get_cpu_index(void) return -1; } -static MemTxResult memory_region_oldmmio_read_accessor(MemoryRegion *mr, - hwaddr addr, - uint64_t *value, - unsigned size, - signed shift, - uint64_t mask, - MemTxAttrs attrs) -{ - uint64_t tmp; - - tmp = mr->ops->old_mmio.read[ctz32(size)](mr->opaque, addr); - if (mr->subpage) { - trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size); - } else if (mr == &io_mem_notdirty) { - /* Accesses to code which has previously been translated into a TB show - * up in the MMIO path, as accesses to the io_mem_notdirty - * MemoryRegion. */ - trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size); - } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) { - hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr); - trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size); - } - memory_region_shift_read_access(value, shift, mask, tmp); - return MEMTX_OK; -} - static MemTxResult memory_region_read_accessor(MemoryRegion *mr, hwaddr addr, uint64_t *value, @@ -502,31 +476,6 @@ static MemTxResult memory_region_read_with_attrs_accessor(MemoryRegion *mr, return r; } -static MemTxResult memory_region_oldmmio_write_accessor(MemoryRegion *mr, - hwaddr addr, - uint64_t *value, - unsigned size, - signed shift, - uint64_t mask, - MemTxAttrs attrs) -{ - uint64_t tmp = memory_region_shift_write_access(value, shift, mask); - - if (mr->subpage) { - trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size); - } else if (mr == &io_mem_notdirty) { - /* Accesses to code which has previously been translated into a TB show - * up in the MMIO path, as accesses to the io_mem_notdirty - * MemoryRegion. */ - trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size); - } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) { - hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr); - trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size); - } - mr->ops->old_mmio.write[ctz32(size)](mr->opaque, addr, tmp); - return MEMTX_OK; -} - static MemTxResult memory_region_write_accessor(MemoryRegion *mr, hwaddr addr, uint64_t *value, @@ -1418,16 +1367,12 @@ static MemTxResult memory_region_dispatch_read1(MemoryRegion *mr, mr->ops->impl.max_access_size, memory_region_read_accessor, mr, attrs); - } else if (mr->ops->read_with_attrs) { + } else { return access_with_adjusted_size(addr, pval, size, mr->ops->impl.min_access_size, mr->ops->impl.max_access_size, memory_region_read_with_attrs_accessor, mr, attrs); - } else { - return access_with_adjusted_size(addr, pval, size, 1, 4, - memory_region_oldmmio_read_accessor, - mr, attrs); } } @@ -1499,17 +1444,13 @@ MemTxResult memory_region_dispatch_write(MemoryRegion *mr, mr->ops->impl.max_access_size, memory_region_write_accessor, mr, attrs); - } else if (mr->ops->write_with_attrs) { + } else { return access_with_adjusted_size(addr, &data, size, mr->ops->impl.min_access_size, mr->ops->impl.max_access_size, memory_region_write_with_attrs_accessor, mr, attrs); - } else { - return access_with_adjusted_size(addr, &data, size, 1, 4, - memory_region_oldmmio_write_accessor, - mr, attrs); } } From 695e2fc2d64f272cec9e15b4e2a921ae303921b3 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Fri, 24 Aug 2018 18:04:21 +0100 Subject: [PATCH 77/80] hw/nvram/fw_cfg: Use memberwise copy of MemoryRegionOps struct We've now removed the 'old_mmio' member from MemoryRegionOps, so we can perform the copy as a simple struct copy rather than having to do it via a memberwise copy. Signed-off-by: Peter Maydell Message-Id: <20180824170422.5783-3-peter.maydell@linaro.org> Based-on: <20180802174042.29234-1-peter.maydell@linaro.org> Reviewed-by: Richard Henderson Signed-off-by: Paolo Bonzini --- hw/nvram/fw_cfg.c | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/hw/nvram/fw_cfg.c b/hw/nvram/fw_cfg.c index 6de7809f1a..946f765f7f 100644 --- a/hw/nvram/fw_cfg.c +++ b/hw/nvram/fw_cfg.c @@ -1115,12 +1115,7 @@ static void fw_cfg_mem_realize(DeviceState *dev, Error **errp) sysbus_init_mmio(sbd, &s->ctl_iomem); if (s->data_width > data_ops->valid.max_access_size) { - /* memberwise copy because the "old_mmio" member is const */ - s->wide_data_ops.read = data_ops->read; - s->wide_data_ops.write = data_ops->write; - s->wide_data_ops.endianness = data_ops->endianness; - s->wide_data_ops.valid = data_ops->valid; - s->wide_data_ops.impl = data_ops->impl; + s->wide_data_ops = *data_ops; s->wide_data_ops.valid.max_access_size = s->data_width; s->wide_data_ops.impl.max_access_size = s->data_width; From 687ac05d71bbb3172e0546248e40483ef43a4813 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Fri, 24 Aug 2018 18:04:22 +0100 Subject: [PATCH 78/80] docs/devel/memory.txt: Document _with_attrs accessors When we added the _with_attrs accessors we forgot to mention them in the documentation. Signed-off-by: Peter Maydell Message-Id: <20180824170422.5783-4-peter.maydell@linaro.org> Based-on: <20180802174042.29234-1-peter.maydell@linaro.org> Signed-off-by: Paolo Bonzini --- docs/devel/memory.txt | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/docs/devel/memory.txt b/docs/devel/memory.txt index 4fff0d5603..42577e1d86 100644 --- a/docs/devel/memory.txt +++ b/docs/devel/memory.txt @@ -326,8 +326,15 @@ visible as the pci-hole alias clips it to a 0.5GB range. MMIO Operations --------------- -MMIO regions are provided with ->read() and ->write() callbacks; in addition -various constraints can be supplied to control how these callbacks are called: +MMIO regions are provided with ->read() and ->write() callbacks, +which are sufficient for most devices. Some devices change behaviour +based on the attributes used for the memory transaction, or need +to be able to respond that the access should provoke a bus error +rather than completing successfully; those devices can use the +->read_with_attrs() and ->write_with_attrs() callbacks instead. + +In addition various constraints can be supplied to control how these +callbacks are called: - .valid.min_access_size, .valid.max_access_size define the access sizes (in bytes) which the device accepts; accesses outside this range will From 1926ab273b374372e5108a9e360b9e9366d9acf7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alex=20Benn=C3=A9e?= Date: Thu, 27 Sep 2018 18:17:24 +0100 Subject: [PATCH 79/80] cpus: fix TCG kick timer leak MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This is an alternative fix to Marc-André's original patch. Reported-by: Marc-André Lureau Suggested-by: Paolo Bonzini Signed-off-by: Alex Bennée Message-Id: <20180927171724.30128-1-alex.bennee@linaro.org> Signed-off-by: Paolo Bonzini --- cpus.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/cpus.c b/cpus.c index 68f08f5b2d..361678e459 100644 --- a/cpus.c +++ b/cpus.c @@ -983,6 +983,8 @@ static void start_tcg_kick_timer(void) if (!tcg_kick_vcpu_timer && CPU_NEXT(first_cpu)) { tcg_kick_vcpu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, kick_tcg_thread, NULL); + } + if (tcg_kick_vcpu_timer && !timer_pending(tcg_kick_vcpu_timer)) { timer_mod(tcg_kick_vcpu_timer, qemu_tcg_next_kick()); } } @@ -990,9 +992,8 @@ static void start_tcg_kick_timer(void) static void stop_tcg_kick_timer(void) { assert(!mttcg_enabled); - if (tcg_kick_vcpu_timer) { + if (tcg_kick_vcpu_timer && timer_pending(tcg_kick_vcpu_timer)) { timer_del(tcg_kick_vcpu_timer); - tcg_kick_vcpu_timer = NULL; } } From 97866508669c4a75f531bfa94f8267900fcbb5dc Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Thu, 27 Sep 2018 14:48:52 +0100 Subject: [PATCH 80/80] hw/scsi/mptendian: Avoid taking address of fields in packed structs Taking the address of a field in a packed struct is a bad idea, because it might not be actually aligned enough for that pointer type (and thus cause a crash on dereference on some host architectures). Newer versions of clang warn about this. Avoid the bug by not using the "modify in place" byte swapping functions. This patch was produced with the following simple spatch script: @@ expression E; @@ -le16_to_cpus(&E); +E = le16_to_cpu(E); @@ expression E; @@ -le32_to_cpus(&E); +E = le32_to_cpu(E); @@ expression E; @@ -le64_to_cpus(&E); +E = le64_to_cpu(E); @@ expression E; @@ -cpu_to_le16s(&E); +E = cpu_to_le16(E); @@ expression E; @@ -cpu_to_le32s(&E); +E = cpu_to_le32(E); @@ expression E; @@ -cpu_to_le64s(&E); +E = cpu_to_le64(E); followed by some minor tidying of overlong lines and bad indent. Signed-off-by: Peter Maydell Message-Id: <20180927134852.21490-1-peter.maydell@linaro.org> Reviewed-by: Fam Zheng Signed-off-by: Paolo Bonzini --- hw/scsi/mptendian.c | 163 ++++++++++++++++++++++---------------------- 1 file changed, 83 insertions(+), 80 deletions(-) diff --git a/hw/scsi/mptendian.c b/hw/scsi/mptendian.c index 8ae39a76f4..79f99734d2 100644 --- a/hw/scsi/mptendian.c +++ b/hw/scsi/mptendian.c @@ -35,152 +35,155 @@ static void mptsas_fix_sgentry_endianness(MPISGEntry *sge) { - le32_to_cpus(&sge->FlagsLength); + sge->FlagsLength = le32_to_cpu(sge->FlagsLength); if (sge->FlagsLength & MPI_SGE_FLAGS_64_BIT_ADDRESSING) { - le64_to_cpus(&sge->u.Address64); + sge->u.Address64 = le64_to_cpu(sge->u.Address64); } else { - le32_to_cpus(&sge->u.Address32); + sge->u.Address32 = le32_to_cpu(sge->u.Address32); } } static void mptsas_fix_sgentry_endianness_reply(MPISGEntry *sge) { if (sge->FlagsLength & MPI_SGE_FLAGS_64_BIT_ADDRESSING) { - cpu_to_le64s(&sge->u.Address64); + sge->u.Address64 = cpu_to_le64(sge->u.Address64); } else { - cpu_to_le32s(&sge->u.Address32); + sge->u.Address32 = cpu_to_le32(sge->u.Address32); } - cpu_to_le32s(&sge->FlagsLength); + sge->FlagsLength = cpu_to_le32(sge->FlagsLength); } void mptsas_fix_scsi_io_endianness(MPIMsgSCSIIORequest *req) { - le32_to_cpus(&req->MsgContext); - le32_to_cpus(&req->Control); - le32_to_cpus(&req->DataLength); - le32_to_cpus(&req->SenseBufferLowAddr); + req->MsgContext = le32_to_cpu(req->MsgContext); + req->Control = le32_to_cpu(req->Control); + req->DataLength = le32_to_cpu(req->DataLength); + req->SenseBufferLowAddr = le32_to_cpu(req->SenseBufferLowAddr); } void mptsas_fix_scsi_io_reply_endianness(MPIMsgSCSIIOReply *reply) { - cpu_to_le32s(&reply->MsgContext); - cpu_to_le16s(&reply->IOCStatus); - cpu_to_le32s(&reply->IOCLogInfo); - cpu_to_le32s(&reply->TransferCount); - cpu_to_le32s(&reply->SenseCount); - cpu_to_le32s(&reply->ResponseInfo); - cpu_to_le16s(&reply->TaskTag); + reply->MsgContext = cpu_to_le32(reply->MsgContext); + reply->IOCStatus = cpu_to_le16(reply->IOCStatus); + reply->IOCLogInfo = cpu_to_le32(reply->IOCLogInfo); + reply->TransferCount = cpu_to_le32(reply->TransferCount); + reply->SenseCount = cpu_to_le32(reply->SenseCount); + reply->ResponseInfo = cpu_to_le32(reply->ResponseInfo); + reply->TaskTag = cpu_to_le16(reply->TaskTag); } void mptsas_fix_scsi_task_mgmt_endianness(MPIMsgSCSITaskMgmt *req) { - le32_to_cpus(&req->MsgContext); - le32_to_cpus(&req->TaskMsgContext); + req->MsgContext = le32_to_cpu(req->MsgContext); + req->TaskMsgContext = le32_to_cpu(req->TaskMsgContext); } void mptsas_fix_scsi_task_mgmt_reply_endianness(MPIMsgSCSITaskMgmtReply *reply) { - cpu_to_le32s(&reply->MsgContext); - cpu_to_le16s(&reply->IOCStatus); - cpu_to_le32s(&reply->IOCLogInfo); - cpu_to_le32s(&reply->TerminationCount); + reply->MsgContext = cpu_to_le32(reply->MsgContext); + reply->IOCStatus = cpu_to_le16(reply->IOCStatus); + reply->IOCLogInfo = cpu_to_le32(reply->IOCLogInfo); + reply->TerminationCount = cpu_to_le32(reply->TerminationCount); } void mptsas_fix_ioc_init_endianness(MPIMsgIOCInit *req) { - le32_to_cpus(&req->MsgContext); - le16_to_cpus(&req->ReplyFrameSize); - le32_to_cpus(&req->HostMfaHighAddr); - le32_to_cpus(&req->SenseBufferHighAddr); - le32_to_cpus(&req->ReplyFifoHostSignalingAddr); + req->MsgContext = le32_to_cpu(req->MsgContext); + req->ReplyFrameSize = le16_to_cpu(req->ReplyFrameSize); + req->HostMfaHighAddr = le32_to_cpu(req->HostMfaHighAddr); + req->SenseBufferHighAddr = le32_to_cpu(req->SenseBufferHighAddr); + req->ReplyFifoHostSignalingAddr = + le32_to_cpu(req->ReplyFifoHostSignalingAddr); mptsas_fix_sgentry_endianness(&req->HostPageBufferSGE); - le16_to_cpus(&req->MsgVersion); - le16_to_cpus(&req->HeaderVersion); + req->MsgVersion = le16_to_cpu(req->MsgVersion); + req->HeaderVersion = le16_to_cpu(req->HeaderVersion); } void mptsas_fix_ioc_init_reply_endianness(MPIMsgIOCInitReply *reply) { - cpu_to_le32s(&reply->MsgContext); - cpu_to_le16s(&reply->IOCStatus); - cpu_to_le32s(&reply->IOCLogInfo); + reply->MsgContext = cpu_to_le32(reply->MsgContext); + reply->IOCStatus = cpu_to_le16(reply->IOCStatus); + reply->IOCLogInfo = cpu_to_le32(reply->IOCLogInfo); } void mptsas_fix_ioc_facts_endianness(MPIMsgIOCFacts *req) { - le32_to_cpus(&req->MsgContext); + req->MsgContext = le32_to_cpu(req->MsgContext); } void mptsas_fix_ioc_facts_reply_endianness(MPIMsgIOCFactsReply *reply) { - cpu_to_le16s(&reply->MsgVersion); - cpu_to_le16s(&reply->HeaderVersion); - cpu_to_le32s(&reply->MsgContext); - cpu_to_le16s(&reply->IOCExceptions); - cpu_to_le16s(&reply->IOCStatus); - cpu_to_le32s(&reply->IOCLogInfo); - cpu_to_le16s(&reply->ReplyQueueDepth); - cpu_to_le16s(&reply->RequestFrameSize); - cpu_to_le16s(&reply->ProductID); - cpu_to_le32s(&reply->CurrentHostMfaHighAddr); - cpu_to_le16s(&reply->GlobalCredits); - cpu_to_le32s(&reply->CurrentSenseBufferHighAddr); - cpu_to_le16s(&reply->CurReplyFrameSize); - cpu_to_le32s(&reply->FWImageSize); - cpu_to_le32s(&reply->IOCCapabilities); - cpu_to_le16s(&reply->HighPriorityQueueDepth); + reply->MsgVersion = cpu_to_le16(reply->MsgVersion); + reply->HeaderVersion = cpu_to_le16(reply->HeaderVersion); + reply->MsgContext = cpu_to_le32(reply->MsgContext); + reply->IOCExceptions = cpu_to_le16(reply->IOCExceptions); + reply->IOCStatus = cpu_to_le16(reply->IOCStatus); + reply->IOCLogInfo = cpu_to_le32(reply->IOCLogInfo); + reply->ReplyQueueDepth = cpu_to_le16(reply->ReplyQueueDepth); + reply->RequestFrameSize = cpu_to_le16(reply->RequestFrameSize); + reply->ProductID = cpu_to_le16(reply->ProductID); + reply->CurrentHostMfaHighAddr = cpu_to_le32(reply->CurrentHostMfaHighAddr); + reply->GlobalCredits = cpu_to_le16(reply->GlobalCredits); + reply->CurrentSenseBufferHighAddr = + cpu_to_le32(reply->CurrentSenseBufferHighAddr); + reply->CurReplyFrameSize = cpu_to_le16(reply->CurReplyFrameSize); + reply->FWImageSize = cpu_to_le32(reply->FWImageSize); + reply->IOCCapabilities = cpu_to_le32(reply->IOCCapabilities); + reply->HighPriorityQueueDepth = cpu_to_le16(reply->HighPriorityQueueDepth); mptsas_fix_sgentry_endianness_reply(&reply->HostPageBufferSGE); - cpu_to_le32s(&reply->ReplyFifoHostSignalingAddr); + reply->ReplyFifoHostSignalingAddr = + cpu_to_le32(reply->ReplyFifoHostSignalingAddr); } void mptsas_fix_config_endianness(MPIMsgConfig *req) { - le16_to_cpus(&req->ExtPageLength); - le32_to_cpus(&req->MsgContext); - le32_to_cpus(&req->PageAddress); + req->ExtPageLength = le16_to_cpu(req->ExtPageLength); + req->MsgContext = le32_to_cpu(req->MsgContext); + req->PageAddress = le32_to_cpu(req->PageAddress); mptsas_fix_sgentry_endianness(&req->PageBufferSGE); } void mptsas_fix_config_reply_endianness(MPIMsgConfigReply *reply) { - cpu_to_le16s(&reply->ExtPageLength); - cpu_to_le32s(&reply->MsgContext); - cpu_to_le16s(&reply->IOCStatus); - cpu_to_le32s(&reply->IOCLogInfo); + reply->ExtPageLength = cpu_to_le16(reply->ExtPageLength); + reply->MsgContext = cpu_to_le32(reply->MsgContext); + reply->IOCStatus = cpu_to_le16(reply->IOCStatus); + reply->IOCLogInfo = cpu_to_le32(reply->IOCLogInfo); } void mptsas_fix_port_facts_endianness(MPIMsgPortFacts *req) { - le32_to_cpus(&req->MsgContext); + req->MsgContext = le32_to_cpu(req->MsgContext); } void mptsas_fix_port_facts_reply_endianness(MPIMsgPortFactsReply *reply) { - cpu_to_le32s(&reply->MsgContext); - cpu_to_le16s(&reply->IOCStatus); - cpu_to_le32s(&reply->IOCLogInfo); - cpu_to_le16s(&reply->MaxDevices); - cpu_to_le16s(&reply->PortSCSIID); - cpu_to_le16s(&reply->ProtocolFlags); - cpu_to_le16s(&reply->MaxPostedCmdBuffers); - cpu_to_le16s(&reply->MaxPersistentIDs); - cpu_to_le16s(&reply->MaxLanBuckets); + reply->MsgContext = cpu_to_le32(reply->MsgContext); + reply->IOCStatus = cpu_to_le16(reply->IOCStatus); + reply->IOCLogInfo = cpu_to_le32(reply->IOCLogInfo); + reply->MaxDevices = cpu_to_le16(reply->MaxDevices); + reply->PortSCSIID = cpu_to_le16(reply->PortSCSIID); + reply->ProtocolFlags = cpu_to_le16(reply->ProtocolFlags); + reply->MaxPostedCmdBuffers = cpu_to_le16(reply->MaxPostedCmdBuffers); + reply->MaxPersistentIDs = cpu_to_le16(reply->MaxPersistentIDs); + reply->MaxLanBuckets = cpu_to_le16(reply->MaxLanBuckets); } void mptsas_fix_port_enable_endianness(MPIMsgPortEnable *req) { - le32_to_cpus(&req->MsgContext); + req->MsgContext = le32_to_cpu(req->MsgContext); } void mptsas_fix_port_enable_reply_endianness(MPIMsgPortEnableReply *reply) { - cpu_to_le32s(&reply->MsgContext); - cpu_to_le16s(&reply->IOCStatus); - cpu_to_le32s(&reply->IOCLogInfo); + reply->MsgContext = cpu_to_le32(reply->MsgContext); + reply->IOCStatus = cpu_to_le16(reply->IOCStatus); + reply->IOCLogInfo = cpu_to_le32(reply->IOCLogInfo); } void mptsas_fix_event_notification_endianness(MPIMsgEventNotify *req) { - le32_to_cpus(&req->MsgContext); + req->MsgContext = le32_to_cpu(req->MsgContext); } void mptsas_fix_event_notification_reply_endianness(MPIMsgEventNotifyReply *reply) @@ -188,16 +191,16 @@ void mptsas_fix_event_notification_reply_endianness(MPIMsgEventNotifyReply *repl int length = reply->EventDataLength; int i; - cpu_to_le16s(&reply->EventDataLength); - cpu_to_le32s(&reply->MsgContext); - cpu_to_le16s(&reply->IOCStatus); - cpu_to_le32s(&reply->IOCLogInfo); - cpu_to_le32s(&reply->Event); - cpu_to_le32s(&reply->EventContext); + reply->EventDataLength = cpu_to_le16(reply->EventDataLength); + reply->MsgContext = cpu_to_le32(reply->MsgContext); + reply->IOCStatus = cpu_to_le16(reply->IOCStatus); + reply->IOCLogInfo = cpu_to_le32(reply->IOCLogInfo); + reply->Event = cpu_to_le32(reply->Event); + reply->EventContext = cpu_to_le32(reply->EventContext); /* Really depends on the event kind. This will do for now. */ for (i = 0; i < length; i++) { - cpu_to_le32s(&reply->Data[i]); + reply->Data[i] = cpu_to_le32(reply->Data[i]); } }