* TCG 8-byte atomic accesses bugfix (Andrew)
* Report disk rotation rate (Daniel) * Report invalid scsi-disk block size configuration (Mark) * KVM and memory API MemoryListener fixes (David, Maxime, Peter Xu) * x86 CPU hotplug crash fix (Igor) * Load/store API documentation (Peter Maydell) * Small fixes by myself and Thomas * qdev DEVICE_DELETED deferral (Michael) -----BEGIN PGP SIGNATURE----- iQFIBAABCAAyFiEE8TM4V0tmI4mGbHaCv/vSX3jHroMFAlnnJUgUHHBib256aW5p QHJlZGhhdC5jb20ACgkQv/vSX3jHroMifwf/dTZwtGqvAV4+jezCiZ3MTknz39dM HOGnD3m2xy04QT5LHiwDmaLFXy1y/AUVQm79JMPN4dKoFvtruREoWUq8EU0FCsLZ PkdCbJuXKGiBYMRXkQQxeT8lAyaBQwZdc+O9mYuOrSGZOQscA7SxgClYmzVdVzcy ZNTqkuaw1NDIAapdfGv94WLza4Nb8XX8bFwohgkf4mLDXifhjYHQTbBTfB0NqPxH Rk3HU+wgYUCJRYXpvktESgzRo5sm1aozCRq3f0Y6RV12ylgF6GG4CyN7YcKRn8eh NZbyehHiF5YU2kuvO9SmAB+FqM2+aMtq8uuNuI1Nxgd222MOVaChyWc3jg== =gmUj -----END PGP SIGNATURE----- Merge remote-tracking branch 'remotes/bonzini/tags/for-upstream' into staging * TCG 8-byte atomic accesses bugfix (Andrew) * Report disk rotation rate (Daniel) * Report invalid scsi-disk block size configuration (Mark) * KVM and memory API MemoryListener fixes (David, Maxime, Peter Xu) * x86 CPU hotplug crash fix (Igor) * Load/store API documentation (Peter Maydell) * Small fixes by myself and Thomas * qdev DEVICE_DELETED deferral (Michael) # gpg: Signature made Wed 18 Oct 2017 10:56:24 BST # gpg: using RSA key 0xBFFBD25F78C7AE83 # gpg: Good signature from "Paolo Bonzini <bonzini@gnu.org>" # gpg: aka "Paolo Bonzini <pbonzini@redhat.com>" # Primary key fingerprint: 46F5 9FBD 57D6 12E7 BFD4 E2F7 7E15 100C CD36 69B1 # Subkey fingerprint: F133 3857 4B66 2389 866C 7682 BFFB D25F 78C7 AE83 * remotes/bonzini/tags/for-upstream: (29 commits) scsi: reject configurations with logical block size > physical block size qdev: defer DEVICE_DEL event until instance_finalize() Revert "qdev: Free QemuOpts when the QOM path goes away" qdev: store DeviceState's canonical path to use when unparenting qemu-pr-helper: use new libmultipath API watch_mem_write: implement 8-byte accesses notdirty_mem_write: implement 8-byte accesses memory: reuse section_from_flat_range() kvm: simplify kvm_align_section() kvm: region_add and region_del is not called on updates kvm: fix error message when failing to unregister slot kvm: tolerate non-existing slot for log_start/log_stop/log_sync kvm: fix alignment of ram address memory: call log_start after region_add target/i386: trap on instructions longer than >15 bytes target/i386: introduce x86_ld*_code tco: add trace events docs/devel/loads-stores.rst: Document our various load and store APIs nios2: define tcg_env build: remove CONFIG_LIBDECNUMBER ... Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
commit
a8b392ac9a
@ -102,12 +102,6 @@ obj-y += target/$(TARGET_BASE_ARCH)/
|
||||
obj-y += disas.o
|
||||
obj-$(call notempty,$(TARGET_XML_FILES)) += gdbstub-xml.o
|
||||
|
||||
obj-$(CONFIG_LIBDECNUMBER) += libdecnumber/decContext.o
|
||||
obj-$(CONFIG_LIBDECNUMBER) += libdecnumber/decNumber.o
|
||||
obj-$(CONFIG_LIBDECNUMBER) += libdecnumber/dpd/decimal32.o
|
||||
obj-$(CONFIG_LIBDECNUMBER) += libdecnumber/dpd/decimal64.o
|
||||
obj-$(CONFIG_LIBDECNUMBER) += libdecnumber/dpd/decimal128.o
|
||||
|
||||
#########################################################
|
||||
# Linux user emulator target
|
||||
|
||||
|
@ -197,26 +197,20 @@ static hwaddr kvm_align_section(MemoryRegionSection *section,
|
||||
hwaddr *start)
|
||||
{
|
||||
hwaddr size = int128_get64(section->size);
|
||||
hwaddr delta;
|
||||
|
||||
*start = section->offset_within_address_space;
|
||||
hwaddr delta, aligned;
|
||||
|
||||
/* kvm works in page size chunks, but the function may be called
|
||||
with sub-page size and unaligned start address. Pad the start
|
||||
address to next and truncate size to previous page boundary. */
|
||||
delta = qemu_real_host_page_size - (*start & ~qemu_real_host_page_mask);
|
||||
delta &= ~qemu_real_host_page_mask;
|
||||
*start += delta;
|
||||
aligned = ROUND_UP(section->offset_within_address_space,
|
||||
qemu_real_host_page_size);
|
||||
delta = aligned - section->offset_within_address_space;
|
||||
*start = aligned;
|
||||
if (delta > size) {
|
||||
return 0;
|
||||
}
|
||||
size -= delta;
|
||||
size &= qemu_real_host_page_mask;
|
||||
if (*start & ~qemu_real_host_page_mask) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
return size;
|
||||
return (size - delta) & qemu_real_host_page_mask;
|
||||
}
|
||||
|
||||
int kvm_physical_memory_addr_from_host(KVMState *s, void *ram,
|
||||
@ -394,8 +388,8 @@ static int kvm_section_update_flags(KVMMemoryListener *kml,
|
||||
|
||||
mem = kvm_lookup_matching_slot(kml, start_addr, size);
|
||||
if (!mem) {
|
||||
fprintf(stderr, "%s: error finding slot\n", __func__);
|
||||
abort();
|
||||
/* We don't have a slot if we want to trap every access. */
|
||||
return 0;
|
||||
}
|
||||
|
||||
return kvm_slot_update_flags(kml, mem, section->mr);
|
||||
@ -470,8 +464,8 @@ static int kvm_physical_sync_dirty_bitmap(KVMMemoryListener *kml,
|
||||
if (size) {
|
||||
mem = kvm_lookup_matching_slot(kml, start_addr, size);
|
||||
if (!mem) {
|
||||
fprintf(stderr, "%s: error finding slot\n", __func__);
|
||||
abort();
|
||||
/* We don't have a slot if we want to trap every access. */
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* XXX bad kernel interface alert
|
||||
@ -717,11 +711,12 @@ static void kvm_set_phys_mem(KVMMemoryListener *kml,
|
||||
return;
|
||||
}
|
||||
|
||||
/* use aligned delta to align the ram address */
|
||||
ram = memory_region_get_ram_ptr(mr) + section->offset_within_region +
|
||||
(section->offset_within_address_space - start_addr);
|
||||
(start_addr - section->offset_within_address_space);
|
||||
|
||||
mem = kvm_lookup_matching_slot(kml, start_addr, size);
|
||||
if (!add) {
|
||||
mem = kvm_lookup_matching_slot(kml, start_addr, size);
|
||||
if (!mem) {
|
||||
return;
|
||||
}
|
||||
@ -733,19 +728,13 @@ static void kvm_set_phys_mem(KVMMemoryListener *kml,
|
||||
mem->memory_size = 0;
|
||||
err = kvm_set_user_memory_region(kml, mem);
|
||||
if (err) {
|
||||
fprintf(stderr, "%s: error unregistering overlapping slot: %s\n",
|
||||
fprintf(stderr, "%s: error unregistering slot: %s\n",
|
||||
__func__, strerror(-err));
|
||||
abort();
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
if (mem) {
|
||||
/* update the slot */
|
||||
kvm_slot_update_flags(kml, mem, mr);
|
||||
return;
|
||||
}
|
||||
|
||||
/* register the new slot */
|
||||
mem = kvm_alloc_slot(kml);
|
||||
mem->memory_size = size;
|
||||
|
@ -332,10 +332,6 @@ static void tcp_chr_free_connection(Chardev *chr)
|
||||
SocketChardev *s = SOCKET_CHARDEV(chr);
|
||||
int i;
|
||||
|
||||
if (!s->connected) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (s->read_msgfds_num) {
|
||||
for (i = 0; i < s->read_msgfds_num; i++) {
|
||||
close(s->read_msgfds[i]);
|
||||
@ -394,22 +390,25 @@ static void update_disconnected_filename(SocketChardev *s)
|
||||
s->is_listen, s->is_telnet);
|
||||
}
|
||||
|
||||
/* NB may be called even if tcp_chr_connect has not been
|
||||
* reached, due to TLS or telnet initialization failure,
|
||||
* so can *not* assume s->connected == true
|
||||
*/
|
||||
static void tcp_chr_disconnect(Chardev *chr)
|
||||
{
|
||||
SocketChardev *s = SOCKET_CHARDEV(chr);
|
||||
|
||||
if (!s->connected) {
|
||||
return;
|
||||
}
|
||||
bool emit_close = s->connected;
|
||||
|
||||
tcp_chr_free_connection(chr);
|
||||
|
||||
if (s->listen_ioc) {
|
||||
if (s->listen_ioc && s->listen_tag == 0) {
|
||||
s->listen_tag = qio_channel_add_watch(
|
||||
QIO_CHANNEL(s->listen_ioc), G_IO_IN, tcp_chr_accept, chr, NULL);
|
||||
}
|
||||
update_disconnected_filename(s);
|
||||
qemu_chr_be_event(chr, CHR_EVENT_CLOSED);
|
||||
if (emit_close) {
|
||||
qemu_chr_be_event(chr, CHR_EVENT_CLOSED);
|
||||
}
|
||||
if (s->reconnect_time) {
|
||||
qemu_chr_socket_restart_timer(chr);
|
||||
}
|
||||
|
12
configure
vendored
12
configure
vendored
@ -3335,9 +3335,17 @@ if test "$mpath" != "no" ; then
|
||||
#include <mpath_persist.h>
|
||||
unsigned mpath_mx_alloc_len = 1024;
|
||||
int logsink;
|
||||
static struct config *multipath_conf;
|
||||
extern struct udev *udev;
|
||||
extern struct config *get_multipath_config(void);
|
||||
extern void put_multipath_config(struct config *conf);
|
||||
struct udev *udev;
|
||||
struct config *get_multipath_config(void) { return multipath_conf; }
|
||||
void put_multipath_config(struct config *conf) { }
|
||||
|
||||
int main(void) {
|
||||
struct udev *udev = udev_new();
|
||||
mpath_lib_init(udev);
|
||||
udev = udev_new();
|
||||
multipath_conf = mpath_lib_init();
|
||||
return 0;
|
||||
}
|
||||
EOF
|
||||
|
@ -1,2 +1 @@
|
||||
# Default configuration for ppc-linux-user
|
||||
CONFIG_LIBDECNUMBER=y
|
||||
|
@ -46,7 +46,6 @@ CONFIG_E500=y
|
||||
CONFIG_OPENPIC_KVM=$(call land,$(CONFIG_E500),$(CONFIG_KVM))
|
||||
CONFIG_PLATFORM_BUS=y
|
||||
CONFIG_ETSEC=y
|
||||
CONFIG_LIBDECNUMBER=y
|
||||
CONFIG_SM501=y
|
||||
# For PReP
|
||||
CONFIG_SERIAL_ISA=y
|
||||
|
@ -1,2 +1 @@
|
||||
# Default configuration for ppc64-linux-user
|
||||
CONFIG_LIBDECNUMBER=y
|
||||
|
@ -51,7 +51,6 @@ CONFIG_E500=y
|
||||
CONFIG_OPENPIC_KVM=$(call land,$(CONFIG_E500),$(CONFIG_KVM))
|
||||
CONFIG_PLATFORM_BUS=y
|
||||
CONFIG_ETSEC=y
|
||||
CONFIG_LIBDECNUMBER=y
|
||||
CONFIG_SM501=y
|
||||
# For pSeries
|
||||
CONFIG_XICS=$(CONFIG_PSERIES)
|
||||
|
@ -1,2 +1 @@
|
||||
# Default configuration for ppc64abi32-linux-user
|
||||
CONFIG_LIBDECNUMBER=y
|
||||
|
@ -1,2 +1 @@
|
||||
# Default configuration for ppc64le-linux-user
|
||||
CONFIG_LIBDECNUMBER=y
|
||||
|
@ -15,5 +15,4 @@ CONFIG_PTIMER=y
|
||||
CONFIG_I8259=y
|
||||
CONFIG_XILINX=y
|
||||
CONFIG_XILINX_ETHLITE=y
|
||||
CONFIG_LIBDECNUMBER=y
|
||||
CONFIG_SM501=y
|
||||
|
1
disas.c
1
disas.c
@ -190,7 +190,6 @@ void target_disas(FILE *out, CPUState *cpu, target_ulong code,
|
||||
|
||||
s.cpu = cpu;
|
||||
s.info.read_memory_func = target_read_memory;
|
||||
s.info.read_memory_inner_func = NULL;
|
||||
s.info.buffer_vma = code;
|
||||
s.info.buffer_length = size;
|
||||
s.info.print_address_func = generic_print_address;
|
||||
|
396
docs/devel/loads-stores.rst
Normal file
396
docs/devel/loads-stores.rst
Normal file
@ -0,0 +1,396 @@
|
||||
..
|
||||
Copyright (c) 2017 Linaro Limited
|
||||
Written by Peter Maydell
|
||||
|
||||
===================
|
||||
Load and Store APIs
|
||||
===================
|
||||
|
||||
QEMU internally has multiple families of functions for performing
|
||||
loads and stores. This document attempts to enumerate them all
|
||||
and indicate when to use them. It does not provide detailed
|
||||
documentation of each API -- for that you should look at the
|
||||
documentation comments in the relevant header files.
|
||||
|
||||
|
||||
``ld*_p and st*_p``
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
These functions operate on a host pointer, and should be used
|
||||
when you already have a pointer into host memory (corresponding
|
||||
to guest ram or a local buffer). They deal with doing accesses
|
||||
with the desired endianness and with correctly handling
|
||||
potentially unaligned pointer values.
|
||||
|
||||
Function names follow the pattern:
|
||||
|
||||
load: ``ld{type}{sign}{size}_{endian}_p(ptr)``
|
||||
|
||||
store: ``st{type}{size}_{endian}_p(ptr, val)``
|
||||
|
||||
``type``
|
||||
- (empty) : integer access
|
||||
- ``f`` : float access
|
||||
|
||||
``sign``
|
||||
- (empty) : for 32 or 64 bit sizes (including floats and doubles)
|
||||
- ``u`` : unsigned
|
||||
- ``s`` : signed
|
||||
|
||||
``size``
|
||||
- ``b`` : 8 bits
|
||||
- ``w`` : 16 bits
|
||||
- ``l`` : 32 bits
|
||||
- ``q`` : 64 bits
|
||||
|
||||
``endian``
|
||||
- ``he`` : host endian
|
||||
- ``be`` : big endian
|
||||
- ``le`` : little endian
|
||||
|
||||
The ``_{endian}`` infix is omitted for target-endian accesses.
|
||||
|
||||
The target endian accessors are only available to source
|
||||
files which are built per-target.
|
||||
|
||||
Regexes for git grep
|
||||
- ``\<ldf\?[us]\?[bwlq]\(_[hbl]e\)\?_p\>``
|
||||
- ``\<stf\?[bwlq]\(_[hbl]e\)\?_p\>``
|
||||
|
||||
``cpu_{ld,st}_*``
|
||||
~~~~~~~~~~~~~~~~~
|
||||
|
||||
These functions operate on a guest virtual address. Be aware
|
||||
that these functions may cause a guest CPU exception to be
|
||||
taken (e.g. for an alignment fault or MMU fault) which will
|
||||
result in guest CPU state being updated and control longjumping
|
||||
out of the function call. They should therefore only be used
|
||||
in code that is implementing emulation of the target CPU.
|
||||
|
||||
These functions may throw an exception (longjmp() back out
|
||||
to the top level TCG loop). This means they must only be used
|
||||
from helper functions where the translator has saved all
|
||||
necessary CPU state before generating the helper function call.
|
||||
It's usually better to use the ``_ra`` variants described below
|
||||
from helper functions, but these functions are the right choice
|
||||
for calls made from hooks like the CPU do_interrupt hook or
|
||||
when you know for certain that the translator had to save all
|
||||
the CPU state that ``cpu_restore_state()`` would restore anyway.
|
||||
|
||||
Function names follow the pattern:
|
||||
|
||||
load: ``cpu_ld{sign}{size}_{mmusuffix}(env, ptr)``
|
||||
|
||||
store: ``cpu_st{size}_{mmusuffix}(env, ptr, val)``
|
||||
|
||||
``sign``
|
||||
- (empty) : for 32 or 64 bit sizes
|
||||
- ``u`` : unsigned
|
||||
- ``s`` : signed
|
||||
|
||||
``size``
|
||||
- ``b`` : 8 bits
|
||||
- ``w`` : 16 bits
|
||||
- ``l`` : 32 bits
|
||||
- ``q`` : 64 bits
|
||||
|
||||
``mmusuffix`` is one of the generic suffixes ``data`` or ``code``, or
|
||||
(for softmmu configs) a target-specific MMU mode suffix as defined
|
||||
in the target's ``cpu.h``.
|
||||
|
||||
Regexes for git grep
|
||||
- ``\<cpu_ld[us]\?[bwlq]_[a-zA-Z0-9]\+\>``
|
||||
- ``\<cpu_st[bwlq]_[a-zA-Z0-9]\+\>``
|
||||
|
||||
``cpu_{ld,st}_*_ra``
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
These functions work like the ``cpu_{ld,st}_*`` functions except
|
||||
that they also take a ``retaddr`` argument. This extra argument
|
||||
allows for correct unwinding of any exception that is taken,
|
||||
and should generally be the result of GETPC() called directly
|
||||
from the top level HELPER(foo) function (i.e. the return address
|
||||
in the generated code).
|
||||
|
||||
These are generally the preferred way to do accesses by guest
|
||||
virtual address from helper functions; see the documentation
|
||||
of the non-``_ra`` variants for when those would be better.
|
||||
|
||||
Calling these functions with a ``retaddr`` argument of 0 is
|
||||
equivalent to calling the non-``_ra`` version of the function.
|
||||
|
||||
Function names follow the pattern:
|
||||
|
||||
load: ``cpu_ld{sign}{size}_{mmusuffix}_ra(env, ptr, retaddr)``
|
||||
|
||||
store: ``cpu_st{sign}{size}_{mmusuffix}_ra(env, ptr, val, retaddr)``
|
||||
|
||||
Regexes for git grep
|
||||
- ``\<cpu_ld[us]\?[bwlq]_[a-zA-Z0-9]\+_ra\>``
|
||||
- ``\<cpu_st[bwlq]_[a-zA-Z0-9]\+_ra\>``
|
||||
|
||||
``helper_*_{ld,st}*mmu``
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
These functions are intended primarily to be called by the code
|
||||
generated by the TCG backend. They may also be called by target
|
||||
CPU helper function code. Like the ``cpu_{ld,st}_*_ra`` functions
|
||||
they perform accesses by guest virtual address; the difference is
|
||||
that these functions allow you to specify an ``opindex`` parameter
|
||||
which encodes (among other things) the mmu index to use for the
|
||||
access. This is necessary if your helper needs to make an access
|
||||
via a specific mmu index (for instance, an "always as non-privileged"
|
||||
access) rather than using the default mmu index for the current state
|
||||
of the guest CPU.
|
||||
|
||||
The ``opindex`` parameter should be created by calling ``make_memop_idx()``.
|
||||
|
||||
The ``retaddr`` parameter should be the result of GETPC() called directly
|
||||
from the top level HELPER(foo) function (or 0 if no guest CPU state
|
||||
unwinding is required).
|
||||
|
||||
**TODO** The names of these functions are a bit odd for historical
|
||||
reasons because they were originally expected to be called only from
|
||||
within generated code. We should rename them to bring them
|
||||
more in line with the other memory access functions.
|
||||
|
||||
load: ``helper_{endian}_ld{sign}{size}_mmu(env, addr, opindex, retaddr)``
|
||||
|
||||
load (code): ``helper_{endian}_ld{sign}{size}_cmmu(env, addr, opindex, retaddr)``
|
||||
|
||||
store: ``helper_{endian}_st{size}_mmu(env, addr, val, opindex, retaddr)``
|
||||
|
||||
``sign``
|
||||
- (empty) : for 32 or 64 bit sizes
|
||||
- ``u`` : unsigned
|
||||
- ``s`` : signed
|
||||
|
||||
``size``
|
||||
- ``b`` : 8 bits
|
||||
- ``w`` : 16 bits
|
||||
- ``l`` : 32 bits
|
||||
- ``q`` : 64 bits
|
||||
|
||||
``endian``
|
||||
- ``le`` : little endian
|
||||
- ``be`` : big endian
|
||||
- ``ret`` : target endianness
|
||||
|
||||
Regexes for git grep
|
||||
- ``\<helper_\(le\|be\|ret\)_ld[us]\?[bwlq]_c\?mmu\>``
|
||||
- ``\<helper_\(le\|be\|ret\)_st[bwlq]_mmu\>``
|
||||
|
||||
``address_space_*``
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
These functions are the primary ones to use when emulating CPU
|
||||
or device memory accesses. They take an AddressSpace, which is the
|
||||
way QEMU defines the view of memory that a device or CPU has.
|
||||
(They generally correspond to being the "master" end of a hardware bus
|
||||
or bus fabric.)
|
||||
|
||||
Each CPU has an AddressSpace. Some kinds of CPU have more than
|
||||
one AddressSpace (for instance ARM guest CPUs have an AddressSpace
|
||||
for the Secure world and one for NonSecure if they implement TrustZone).
|
||||
Devices which can do DMA-type operations should generally have an
|
||||
AddressSpace. There is also a "system address space" which typically
|
||||
has all the devices and memory that all CPUs can see. (Some older
|
||||
device models use the "system address space" rather than properly
|
||||
modelling that they have an AddressSpace of their own.)
|
||||
|
||||
Functions are provided for doing byte-buffer reads and writes,
|
||||
and also for doing one-data-item loads and stores.
|
||||
|
||||
In all cases the caller provides a MemTxAttrs to specify bus
|
||||
transaction attributes, and can check whether the memory transaction
|
||||
succeeded using a MemTxResult return code.
|
||||
|
||||
``address_space_read(address_space, addr, attrs, buf, len)``
|
||||
|
||||
``address_space_write(address_space, addr, attrs, buf, len)``
|
||||
|
||||
``address_space_rw(address_space, addr, attrs, buf, len, is_write)``
|
||||
|
||||
``address_space_ld{sign}{size}_{endian}(address_space, addr, attrs, txresult)``
|
||||
|
||||
``address_space_st{size}_{endian}(address_space, addr, val, attrs, txresult)``
|
||||
|
||||
``sign``
|
||||
- (empty) : for 32 or 64 bit sizes
|
||||
- ``u`` : unsigned
|
||||
|
||||
(No signed load operations are provided.)
|
||||
|
||||
``size``
|
||||
- ``b`` : 8 bits
|
||||
- ``w`` : 16 bits
|
||||
- ``l`` : 32 bits
|
||||
- ``q`` : 64 bits
|
||||
|
||||
``endian``
|
||||
- ``le`` : little endian
|
||||
- ``be`` : big endian
|
||||
|
||||
The ``_{endian}`` suffix is omitted for byte accesses.
|
||||
|
||||
Regexes for git grep
|
||||
- ``\<address_space_\(read\|write\|rw\)\>``
|
||||
- ``\<address_space_ldu\?[bwql]\(_[lb]e\)\?\>``
|
||||
- ``\<address_space_st[bwql]\(_[lb]e\)\?\>``
|
||||
|
||||
``{ld,st}*_phys``
|
||||
~~~~~~~~~~~~~~~~~
|
||||
|
||||
These are functions which are identical to
|
||||
``address_space_{ld,st}*``, except that they always pass
|
||||
``MEMTXATTRS_UNSPECIFIED`` for the transaction attributes, and ignore
|
||||
whether the transaction succeeded or failed.
|
||||
|
||||
The fact that they ignore whether the transaction succeeded means
|
||||
they should not be used in new code, unless you know for certain
|
||||
that your code will only be used in a context where the CPU or
|
||||
device doing the access has no way to report such an error.
|
||||
|
||||
``load: ld{sign}{size}_{endian}_phys``
|
||||
|
||||
``store: st{size}_{endian}_phys``
|
||||
|
||||
``sign``
|
||||
- (empty) : for 32 or 64 bit sizes
|
||||
- ``u`` : unsigned
|
||||
|
||||
(No signed load operations are provided.)
|
||||
|
||||
``size``
|
||||
- ``b`` : 8 bits
|
||||
- ``w`` : 16 bits
|
||||
- ``l`` : 32 bits
|
||||
- ``q`` : 64 bits
|
||||
|
||||
``endian``
|
||||
- ``le`` : little endian
|
||||
- ``be`` : big endian
|
||||
|
||||
The ``_{endian}_`` infix is omitted for byte accesses.
|
||||
|
||||
Regexes for git grep
|
||||
- ``\<ldu\?[bwlq]\(_[bl]e\)\?_phys\>``
|
||||
- ``\<st[bwlq]\(_[bl]e\)\?_phys\>``
|
||||
|
||||
``cpu_physical_memory_*``
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
These are convenience functions which are identical to
|
||||
``address_space_*`` but operate specifically on the system address space,
|
||||
always pass a ``MEMTXATTRS_UNSPECIFIED`` set of memory attributes and
|
||||
ignore whether the memory transaction succeeded or failed.
|
||||
For new code they are better avoided:
|
||||
|
||||
* there is likely to be behaviour you need to model correctly for a
|
||||
failed read or write operation
|
||||
* a device should usually perform operations on its own AddressSpace
|
||||
rather than using the system address space
|
||||
|
||||
``cpu_physical_memory_read``
|
||||
|
||||
``cpu_physical_memory_write``
|
||||
|
||||
``cpu_physical_memory_rw``
|
||||
|
||||
Regexes for git grep
|
||||
- ``\<cpu_physical_memory_\(read\|write\|rw\)\>``
|
||||
|
||||
``cpu_physical_memory_write_rom``
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
This function performs a write by physical address like
|
||||
``address_space_write``, except that if the write is to a ROM then
|
||||
the ROM contents will be modified, even though a write by the guest
|
||||
CPU to the ROM would be ignored.
|
||||
|
||||
Note that unlike ``cpu_physical_memory_write()`` this function takes
|
||||
an AddressSpace argument, but unlike ``address_space_write()`` this
|
||||
function does not take a ``MemTxAttrs`` or return a ``MemTxResult``.
|
||||
|
||||
**TODO**: we should probably clean up this inconsistency and
|
||||
turn the function into ``address_space_write_rom`` with an API
|
||||
matching ``address_space_write``.
|
||||
|
||||
``cpu_physical_memory_write_rom``
|
||||
|
||||
|
||||
``cpu_memory_rw_debug``
|
||||
~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Access CPU memory by virtual address for debug purposes.
|
||||
|
||||
This function is intended for use by the GDB stub and similar code.
|
||||
It takes a virtual address, converts it to a physical address via
|
||||
an MMU lookup using the current settings of the specified CPU,
|
||||
and then performs the access (using ``address_space_rw`` for
|
||||
reads or ``cpu_physical_memory_write_rom`` for writes).
|
||||
This means that if the access is a write to a ROM then this
|
||||
function will modify the contents (whereas a normal guest CPU access
|
||||
would ignore the write attempt).
|
||||
|
||||
``cpu_memory_rw_debug``
|
||||
|
||||
``dma_memory_*``
|
||||
~~~~~~~~~~~~~~~~
|
||||
|
||||
These behave like ``address_space_*``, except that they perform a DMA
|
||||
barrier operation first.
|
||||
|
||||
**TODO**: We should provide guidance on when you need the DMA
|
||||
barrier operation and when it's OK to use ``address_space_*``, and
|
||||
make sure our existing code is doing things correctly.
|
||||
|
||||
``dma_memory_read``
|
||||
|
||||
``dma_memory_write``
|
||||
|
||||
``dma_memory_rw``
|
||||
|
||||
Regexes for git grep
|
||||
- ``\<dma_memory_\(read\|write\|rw\)\>``
|
||||
|
||||
``pci_dma_*`` and ``{ld,st}*_pci_dma``
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
These functions are specifically for PCI device models which need to
|
||||
perform accesses where the PCI device is a bus master. You pass them a
|
||||
``PCIDevice *`` and they will do ``dma_memory_*`` operations on the
|
||||
correct address space for that device.
|
||||
|
||||
``pci_dma_read``
|
||||
|
||||
``pci_dma_write``
|
||||
|
||||
``pci_dma_rw``
|
||||
|
||||
``load: ld{sign}{size}_{endian}_pci_dma``
|
||||
|
||||
``store: st{size}_{endian}_pci_dma``
|
||||
|
||||
``sign``
|
||||
- (empty) : for 32 or 64 bit sizes
|
||||
- ``u`` : unsigned
|
||||
|
||||
(No signed load operations are provided.)
|
||||
|
||||
``size``
|
||||
- ``b`` : 8 bits
|
||||
- ``w`` : 16 bits
|
||||
- ``l`` : 32 bits
|
||||
- ``q`` : 64 bits
|
||||
|
||||
``endian``
|
||||
- ``le`` : little endian
|
||||
- ``be`` : big endian
|
||||
|
||||
The ``_{endian}_`` infix is omitted for byte accesses.
|
||||
|
||||
Regexes for git grep
|
||||
- ``\<pci_dma_\(read\|write\|rw\)\>``
|
||||
- ``\<ldu\?[bwlq]\(_[bl]e\)\?_pci_dma\>``
|
||||
- ``\<st[bwlq]\(_[bl]e\)\?_pci_dma\>``
|
109
exec.c
109
exec.c
@ -465,11 +465,29 @@ address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *x
|
||||
return section;
|
||||
}
|
||||
|
||||
/* Called from RCU critical section */
|
||||
/**
|
||||
* flatview_do_translate - translate an address in FlatView
|
||||
*
|
||||
* @fv: the flat view that we want to translate on
|
||||
* @addr: the address to be translated in above address space
|
||||
* @xlat: the translated address offset within memory region. It
|
||||
* cannot be @NULL.
|
||||
* @plen_out: valid read/write length of the translated address. It
|
||||
* can be @NULL when we don't care about it.
|
||||
* @page_mask_out: page mask for the translated address. This
|
||||
* should only be meaningful for IOMMU translated
|
||||
* addresses, since there may be huge pages that this bit
|
||||
* would tell. It can be @NULL if we don't care about it.
|
||||
* @is_write: whether the translation operation is for write
|
||||
* @is_mmio: whether this can be MMIO, set true if it can
|
||||
*
|
||||
* This function is called from RCU critical section
|
||||
*/
|
||||
static MemoryRegionSection flatview_do_translate(FlatView *fv,
|
||||
hwaddr addr,
|
||||
hwaddr *xlat,
|
||||
hwaddr *plen,
|
||||
hwaddr *plen_out,
|
||||
hwaddr *page_mask_out,
|
||||
bool is_write,
|
||||
bool is_mmio,
|
||||
AddressSpace **target_as)
|
||||
@ -478,11 +496,17 @@ static MemoryRegionSection flatview_do_translate(FlatView *fv,
|
||||
MemoryRegionSection *section;
|
||||
IOMMUMemoryRegion *iommu_mr;
|
||||
IOMMUMemoryRegionClass *imrc;
|
||||
hwaddr page_mask = (hwaddr)(-1);
|
||||
hwaddr plen = (hwaddr)(-1);
|
||||
|
||||
if (plen_out) {
|
||||
plen = *plen_out;
|
||||
}
|
||||
|
||||
for (;;) {
|
||||
section = address_space_translate_internal(
|
||||
flatview_to_dispatch(fv), addr, &addr,
|
||||
plen, is_mmio);
|
||||
&plen, is_mmio);
|
||||
|
||||
iommu_mr = memory_region_get_iommu(section->mr);
|
||||
if (!iommu_mr) {
|
||||
@ -494,7 +518,8 @@ static MemoryRegionSection flatview_do_translate(FlatView *fv,
|
||||
IOMMU_WO : IOMMU_RO);
|
||||
addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
|
||||
| (addr & iotlb.addr_mask));
|
||||
*plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1);
|
||||
page_mask &= iotlb.addr_mask;
|
||||
plen = MIN(plen, (addr | iotlb.addr_mask) - addr + 1);
|
||||
if (!(iotlb.perm & (1 << is_write))) {
|
||||
goto translate_fail;
|
||||
}
|
||||
@ -505,6 +530,19 @@ static MemoryRegionSection flatview_do_translate(FlatView *fv,
|
||||
|
||||
*xlat = addr;
|
||||
|
||||
if (page_mask == (hwaddr)(-1)) {
|
||||
/* Not behind an IOMMU, use default page size. */
|
||||
page_mask = ~TARGET_PAGE_MASK;
|
||||
}
|
||||
|
||||
if (page_mask_out) {
|
||||
*page_mask_out = page_mask;
|
||||
}
|
||||
|
||||
if (plen_out) {
|
||||
*plen_out = plen;
|
||||
}
|
||||
|
||||
return *section;
|
||||
|
||||
translate_fail:
|
||||
@ -516,14 +554,14 @@ IOMMUTLBEntry address_space_get_iotlb_entry(AddressSpace *as, hwaddr addr,
|
||||
bool is_write)
|
||||
{
|
||||
MemoryRegionSection section;
|
||||
hwaddr xlat, plen;
|
||||
hwaddr xlat, page_mask;
|
||||
|
||||
/* Try to get maximum page mask during translation. */
|
||||
plen = (hwaddr)-1;
|
||||
|
||||
/* This can never be MMIO. */
|
||||
section = flatview_do_translate(address_space_to_flatview(as), addr,
|
||||
&xlat, &plen, is_write, false, &as);
|
||||
/*
|
||||
* This can never be MMIO, and we don't really care about plen,
|
||||
* but page mask.
|
||||
*/
|
||||
section = flatview_do_translate(address_space_to_flatview(as), addr, &xlat,
|
||||
NULL, &page_mask, is_write, false, &as);
|
||||
|
||||
/* Illegal translation */
|
||||
if (section.mr == &io_mem_unassigned) {
|
||||
@ -534,22 +572,11 @@ IOMMUTLBEntry address_space_get_iotlb_entry(AddressSpace *as, hwaddr addr,
|
||||
xlat += section.offset_within_address_space -
|
||||
section.offset_within_region;
|
||||
|
||||
if (plen == (hwaddr)-1) {
|
||||
/*
|
||||
* We use default page size here. Logically it only happens
|
||||
* for identity mappings.
|
||||
*/
|
||||
plen = TARGET_PAGE_SIZE;
|
||||
}
|
||||
|
||||
/* Convert to address mask */
|
||||
plen -= 1;
|
||||
|
||||
return (IOMMUTLBEntry) {
|
||||
.target_as = as,
|
||||
.iova = addr & ~plen,
|
||||
.translated_addr = xlat & ~plen,
|
||||
.addr_mask = plen,
|
||||
.iova = addr & ~page_mask,
|
||||
.translated_addr = xlat & ~page_mask,
|
||||
.addr_mask = page_mask,
|
||||
/* IOTLBs are for DMAs, and DMA only allows on RAMs. */
|
||||
.perm = IOMMU_RW,
|
||||
};
|
||||
@ -567,7 +594,8 @@ MemoryRegion *flatview_translate(FlatView *fv, hwaddr addr, hwaddr *xlat,
|
||||
AddressSpace *as = NULL;
|
||||
|
||||
/* This can be MMIO, so setup MMIO bit. */
|
||||
section = flatview_do_translate(fv, addr, xlat, plen, is_write, true, &as);
|
||||
section = flatview_do_translate(fv, addr, xlat, plen, NULL,
|
||||
is_write, true, &as);
|
||||
mr = section.mr;
|
||||
|
||||
if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
|
||||
@ -2348,6 +2376,9 @@ static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
|
||||
case 4:
|
||||
stl_p(qemu_map_ram_ptr(NULL, ram_addr), val);
|
||||
break;
|
||||
case 8:
|
||||
stq_p(qemu_map_ram_ptr(NULL, ram_addr), val);
|
||||
break;
|
||||
default:
|
||||
abort();
|
||||
}
|
||||
@ -2378,6 +2409,16 @@ static const MemoryRegionOps notdirty_mem_ops = {
|
||||
.write = notdirty_mem_write,
|
||||
.valid.accepts = notdirty_mem_accepts,
|
||||
.endianness = DEVICE_NATIVE_ENDIAN,
|
||||
.valid = {
|
||||
.min_access_size = 1,
|
||||
.max_access_size = 8,
|
||||
.unaligned = false,
|
||||
},
|
||||
.impl = {
|
||||
.min_access_size = 1,
|
||||
.max_access_size = 8,
|
||||
.unaligned = false,
|
||||
},
|
||||
};
|
||||
|
||||
/* Generate a debug exception if a watchpoint has been hit. */
|
||||
@ -2462,6 +2503,9 @@ static MemTxResult watch_mem_read(void *opaque, hwaddr addr, uint64_t *pdata,
|
||||
case 4:
|
||||
data = address_space_ldl(as, addr, attrs, &res);
|
||||
break;
|
||||
case 8:
|
||||
data = address_space_ldq(as, addr, attrs, &res);
|
||||
break;
|
||||
default: abort();
|
||||
}
|
||||
*pdata = data;
|
||||
@ -2487,6 +2531,9 @@ static MemTxResult watch_mem_write(void *opaque, hwaddr addr,
|
||||
case 4:
|
||||
address_space_stl(as, addr, val, attrs, &res);
|
||||
break;
|
||||
case 8:
|
||||
address_space_stq(as, addr, val, attrs, &res);
|
||||
break;
|
||||
default: abort();
|
||||
}
|
||||
return res;
|
||||
@ -2496,6 +2543,16 @@ static const MemoryRegionOps watch_mem_ops = {
|
||||
.read_with_attrs = watch_mem_read,
|
||||
.write_with_attrs = watch_mem_write,
|
||||
.endianness = DEVICE_NATIVE_ENDIAN,
|
||||
.valid = {
|
||||
.min_access_size = 1,
|
||||
.max_access_size = 8,
|
||||
.unaligned = false,
|
||||
},
|
||||
.impl = {
|
||||
.min_access_size = 1,
|
||||
.max_access_size = 8,
|
||||
.unaligned = false,
|
||||
},
|
||||
};
|
||||
|
||||
static MemTxResult flatview_write(FlatView *fv, hwaddr addr, MemTxAttrs attrs,
|
||||
|
@ -12,6 +12,7 @@
|
||||
#include "hw/i386/ich9.h"
|
||||
|
||||
#include "hw/acpi/tco.h"
|
||||
#include "trace.h"
|
||||
|
||||
//#define DEBUG
|
||||
|
||||
@ -41,8 +42,11 @@ enum {
|
||||
|
||||
static inline void tco_timer_reload(TCOIORegs *tr)
|
||||
{
|
||||
tr->expire_time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
|
||||
((int64_t)(tr->tco.tmr & TCO_TMR_MASK) * TCO_TICK_NSEC);
|
||||
int ticks = tr->tco.tmr & TCO_TMR_MASK;
|
||||
int64_t nsec = (int64_t)ticks * TCO_TICK_NSEC;
|
||||
|
||||
trace_tco_timer_reload(ticks, nsec / 1000000);
|
||||
tr->expire_time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + nsec;
|
||||
timer_mod(tr->tco_timer, tr->expire_time);
|
||||
}
|
||||
|
||||
@ -59,6 +63,9 @@ static void tco_timer_expired(void *opaque)
|
||||
ICH9LPCState *lpc = container_of(pm, ICH9LPCState, pm);
|
||||
uint32_t gcs = pci_get_long(lpc->chip_config + ICH9_CC_GCS);
|
||||
|
||||
trace_tco_timer_expired(tr->timeouts_no,
|
||||
lpc->pin_strap.spkr_hi,
|
||||
!!(gcs & ICH9_CC_GCS_NO_REBOOT));
|
||||
tr->tco.rld = 0;
|
||||
tr->tco.sts1 |= TCO_TIMEOUT;
|
||||
if (++tr->timeouts_no == 2) {
|
||||
|
@ -30,3 +30,7 @@ cpuhp_acpi_ejecting_invalid_cpu(uint32_t idx) "0x%"PRIx32
|
||||
cpuhp_acpi_ejecting_cpu(uint32_t idx) "0x%"PRIx32
|
||||
cpuhp_acpi_write_ost_ev(uint32_t slot, uint32_t ev) "idx[0x%"PRIx32"] OST EVENT: 0x%"PRIx32
|
||||
cpuhp_acpi_write_ost_status(uint32_t slot, uint32_t st) "idx[0x%"PRIx32"] OST STATUS: 0x%"PRIx32
|
||||
|
||||
# hw/acpi/tco.c
|
||||
tco_timer_reload(int ticks, int msec) "ticks=%d (%d ms)"
|
||||
tco_timer_expired(int timeouts_no, bool strap, bool no_reboot) "timeouts_no=%d no_reboot=%d/%d"
|
||||
|
@ -928,6 +928,13 @@ static void device_set_realized(Object *obj, bool value, Error **errp)
|
||||
goto post_realize_fail;
|
||||
}
|
||||
|
||||
/*
|
||||
* always free/re-initialize here since the value cannot be cleaned up
|
||||
* in device_unrealize due to its usage later on in the unplug path
|
||||
*/
|
||||
g_free(dev->canonical_path);
|
||||
dev->canonical_path = object_get_canonical_path(OBJECT(dev));
|
||||
|
||||
if (qdev_get_vmsd(dev)) {
|
||||
if (vmstate_register_with_alias_id(dev, -1, qdev_get_vmsd(dev), dev,
|
||||
dev->instance_id_alias,
|
||||
@ -984,6 +991,8 @@ child_realize_fail:
|
||||
}
|
||||
|
||||
post_realize_fail:
|
||||
g_free(dev->canonical_path);
|
||||
dev->canonical_path = NULL;
|
||||
if (dc->unrealize) {
|
||||
dc->unrealize(dev, NULL);
|
||||
}
|
||||
@ -1070,6 +1079,18 @@ static void device_finalize(Object *obj)
|
||||
* here
|
||||
*/
|
||||
}
|
||||
|
||||
/* Only send event if the device had been completely realized */
|
||||
if (dev->pending_deleted_event) {
|
||||
g_assert(dev->canonical_path);
|
||||
|
||||
qapi_event_send_device_deleted(!!dev->id, dev->id, dev->canonical_path,
|
||||
&error_abort);
|
||||
g_free(dev->canonical_path);
|
||||
dev->canonical_path = NULL;
|
||||
}
|
||||
|
||||
qemu_opts_del(dev->opts);
|
||||
}
|
||||
|
||||
static void device_class_base_init(ObjectClass *class, void *data)
|
||||
@ -1099,17 +1120,6 @@ static void device_unparent(Object *obj)
|
||||
object_unref(OBJECT(dev->parent_bus));
|
||||
dev->parent_bus = NULL;
|
||||
}
|
||||
|
||||
/* Only send event if the device had been completely realized */
|
||||
if (dev->pending_deleted_event) {
|
||||
gchar *path = object_get_canonical_path(OBJECT(dev));
|
||||
|
||||
qapi_event_send_device_deleted(!!dev->id, dev->id, path, &error_abort);
|
||||
g_free(path);
|
||||
}
|
||||
|
||||
qemu_opts_del(dev->opts);
|
||||
dev->opts = NULL;
|
||||
}
|
||||
|
||||
static void device_class_init(ObjectClass *class, void *data)
|
||||
|
@ -1876,8 +1876,15 @@ static void pc_cpu_pre_plug(HotplugHandler *hotplug_dev,
|
||||
CPUArchId *cpu_slot;
|
||||
X86CPUTopoInfo topo;
|
||||
X86CPU *cpu = X86_CPU(dev);
|
||||
MachineState *ms = MACHINE(hotplug_dev);
|
||||
PCMachineState *pcms = PC_MACHINE(hotplug_dev);
|
||||
|
||||
if(!object_dynamic_cast(OBJECT(cpu), ms->cpu_type)) {
|
||||
error_setg(errp, "Invalid CPU type, expected cpu type: '%s'",
|
||||
ms->cpu_type);
|
||||
return;
|
||||
}
|
||||
|
||||
/* if APIC ID is not set, set it based on socket/core/thread properties */
|
||||
if (cpu->apic_id == UNASSIGNED_APIC_ID) {
|
||||
int max_socket = (max_cpus - 1) / smp_threads / smp_cores;
|
||||
|
@ -208,6 +208,7 @@ static void ide_identify(IDEState *s)
|
||||
if (dev && dev->conf.discard_granularity) {
|
||||
put_le16(p + 169, 1); /* TRIM support */
|
||||
}
|
||||
put_le16(p + 217, dev->rotation_rate); /* Nominal media rotation rate */
|
||||
|
||||
ide_identify_size(s);
|
||||
s->identify_set = 1;
|
||||
|
@ -299,6 +299,7 @@ static Property ide_hd_properties[] = {
|
||||
DEFINE_BLOCK_CHS_PROPERTIES(IDEDrive, dev.conf),
|
||||
DEFINE_PROP_BIOS_CHS_TRANS("bios-chs-trans",
|
||||
IDEDrive, dev.chs_trans, BIOS_ATA_TRANSLATION_AUTO),
|
||||
DEFINE_PROP_UINT16("rotation_rate", IDEDrive, dev.rotation_rate, 0),
|
||||
DEFINE_PROP_END_OF_LIST(),
|
||||
};
|
||||
|
||||
|
@ -104,6 +104,14 @@ typedef struct SCSIDiskState
|
||||
char *product;
|
||||
bool tray_open;
|
||||
bool tray_locked;
|
||||
/*
|
||||
* 0x0000 - rotation rate not reported
|
||||
* 0x0001 - non-rotating medium (SSD)
|
||||
* 0x0002-0x0400 - reserved
|
||||
* 0x0401-0xffe - rotations per minute
|
||||
* 0xffff - reserved
|
||||
*/
|
||||
uint16_t rotation_rate;
|
||||
} SCSIDiskState;
|
||||
|
||||
static bool scsi_handle_rw_error(SCSIDiskReq *r, int error, bool acct_failed);
|
||||
@ -605,6 +613,7 @@ static int scsi_disk_emulate_inquiry(SCSIRequest *req, uint8_t *outbuf)
|
||||
outbuf[buflen++] = 0x83; // device identification
|
||||
if (s->qdev.type == TYPE_DISK) {
|
||||
outbuf[buflen++] = 0xb0; // block limits
|
||||
outbuf[buflen++] = 0xb1; /* block device characteristics */
|
||||
outbuf[buflen++] = 0xb2; // thin provisioning
|
||||
}
|
||||
break;
|
||||
@ -747,6 +756,15 @@ static int scsi_disk_emulate_inquiry(SCSIRequest *req, uint8_t *outbuf)
|
||||
outbuf[43] = max_io_sectors & 0xff;
|
||||
break;
|
||||
}
|
||||
case 0xb1: /* block device characteristics */
|
||||
{
|
||||
buflen = 8;
|
||||
outbuf[4] = (s->rotation_rate >> 8) & 0xff;
|
||||
outbuf[5] = s->rotation_rate & 0xff;
|
||||
outbuf[6] = 0;
|
||||
outbuf[7] = 0;
|
||||
break;
|
||||
}
|
||||
case 0xb2: /* thin provisioning */
|
||||
{
|
||||
buflen = 8;
|
||||
@ -2329,6 +2347,14 @@ static void scsi_realize(SCSIDevice *dev, Error **errp)
|
||||
|
||||
blkconf_serial(&s->qdev.conf, &s->serial);
|
||||
blkconf_blocksizes(&s->qdev.conf);
|
||||
|
||||
if (s->qdev.conf.logical_block_size >
|
||||
s->qdev.conf.physical_block_size) {
|
||||
error_setg(errp,
|
||||
"logical_block_size > physical_block_size not supported");
|
||||
return;
|
||||
}
|
||||
|
||||
if (dev->type == TYPE_DISK) {
|
||||
blkconf_geometry(&dev->conf, NULL, 65535, 255, 255, &err);
|
||||
if (err) {
|
||||
@ -2911,6 +2937,7 @@ static Property scsi_hd_properties[] = {
|
||||
DEFAULT_MAX_UNMAP_SIZE),
|
||||
DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size,
|
||||
DEFAULT_MAX_IO_SIZE),
|
||||
DEFINE_PROP_UINT16("rotation_rate", SCSIDiskState, rotation_rate, 0),
|
||||
DEFINE_BLOCK_CHS_PROPERTIES(SCSIDiskState, qdev.conf),
|
||||
DEFINE_PROP_END_OF_LIST(),
|
||||
};
|
||||
@ -2982,6 +3009,7 @@ static const TypeInfo scsi_cd_info = {
|
||||
static Property scsi_block_properties[] = {
|
||||
DEFINE_BLOCK_ERROR_PROPERTIES(SCSIDiskState, qdev.conf), \
|
||||
DEFINE_PROP_DRIVE("drive", SCSIDiskState, qdev.conf.blk),
|
||||
DEFINE_PROP_UINT16("rotation_rate", SCSIDiskState, rotation_rate, 0),
|
||||
DEFINE_PROP_END_OF_LIST(),
|
||||
};
|
||||
|
||||
|
@ -479,6 +479,7 @@ int generic_symbol_at_address(bfd_vma, struct disassemble_info *);
|
||||
(INFO).buffer_vma = 0, \
|
||||
(INFO).buffer_length = 0, \
|
||||
(INFO).read_memory_func = buffer_read_memory, \
|
||||
(INFO).read_memory_inner_func = NULL, \
|
||||
(INFO).memory_error_func = perror_memory, \
|
||||
(INFO).print_address_func = generic_print_address, \
|
||||
(INFO).print_insn = NULL, \
|
||||
|
@ -508,6 +508,14 @@ struct IDEDevice {
|
||||
char *serial;
|
||||
char *model;
|
||||
uint64_t wwn;
|
||||
/*
|
||||
* 0x0000 - rotation rate not reported
|
||||
* 0x0001 - non-rotating medium (SSD)
|
||||
* 0x0002-0x0400 - reserved
|
||||
* 0x0401-0xffe - rotations per minute
|
||||
* 0xffff - reserved
|
||||
*/
|
||||
uint16_t rotation_rate;
|
||||
};
|
||||
|
||||
/* These are used for the error_status field of IDEBus */
|
||||
|
@ -153,6 +153,7 @@ struct DeviceState {
|
||||
/*< public >*/
|
||||
|
||||
const char *id;
|
||||
char *canonical_path;
|
||||
bool realized;
|
||||
bool pending_deleted_event;
|
||||
QemuOpts *opts;
|
||||
|
5
libdecnumber/Makefile.objs
Normal file
5
libdecnumber/Makefile.objs
Normal file
@ -0,0 +1,5 @@
|
||||
obj-y += decContext.o
|
||||
obj-y += decNumber.o
|
||||
obj-y += dpd/decimal32.o
|
||||
obj-y += dpd/decimal64.o
|
||||
obj-y += dpd/decimal128.o
|
18
memory.c
18
memory.c
@ -1892,7 +1892,7 @@ void memory_region_notify_one(IOMMUNotifier *notifier,
|
||||
* Skip the notification if the notification does not overlap
|
||||
* with registered range.
|
||||
*/
|
||||
if (notifier->start > entry->iova + entry->addr_mask + 1 ||
|
||||
if (notifier->start > entry->iova + entry->addr_mask ||
|
||||
notifier->end < entry->iova) {
|
||||
return;
|
||||
}
|
||||
@ -2599,20 +2599,14 @@ static void listener_add_address_space(MemoryListener *listener,
|
||||
|
||||
view = address_space_get_flatview(as);
|
||||
FOR_EACH_FLAT_RANGE(fr, view) {
|
||||
MemoryRegionSection section = {
|
||||
.mr = fr->mr,
|
||||
.fv = view,
|
||||
.offset_within_region = fr->offset_in_region,
|
||||
.size = fr->addr.size,
|
||||
.offset_within_address_space = int128_get64(fr->addr.start),
|
||||
.readonly = fr->readonly,
|
||||
};
|
||||
if (fr->dirty_log_mask && listener->log_start) {
|
||||
listener->log_start(listener, §ion, 0, fr->dirty_log_mask);
|
||||
}
|
||||
MemoryRegionSection section = section_from_flat_range(fr, view);
|
||||
|
||||
if (listener->region_add) {
|
||||
listener->region_add(listener, §ion);
|
||||
}
|
||||
if (fr->dirty_log_mask && listener->log_start) {
|
||||
listener->log_start(listener, §ion, 0, fr->dirty_log_mask);
|
||||
}
|
||||
}
|
||||
if (listener->commit) {
|
||||
listener->commit(listener);
|
||||
|
@ -11,6 +11,8 @@ use warnings;
|
||||
my $P = $0;
|
||||
$P =~ s@.*/@@g;
|
||||
|
||||
our $SrcFile = qr{\.(?:h|c|cpp|s|S|pl|py|sh)$};
|
||||
|
||||
my $V = '0.31';
|
||||
|
||||
use Getopt::Long qw(:config no_auto_abbrev);
|
||||
@ -101,30 +103,29 @@ if ($#ARGV < 0) {
|
||||
}
|
||||
|
||||
if (!defined $chk_branch && !defined $chk_patch && !defined $file) {
|
||||
$chk_branch = $ARGV[0] =~ /\.\./ ? 1 : 0;
|
||||
$chk_patch = $chk_branch ? 0 :
|
||||
$ARGV[0] =~ /\.patch$/ || $ARGV[0] eq "-" ? 1 : 0;
|
||||
$file = $chk_branch || $chk_patch ? 0 : 1;
|
||||
$chk_branch = $ARGV[0] =~ /.\.\./ ? 1 : 0;
|
||||
$file = $ARGV[0] =~ /$SrcFile/ ? 1 : 0;
|
||||
$chk_patch = $chk_branch || $file ? 0 : 1;
|
||||
} elsif (!defined $chk_branch && !defined $chk_patch) {
|
||||
if ($file) {
|
||||
$chk_branch = $chk_patch = 0;
|
||||
} else {
|
||||
$chk_branch = $ARGV[0] =~ /\.\./ ? 1 : 0;
|
||||
$chk_branch = $ARGV[0] =~ /.\.\./ ? 1 : 0;
|
||||
$chk_patch = $chk_branch ? 0 : 1;
|
||||
}
|
||||
} elsif (!defined $chk_branch && !defined $file) {
|
||||
if ($chk_patch) {
|
||||
$chk_branch = $file = 0;
|
||||
} else {
|
||||
$chk_branch = $ARGV[0] =~ /\.\./ ? 1 : 0;
|
||||
$chk_branch = $ARGV[0] =~ /.\.\./ ? 1 : 0;
|
||||
$file = $chk_branch ? 0 : 1;
|
||||
}
|
||||
} elsif (!defined $chk_patch && !defined $file) {
|
||||
if ($chk_branch) {
|
||||
$chk_patch = $file = 0;
|
||||
} else {
|
||||
$chk_patch = $ARGV[0] =~ /\.patch$/ || $ARGV[0] eq "-" ? 1 : 0;
|
||||
$file = $chk_patch ? 0 : 1;
|
||||
$file = $ARGV[0] =~ /$SrcFile/ ? 1 : 0;
|
||||
$chk_patch = $file ? 0 : 1;
|
||||
}
|
||||
} elsif (!defined $chk_branch) {
|
||||
$chk_branch = $chk_patch || $file ? 0 : 1;
|
||||
@ -1443,7 +1444,7 @@ sub process {
|
||||
}
|
||||
|
||||
# check we are in a valid source file if not then ignore this hunk
|
||||
next if ($realfile !~ /\.(h|c|cpp|s|S|pl|py|sh)$/);
|
||||
next if ($realfile !~ /$SrcFile/);
|
||||
|
||||
#90 column limit
|
||||
if ($line =~ /^\+/ &&
|
||||
|
@ -276,15 +276,26 @@ static void dm_init(void)
|
||||
|
||||
/* Variables required by libmultipath and libmpathpersist. */
|
||||
QEMU_BUILD_BUG_ON(PR_HELPER_DATA_SIZE > MPATH_MAX_PARAM_LEN);
|
||||
static struct config *multipath_conf;
|
||||
unsigned mpath_mx_alloc_len = PR_HELPER_DATA_SIZE;
|
||||
int logsink;
|
||||
struct udev *udev;
|
||||
|
||||
extern struct config *get_multipath_config(void);
|
||||
struct config *get_multipath_config(void)
|
||||
{
|
||||
return multipath_conf;
|
||||
}
|
||||
|
||||
extern void put_multipath_config(struct config *conf);
|
||||
void put_multipath_config(struct config *conf)
|
||||
{
|
||||
}
|
||||
|
||||
static void multipath_pr_init(void)
|
||||
{
|
||||
static struct udev *udev;
|
||||
|
||||
udev = udev_new();
|
||||
mpath_lib_init(udev);
|
||||
multipath_conf = mpath_lib_init();
|
||||
}
|
||||
|
||||
static int is_mpath(int fd)
|
||||
|
@ -136,6 +136,7 @@ typedef struct DisasContext {
|
||||
int cpuid_ext3_features;
|
||||
int cpuid_7_0_ebx_features;
|
||||
int cpuid_xsave_features;
|
||||
sigjmp_buf jmpbuf;
|
||||
} DisasContext;
|
||||
|
||||
static void gen_eob(DisasContext *s);
|
||||
@ -1863,6 +1864,57 @@ static void gen_shifti(DisasContext *s1, int op, TCGMemOp ot, int d, int c)
|
||||
}
|
||||
}
|
||||
|
||||
#define X86_MAX_INSN_LENGTH 15
|
||||
|
||||
static uint64_t advance_pc(CPUX86State *env, DisasContext *s, int num_bytes)
|
||||
{
|
||||
uint64_t pc = s->pc;
|
||||
|
||||
s->pc += num_bytes;
|
||||
if (unlikely(s->pc - s->pc_start > X86_MAX_INSN_LENGTH)) {
|
||||
/* If the instruction's 16th byte is on a different page than the 1st, a
|
||||
* page fault on the second page wins over the general protection fault
|
||||
* caused by the instruction being too long.
|
||||
* This can happen even if the operand is only one byte long!
|
||||
*/
|
||||
if (((s->pc - 1) ^ (pc - 1)) & TARGET_PAGE_MASK) {
|
||||
volatile uint8_t unused =
|
||||
cpu_ldub_code(env, (s->pc - 1) & TARGET_PAGE_MASK);
|
||||
(void) unused;
|
||||
}
|
||||
siglongjmp(s->jmpbuf, 1);
|
||||
}
|
||||
|
||||
return pc;
|
||||
}
|
||||
|
||||
static inline uint8_t x86_ldub_code(CPUX86State *env, DisasContext *s)
|
||||
{
|
||||
return cpu_ldub_code(env, advance_pc(env, s, 1));
|
||||
}
|
||||
|
||||
static inline int16_t x86_ldsw_code(CPUX86State *env, DisasContext *s)
|
||||
{
|
||||
return cpu_ldsw_code(env, advance_pc(env, s, 2));
|
||||
}
|
||||
|
||||
static inline uint16_t x86_lduw_code(CPUX86State *env, DisasContext *s)
|
||||
{
|
||||
return cpu_lduw_code(env, advance_pc(env, s, 2));
|
||||
}
|
||||
|
||||
static inline uint32_t x86_ldl_code(CPUX86State *env, DisasContext *s)
|
||||
{
|
||||
return cpu_ldl_code(env, advance_pc(env, s, 4));
|
||||
}
|
||||
|
||||
#ifdef TARGET_X86_64
|
||||
static inline uint64_t x86_ldq_code(CPUX86State *env, DisasContext *s)
|
||||
{
|
||||
return cpu_ldq_code(env, advance_pc(env, s, 8));
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Decompose an address. */
|
||||
|
||||
typedef struct AddressParts {
|
||||
@ -1900,7 +1952,7 @@ static AddressParts gen_lea_modrm_0(CPUX86State *env, DisasContext *s,
|
||||
case MO_32:
|
||||
havesib = 0;
|
||||
if (rm == 4) {
|
||||
int code = cpu_ldub_code(env, s->pc++);
|
||||
int code = x86_ldub_code(env, s);
|
||||
scale = (code >> 6) & 3;
|
||||
index = ((code >> 3) & 7) | REX_X(s);
|
||||
if (index == 4) {
|
||||
@ -1914,8 +1966,7 @@ static AddressParts gen_lea_modrm_0(CPUX86State *env, DisasContext *s,
|
||||
case 0:
|
||||
if ((base & 7) == 5) {
|
||||
base = -1;
|
||||
disp = (int32_t)cpu_ldl_code(env, s->pc);
|
||||
s->pc += 4;
|
||||
disp = (int32_t)x86_ldl_code(env, s);
|
||||
if (CODE64(s) && !havesib) {
|
||||
base = -2;
|
||||
disp += s->pc + s->rip_offset;
|
||||
@ -1923,12 +1974,11 @@ static AddressParts gen_lea_modrm_0(CPUX86State *env, DisasContext *s,
|
||||
}
|
||||
break;
|
||||
case 1:
|
||||
disp = (int8_t)cpu_ldub_code(env, s->pc++);
|
||||
disp = (int8_t)x86_ldub_code(env, s);
|
||||
break;
|
||||
default:
|
||||
case 2:
|
||||
disp = (int32_t)cpu_ldl_code(env, s->pc);
|
||||
s->pc += 4;
|
||||
disp = (int32_t)x86_ldl_code(env, s);
|
||||
break;
|
||||
}
|
||||
|
||||
@ -1945,15 +1995,13 @@ static AddressParts gen_lea_modrm_0(CPUX86State *env, DisasContext *s,
|
||||
if (mod == 0) {
|
||||
if (rm == 6) {
|
||||
base = -1;
|
||||
disp = cpu_lduw_code(env, s->pc);
|
||||
s->pc += 2;
|
||||
disp = x86_lduw_code(env, s);
|
||||
break;
|
||||
}
|
||||
} else if (mod == 1) {
|
||||
disp = (int8_t)cpu_ldub_code(env, s->pc++);
|
||||
disp = (int8_t)x86_ldub_code(env, s);
|
||||
} else {
|
||||
disp = (int16_t)cpu_lduw_code(env, s->pc);
|
||||
s->pc += 2;
|
||||
disp = (int16_t)x86_lduw_code(env, s);
|
||||
}
|
||||
|
||||
switch (rm) {
|
||||
@ -2103,19 +2151,16 @@ static inline uint32_t insn_get(CPUX86State *env, DisasContext *s, TCGMemOp ot)
|
||||
|
||||
switch (ot) {
|
||||
case MO_8:
|
||||
ret = cpu_ldub_code(env, s->pc);
|
||||
s->pc++;
|
||||
ret = x86_ldub_code(env, s);
|
||||
break;
|
||||
case MO_16:
|
||||
ret = cpu_lduw_code(env, s->pc);
|
||||
s->pc += 2;
|
||||
ret = x86_lduw_code(env, s);
|
||||
break;
|
||||
case MO_32:
|
||||
#ifdef TARGET_X86_64
|
||||
case MO_64:
|
||||
#endif
|
||||
ret = cpu_ldl_code(env, s->pc);
|
||||
s->pc += 4;
|
||||
ret = x86_ldl_code(env, s);
|
||||
break;
|
||||
default:
|
||||
tcg_abort();
|
||||
@ -3041,7 +3086,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
|
||||
gen_helper_enter_mmx(cpu_env);
|
||||
}
|
||||
|
||||
modrm = cpu_ldub_code(env, s->pc++);
|
||||
modrm = x86_ldub_code(env, s);
|
||||
reg = ((modrm >> 3) & 7);
|
||||
if (is_xmm)
|
||||
reg |= rex_r;
|
||||
@ -3250,8 +3295,8 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
|
||||
|
||||
if (b1 == 1 && reg != 0)
|
||||
goto illegal_op;
|
||||
field_length = cpu_ldub_code(env, s->pc++) & 0x3F;
|
||||
bit_index = cpu_ldub_code(env, s->pc++) & 0x3F;
|
||||
field_length = x86_ldub_code(env, s) & 0x3F;
|
||||
bit_index = x86_ldub_code(env, s) & 0x3F;
|
||||
tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
|
||||
offsetof(CPUX86State,xmm_regs[reg]));
|
||||
if (b1 == 1)
|
||||
@ -3380,7 +3425,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
|
||||
if (b1 >= 2) {
|
||||
goto unknown_op;
|
||||
}
|
||||
val = cpu_ldub_code(env, s->pc++);
|
||||
val = x86_ldub_code(env, s);
|
||||
if (is_xmm) {
|
||||
tcg_gen_movi_tl(cpu_T0, val);
|
||||
tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_t0.ZMM_L(0)));
|
||||
@ -3537,7 +3582,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
|
||||
case 0x1c4:
|
||||
s->rip_offset = 1;
|
||||
gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
|
||||
val = cpu_ldub_code(env, s->pc++);
|
||||
val = x86_ldub_code(env, s);
|
||||
if (b1) {
|
||||
val &= 7;
|
||||
tcg_gen_st16_tl(cpu_T0, cpu_env,
|
||||
@ -3553,7 +3598,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
|
||||
if (mod != 3)
|
||||
goto illegal_op;
|
||||
ot = mo_64_32(s->dflag);
|
||||
val = cpu_ldub_code(env, s->pc++);
|
||||
val = x86_ldub_code(env, s);
|
||||
if (b1) {
|
||||
val &= 7;
|
||||
rm = (modrm & 7) | REX_B(s);
|
||||
@ -3616,7 +3661,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
|
||||
if ((b & 0xf0) == 0xf0) {
|
||||
goto do_0f_38_fx;
|
||||
}
|
||||
modrm = cpu_ldub_code(env, s->pc++);
|
||||
modrm = x86_ldub_code(env, s);
|
||||
rm = modrm & 7;
|
||||
reg = ((modrm >> 3) & 7) | rex_r;
|
||||
mod = (modrm >> 6) & 3;
|
||||
@ -3693,7 +3738,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
|
||||
do_0f_38_fx:
|
||||
/* Various integer extensions at 0f 38 f[0-f]. */
|
||||
b = modrm | (b1 << 8);
|
||||
modrm = cpu_ldub_code(env, s->pc++);
|
||||
modrm = x86_ldub_code(env, s);
|
||||
reg = ((modrm >> 3) & 7) | rex_r;
|
||||
|
||||
switch (b) {
|
||||
@ -4054,7 +4099,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
|
||||
case 0x03a:
|
||||
case 0x13a:
|
||||
b = modrm;
|
||||
modrm = cpu_ldub_code(env, s->pc++);
|
||||
modrm = x86_ldub_code(env, s);
|
||||
rm = modrm & 7;
|
||||
reg = ((modrm >> 3) & 7) | rex_r;
|
||||
mod = (modrm >> 6) & 3;
|
||||
@ -4077,7 +4122,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
|
||||
if (mod != 3)
|
||||
gen_lea_modrm(env, s, modrm);
|
||||
reg = ((modrm >> 3) & 7) | rex_r;
|
||||
val = cpu_ldub_code(env, s->pc++);
|
||||
val = x86_ldub_code(env, s);
|
||||
switch (b) {
|
||||
case 0x14: /* pextrb */
|
||||
tcg_gen_ld8u_tl(cpu_T0, cpu_env, offsetof(CPUX86State,
|
||||
@ -4225,7 +4270,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
|
||||
gen_ldq_env_A0(s, op2_offset);
|
||||
}
|
||||
}
|
||||
val = cpu_ldub_code(env, s->pc++);
|
||||
val = x86_ldub_code(env, s);
|
||||
|
||||
if ((b & 0xfc) == 0x60) { /* pcmpXstrX */
|
||||
set_cc_op(s, CC_OP_EFLAGS);
|
||||
@ -4244,7 +4289,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
|
||||
case 0x33a:
|
||||
/* Various integer extensions at 0f 3a f[0-f]. */
|
||||
b = modrm | (b1 << 8);
|
||||
modrm = cpu_ldub_code(env, s->pc++);
|
||||
modrm = x86_ldub_code(env, s);
|
||||
reg = ((modrm >> 3) & 7) | rex_r;
|
||||
|
||||
switch (b) {
|
||||
@ -4256,7 +4301,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
|
||||
}
|
||||
ot = mo_64_32(s->dflag);
|
||||
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
|
||||
b = cpu_ldub_code(env, s->pc++);
|
||||
b = x86_ldub_code(env, s);
|
||||
if (ot == MO_64) {
|
||||
tcg_gen_rotri_tl(cpu_T0, cpu_T0, b & 63);
|
||||
} else {
|
||||
@ -4351,7 +4396,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
|
||||
}
|
||||
switch(b) {
|
||||
case 0x0f: /* 3DNow! data insns */
|
||||
val = cpu_ldub_code(env, s->pc++);
|
||||
val = x86_ldub_code(env, s);
|
||||
sse_fn_epp = sse_op_table5[val];
|
||||
if (!sse_fn_epp) {
|
||||
goto unknown_op;
|
||||
@ -4365,7 +4410,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
|
||||
break;
|
||||
case 0x70: /* pshufx insn */
|
||||
case 0xc6: /* pshufx insn */
|
||||
val = cpu_ldub_code(env, s->pc++);
|
||||
val = x86_ldub_code(env, s);
|
||||
tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
|
||||
tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
|
||||
/* XXX: introduce a new table? */
|
||||
@ -4374,7 +4419,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
|
||||
break;
|
||||
case 0xc2:
|
||||
/* compare insns */
|
||||
val = cpu_ldub_code(env, s->pc++);
|
||||
val = x86_ldub_code(env, s);
|
||||
if (val >= 8)
|
||||
goto unknown_op;
|
||||
sse_fn_epp = sse_op_table4[val][b1];
|
||||
@ -4435,16 +4480,13 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
|
||||
s->rip_offset = 0; /* for relative ip address */
|
||||
s->vex_l = 0;
|
||||
s->vex_v = 0;
|
||||
next_byte:
|
||||
/* x86 has an upper limit of 15 bytes for an instruction. Since we
|
||||
* do not want to decode and generate IR for an illegal
|
||||
* instruction, the following check limits the instruction size to
|
||||
* 25 bytes: 14 prefix + 1 opc + 6 (modrm+sib+ofs) + 4 imm */
|
||||
if (s->pc - pc_start > 14) {
|
||||
goto illegal_op;
|
||||
if (sigsetjmp(s->jmpbuf, 0) != 0) {
|
||||
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
|
||||
return s->pc;
|
||||
}
|
||||
b = cpu_ldub_code(env, s->pc);
|
||||
s->pc++;
|
||||
|
||||
next_byte:
|
||||
b = x86_ldub_code(env, s);
|
||||
/* Collect prefixes. */
|
||||
switch (b) {
|
||||
case 0xf3:
|
||||
@ -4501,7 +4543,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
|
||||
static const int pp_prefix[4] = {
|
||||
0, PREFIX_DATA, PREFIX_REPZ, PREFIX_REPNZ
|
||||
};
|
||||
int vex3, vex2 = cpu_ldub_code(env, s->pc);
|
||||
int vex3, vex2 = x86_ldub_code(env, s);
|
||||
|
||||
if (!CODE64(s) && (vex2 & 0xc0) != 0xc0) {
|
||||
/* 4.1.4.6: In 32-bit mode, bits [7:6] must be 11b,
|
||||
@ -4523,17 +4565,17 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
|
||||
rex_r = (~vex2 >> 4) & 8;
|
||||
if (b == 0xc5) {
|
||||
vex3 = vex2;
|
||||
b = cpu_ldub_code(env, s->pc++);
|
||||
b = x86_ldub_code(env, s);
|
||||
} else {
|
||||
#ifdef TARGET_X86_64
|
||||
s->rex_x = (~vex2 >> 3) & 8;
|
||||
s->rex_b = (~vex2 >> 2) & 8;
|
||||
#endif
|
||||
vex3 = cpu_ldub_code(env, s->pc++);
|
||||
vex3 = x86_ldub_code(env, s);
|
||||
rex_w = (vex3 >> 7) & 1;
|
||||
switch (vex2 & 0x1f) {
|
||||
case 0x01: /* Implied 0f leading opcode bytes. */
|
||||
b = cpu_ldub_code(env, s->pc++) | 0x100;
|
||||
b = x86_ldub_code(env, s) | 0x100;
|
||||
break;
|
||||
case 0x02: /* Implied 0f 38 leading opcode bytes. */
|
||||
b = 0x138;
|
||||
@ -4585,7 +4627,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
|
||||
case 0x0f:
|
||||
/**************************/
|
||||
/* extended op code */
|
||||
b = cpu_ldub_code(env, s->pc++) | 0x100;
|
||||
b = x86_ldub_code(env, s) | 0x100;
|
||||
goto reswitch;
|
||||
|
||||
/**************************/
|
||||
@ -4607,7 +4649,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
|
||||
|
||||
switch(f) {
|
||||
case 0: /* OP Ev, Gv */
|
||||
modrm = cpu_ldub_code(env, s->pc++);
|
||||
modrm = x86_ldub_code(env, s);
|
||||
reg = ((modrm >> 3) & 7) | rex_r;
|
||||
mod = (modrm >> 6) & 3;
|
||||
rm = (modrm & 7) | REX_B(s);
|
||||
@ -4628,7 +4670,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
|
||||
gen_op(s, op, ot, opreg);
|
||||
break;
|
||||
case 1: /* OP Gv, Ev */
|
||||
modrm = cpu_ldub_code(env, s->pc++);
|
||||
modrm = x86_ldub_code(env, s);
|
||||
mod = (modrm >> 6) & 3;
|
||||
reg = ((modrm >> 3) & 7) | rex_r;
|
||||
rm = (modrm & 7) | REX_B(s);
|
||||
@ -4662,7 +4704,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
|
||||
|
||||
ot = mo_b_d(b, dflag);
|
||||
|
||||
modrm = cpu_ldub_code(env, s->pc++);
|
||||
modrm = x86_ldub_code(env, s);
|
||||
mod = (modrm >> 6) & 3;
|
||||
rm = (modrm & 7) | REX_B(s);
|
||||
op = (modrm >> 3) & 7;
|
||||
@ -4708,7 +4750,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
|
||||
case 0xf7:
|
||||
ot = mo_b_d(b, dflag);
|
||||
|
||||
modrm = cpu_ldub_code(env, s->pc++);
|
||||
modrm = x86_ldub_code(env, s);
|
||||
mod = (modrm >> 6) & 3;
|
||||
rm = (modrm & 7) | REX_B(s);
|
||||
op = (modrm >> 3) & 7;
|
||||
@ -4940,7 +4982,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
|
||||
case 0xff: /* GRP5 */
|
||||
ot = mo_b_d(b, dflag);
|
||||
|
||||
modrm = cpu_ldub_code(env, s->pc++);
|
||||
modrm = x86_ldub_code(env, s);
|
||||
mod = (modrm >> 6) & 3;
|
||||
rm = (modrm & 7) | REX_B(s);
|
||||
op = (modrm >> 3) & 7;
|
||||
@ -5048,7 +5090,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
|
||||
case 0x85:
|
||||
ot = mo_b_d(b, dflag);
|
||||
|
||||
modrm = cpu_ldub_code(env, s->pc++);
|
||||
modrm = x86_ldub_code(env, s);
|
||||
reg = ((modrm >> 3) & 7) | rex_r;
|
||||
|
||||
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
|
||||
@ -5120,7 +5162,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
|
||||
case 0x69: /* imul Gv, Ev, I */
|
||||
case 0x6b:
|
||||
ot = dflag;
|
||||
modrm = cpu_ldub_code(env, s->pc++);
|
||||
modrm = x86_ldub_code(env, s);
|
||||
reg = ((modrm >> 3) & 7) | rex_r;
|
||||
if (b == 0x69)
|
||||
s->rip_offset = insn_const_size(ot);
|
||||
@ -5172,7 +5214,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
|
||||
case 0x1c0:
|
||||
case 0x1c1: /* xadd Ev, Gv */
|
||||
ot = mo_b_d(b, dflag);
|
||||
modrm = cpu_ldub_code(env, s->pc++);
|
||||
modrm = x86_ldub_code(env, s);
|
||||
reg = ((modrm >> 3) & 7) | rex_r;
|
||||
mod = (modrm >> 6) & 3;
|
||||
gen_op_mov_v_reg(ot, cpu_T0, reg);
|
||||
@ -5204,7 +5246,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
|
||||
TCGv oldv, newv, cmpv;
|
||||
|
||||
ot = mo_b_d(b, dflag);
|
||||
modrm = cpu_ldub_code(env, s->pc++);
|
||||
modrm = x86_ldub_code(env, s);
|
||||
reg = ((modrm >> 3) & 7) | rex_r;
|
||||
mod = (modrm >> 6) & 3;
|
||||
oldv = tcg_temp_new();
|
||||
@ -5256,7 +5298,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
|
||||
}
|
||||
break;
|
||||
case 0x1c7: /* cmpxchg8b */
|
||||
modrm = cpu_ldub_code(env, s->pc++);
|
||||
modrm = x86_ldub_code(env, s);
|
||||
mod = (modrm >> 6) & 3;
|
||||
if ((mod == 3) || ((modrm & 0x38) != 0x8))
|
||||
goto illegal_op;
|
||||
@ -5318,7 +5360,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
|
||||
gen_push_v(s, cpu_T0);
|
||||
break;
|
||||
case 0x8f: /* pop Ev */
|
||||
modrm = cpu_ldub_code(env, s->pc++);
|
||||
modrm = x86_ldub_code(env, s);
|
||||
mod = (modrm >> 6) & 3;
|
||||
ot = gen_pop_T0(s);
|
||||
if (mod == 3) {
|
||||
@ -5337,9 +5379,8 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
|
||||
case 0xc8: /* enter */
|
||||
{
|
||||
int level;
|
||||
val = cpu_lduw_code(env, s->pc);
|
||||
s->pc += 2;
|
||||
level = cpu_ldub_code(env, s->pc++);
|
||||
val = x86_lduw_code(env, s);
|
||||
level = x86_ldub_code(env, s);
|
||||
gen_enter(s, val, level);
|
||||
}
|
||||
break;
|
||||
@ -5396,7 +5437,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
|
||||
case 0x88:
|
||||
case 0x89: /* mov Gv, Ev */
|
||||
ot = mo_b_d(b, dflag);
|
||||
modrm = cpu_ldub_code(env, s->pc++);
|
||||
modrm = x86_ldub_code(env, s);
|
||||
reg = ((modrm >> 3) & 7) | rex_r;
|
||||
|
||||
/* generate a generic store */
|
||||
@ -5405,7 +5446,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
|
||||
case 0xc6:
|
||||
case 0xc7: /* mov Ev, Iv */
|
||||
ot = mo_b_d(b, dflag);
|
||||
modrm = cpu_ldub_code(env, s->pc++);
|
||||
modrm = x86_ldub_code(env, s);
|
||||
mod = (modrm >> 6) & 3;
|
||||
if (mod != 3) {
|
||||
s->rip_offset = insn_const_size(ot);
|
||||
@ -5422,14 +5463,14 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
|
||||
case 0x8a:
|
||||
case 0x8b: /* mov Ev, Gv */
|
||||
ot = mo_b_d(b, dflag);
|
||||
modrm = cpu_ldub_code(env, s->pc++);
|
||||
modrm = x86_ldub_code(env, s);
|
||||
reg = ((modrm >> 3) & 7) | rex_r;
|
||||
|
||||
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
|
||||
gen_op_mov_reg_v(ot, reg, cpu_T0);
|
||||
break;
|
||||
case 0x8e: /* mov seg, Gv */
|
||||
modrm = cpu_ldub_code(env, s->pc++);
|
||||
modrm = x86_ldub_code(env, s);
|
||||
reg = (modrm >> 3) & 7;
|
||||
if (reg >= 6 || reg == R_CS)
|
||||
goto illegal_op;
|
||||
@ -5447,7 +5488,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
|
||||
}
|
||||
break;
|
||||
case 0x8c: /* mov Gv, seg */
|
||||
modrm = cpu_ldub_code(env, s->pc++);
|
||||
modrm = x86_ldub_code(env, s);
|
||||
reg = (modrm >> 3) & 7;
|
||||
mod = (modrm >> 6) & 3;
|
||||
if (reg >= 6)
|
||||
@ -5472,7 +5513,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
|
||||
/* s_ot is the sign+size of source */
|
||||
s_ot = b & 8 ? MO_SIGN | ot : ot;
|
||||
|
||||
modrm = cpu_ldub_code(env, s->pc++);
|
||||
modrm = x86_ldub_code(env, s);
|
||||
reg = ((modrm >> 3) & 7) | rex_r;
|
||||
mod = (modrm >> 6) & 3;
|
||||
rm = (modrm & 7) | REX_B(s);
|
||||
@ -5508,7 +5549,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
|
||||
break;
|
||||
|
||||
case 0x8d: /* lea */
|
||||
modrm = cpu_ldub_code(env, s->pc++);
|
||||
modrm = x86_ldub_code(env, s);
|
||||
mod = (modrm >> 6) & 3;
|
||||
if (mod == 3)
|
||||
goto illegal_op;
|
||||
@ -5532,8 +5573,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
|
||||
switch (s->aflag) {
|
||||
#ifdef TARGET_X86_64
|
||||
case MO_64:
|
||||
offset_addr = cpu_ldq_code(env, s->pc);
|
||||
s->pc += 8;
|
||||
offset_addr = x86_ldq_code(env, s);
|
||||
break;
|
||||
#endif
|
||||
default:
|
||||
@ -5570,8 +5610,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
|
||||
if (dflag == MO_64) {
|
||||
uint64_t tmp;
|
||||
/* 64 bit case */
|
||||
tmp = cpu_ldq_code(env, s->pc);
|
||||
s->pc += 8;
|
||||
tmp = x86_ldq_code(env, s);
|
||||
reg = (b & 7) | REX_B(s);
|
||||
tcg_gen_movi_tl(cpu_T0, tmp);
|
||||
gen_op_mov_reg_v(MO_64, reg, cpu_T0);
|
||||
@ -5595,7 +5634,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
|
||||
case 0x86:
|
||||
case 0x87: /* xchg Ev, Gv */
|
||||
ot = mo_b_d(b, dflag);
|
||||
modrm = cpu_ldub_code(env, s->pc++);
|
||||
modrm = x86_ldub_code(env, s);
|
||||
reg = ((modrm >> 3) & 7) | rex_r;
|
||||
mod = (modrm >> 6) & 3;
|
||||
if (mod == 3) {
|
||||
@ -5632,7 +5671,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
|
||||
op = R_GS;
|
||||
do_lxx:
|
||||
ot = dflag != MO_16 ? MO_32 : MO_16;
|
||||
modrm = cpu_ldub_code(env, s->pc++);
|
||||
modrm = x86_ldub_code(env, s);
|
||||
reg = ((modrm >> 3) & 7) | rex_r;
|
||||
mod = (modrm >> 6) & 3;
|
||||
if (mod == 3)
|
||||
@ -5660,7 +5699,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
|
||||
grp2:
|
||||
{
|
||||
ot = mo_b_d(b, dflag);
|
||||
modrm = cpu_ldub_code(env, s->pc++);
|
||||
modrm = x86_ldub_code(env, s);
|
||||
mod = (modrm >> 6) & 3;
|
||||
op = (modrm >> 3) & 7;
|
||||
|
||||
@ -5679,7 +5718,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
|
||||
gen_shift(s, op, ot, opreg, OR_ECX);
|
||||
} else {
|
||||
if (shift == 2) {
|
||||
shift = cpu_ldub_code(env, s->pc++);
|
||||
shift = x86_ldub_code(env, s);
|
||||
}
|
||||
gen_shifti(s, op, ot, opreg, shift);
|
||||
}
|
||||
@ -5713,7 +5752,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
|
||||
shift = 0;
|
||||
do_shiftd:
|
||||
ot = dflag;
|
||||
modrm = cpu_ldub_code(env, s->pc++);
|
||||
modrm = x86_ldub_code(env, s);
|
||||
mod = (modrm >> 6) & 3;
|
||||
rm = (modrm & 7) | REX_B(s);
|
||||
reg = ((modrm >> 3) & 7) | rex_r;
|
||||
@ -5726,7 +5765,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
|
||||
gen_op_mov_v_reg(ot, cpu_T1, reg);
|
||||
|
||||
if (shift) {
|
||||
TCGv imm = tcg_const_tl(cpu_ldub_code(env, s->pc++));
|
||||
TCGv imm = tcg_const_tl(x86_ldub_code(env, s));
|
||||
gen_shiftd_rm_T1(s, ot, opreg, op, imm);
|
||||
tcg_temp_free(imm);
|
||||
} else {
|
||||
@ -5743,7 +5782,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
|
||||
gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
|
||||
break;
|
||||
}
|
||||
modrm = cpu_ldub_code(env, s->pc++);
|
||||
modrm = x86_ldub_code(env, s);
|
||||
mod = (modrm >> 6) & 3;
|
||||
rm = modrm & 7;
|
||||
op = ((b & 7) << 3) | ((modrm >> 3) & 7);
|
||||
@ -6328,7 +6367,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
|
||||
case 0xe4:
|
||||
case 0xe5:
|
||||
ot = mo_b_d32(b, dflag);
|
||||
val = cpu_ldub_code(env, s->pc++);
|
||||
val = x86_ldub_code(env, s);
|
||||
tcg_gen_movi_tl(cpu_T0, val);
|
||||
gen_check_io(s, ot, pc_start - s->cs_base,
|
||||
SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes));
|
||||
@ -6347,7 +6386,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
|
||||
case 0xe6:
|
||||
case 0xe7:
|
||||
ot = mo_b_d32(b, dflag);
|
||||
val = cpu_ldub_code(env, s->pc++);
|
||||
val = x86_ldub_code(env, s);
|
||||
tcg_gen_movi_tl(cpu_T0, val);
|
||||
gen_check_io(s, ot, pc_start - s->cs_base,
|
||||
svm_is_rep(prefixes));
|
||||
@ -6407,8 +6446,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
|
||||
/************************/
|
||||
/* control */
|
||||
case 0xc2: /* ret im */
|
||||
val = cpu_ldsw_code(env, s->pc);
|
||||
s->pc += 2;
|
||||
val = x86_ldsw_code(env, s);
|
||||
ot = gen_pop_T0(s);
|
||||
gen_stack_update(s, val + (1 << ot));
|
||||
/* Note that gen_pop_T0 uses a zero-extending load. */
|
||||
@ -6425,8 +6463,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
|
||||
gen_jr(s, cpu_T0);
|
||||
break;
|
||||
case 0xca: /* lret im */
|
||||
val = cpu_ldsw_code(env, s->pc);
|
||||
s->pc += 2;
|
||||
val = x86_ldsw_code(env, s);
|
||||
do_lret:
|
||||
if (s->pe && !s->vm86) {
|
||||
gen_update_cc_op(s);
|
||||
@ -6563,7 +6600,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
|
||||
break;
|
||||
|
||||
case 0x190 ... 0x19f: /* setcc Gv */
|
||||
modrm = cpu_ldub_code(env, s->pc++);
|
||||
modrm = x86_ldub_code(env, s);
|
||||
gen_setcc1(s, b, cpu_T0);
|
||||
gen_ldst_modrm(env, s, modrm, MO_8, OR_TMP0, 1);
|
||||
break;
|
||||
@ -6572,7 +6609,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
|
||||
goto illegal_op;
|
||||
}
|
||||
ot = dflag;
|
||||
modrm = cpu_ldub_code(env, s->pc++);
|
||||
modrm = x86_ldub_code(env, s);
|
||||
reg = ((modrm >> 3) & 7) | rex_r;
|
||||
gen_cmovcc1(env, s, ot, b, modrm, reg);
|
||||
break;
|
||||
@ -6689,7 +6726,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
|
||||
/* bit operations */
|
||||
case 0x1ba: /* bt/bts/btr/btc Gv, im */
|
||||
ot = dflag;
|
||||
modrm = cpu_ldub_code(env, s->pc++);
|
||||
modrm = x86_ldub_code(env, s);
|
||||
op = (modrm >> 3) & 7;
|
||||
mod = (modrm >> 6) & 3;
|
||||
rm = (modrm & 7) | REX_B(s);
|
||||
@ -6703,7 +6740,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
|
||||
gen_op_mov_v_reg(ot, cpu_T0, rm);
|
||||
}
|
||||
/* load shift */
|
||||
val = cpu_ldub_code(env, s->pc++);
|
||||
val = x86_ldub_code(env, s);
|
||||
tcg_gen_movi_tl(cpu_T1, val);
|
||||
if (op < 4)
|
||||
goto unknown_op;
|
||||
@ -6722,7 +6759,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
|
||||
op = 3;
|
||||
do_btx:
|
||||
ot = dflag;
|
||||
modrm = cpu_ldub_code(env, s->pc++);
|
||||
modrm = x86_ldub_code(env, s);
|
||||
reg = ((modrm >> 3) & 7) | rex_r;
|
||||
mod = (modrm >> 6) & 3;
|
||||
rm = (modrm & 7) | REX_B(s);
|
||||
@ -6827,7 +6864,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
|
||||
case 0x1bc: /* bsf / tzcnt */
|
||||
case 0x1bd: /* bsr / lzcnt */
|
||||
ot = dflag;
|
||||
modrm = cpu_ldub_code(env, s->pc++);
|
||||
modrm = x86_ldub_code(env, s);
|
||||
reg = ((modrm >> 3) & 7) | rex_r;
|
||||
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
|
||||
gen_extu(ot, cpu_T0);
|
||||
@ -6907,7 +6944,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
|
||||
case 0xd4: /* aam */
|
||||
if (CODE64(s))
|
||||
goto illegal_op;
|
||||
val = cpu_ldub_code(env, s->pc++);
|
||||
val = x86_ldub_code(env, s);
|
||||
if (val == 0) {
|
||||
gen_exception(s, EXCP00_DIVZ, pc_start - s->cs_base);
|
||||
} else {
|
||||
@ -6918,7 +6955,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
|
||||
case 0xd5: /* aad */
|
||||
if (CODE64(s))
|
||||
goto illegal_op;
|
||||
val = cpu_ldub_code(env, s->pc++);
|
||||
val = x86_ldub_code(env, s);
|
||||
gen_helper_aad(cpu_env, tcg_const_i32(val));
|
||||
set_cc_op(s, CC_OP_LOGICB);
|
||||
break;
|
||||
@ -6952,7 +6989,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
|
||||
gen_interrupt(s, EXCP03_INT3, pc_start - s->cs_base, s->pc - s->cs_base);
|
||||
break;
|
||||
case 0xcd: /* int N */
|
||||
val = cpu_ldub_code(env, s->pc++);
|
||||
val = x86_ldub_code(env, s);
|
||||
if (s->vm86 && s->iopl != 3) {
|
||||
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
|
||||
} else {
|
||||
@ -7007,7 +7044,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
|
||||
if (CODE64(s))
|
||||
goto illegal_op;
|
||||
ot = dflag;
|
||||
modrm = cpu_ldub_code(env, s->pc++);
|
||||
modrm = x86_ldub_code(env, s);
|
||||
reg = (modrm >> 3) & 7;
|
||||
mod = (modrm >> 6) & 3;
|
||||
if (mod == 3)
|
||||
@ -7186,7 +7223,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
|
||||
}
|
||||
break;
|
||||
case 0x100:
|
||||
modrm = cpu_ldub_code(env, s->pc++);
|
||||
modrm = x86_ldub_code(env, s);
|
||||
mod = (modrm >> 6) & 3;
|
||||
op = (modrm >> 3) & 7;
|
||||
switch(op) {
|
||||
@ -7251,7 +7288,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
|
||||
break;
|
||||
|
||||
case 0x101:
|
||||
modrm = cpu_ldub_code(env, s->pc++);
|
||||
modrm = x86_ldub_code(env, s);
|
||||
switch (modrm) {
|
||||
CASE_MODRM_MEM_OP(0): /* sgdt */
|
||||
gen_svm_check_intercept(s, pc_start, SVM_EXIT_GDTR_READ);
|
||||
@ -7596,7 +7633,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
|
||||
/* d_ot is the size of destination */
|
||||
d_ot = dflag;
|
||||
|
||||
modrm = cpu_ldub_code(env, s->pc++);
|
||||
modrm = x86_ldub_code(env, s);
|
||||
reg = ((modrm >> 3) & 7) | rex_r;
|
||||
mod = (modrm >> 6) & 3;
|
||||
rm = (modrm & 7) | REX_B(s);
|
||||
@ -7625,7 +7662,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
|
||||
t1 = tcg_temp_local_new();
|
||||
t2 = tcg_temp_local_new();
|
||||
ot = MO_16;
|
||||
modrm = cpu_ldub_code(env, s->pc++);
|
||||
modrm = x86_ldub_code(env, s);
|
||||
reg = (modrm >> 3) & 7;
|
||||
mod = (modrm >> 6) & 3;
|
||||
rm = modrm & 7;
|
||||
@ -7670,7 +7707,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
|
||||
if (!s->pe || s->vm86)
|
||||
goto illegal_op;
|
||||
ot = dflag != MO_16 ? MO_32 : MO_16;
|
||||
modrm = cpu_ldub_code(env, s->pc++);
|
||||
modrm = x86_ldub_code(env, s);
|
||||
reg = ((modrm >> 3) & 7) | rex_r;
|
||||
gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
|
||||
t0 = tcg_temp_local_new();
|
||||
@ -7690,7 +7727,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
|
||||
}
|
||||
break;
|
||||
case 0x118:
|
||||
modrm = cpu_ldub_code(env, s->pc++);
|
||||
modrm = x86_ldub_code(env, s);
|
||||
mod = (modrm >> 6) & 3;
|
||||
op = (modrm >> 3) & 7;
|
||||
switch(op) {
|
||||
@ -7709,7 +7746,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
|
||||
}
|
||||
break;
|
||||
case 0x11a:
|
||||
modrm = cpu_ldub_code(env, s->pc++);
|
||||
modrm = x86_ldub_code(env, s);
|
||||
if (s->flags & HF_MPX_EN_MASK) {
|
||||
mod = (modrm >> 6) & 3;
|
||||
reg = ((modrm >> 3) & 7) | rex_r;
|
||||
@ -7799,7 +7836,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
|
||||
gen_nop_modrm(env, s, modrm);
|
||||
break;
|
||||
case 0x11b:
|
||||
modrm = cpu_ldub_code(env, s->pc++);
|
||||
modrm = x86_ldub_code(env, s);
|
||||
if (s->flags & HF_MPX_EN_MASK) {
|
||||
mod = (modrm >> 6) & 3;
|
||||
reg = ((modrm >> 3) & 7) | rex_r;
|
||||
@ -7901,7 +7938,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
|
||||
gen_nop_modrm(env, s, modrm);
|
||||
break;
|
||||
case 0x119: case 0x11c ... 0x11f: /* nop (multi byte) */
|
||||
modrm = cpu_ldub_code(env, s->pc++);
|
||||
modrm = x86_ldub_code(env, s);
|
||||
gen_nop_modrm(env, s, modrm);
|
||||
break;
|
||||
case 0x120: /* mov reg, crN */
|
||||
@ -7909,7 +7946,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
|
||||
if (s->cpl != 0) {
|
||||
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
|
||||
} else {
|
||||
modrm = cpu_ldub_code(env, s->pc++);
|
||||
modrm = x86_ldub_code(env, s);
|
||||
/* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
|
||||
* AMD documentation (24594.pdf) and testing of
|
||||
* intel 386 and 486 processors all show that the mod bits
|
||||
@ -7966,7 +8003,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
|
||||
if (s->cpl != 0) {
|
||||
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
|
||||
} else {
|
||||
modrm = cpu_ldub_code(env, s->pc++);
|
||||
modrm = x86_ldub_code(env, s);
|
||||
/* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
|
||||
* AMD documentation (24594.pdf) and testing of
|
||||
* intel 386 and 486 processors all show that the mod bits
|
||||
@ -8012,7 +8049,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
|
||||
if (!(s->cpuid_features & CPUID_SSE2))
|
||||
goto illegal_op;
|
||||
ot = mo_64_32(dflag);
|
||||
modrm = cpu_ldub_code(env, s->pc++);
|
||||
modrm = x86_ldub_code(env, s);
|
||||
mod = (modrm >> 6) & 3;
|
||||
if (mod == 3)
|
||||
goto illegal_op;
|
||||
@ -8021,7 +8058,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
|
||||
gen_ldst_modrm(env, s, modrm, ot, reg, 1);
|
||||
break;
|
||||
case 0x1ae:
|
||||
modrm = cpu_ldub_code(env, s->pc++);
|
||||
modrm = x86_ldub_code(env, s);
|
||||
switch (modrm) {
|
||||
CASE_MODRM_MEM_OP(0): /* fxsave */
|
||||
if (!(s->cpuid_features & CPUID_FXSR)
|
||||
@ -8219,7 +8256,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
|
||||
break;
|
||||
|
||||
case 0x10d: /* 3DNow! prefetch(w) */
|
||||
modrm = cpu_ldub_code(env, s->pc++);
|
||||
modrm = x86_ldub_code(env, s);
|
||||
mod = (modrm >> 6) & 3;
|
||||
if (mod == 3)
|
||||
goto illegal_op;
|
||||
@ -8241,7 +8278,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
|
||||
if (!(s->cpuid_ext_features & CPUID_EXT_POPCNT))
|
||||
goto illegal_op;
|
||||
|
||||
modrm = cpu_ldub_code(env, s->pc++);
|
||||
modrm = x86_ldub_code(env, s);
|
||||
reg = ((modrm >> 3) & 7) | rex_r;
|
||||
|
||||
if (s->prefix & PREFIX_DATA) {
|
||||
|
@ -948,6 +948,7 @@ void nios2_tcg_init(void)
|
||||
int i;
|
||||
|
||||
cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
|
||||
tcg_ctx.tcg_env = cpu_env;
|
||||
|
||||
for (i = 0; i < NUM_CORE_REGS; i++) {
|
||||
cpu_R[i] = tcg_global_mem_new(cpu_env,
|
||||
|
@ -15,5 +15,6 @@ obj-y += int_helper.o
|
||||
obj-y += timebase_helper.o
|
||||
obj-y += misc_helper.o
|
||||
obj-y += mem_helper.o
|
||||
obj-y += ../../libdecnumber/
|
||||
obj-$(CONFIG_USER_ONLY) += user_only_helper.o
|
||||
obj-y += gdbstub.o
|
||||
|
Loading…
Reference in New Issue
Block a user