* Fix memory leak in test-cutils
* Fix edk2/opensbi jobs to not run automatically by accident * Improve timings in the migration qtest * Remove libvixl disassembler * Add ukrainian translation * Require a recent version of libpng -----BEGIN PGP SIGNATURE----- iQJFBAABCAAvFiEEJ7iIR+7gJQEY8+q5LtnXdP5wLbUFAmLECEkRHHRodXRoQHJl ZGhhdC5jb20ACgkQLtnXdP5wLbV7lxAAmEItM6PIoW58eWPzReKVH8LE2w3UlvOZ JQhNgJjuN23fqjUVkcT0yCfdNCz/nKvafHnxfHQnrAXyB5V5vU8ovBgSuWK2mcmD NTFK+/2x5lcsyBrOe3QoeD2g1r7+Os3AYVkdnN/t2HAMLwQyaoshKaMV/UHC9O/i Kle1svYRNyCgyXJgxaOdbVMBSLi/L9h2R5AaG31GIi9wnf0n8HDH/ONtmeIpN09g BlMeZqPhGJT+tpMvviif65/Za57Y9h/r+TOgEIIs00cWmxqaBmcXXN9qog2s0n7A nOm3ck2lpGJCQ6+sl6/Mphyr3X6nWHsxGrLDElS0Ba5bg6T/Xqfg2pBcb81Klkjc QcTdFPiMxKUczgpFq326sqiaVzMgys4vwnW5iPSd5swNzrkYKADAIreki5jyM3cH lohBG/ruOmg5xMkX2K6pra0iOAeCz44Ku/HTREfY1CTUgEQZJY4SZrMJSnmUTnM+ EQCkDcmOsnFDaQazneCbo18l37cXOgEhH8VoGAOqg1aRjr7TNlsJzx87PoD+9zNR GEh7kp18ABRGik5ZACdLQ/HhhOJa8+UWsGCwCdeBGv/TVug1Byz0OUG0PxX3X5SV WwubeKyZcqzoH92SQI3jZGSmuGBySy9q51T2k8FjZvaDsPiUN/MLPspNezH1qj2B W7qEaqIyGmo= =Q2vV -----END PGP SIGNATURE----- Merge tag 'pull-request-2022-07-05' of https://gitlab.com/thuth/qemu into staging * Fix memory leak in test-cutils * Fix edk2/opensbi jobs to not run automatically by accident * Improve timings in the migration qtest * Remove libvixl disassembler * Add ukrainian translation * Require a recent version of libpng # -----BEGIN PGP SIGNATURE----- # # iQJFBAABCAAvFiEEJ7iIR+7gJQEY8+q5LtnXdP5wLbUFAmLECEkRHHRodXRoQHJl # ZGhhdC5jb20ACgkQLtnXdP5wLbV7lxAAmEItM6PIoW58eWPzReKVH8LE2w3UlvOZ # JQhNgJjuN23fqjUVkcT0yCfdNCz/nKvafHnxfHQnrAXyB5V5vU8ovBgSuWK2mcmD # NTFK+/2x5lcsyBrOe3QoeD2g1r7+Os3AYVkdnN/t2HAMLwQyaoshKaMV/UHC9O/i # Kle1svYRNyCgyXJgxaOdbVMBSLi/L9h2R5AaG31GIi9wnf0n8HDH/ONtmeIpN09g # BlMeZqPhGJT+tpMvviif65/Za57Y9h/r+TOgEIIs00cWmxqaBmcXXN9qog2s0n7A # nOm3ck2lpGJCQ6+sl6/Mphyr3X6nWHsxGrLDElS0Ba5bg6T/Xqfg2pBcb81Klkjc # QcTdFPiMxKUczgpFq326sqiaVzMgys4vwnW5iPSd5swNzrkYKADAIreki5jyM3cH # lohBG/ruOmg5xMkX2K6pra0iOAeCz44Ku/HTREfY1CTUgEQZJY4SZrMJSnmUTnM+ # EQCkDcmOsnFDaQazneCbo18l37cXOgEhH8VoGAOqg1aRjr7TNlsJzx87PoD+9zNR # GEh7kp18ABRGik5ZACdLQ/HhhOJa8+UWsGCwCdeBGv/TVug1Byz0OUG0PxX3X5SV # WwubeKyZcqzoH92SQI3jZGSmuGBySy9q51T2k8FjZvaDsPiUN/MLPspNezH1qj2B # W7qEaqIyGmo= # =Q2vV # -----END PGP SIGNATURE----- # gpg: Signature made Tue 05 Jul 2022 03:15:45 PM +0530 # gpg: using RSA key 27B88847EEE0250118F3EAB92ED9D774FE702DB5 # gpg: issuer "thuth@redhat.com" # gpg: Good signature from "Thomas Huth <th.huth@gmx.de>" [undefined] # gpg: aka "Thomas Huth <thuth@redhat.com>" [undefined] # gpg: aka "Thomas Huth <th.huth@posteo.de>" [unknown] # gpg: aka "Thomas Huth <huth@tuxfamily.org>" [undefined] # gpg: WARNING: This key is not certified with a trusted signature! # gpg: There is no indication that the signature belongs to the owner. # Primary key fingerprint: 27B8 8847 EEE0 2501 18F3 EAB9 2ED9 D774 FE70 2DB5 * tag 'pull-request-2022-07-05' of https://gitlab.com/thuth/qemu: include/qemu/host-utils: Remove unused code in the *_overflow wrappers meson.build: Require a recent version of libpng po: add ukrainian translation disas: Remove libvixl disassembler tests: use consistent bandwidth/downtime limits in migration tests tests: increase migration test converge downtime to 30 seconds tests: wait for migration completion before looking for STOP event tests: wait max 120 seconds for migration test status changes gitlab-ci: Extend timeout for ubuntu-20.04-s390x-all to 75m gitlab: honour QEMU_CI variable in edk2/opensbi jobs gitlab: tweak comments in edk2/opensbi jobs gitlab: normalize indentation in edk2/opensbi rules tests/fp: Do not build softfloat3 tests if TCG is disabled tests: fix test-cutils leaks Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
commit
d82423a697
@ -31,6 +31,7 @@ ubuntu-20.04-s390x-all:
|
||||
- s390x
|
||||
variables:
|
||||
DFLTCC: 0
|
||||
timeout: 75m
|
||||
rules:
|
||||
- if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
|
||||
- if: "$S390X_RUNNER_AVAILABLE"
|
||||
|
@ -1,60 +1,85 @@
|
||||
# All jobs needing docker-edk2 must use the same rules it uses.
|
||||
.edk2_job_rules:
|
||||
rules: # Only run this job when ...
|
||||
- changes:
|
||||
# this file is modified
|
||||
- .gitlab-ci.d/edk2.yml
|
||||
# or the Dockerfile is modified
|
||||
- .gitlab-ci.d/edk2/Dockerfile
|
||||
# or roms/edk2/ is modified (submodule updated)
|
||||
- roms/edk2/*
|
||||
when: on_success
|
||||
- if: '$CI_COMMIT_REF_NAME =~ /^edk2/' # or the branch/tag starts with 'edk2'
|
||||
when: on_success
|
||||
- if: '$CI_COMMIT_MESSAGE =~ /edk2/i' # or last commit description contains 'EDK2'
|
||||
when: on_success
|
||||
rules:
|
||||
# Forks don't get pipelines unless QEMU_CI=1 or QEMU_CI=2 is set
|
||||
- if: '$QEMU_CI != "1" && $QEMU_CI != "2" && $CI_PROJECT_NAMESPACE != "qemu-project"'
|
||||
when: never
|
||||
|
||||
# In forks, if QEMU_CI=1 is set, then create manual job
|
||||
# if any of the files affecting the build are touched
|
||||
- if: '$QEMU_CI == "1" && $CI_PROJECT_NAMESPACE != "qemu-project"'
|
||||
changes:
|
||||
- .gitlab-ci.d/edk2.yml
|
||||
- .gitlab-ci.d/edk2/Dockerfile
|
||||
- roms/edk2/*
|
||||
when: manual
|
||||
|
||||
# In forks, if QEMU_CI=1 is set, then create manual job
|
||||
# if the branch/tag starts with 'edk2'
|
||||
- if: '$QEMU_CI == "1" && $CI_PROJECT_NAMESPACE != "qemu-project" && $CI_COMMIT_REF_NAME =~ /^edk2/'
|
||||
when: manual
|
||||
|
||||
# In forks, if QEMU_CI=1 is set, then create manual job
|
||||
# if last commit msg contains 'EDK2' (case insensitive)
|
||||
- if: '$QEMU_CI == "1" && $CI_PROJECT_NAMESPACE != "qemu-project" && $CI_COMMIT_MESSAGE =~ /edk2/i'
|
||||
when: manual
|
||||
|
||||
# Run if any files affecting the build output are touched
|
||||
- changes:
|
||||
- .gitlab-ci.d/edk2.yml
|
||||
- .gitlab-ci.d/edk2/Dockerfile
|
||||
- roms/edk2/*
|
||||
when: on_success
|
||||
|
||||
# Run if the branch/tag starts with 'edk2'
|
||||
- if: '$CI_COMMIT_REF_NAME =~ /^edk2/'
|
||||
when: on_success
|
||||
|
||||
# Run if last commit msg contains 'EDK2' (case insensitive)
|
||||
- if: '$CI_COMMIT_MESSAGE =~ /edk2/i'
|
||||
when: on_success
|
||||
|
||||
docker-edk2:
|
||||
extends: .edk2_job_rules
|
||||
stage: containers
|
||||
image: docker:19.03.1
|
||||
services:
|
||||
- docker:19.03.1-dind
|
||||
variables:
|
||||
GIT_DEPTH: 3
|
||||
IMAGE_TAG: $CI_REGISTRY_IMAGE:edk2-cross-build
|
||||
# We don't use TLS
|
||||
DOCKER_HOST: tcp://docker:2375
|
||||
DOCKER_TLS_CERTDIR: ""
|
||||
before_script:
|
||||
- docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY
|
||||
script:
|
||||
- docker pull $IMAGE_TAG || true
|
||||
- docker build --cache-from $IMAGE_TAG --tag $CI_REGISTRY_IMAGE:$CI_COMMIT_SHA
|
||||
--tag $IMAGE_TAG .gitlab-ci.d/edk2
|
||||
- docker push $CI_REGISTRY_IMAGE:$CI_COMMIT_SHA
|
||||
- docker push $IMAGE_TAG
|
||||
extends: .edk2_job_rules
|
||||
stage: containers
|
||||
image: docker:19.03.1
|
||||
services:
|
||||
- docker:19.03.1-dind
|
||||
variables:
|
||||
GIT_DEPTH: 3
|
||||
IMAGE_TAG: $CI_REGISTRY_IMAGE:edk2-cross-build
|
||||
# We don't use TLS
|
||||
DOCKER_HOST: tcp://docker:2375
|
||||
DOCKER_TLS_CERTDIR: ""
|
||||
before_script:
|
||||
- docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY
|
||||
script:
|
||||
- docker pull $IMAGE_TAG || true
|
||||
- docker build --cache-from $IMAGE_TAG --tag $CI_REGISTRY_IMAGE:$CI_COMMIT_SHA
|
||||
--tag $IMAGE_TAG .gitlab-ci.d/edk2
|
||||
- docker push $CI_REGISTRY_IMAGE:$CI_COMMIT_SHA
|
||||
- docker push $IMAGE_TAG
|
||||
|
||||
build-edk2:
|
||||
extends: .edk2_job_rules
|
||||
stage: build
|
||||
needs: ['docker-edk2']
|
||||
artifacts:
|
||||
paths: # 'artifacts.zip' will contains the following files:
|
||||
- pc-bios/edk2*bz2
|
||||
- pc-bios/edk2-licenses.txt
|
||||
- edk2-stdout.log
|
||||
- edk2-stderr.log
|
||||
image: $CI_REGISTRY_IMAGE:edk2-cross-build
|
||||
variables:
|
||||
GIT_DEPTH: 3
|
||||
script: # Clone the required submodules and build EDK2
|
||||
- git submodule update --init roms/edk2
|
||||
- git -C roms/edk2 submodule update --init --
|
||||
ArmPkg/Library/ArmSoftFloatLib/berkeley-softfloat-3
|
||||
BaseTools/Source/C/BrotliCompress/brotli
|
||||
CryptoPkg/Library/OpensslLib/openssl
|
||||
MdeModulePkg/Library/BrotliCustomDecompressLib/brotli
|
||||
- export JOBS=$(($(getconf _NPROCESSORS_ONLN) + 1))
|
||||
- echo "=== Using ${JOBS} simultaneous jobs ==="
|
||||
- make -j${JOBS} -C roms efi 2>&1 1>edk2-stdout.log | tee -a edk2-stderr.log >&2
|
||||
extends: .edk2_job_rules
|
||||
stage: build
|
||||
needs: ['docker-edk2']
|
||||
artifacts:
|
||||
paths: # 'artifacts.zip' will contains the following files:
|
||||
- pc-bios/edk2*bz2
|
||||
- pc-bios/edk2-licenses.txt
|
||||
- edk2-stdout.log
|
||||
- edk2-stderr.log
|
||||
image: $CI_REGISTRY_IMAGE:edk2-cross-build
|
||||
variables:
|
||||
GIT_DEPTH: 3
|
||||
script: # Clone the required submodules and build EDK2
|
||||
- git submodule update --init roms/edk2
|
||||
- git -C roms/edk2 submodule update --init --
|
||||
ArmPkg/Library/ArmSoftFloatLib/berkeley-softfloat-3
|
||||
BaseTools/Source/C/BrotliCompress/brotli
|
||||
CryptoPkg/Library/OpensslLib/openssl
|
||||
MdeModulePkg/Library/BrotliCustomDecompressLib/brotli
|
||||
- export JOBS=$(($(getconf _NPROCESSORS_ONLN) + 1))
|
||||
- echo "=== Using ${JOBS} simultaneous jobs ==="
|
||||
- make -j${JOBS} -C roms efi 2>&1 1>edk2-stdout.log | tee -a edk2-stderr.log >&2
|
||||
|
@ -1,61 +1,85 @@
|
||||
# All jobs needing docker-opensbi must use the same rules it uses.
|
||||
.opensbi_job_rules:
|
||||
rules: # Only run this job when ...
|
||||
- changes:
|
||||
# this file is modified
|
||||
- .gitlab-ci.d/opensbi.yml
|
||||
# or the Dockerfile is modified
|
||||
- .gitlab-ci.d/opensbi/Dockerfile
|
||||
when: on_success
|
||||
- changes: # or roms/opensbi/ is modified (submodule updated)
|
||||
- roms/opensbi/*
|
||||
when: on_success
|
||||
- if: '$CI_COMMIT_REF_NAME =~ /^opensbi/' # or the branch/tag starts with 'opensbi'
|
||||
when: on_success
|
||||
- if: '$CI_COMMIT_MESSAGE =~ /opensbi/i' # or last commit description contains 'OpenSBI'
|
||||
when: on_success
|
||||
rules:
|
||||
# Forks don't get pipelines unless QEMU_CI=1 or QEMU_CI=2 is set
|
||||
- if: '$QEMU_CI != "1" && $QEMU_CI != "2" && $CI_PROJECT_NAMESPACE != "qemu-project"'
|
||||
when: never
|
||||
|
||||
# In forks, if QEMU_CI=1 is set, then create manual job
|
||||
# if any files affecting the build output are touched
|
||||
- if: '$QEMU_CI == "1" && $CI_PROJECT_NAMESPACE != "qemu-project"'
|
||||
changes:
|
||||
- .gitlab-ci.d/opensbi.yml
|
||||
- .gitlab-ci.d/opensbi/Dockerfile
|
||||
- roms/opensbi/*
|
||||
when: manual
|
||||
|
||||
# In forks, if QEMU_CI=1 is set, then create manual job
|
||||
# if the branch/tag starts with 'opensbi'
|
||||
- if: '$QEMU_CI == "1" && $CI_PROJECT_NAMESPACE != "qemu-project" && $CI_COMMIT_REF_NAME =~ /^opensbi/'
|
||||
when: manual
|
||||
|
||||
# In forks, if QEMU_CI=1 is set, then create manual job
|
||||
# if the last commit msg contains 'OpenSBI' (case insensitive)
|
||||
- if: '$QEMU_CI == "1" && $CI_PROJECT_NAMESPACE != "qemu-project" && $CI_COMMIT_MESSAGE =~ /opensbi/i'
|
||||
when: manual
|
||||
|
||||
# Run if any files affecting the build output are touched
|
||||
- changes:
|
||||
- .gitlab-ci.d/opensbi.yml
|
||||
- .gitlab-ci.d/opensbi/Dockerfile
|
||||
- roms/opensbi/*
|
||||
when: on_success
|
||||
|
||||
# Run if the branch/tag starts with 'opensbi'
|
||||
- if: '$CI_COMMIT_REF_NAME =~ /^opensbi/'
|
||||
when: on_success
|
||||
|
||||
# Run if the last commit msg contains 'OpenSBI' (case insensitive)
|
||||
- if: '$CI_COMMIT_MESSAGE =~ /opensbi/i'
|
||||
when: on_success
|
||||
|
||||
docker-opensbi:
|
||||
extends: .opensbi_job_rules
|
||||
stage: containers
|
||||
image: docker:19.03.1
|
||||
services:
|
||||
- docker:19.03.1-dind
|
||||
variables:
|
||||
GIT_DEPTH: 3
|
||||
IMAGE_TAG: $CI_REGISTRY_IMAGE:opensbi-cross-build
|
||||
# We don't use TLS
|
||||
DOCKER_HOST: tcp://docker:2375
|
||||
DOCKER_TLS_CERTDIR: ""
|
||||
before_script:
|
||||
- docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY
|
||||
script:
|
||||
- docker pull $IMAGE_TAG || true
|
||||
- docker build --cache-from $IMAGE_TAG --tag $CI_REGISTRY_IMAGE:$CI_COMMIT_SHA
|
||||
--tag $IMAGE_TAG .gitlab-ci.d/opensbi
|
||||
- docker push $CI_REGISTRY_IMAGE:$CI_COMMIT_SHA
|
||||
- docker push $IMAGE_TAG
|
||||
extends: .opensbi_job_rules
|
||||
stage: containers
|
||||
image: docker:19.03.1
|
||||
services:
|
||||
- docker:19.03.1-dind
|
||||
variables:
|
||||
GIT_DEPTH: 3
|
||||
IMAGE_TAG: $CI_REGISTRY_IMAGE:opensbi-cross-build
|
||||
# We don't use TLS
|
||||
DOCKER_HOST: tcp://docker:2375
|
||||
DOCKER_TLS_CERTDIR: ""
|
||||
before_script:
|
||||
- docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY
|
||||
script:
|
||||
- docker pull $IMAGE_TAG || true
|
||||
- docker build --cache-from $IMAGE_TAG --tag $CI_REGISTRY_IMAGE:$CI_COMMIT_SHA
|
||||
--tag $IMAGE_TAG .gitlab-ci.d/opensbi
|
||||
- docker push $CI_REGISTRY_IMAGE:$CI_COMMIT_SHA
|
||||
- docker push $IMAGE_TAG
|
||||
|
||||
build-opensbi:
|
||||
extends: .opensbi_job_rules
|
||||
stage: build
|
||||
needs: ['docker-opensbi']
|
||||
artifacts:
|
||||
paths: # 'artifacts.zip' will contains the following files:
|
||||
- pc-bios/opensbi-riscv32-generic-fw_dynamic.bin
|
||||
- pc-bios/opensbi-riscv64-generic-fw_dynamic.bin
|
||||
- opensbi32-generic-stdout.log
|
||||
- opensbi32-generic-stderr.log
|
||||
- opensbi64-generic-stdout.log
|
||||
- opensbi64-generic-stderr.log
|
||||
image: $CI_REGISTRY_IMAGE:opensbi-cross-build
|
||||
variables:
|
||||
GIT_DEPTH: 3
|
||||
script: # Clone the required submodules and build OpenSBI
|
||||
- git submodule update --init roms/opensbi
|
||||
- export JOBS=$(($(getconf _NPROCESSORS_ONLN) + 1))
|
||||
- echo "=== Using ${JOBS} simultaneous jobs ==="
|
||||
- make -j${JOBS} -C roms/opensbi clean
|
||||
- make -j${JOBS} -C roms opensbi32-generic 2>&1 1>opensbi32-generic-stdout.log | tee -a opensbi32-generic-stderr.log >&2
|
||||
- make -j${JOBS} -C roms/opensbi clean
|
||||
- make -j${JOBS} -C roms opensbi64-generic 2>&1 1>opensbi64-generic-stdout.log | tee -a opensbi64-generic-stderr.log >&2
|
||||
extends: .opensbi_job_rules
|
||||
stage: build
|
||||
needs: ['docker-opensbi']
|
||||
artifacts:
|
||||
paths: # 'artifacts.zip' will contains the following files:
|
||||
- pc-bios/opensbi-riscv32-generic-fw_dynamic.bin
|
||||
- pc-bios/opensbi-riscv64-generic-fw_dynamic.bin
|
||||
- opensbi32-generic-stdout.log
|
||||
- opensbi32-generic-stderr.log
|
||||
- opensbi64-generic-stdout.log
|
||||
- opensbi64-generic-stderr.log
|
||||
image: $CI_REGISTRY_IMAGE:opensbi-cross-build
|
||||
variables:
|
||||
GIT_DEPTH: 3
|
||||
script: # Clone the required submodules and build OpenSBI
|
||||
- git submodule update --init roms/opensbi
|
||||
- export JOBS=$(($(getconf _NPROCESSORS_ONLN) + 1))
|
||||
- echo "=== Using ${JOBS} simultaneous jobs ==="
|
||||
- make -j${JOBS} -C roms/opensbi clean
|
||||
- make -j${JOBS} -C roms opensbi32-generic 2>&1 1>opensbi32-generic-stdout.log | tee -a opensbi32-generic-stderr.log >&2
|
||||
- make -j${JOBS} -C roms/opensbi clean
|
||||
- make -j${JOBS} -C roms opensbi64-generic 2>&1 1>opensbi64-generic-stdout.log | tee -a opensbi64-generic-stderr.log >&2
|
||||
|
@ -165,8 +165,6 @@ F: tests/qtest/arm-cpu-features.c
|
||||
F: hw/arm/
|
||||
F: hw/cpu/a*mpcore.c
|
||||
F: include/hw/cpu/a*mpcore.h
|
||||
F: disas/arm-a64.cc
|
||||
F: disas/libvixl/
|
||||
F: docs/system/target-arm.rst
|
||||
F: docs/system/arm/cpu-features.rst
|
||||
|
||||
@ -3313,8 +3311,6 @@ M: Richard Henderson <richard.henderson@linaro.org>
|
||||
S: Maintained
|
||||
L: qemu-arm@nongnu.org
|
||||
F: tcg/aarch64/
|
||||
F: disas/arm-a64.cc
|
||||
F: disas/libvixl/
|
||||
|
||||
ARM TCG target
|
||||
M: Richard Henderson <richard.henderson@linaro.org>
|
||||
|
3
disas.c
3
disas.c
@ -178,9 +178,6 @@ static void initialize_debug_host(CPUDebug *s)
|
||||
#endif
|
||||
#elif defined(__aarch64__)
|
||||
s->info.cap_arch = CS_ARCH_ARM64;
|
||||
# ifdef CONFIG_ARM_A64_DIS
|
||||
s->info.print_insn = print_insn_arm_a64;
|
||||
# endif
|
||||
#elif defined(__alpha__)
|
||||
s->info.print_insn = print_insn_alpha;
|
||||
#elif defined(__sparc__)
|
||||
|
101
disas/arm-a64.cc
101
disas/arm-a64.cc
@ -1,101 +0,0 @@
|
||||
/*
|
||||
* ARM A64 disassembly output wrapper to libvixl
|
||||
* Copyright (c) 2013 Linaro Limited
|
||||
* Written by Claudio Fontana
|
||||
*
|
||||
* This program is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation, either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "disas/dis-asm.h"
|
||||
|
||||
#include "vixl/a64/disasm-a64.h"
|
||||
|
||||
using namespace vixl;
|
||||
|
||||
static Decoder *vixl_decoder = NULL;
|
||||
static Disassembler *vixl_disasm = NULL;
|
||||
|
||||
/* We don't use libvixl's PrintDisassembler because its output
|
||||
* is a little unhelpful (trailing newlines, for example).
|
||||
* Instead we use our own very similar variant so we have
|
||||
* control over the format.
|
||||
*/
|
||||
class QEMUDisassembler : public Disassembler {
|
||||
public:
|
||||
QEMUDisassembler() : printf_(NULL), stream_(NULL) { }
|
||||
~QEMUDisassembler() { }
|
||||
|
||||
void SetStream(FILE *stream) {
|
||||
stream_ = stream;
|
||||
}
|
||||
|
||||
void SetPrintf(fprintf_function printf_fn) {
|
||||
printf_ = printf_fn;
|
||||
}
|
||||
|
||||
protected:
|
||||
virtual void ProcessOutput(const Instruction *instr) {
|
||||
printf_(stream_, "%08" PRIx32 " %s",
|
||||
instr->InstructionBits(), GetOutput());
|
||||
}
|
||||
|
||||
private:
|
||||
fprintf_function printf_;
|
||||
FILE *stream_;
|
||||
};
|
||||
|
||||
static int vixl_is_initialized(void)
|
||||
{
|
||||
return vixl_decoder != NULL;
|
||||
}
|
||||
|
||||
static void vixl_init() {
|
||||
vixl_decoder = new Decoder();
|
||||
vixl_disasm = new QEMUDisassembler();
|
||||
vixl_decoder->AppendVisitor(vixl_disasm);
|
||||
}
|
||||
|
||||
#define INSN_SIZE 4
|
||||
|
||||
/* Disassemble ARM A64 instruction. This is our only entry
|
||||
* point from QEMU's C code.
|
||||
*/
|
||||
int print_insn_arm_a64(uint64_t addr, disassemble_info *info)
|
||||
{
|
||||
uint8_t bytes[INSN_SIZE];
|
||||
uint32_t instrval;
|
||||
const Instruction *instr;
|
||||
int status;
|
||||
|
||||
status = info->read_memory_func(addr, bytes, INSN_SIZE, info);
|
||||
if (status != 0) {
|
||||
info->memory_error_func(status, addr, info);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (!vixl_is_initialized()) {
|
||||
vixl_init();
|
||||
}
|
||||
|
||||
((QEMUDisassembler *)vixl_disasm)->SetPrintf(info->fprintf_func);
|
||||
((QEMUDisassembler *)vixl_disasm)->SetStream(info->stream);
|
||||
|
||||
instrval = bytes[0] | bytes[1] << 8 | bytes[2] << 16 | bytes[3] << 24;
|
||||
instr = reinterpret_cast<const Instruction *>(&instrval);
|
||||
vixl_disasm->MapCodeAddress(addr, instr);
|
||||
vixl_decoder->Decode(instr);
|
||||
|
||||
return INSN_SIZE;
|
||||
}
|
@ -1,30 +0,0 @@
|
||||
LICENCE
|
||||
=======
|
||||
|
||||
The software in this repository is covered by the following licence.
|
||||
|
||||
// Copyright 2013, ARM Limited
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of ARM Limited nor the names of its contributors may be
|
||||
// used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
@ -1,11 +0,0 @@
|
||||
|
||||
The code in this directory is a subset of libvixl:
|
||||
https://github.com/armvixl/vixl
|
||||
(specifically, it is the set of files needed for disassembly only,
|
||||
taken from libvixl 1.12).
|
||||
Bugfixes should preferably be sent upstream initially.
|
||||
|
||||
The disassembler does not currently support the entire A64 instruction
|
||||
set. Notably:
|
||||
* Limited support for system instructions.
|
||||
* A few miscellaneous integer and floating point instructions are missing.
|
@ -1,7 +0,0 @@
|
||||
libvixl_ss.add(files(
|
||||
'vixl/a64/decoder-a64.cc',
|
||||
'vixl/a64/disasm-a64.cc',
|
||||
'vixl/a64/instructions-a64.cc',
|
||||
'vixl/compiler-intrinsics.cc',
|
||||
'vixl/utils.cc',
|
||||
))
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -1,83 +0,0 @@
|
||||
// Copyright 2014, ARM Limited
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of ARM Limited nor the names of its contributors may be
|
||||
// used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#ifndef VIXL_CPU_A64_H
|
||||
#define VIXL_CPU_A64_H
|
||||
|
||||
#include "vixl/globals.h"
|
||||
#include "vixl/a64/instructions-a64.h"
|
||||
|
||||
namespace vixl {
|
||||
|
||||
class CPU {
|
||||
public:
|
||||
// Initialise CPU support.
|
||||
static void SetUp();
|
||||
|
||||
// Ensures the data at a given address and with a given size is the same for
|
||||
// the I and D caches. I and D caches are not automatically coherent on ARM
|
||||
// so this operation is required before any dynamically generated code can
|
||||
// safely run.
|
||||
static void EnsureIAndDCacheCoherency(void *address, size_t length);
|
||||
|
||||
// Handle tagged pointers.
|
||||
template <typename T>
|
||||
static T SetPointerTag(T pointer, uint64_t tag) {
|
||||
VIXL_ASSERT(is_uintn(kAddressTagWidth, tag));
|
||||
|
||||
// Use C-style casts to get static_cast behaviour for integral types (T),
|
||||
// and reinterpret_cast behaviour for other types.
|
||||
|
||||
uint64_t raw = (uint64_t)pointer;
|
||||
VIXL_STATIC_ASSERT(sizeof(pointer) == sizeof(raw));
|
||||
|
||||
raw = (raw & ~kAddressTagMask) | (tag << kAddressTagOffset);
|
||||
return (T)raw;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
static uint64_t GetPointerTag(T pointer) {
|
||||
// Use C-style casts to get static_cast behaviour for integral types (T),
|
||||
// and reinterpret_cast behaviour for other types.
|
||||
|
||||
uint64_t raw = (uint64_t)pointer;
|
||||
VIXL_STATIC_ASSERT(sizeof(pointer) == sizeof(raw));
|
||||
|
||||
return (raw & kAddressTagMask) >> kAddressTagOffset;
|
||||
}
|
||||
|
||||
private:
|
||||
// Return the content of the cache type register.
|
||||
static uint32_t GetCacheType();
|
||||
|
||||
// I and D cache line size in bytes.
|
||||
static unsigned icache_line_size_;
|
||||
static unsigned dcache_line_size_;
|
||||
};
|
||||
|
||||
} // namespace vixl
|
||||
|
||||
#endif // VIXL_CPU_A64_H
|
@ -1,877 +0,0 @@
|
||||
// Copyright 2014, ARM Limited
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of ARM Limited nor the names of its contributors may be
|
||||
// used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#include "vixl/globals.h"
|
||||
#include "vixl/utils.h"
|
||||
#include "vixl/a64/decoder-a64.h"
|
||||
|
||||
namespace vixl {
|
||||
|
||||
void Decoder::DecodeInstruction(const Instruction *instr) {
|
||||
if (instr->Bits(28, 27) == 0) {
|
||||
VisitUnallocated(instr);
|
||||
} else {
|
||||
switch (instr->Bits(27, 24)) {
|
||||
// 0: PC relative addressing.
|
||||
case 0x0: DecodePCRelAddressing(instr); break;
|
||||
|
||||
// 1: Add/sub immediate.
|
||||
case 0x1: DecodeAddSubImmediate(instr); break;
|
||||
|
||||
// A: Logical shifted register.
|
||||
// Add/sub with carry.
|
||||
// Conditional compare register.
|
||||
// Conditional compare immediate.
|
||||
// Conditional select.
|
||||
// Data processing 1 source.
|
||||
// Data processing 2 source.
|
||||
// B: Add/sub shifted register.
|
||||
// Add/sub extended register.
|
||||
// Data processing 3 source.
|
||||
case 0xA:
|
||||
case 0xB: DecodeDataProcessing(instr); break;
|
||||
|
||||
// 2: Logical immediate.
|
||||
// Move wide immediate.
|
||||
case 0x2: DecodeLogical(instr); break;
|
||||
|
||||
// 3: Bitfield.
|
||||
// Extract.
|
||||
case 0x3: DecodeBitfieldExtract(instr); break;
|
||||
|
||||
// 4: Unconditional branch immediate.
|
||||
// Exception generation.
|
||||
// Compare and branch immediate.
|
||||
// 5: Compare and branch immediate.
|
||||
// Conditional branch.
|
||||
// System.
|
||||
// 6,7: Unconditional branch.
|
||||
// Test and branch immediate.
|
||||
case 0x4:
|
||||
case 0x5:
|
||||
case 0x6:
|
||||
case 0x7: DecodeBranchSystemException(instr); break;
|
||||
|
||||
// 8,9: Load/store register pair post-index.
|
||||
// Load register literal.
|
||||
// Load/store register unscaled immediate.
|
||||
// Load/store register immediate post-index.
|
||||
// Load/store register immediate pre-index.
|
||||
// Load/store register offset.
|
||||
// Load/store exclusive.
|
||||
// C,D: Load/store register pair offset.
|
||||
// Load/store register pair pre-index.
|
||||
// Load/store register unsigned immediate.
|
||||
// Advanced SIMD.
|
||||
case 0x8:
|
||||
case 0x9:
|
||||
case 0xC:
|
||||
case 0xD: DecodeLoadStore(instr); break;
|
||||
|
||||
// E: FP fixed point conversion.
|
||||
// FP integer conversion.
|
||||
// FP data processing 1 source.
|
||||
// FP compare.
|
||||
// FP immediate.
|
||||
// FP data processing 2 source.
|
||||
// FP conditional compare.
|
||||
// FP conditional select.
|
||||
// Advanced SIMD.
|
||||
// F: FP data processing 3 source.
|
||||
// Advanced SIMD.
|
||||
case 0xE:
|
||||
case 0xF: DecodeFP(instr); break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void Decoder::AppendVisitor(DecoderVisitor* new_visitor) {
|
||||
visitors_.push_back(new_visitor);
|
||||
}
|
||||
|
||||
|
||||
void Decoder::PrependVisitor(DecoderVisitor* new_visitor) {
|
||||
visitors_.push_front(new_visitor);
|
||||
}
|
||||
|
||||
|
||||
void Decoder::InsertVisitorBefore(DecoderVisitor* new_visitor,
|
||||
DecoderVisitor* registered_visitor) {
|
||||
std::list<DecoderVisitor*>::iterator it;
|
||||
for (it = visitors_.begin(); it != visitors_.end(); it++) {
|
||||
if (*it == registered_visitor) {
|
||||
visitors_.insert(it, new_visitor);
|
||||
return;
|
||||
}
|
||||
}
|
||||
// We reached the end of the list. The last element must be
|
||||
// registered_visitor.
|
||||
VIXL_ASSERT(*it == registered_visitor);
|
||||
visitors_.insert(it, new_visitor);
|
||||
}
|
||||
|
||||
|
||||
void Decoder::InsertVisitorAfter(DecoderVisitor* new_visitor,
|
||||
DecoderVisitor* registered_visitor) {
|
||||
std::list<DecoderVisitor*>::iterator it;
|
||||
for (it = visitors_.begin(); it != visitors_.end(); it++) {
|
||||
if (*it == registered_visitor) {
|
||||
it++;
|
||||
visitors_.insert(it, new_visitor);
|
||||
return;
|
||||
}
|
||||
}
|
||||
// We reached the end of the list. The last element must be
|
||||
// registered_visitor.
|
||||
VIXL_ASSERT(*it == registered_visitor);
|
||||
visitors_.push_back(new_visitor);
|
||||
}
|
||||
|
||||
|
||||
void Decoder::RemoveVisitor(DecoderVisitor* visitor) {
|
||||
visitors_.remove(visitor);
|
||||
}
|
||||
|
||||
|
||||
void Decoder::DecodePCRelAddressing(const Instruction* instr) {
|
||||
VIXL_ASSERT(instr->Bits(27, 24) == 0x0);
|
||||
// We know bit 28 is set, as <b28:b27> = 0 is filtered out at the top level
|
||||
// decode.
|
||||
VIXL_ASSERT(instr->Bit(28) == 0x1);
|
||||
VisitPCRelAddressing(instr);
|
||||
}
|
||||
|
||||
|
||||
void Decoder::DecodeBranchSystemException(const Instruction* instr) {
|
||||
VIXL_ASSERT((instr->Bits(27, 24) == 0x4) ||
|
||||
(instr->Bits(27, 24) == 0x5) ||
|
||||
(instr->Bits(27, 24) == 0x6) ||
|
||||
(instr->Bits(27, 24) == 0x7) );
|
||||
|
||||
switch (instr->Bits(31, 29)) {
|
||||
case 0:
|
||||
case 4: {
|
||||
VisitUnconditionalBranch(instr);
|
||||
break;
|
||||
}
|
||||
case 1:
|
||||
case 5: {
|
||||
if (instr->Bit(25) == 0) {
|
||||
VisitCompareBranch(instr);
|
||||
} else {
|
||||
VisitTestBranch(instr);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case 2: {
|
||||
if (instr->Bit(25) == 0) {
|
||||
if ((instr->Bit(24) == 0x1) ||
|
||||
(instr->Mask(0x01000010) == 0x00000010)) {
|
||||
VisitUnallocated(instr);
|
||||
} else {
|
||||
VisitConditionalBranch(instr);
|
||||
}
|
||||
} else {
|
||||
VisitUnallocated(instr);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case 6: {
|
||||
if (instr->Bit(25) == 0) {
|
||||
if (instr->Bit(24) == 0) {
|
||||
if ((instr->Bits(4, 2) != 0) ||
|
||||
(instr->Mask(0x00E0001D) == 0x00200001) ||
|
||||
(instr->Mask(0x00E0001D) == 0x00400001) ||
|
||||
(instr->Mask(0x00E0001E) == 0x00200002) ||
|
||||
(instr->Mask(0x00E0001E) == 0x00400002) ||
|
||||
(instr->Mask(0x00E0001C) == 0x00600000) ||
|
||||
(instr->Mask(0x00E0001C) == 0x00800000) ||
|
||||
(instr->Mask(0x00E0001F) == 0x00A00000) ||
|
||||
(instr->Mask(0x00C0001C) == 0x00C00000)) {
|
||||
VisitUnallocated(instr);
|
||||
} else {
|
||||
VisitException(instr);
|
||||
}
|
||||
} else {
|
||||
if (instr->Bits(23, 22) == 0) {
|
||||
const Instr masked_003FF0E0 = instr->Mask(0x003FF0E0);
|
||||
if ((instr->Bits(21, 19) == 0x4) ||
|
||||
(masked_003FF0E0 == 0x00033000) ||
|
||||
(masked_003FF0E0 == 0x003FF020) ||
|
||||
(masked_003FF0E0 == 0x003FF060) ||
|
||||
(masked_003FF0E0 == 0x003FF0E0) ||
|
||||
(instr->Mask(0x00388000) == 0x00008000) ||
|
||||
(instr->Mask(0x0038E000) == 0x00000000) ||
|
||||
(instr->Mask(0x0039E000) == 0x00002000) ||
|
||||
(instr->Mask(0x003AE000) == 0x00002000) ||
|
||||
(instr->Mask(0x003CE000) == 0x00042000) ||
|
||||
(instr->Mask(0x003FFFC0) == 0x000320C0) ||
|
||||
(instr->Mask(0x003FF100) == 0x00032100) ||
|
||||
(instr->Mask(0x003FF200) == 0x00032200) ||
|
||||
(instr->Mask(0x003FF400) == 0x00032400) ||
|
||||
(instr->Mask(0x003FF800) == 0x00032800) ||
|
||||
(instr->Mask(0x0038F000) == 0x00005000) ||
|
||||
(instr->Mask(0x0038E000) == 0x00006000)) {
|
||||
VisitUnallocated(instr);
|
||||
} else {
|
||||
VisitSystem(instr);
|
||||
}
|
||||
} else {
|
||||
VisitUnallocated(instr);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if ((instr->Bit(24) == 0x1) ||
|
||||
(instr->Bits(20, 16) != 0x1F) ||
|
||||
(instr->Bits(15, 10) != 0) ||
|
||||
(instr->Bits(4, 0) != 0) ||
|
||||
(instr->Bits(24, 21) == 0x3) ||
|
||||
(instr->Bits(24, 22) == 0x3)) {
|
||||
VisitUnallocated(instr);
|
||||
} else {
|
||||
VisitUnconditionalBranchToRegister(instr);
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
case 3:
|
||||
case 7: {
|
||||
VisitUnallocated(instr);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Decoder::DecodeLoadStore(const Instruction* instr) {
|
||||
VIXL_ASSERT((instr->Bits(27, 24) == 0x8) ||
|
||||
(instr->Bits(27, 24) == 0x9) ||
|
||||
(instr->Bits(27, 24) == 0xC) ||
|
||||
(instr->Bits(27, 24) == 0xD) );
|
||||
// TODO(all): rearrange the tree to integrate this branch.
|
||||
if ((instr->Bit(28) == 0) && (instr->Bit(29) == 0) && (instr->Bit(26) == 1)) {
|
||||
DecodeNEONLoadStore(instr);
|
||||
return;
|
||||
}
|
||||
|
||||
if (instr->Bit(24) == 0) {
|
||||
if (instr->Bit(28) == 0) {
|
||||
if (instr->Bit(29) == 0) {
|
||||
if (instr->Bit(26) == 0) {
|
||||
VisitLoadStoreExclusive(instr);
|
||||
} else {
|
||||
VIXL_UNREACHABLE();
|
||||
}
|
||||
} else {
|
||||
if ((instr->Bits(31, 30) == 0x3) ||
|
||||
(instr->Mask(0xC4400000) == 0x40000000)) {
|
||||
VisitUnallocated(instr);
|
||||
} else {
|
||||
if (instr->Bit(23) == 0) {
|
||||
if (instr->Mask(0xC4400000) == 0xC0400000) {
|
||||
VisitUnallocated(instr);
|
||||
} else {
|
||||
VisitLoadStorePairNonTemporal(instr);
|
||||
}
|
||||
} else {
|
||||
VisitLoadStorePairPostIndex(instr);
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if (instr->Bit(29) == 0) {
|
||||
if (instr->Mask(0xC4000000) == 0xC4000000) {
|
||||
VisitUnallocated(instr);
|
||||
} else {
|
||||
VisitLoadLiteral(instr);
|
||||
}
|
||||
} else {
|
||||
if ((instr->Mask(0x84C00000) == 0x80C00000) ||
|
||||
(instr->Mask(0x44800000) == 0x44800000) ||
|
||||
(instr->Mask(0x84800000) == 0x84800000)) {
|
||||
VisitUnallocated(instr);
|
||||
} else {
|
||||
if (instr->Bit(21) == 0) {
|
||||
switch (instr->Bits(11, 10)) {
|
||||
case 0: {
|
||||
VisitLoadStoreUnscaledOffset(instr);
|
||||
break;
|
||||
}
|
||||
case 1: {
|
||||
if (instr->Mask(0xC4C00000) == 0xC0800000) {
|
||||
VisitUnallocated(instr);
|
||||
} else {
|
||||
VisitLoadStorePostIndex(instr);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case 2: {
|
||||
// TODO: VisitLoadStoreRegisterOffsetUnpriv.
|
||||
VisitUnimplemented(instr);
|
||||
break;
|
||||
}
|
||||
case 3: {
|
||||
if (instr->Mask(0xC4C00000) == 0xC0800000) {
|
||||
VisitUnallocated(instr);
|
||||
} else {
|
||||
VisitLoadStorePreIndex(instr);
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if (instr->Bits(11, 10) == 0x2) {
|
||||
if (instr->Bit(14) == 0) {
|
||||
VisitUnallocated(instr);
|
||||
} else {
|
||||
VisitLoadStoreRegisterOffset(instr);
|
||||
}
|
||||
} else {
|
||||
VisitUnallocated(instr);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if (instr->Bit(28) == 0) {
|
||||
if (instr->Bit(29) == 0) {
|
||||
VisitUnallocated(instr);
|
||||
} else {
|
||||
if ((instr->Bits(31, 30) == 0x3) ||
|
||||
(instr->Mask(0xC4400000) == 0x40000000)) {
|
||||
VisitUnallocated(instr);
|
||||
} else {
|
||||
if (instr->Bit(23) == 0) {
|
||||
VisitLoadStorePairOffset(instr);
|
||||
} else {
|
||||
VisitLoadStorePairPreIndex(instr);
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if (instr->Bit(29) == 0) {
|
||||
VisitUnallocated(instr);
|
||||
} else {
|
||||
if ((instr->Mask(0x84C00000) == 0x80C00000) ||
|
||||
(instr->Mask(0x44800000) == 0x44800000) ||
|
||||
(instr->Mask(0x84800000) == 0x84800000)) {
|
||||
VisitUnallocated(instr);
|
||||
} else {
|
||||
VisitLoadStoreUnsignedOffset(instr);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Decoder::DecodeLogical(const Instruction* instr) {
|
||||
VIXL_ASSERT(instr->Bits(27, 24) == 0x2);
|
||||
|
||||
if (instr->Mask(0x80400000) == 0x00400000) {
|
||||
VisitUnallocated(instr);
|
||||
} else {
|
||||
if (instr->Bit(23) == 0) {
|
||||
VisitLogicalImmediate(instr);
|
||||
} else {
|
||||
if (instr->Bits(30, 29) == 0x1) {
|
||||
VisitUnallocated(instr);
|
||||
} else {
|
||||
VisitMoveWideImmediate(instr);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Decoder::DecodeBitfieldExtract(const Instruction* instr) {
|
||||
VIXL_ASSERT(instr->Bits(27, 24) == 0x3);
|
||||
|
||||
if ((instr->Mask(0x80400000) == 0x80000000) ||
|
||||
(instr->Mask(0x80400000) == 0x00400000) ||
|
||||
(instr->Mask(0x80008000) == 0x00008000)) {
|
||||
VisitUnallocated(instr);
|
||||
} else if (instr->Bit(23) == 0) {
|
||||
if ((instr->Mask(0x80200000) == 0x00200000) ||
|
||||
(instr->Mask(0x60000000) == 0x60000000)) {
|
||||
VisitUnallocated(instr);
|
||||
} else {
|
||||
VisitBitfield(instr);
|
||||
}
|
||||
} else {
|
||||
if ((instr->Mask(0x60200000) == 0x00200000) ||
|
||||
(instr->Mask(0x60000000) != 0x00000000)) {
|
||||
VisitUnallocated(instr);
|
||||
} else {
|
||||
VisitExtract(instr);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Decoder::DecodeAddSubImmediate(const Instruction* instr) {
|
||||
VIXL_ASSERT(instr->Bits(27, 24) == 0x1);
|
||||
if (instr->Bit(23) == 1) {
|
||||
VisitUnallocated(instr);
|
||||
} else {
|
||||
VisitAddSubImmediate(instr);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Decoder::DecodeDataProcessing(const Instruction* instr) {
|
||||
VIXL_ASSERT((instr->Bits(27, 24) == 0xA) ||
|
||||
(instr->Bits(27, 24) == 0xB));
|
||||
|
||||
if (instr->Bit(24) == 0) {
|
||||
if (instr->Bit(28) == 0) {
|
||||
if (instr->Mask(0x80008000) == 0x00008000) {
|
||||
VisitUnallocated(instr);
|
||||
} else {
|
||||
VisitLogicalShifted(instr);
|
||||
}
|
||||
} else {
|
||||
switch (instr->Bits(23, 21)) {
|
||||
case 0: {
|
||||
if (instr->Mask(0x0000FC00) != 0) {
|
||||
VisitUnallocated(instr);
|
||||
} else {
|
||||
VisitAddSubWithCarry(instr);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case 2: {
|
||||
if ((instr->Bit(29) == 0) ||
|
||||
(instr->Mask(0x00000410) != 0)) {
|
||||
VisitUnallocated(instr);
|
||||
} else {
|
||||
if (instr->Bit(11) == 0) {
|
||||
VisitConditionalCompareRegister(instr);
|
||||
} else {
|
||||
VisitConditionalCompareImmediate(instr);
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
case 4: {
|
||||
if (instr->Mask(0x20000800) != 0x00000000) {
|
||||
VisitUnallocated(instr);
|
||||
} else {
|
||||
VisitConditionalSelect(instr);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case 6: {
|
||||
if (instr->Bit(29) == 0x1) {
|
||||
VisitUnallocated(instr);
|
||||
VIXL_FALLTHROUGH();
|
||||
} else {
|
||||
if (instr->Bit(30) == 0) {
|
||||
if ((instr->Bit(15) == 0x1) ||
|
||||
(instr->Bits(15, 11) == 0) ||
|
||||
(instr->Bits(15, 12) == 0x1) ||
|
||||
(instr->Bits(15, 12) == 0x3) ||
|
||||
(instr->Bits(15, 13) == 0x3) ||
|
||||
(instr->Mask(0x8000EC00) == 0x00004C00) ||
|
||||
(instr->Mask(0x8000E800) == 0x80004000) ||
|
||||
(instr->Mask(0x8000E400) == 0x80004000)) {
|
||||
VisitUnallocated(instr);
|
||||
} else {
|
||||
VisitDataProcessing2Source(instr);
|
||||
}
|
||||
} else {
|
||||
if ((instr->Bit(13) == 1) ||
|
||||
(instr->Bits(20, 16) != 0) ||
|
||||
(instr->Bits(15, 14) != 0) ||
|
||||
(instr->Mask(0xA01FFC00) == 0x00000C00) ||
|
||||
(instr->Mask(0x201FF800) == 0x00001800)) {
|
||||
VisitUnallocated(instr);
|
||||
} else {
|
||||
VisitDataProcessing1Source(instr);
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
case 1:
|
||||
case 3:
|
||||
case 5:
|
||||
case 7: VisitUnallocated(instr); break;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if (instr->Bit(28) == 0) {
|
||||
if (instr->Bit(21) == 0) {
|
||||
if ((instr->Bits(23, 22) == 0x3) ||
|
||||
(instr->Mask(0x80008000) == 0x00008000)) {
|
||||
VisitUnallocated(instr);
|
||||
} else {
|
||||
VisitAddSubShifted(instr);
|
||||
}
|
||||
} else {
|
||||
if ((instr->Mask(0x00C00000) != 0x00000000) ||
|
||||
(instr->Mask(0x00001400) == 0x00001400) ||
|
||||
(instr->Mask(0x00001800) == 0x00001800)) {
|
||||
VisitUnallocated(instr);
|
||||
} else {
|
||||
VisitAddSubExtended(instr);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if ((instr->Bit(30) == 0x1) ||
|
||||
(instr->Bits(30, 29) == 0x1) ||
|
||||
(instr->Mask(0xE0600000) == 0x00200000) ||
|
||||
(instr->Mask(0xE0608000) == 0x00400000) ||
|
||||
(instr->Mask(0x60608000) == 0x00408000) ||
|
||||
(instr->Mask(0x60E00000) == 0x00E00000) ||
|
||||
(instr->Mask(0x60E00000) == 0x00800000) ||
|
||||
(instr->Mask(0x60E00000) == 0x00600000)) {
|
||||
VisitUnallocated(instr);
|
||||
} else {
|
||||
VisitDataProcessing3Source(instr);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Decoder::DecodeFP(const Instruction* instr) {
|
||||
VIXL_ASSERT((instr->Bits(27, 24) == 0xE) ||
|
||||
(instr->Bits(27, 24) == 0xF));
|
||||
if (instr->Bit(28) == 0) {
|
||||
DecodeNEONVectorDataProcessing(instr);
|
||||
} else {
|
||||
if (instr->Bits(31, 30) == 0x3) {
|
||||
VisitUnallocated(instr);
|
||||
} else if (instr->Bits(31, 30) == 0x1) {
|
||||
DecodeNEONScalarDataProcessing(instr);
|
||||
} else {
|
||||
if (instr->Bit(29) == 0) {
|
||||
if (instr->Bit(24) == 0) {
|
||||
if (instr->Bit(21) == 0) {
|
||||
if ((instr->Bit(23) == 1) ||
|
||||
(instr->Bit(18) == 1) ||
|
||||
(instr->Mask(0x80008000) == 0x00000000) ||
|
||||
(instr->Mask(0x000E0000) == 0x00000000) ||
|
||||
(instr->Mask(0x000E0000) == 0x000A0000) ||
|
||||
(instr->Mask(0x00160000) == 0x00000000) ||
|
||||
(instr->Mask(0x00160000) == 0x00120000)) {
|
||||
VisitUnallocated(instr);
|
||||
} else {
|
||||
VisitFPFixedPointConvert(instr);
|
||||
}
|
||||
} else {
|
||||
if (instr->Bits(15, 10) == 32) {
|
||||
VisitUnallocated(instr);
|
||||
} else if (instr->Bits(15, 10) == 0) {
|
||||
if ((instr->Bits(23, 22) == 0x3) ||
|
||||
(instr->Mask(0x000E0000) == 0x000A0000) ||
|
||||
(instr->Mask(0x000E0000) == 0x000C0000) ||
|
||||
(instr->Mask(0x00160000) == 0x00120000) ||
|
||||
(instr->Mask(0x00160000) == 0x00140000) ||
|
||||
(instr->Mask(0x20C40000) == 0x00800000) ||
|
||||
(instr->Mask(0x20C60000) == 0x00840000) ||
|
||||
(instr->Mask(0xA0C60000) == 0x80060000) ||
|
||||
(instr->Mask(0xA0C60000) == 0x00860000) ||
|
||||
(instr->Mask(0xA0C60000) == 0x00460000) ||
|
||||
(instr->Mask(0xA0CE0000) == 0x80860000) ||
|
||||
(instr->Mask(0xA0CE0000) == 0x804E0000) ||
|
||||
(instr->Mask(0xA0CE0000) == 0x000E0000) ||
|
||||
(instr->Mask(0xA0D60000) == 0x00160000) ||
|
||||
(instr->Mask(0xA0D60000) == 0x80560000) ||
|
||||
(instr->Mask(0xA0D60000) == 0x80960000)) {
|
||||
VisitUnallocated(instr);
|
||||
} else {
|
||||
VisitFPIntegerConvert(instr);
|
||||
}
|
||||
} else if (instr->Bits(14, 10) == 16) {
|
||||
const Instr masked_A0DF8000 = instr->Mask(0xA0DF8000);
|
||||
if ((instr->Mask(0x80180000) != 0) ||
|
||||
(masked_A0DF8000 == 0x00020000) ||
|
||||
(masked_A0DF8000 == 0x00030000) ||
|
||||
(masked_A0DF8000 == 0x00068000) ||
|
||||
(masked_A0DF8000 == 0x00428000) ||
|
||||
(masked_A0DF8000 == 0x00430000) ||
|
||||
(masked_A0DF8000 == 0x00468000) ||
|
||||
(instr->Mask(0xA0D80000) == 0x00800000) ||
|
||||
(instr->Mask(0xA0DE0000) == 0x00C00000) ||
|
||||
(instr->Mask(0xA0DF0000) == 0x00C30000) ||
|
||||
(instr->Mask(0xA0DC0000) == 0x00C40000)) {
|
||||
VisitUnallocated(instr);
|
||||
} else {
|
||||
VisitFPDataProcessing1Source(instr);
|
||||
}
|
||||
} else if (instr->Bits(13, 10) == 8) {
|
||||
if ((instr->Bits(15, 14) != 0) ||
|
||||
(instr->Bits(2, 0) != 0) ||
|
||||
(instr->Mask(0x80800000) != 0x00000000)) {
|
||||
VisitUnallocated(instr);
|
||||
} else {
|
||||
VisitFPCompare(instr);
|
||||
}
|
||||
} else if (instr->Bits(12, 10) == 4) {
|
||||
if ((instr->Bits(9, 5) != 0) ||
|
||||
(instr->Mask(0x80800000) != 0x00000000)) {
|
||||
VisitUnallocated(instr);
|
||||
} else {
|
||||
VisitFPImmediate(instr);
|
||||
}
|
||||
} else {
|
||||
if (instr->Mask(0x80800000) != 0x00000000) {
|
||||
VisitUnallocated(instr);
|
||||
} else {
|
||||
switch (instr->Bits(11, 10)) {
|
||||
case 1: {
|
||||
VisitFPConditionalCompare(instr);
|
||||
break;
|
||||
}
|
||||
case 2: {
|
||||
if ((instr->Bits(15, 14) == 0x3) ||
|
||||
(instr->Mask(0x00009000) == 0x00009000) ||
|
||||
(instr->Mask(0x0000A000) == 0x0000A000)) {
|
||||
VisitUnallocated(instr);
|
||||
} else {
|
||||
VisitFPDataProcessing2Source(instr);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case 3: {
|
||||
VisitFPConditionalSelect(instr);
|
||||
break;
|
||||
}
|
||||
default: VIXL_UNREACHABLE();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Bit 30 == 1 has been handled earlier.
|
||||
VIXL_ASSERT(instr->Bit(30) == 0);
|
||||
if (instr->Mask(0xA0800000) != 0) {
|
||||
VisitUnallocated(instr);
|
||||
} else {
|
||||
VisitFPDataProcessing3Source(instr);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
VisitUnallocated(instr);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Decoder::DecodeNEONLoadStore(const Instruction* instr) {
|
||||
VIXL_ASSERT(instr->Bits(29, 25) == 0x6);
|
||||
if (instr->Bit(31) == 0) {
|
||||
if ((instr->Bit(24) == 0) && (instr->Bit(21) == 1)) {
|
||||
VisitUnallocated(instr);
|
||||
return;
|
||||
}
|
||||
|
||||
if (instr->Bit(23) == 0) {
|
||||
if (instr->Bits(20, 16) == 0) {
|
||||
if (instr->Bit(24) == 0) {
|
||||
VisitNEONLoadStoreMultiStruct(instr);
|
||||
} else {
|
||||
VisitNEONLoadStoreSingleStruct(instr);
|
||||
}
|
||||
} else {
|
||||
VisitUnallocated(instr);
|
||||
}
|
||||
} else {
|
||||
if (instr->Bit(24) == 0) {
|
||||
VisitNEONLoadStoreMultiStructPostIndex(instr);
|
||||
} else {
|
||||
VisitNEONLoadStoreSingleStructPostIndex(instr);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
VisitUnallocated(instr);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Decoder::DecodeNEONVectorDataProcessing(const Instruction* instr) {
|
||||
VIXL_ASSERT(instr->Bits(28, 25) == 0x7);
|
||||
if (instr->Bit(31) == 0) {
|
||||
if (instr->Bit(24) == 0) {
|
||||
if (instr->Bit(21) == 0) {
|
||||
if (instr->Bit(15) == 0) {
|
||||
if (instr->Bit(10) == 0) {
|
||||
if (instr->Bit(29) == 0) {
|
||||
if (instr->Bit(11) == 0) {
|
||||
VisitNEONTable(instr);
|
||||
} else {
|
||||
VisitNEONPerm(instr);
|
||||
}
|
||||
} else {
|
||||
VisitNEONExtract(instr);
|
||||
}
|
||||
} else {
|
||||
if (instr->Bits(23, 22) == 0) {
|
||||
VisitNEONCopy(instr);
|
||||
} else {
|
||||
VisitUnallocated(instr);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
VisitUnallocated(instr);
|
||||
}
|
||||
} else {
|
||||
if (instr->Bit(10) == 0) {
|
||||
if (instr->Bit(11) == 0) {
|
||||
VisitNEON3Different(instr);
|
||||
} else {
|
||||
if (instr->Bits(18, 17) == 0) {
|
||||
if (instr->Bit(20) == 0) {
|
||||
if (instr->Bit(19) == 0) {
|
||||
VisitNEON2RegMisc(instr);
|
||||
} else {
|
||||
if (instr->Bits(30, 29) == 0x2) {
|
||||
VisitCryptoAES(instr);
|
||||
} else {
|
||||
VisitUnallocated(instr);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if (instr->Bit(19) == 0) {
|
||||
VisitNEONAcrossLanes(instr);
|
||||
} else {
|
||||
VisitUnallocated(instr);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
VisitUnallocated(instr);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
VisitNEON3Same(instr);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if (instr->Bit(10) == 0) {
|
||||
VisitNEONByIndexedElement(instr);
|
||||
} else {
|
||||
if (instr->Bit(23) == 0) {
|
||||
if (instr->Bits(22, 19) == 0) {
|
||||
VisitNEONModifiedImmediate(instr);
|
||||
} else {
|
||||
VisitNEONShiftImmediate(instr);
|
||||
}
|
||||
} else {
|
||||
VisitUnallocated(instr);
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
VisitUnallocated(instr);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Decoder::DecodeNEONScalarDataProcessing(const Instruction* instr) {
|
||||
VIXL_ASSERT(instr->Bits(28, 25) == 0xF);
|
||||
if (instr->Bit(24) == 0) {
|
||||
if (instr->Bit(21) == 0) {
|
||||
if (instr->Bit(15) == 0) {
|
||||
if (instr->Bit(10) == 0) {
|
||||
if (instr->Bit(29) == 0) {
|
||||
if (instr->Bit(11) == 0) {
|
||||
VisitCrypto3RegSHA(instr);
|
||||
} else {
|
||||
VisitUnallocated(instr);
|
||||
}
|
||||
} else {
|
||||
VisitUnallocated(instr);
|
||||
}
|
||||
} else {
|
||||
if (instr->Bits(23, 22) == 0) {
|
||||
VisitNEONScalarCopy(instr);
|
||||
} else {
|
||||
VisitUnallocated(instr);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
VisitUnallocated(instr);
|
||||
}
|
||||
} else {
|
||||
if (instr->Bit(10) == 0) {
|
||||
if (instr->Bit(11) == 0) {
|
||||
VisitNEONScalar3Diff(instr);
|
||||
} else {
|
||||
if (instr->Bits(18, 17) == 0) {
|
||||
if (instr->Bit(20) == 0) {
|
||||
if (instr->Bit(19) == 0) {
|
||||
VisitNEONScalar2RegMisc(instr);
|
||||
} else {
|
||||
if (instr->Bit(29) == 0) {
|
||||
VisitCrypto2RegSHA(instr);
|
||||
} else {
|
||||
VisitUnallocated(instr);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if (instr->Bit(19) == 0) {
|
||||
VisitNEONScalarPairwise(instr);
|
||||
} else {
|
||||
VisitUnallocated(instr);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
VisitUnallocated(instr);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
VisitNEONScalar3Same(instr);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if (instr->Bit(10) == 0) {
|
||||
VisitNEONScalarByIndexedElement(instr);
|
||||
} else {
|
||||
if (instr->Bit(23) == 0) {
|
||||
VisitNEONScalarShiftImmediate(instr);
|
||||
} else {
|
||||
VisitUnallocated(instr);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#define DEFINE_VISITOR_CALLERS(A) \
|
||||
void Decoder::Visit##A(const Instruction *instr) { \
|
||||
VIXL_ASSERT(instr->Mask(A##FMask) == A##Fixed); \
|
||||
std::list<DecoderVisitor*>::iterator it; \
|
||||
for (it = visitors_.begin(); it != visitors_.end(); it++) { \
|
||||
(*it)->Visit##A(instr); \
|
||||
} \
|
||||
}
|
||||
VISITOR_LIST(DEFINE_VISITOR_CALLERS)
|
||||
#undef DEFINE_VISITOR_CALLERS
|
||||
} // namespace vixl
|
@ -1,275 +0,0 @@
|
||||
// Copyright 2014, ARM Limited
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of ARM Limited nor the names of its contributors may be
|
||||
// used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#ifndef VIXL_A64_DECODER_A64_H_
|
||||
#define VIXL_A64_DECODER_A64_H_
|
||||
|
||||
#include <list>
|
||||
|
||||
#include "vixl/globals.h"
|
||||
#include "vixl/a64/instructions-a64.h"
|
||||
|
||||
|
||||
// List macro containing all visitors needed by the decoder class.
|
||||
|
||||
#define VISITOR_LIST_THAT_RETURN(V) \
|
||||
V(PCRelAddressing) \
|
||||
V(AddSubImmediate) \
|
||||
V(LogicalImmediate) \
|
||||
V(MoveWideImmediate) \
|
||||
V(Bitfield) \
|
||||
V(Extract) \
|
||||
V(UnconditionalBranch) \
|
||||
V(UnconditionalBranchToRegister) \
|
||||
V(CompareBranch) \
|
||||
V(TestBranch) \
|
||||
V(ConditionalBranch) \
|
||||
V(System) \
|
||||
V(Exception) \
|
||||
V(LoadStorePairPostIndex) \
|
||||
V(LoadStorePairOffset) \
|
||||
V(LoadStorePairPreIndex) \
|
||||
V(LoadStorePairNonTemporal) \
|
||||
V(LoadLiteral) \
|
||||
V(LoadStoreUnscaledOffset) \
|
||||
V(LoadStorePostIndex) \
|
||||
V(LoadStorePreIndex) \
|
||||
V(LoadStoreRegisterOffset) \
|
||||
V(LoadStoreUnsignedOffset) \
|
||||
V(LoadStoreExclusive) \
|
||||
V(LogicalShifted) \
|
||||
V(AddSubShifted) \
|
||||
V(AddSubExtended) \
|
||||
V(AddSubWithCarry) \
|
||||
V(ConditionalCompareRegister) \
|
||||
V(ConditionalCompareImmediate) \
|
||||
V(ConditionalSelect) \
|
||||
V(DataProcessing1Source) \
|
||||
V(DataProcessing2Source) \
|
||||
V(DataProcessing3Source) \
|
||||
V(FPCompare) \
|
||||
V(FPConditionalCompare) \
|
||||
V(FPConditionalSelect) \
|
||||
V(FPImmediate) \
|
||||
V(FPDataProcessing1Source) \
|
||||
V(FPDataProcessing2Source) \
|
||||
V(FPDataProcessing3Source) \
|
||||
V(FPIntegerConvert) \
|
||||
V(FPFixedPointConvert) \
|
||||
V(Crypto2RegSHA) \
|
||||
V(Crypto3RegSHA) \
|
||||
V(CryptoAES) \
|
||||
V(NEON2RegMisc) \
|
||||
V(NEON3Different) \
|
||||
V(NEON3Same) \
|
||||
V(NEONAcrossLanes) \
|
||||
V(NEONByIndexedElement) \
|
||||
V(NEONCopy) \
|
||||
V(NEONExtract) \
|
||||
V(NEONLoadStoreMultiStruct) \
|
||||
V(NEONLoadStoreMultiStructPostIndex) \
|
||||
V(NEONLoadStoreSingleStruct) \
|
||||
V(NEONLoadStoreSingleStructPostIndex) \
|
||||
V(NEONModifiedImmediate) \
|
||||
V(NEONScalar2RegMisc) \
|
||||
V(NEONScalar3Diff) \
|
||||
V(NEONScalar3Same) \
|
||||
V(NEONScalarByIndexedElement) \
|
||||
V(NEONScalarCopy) \
|
||||
V(NEONScalarPairwise) \
|
||||
V(NEONScalarShiftImmediate) \
|
||||
V(NEONShiftImmediate) \
|
||||
V(NEONTable) \
|
||||
V(NEONPerm) \
|
||||
|
||||
#define VISITOR_LIST_THAT_DONT_RETURN(V) \
|
||||
V(Unallocated) \
|
||||
V(Unimplemented) \
|
||||
|
||||
#define VISITOR_LIST(V) \
|
||||
VISITOR_LIST_THAT_RETURN(V) \
|
||||
VISITOR_LIST_THAT_DONT_RETURN(V) \
|
||||
|
||||
namespace vixl {
|
||||
|
||||
// The Visitor interface. Disassembler and simulator (and other tools)
|
||||
// must provide implementations for all of these functions.
|
||||
class DecoderVisitor {
|
||||
public:
|
||||
enum VisitorConstness {
|
||||
kConstVisitor,
|
||||
kNonConstVisitor
|
||||
};
|
||||
explicit DecoderVisitor(VisitorConstness constness = kConstVisitor)
|
||||
: constness_(constness) {}
|
||||
|
||||
virtual ~DecoderVisitor() {}
|
||||
|
||||
#define DECLARE(A) virtual void Visit##A(const Instruction* instr) = 0;
|
||||
VISITOR_LIST(DECLARE)
|
||||
#undef DECLARE
|
||||
|
||||
bool IsConstVisitor() const { return constness_ == kConstVisitor; }
|
||||
Instruction* MutableInstruction(const Instruction* instr) {
|
||||
VIXL_ASSERT(!IsConstVisitor());
|
||||
return const_cast<Instruction*>(instr);
|
||||
}
|
||||
|
||||
private:
|
||||
const VisitorConstness constness_;
|
||||
};
|
||||
|
||||
|
||||
class Decoder {
|
||||
public:
|
||||
Decoder() {}
|
||||
|
||||
// Top-level wrappers around the actual decoding function.
|
||||
void Decode(const Instruction* instr) {
|
||||
std::list<DecoderVisitor*>::iterator it;
|
||||
for (it = visitors_.begin(); it != visitors_.end(); it++) {
|
||||
VIXL_ASSERT((*it)->IsConstVisitor());
|
||||
}
|
||||
DecodeInstruction(instr);
|
||||
}
|
||||
void Decode(Instruction* instr) {
|
||||
DecodeInstruction(const_cast<const Instruction*>(instr));
|
||||
}
|
||||
|
||||
// Register a new visitor class with the decoder.
|
||||
// Decode() will call the corresponding visitor method from all registered
|
||||
// visitor classes when decoding reaches the leaf node of the instruction
|
||||
// decode tree.
|
||||
// Visitors are called in order.
|
||||
// A visitor can be registered multiple times.
|
||||
//
|
||||
// d.AppendVisitor(V1);
|
||||
// d.AppendVisitor(V2);
|
||||
// d.PrependVisitor(V2);
|
||||
// d.AppendVisitor(V3);
|
||||
//
|
||||
// d.Decode(i);
|
||||
//
|
||||
// will call in order visitor methods in V2, V1, V2, V3.
|
||||
void AppendVisitor(DecoderVisitor* visitor);
|
||||
void PrependVisitor(DecoderVisitor* visitor);
|
||||
// These helpers register `new_visitor` before or after the first instance of
|
||||
// `registered_visiter` in the list.
|
||||
// So if
|
||||
// V1, V2, V1, V2
|
||||
// are registered in this order in the decoder, calls to
|
||||
// d.InsertVisitorAfter(V3, V1);
|
||||
// d.InsertVisitorBefore(V4, V2);
|
||||
// will yield the order
|
||||
// V1, V3, V4, V2, V1, V2
|
||||
//
|
||||
// For more complex modifications of the order of registered visitors, one can
|
||||
// directly access and modify the list of visitors via the `visitors()'
|
||||
// accessor.
|
||||
void InsertVisitorBefore(DecoderVisitor* new_visitor,
|
||||
DecoderVisitor* registered_visitor);
|
||||
void InsertVisitorAfter(DecoderVisitor* new_visitor,
|
||||
DecoderVisitor* registered_visitor);
|
||||
|
||||
// Remove all instances of a previously registered visitor class from the list
|
||||
// of visitors stored by the decoder.
|
||||
void RemoveVisitor(DecoderVisitor* visitor);
|
||||
|
||||
#define DECLARE(A) void Visit##A(const Instruction* instr);
|
||||
VISITOR_LIST(DECLARE)
|
||||
#undef DECLARE
|
||||
|
||||
|
||||
std::list<DecoderVisitor*>* visitors() { return &visitors_; }
|
||||
|
||||
private:
|
||||
// Decodes an instruction and calls the visitor functions registered with the
|
||||
// Decoder class.
|
||||
void DecodeInstruction(const Instruction* instr);
|
||||
|
||||
// Decode the PC relative addressing instruction, and call the corresponding
|
||||
// visitors.
|
||||
// On entry, instruction bits 27:24 = 0x0.
|
||||
void DecodePCRelAddressing(const Instruction* instr);
|
||||
|
||||
// Decode the add/subtract immediate instruction, and call the correspoding
|
||||
// visitors.
|
||||
// On entry, instruction bits 27:24 = 0x1.
|
||||
void DecodeAddSubImmediate(const Instruction* instr);
|
||||
|
||||
// Decode the branch, system command, and exception generation parts of
|
||||
// the instruction tree, and call the corresponding visitors.
|
||||
// On entry, instruction bits 27:24 = {0x4, 0x5, 0x6, 0x7}.
|
||||
void DecodeBranchSystemException(const Instruction* instr);
|
||||
|
||||
// Decode the load and store parts of the instruction tree, and call
|
||||
// the corresponding visitors.
|
||||
// On entry, instruction bits 27:24 = {0x8, 0x9, 0xC, 0xD}.
|
||||
void DecodeLoadStore(const Instruction* instr);
|
||||
|
||||
// Decode the logical immediate and move wide immediate parts of the
|
||||
// instruction tree, and call the corresponding visitors.
|
||||
// On entry, instruction bits 27:24 = 0x2.
|
||||
void DecodeLogical(const Instruction* instr);
|
||||
|
||||
// Decode the bitfield and extraction parts of the instruction tree,
|
||||
// and call the corresponding visitors.
|
||||
// On entry, instruction bits 27:24 = 0x3.
|
||||
void DecodeBitfieldExtract(const Instruction* instr);
|
||||
|
||||
// Decode the data processing parts of the instruction tree, and call the
|
||||
// corresponding visitors.
|
||||
// On entry, instruction bits 27:24 = {0x1, 0xA, 0xB}.
|
||||
void DecodeDataProcessing(const Instruction* instr);
|
||||
|
||||
// Decode the floating point parts of the instruction tree, and call the
|
||||
// corresponding visitors.
|
||||
// On entry, instruction bits 27:24 = {0xE, 0xF}.
|
||||
void DecodeFP(const Instruction* instr);
|
||||
|
||||
// Decode the Advanced SIMD (NEON) load/store part of the instruction tree,
|
||||
// and call the corresponding visitors.
|
||||
// On entry, instruction bits 29:25 = 0x6.
|
||||
void DecodeNEONLoadStore(const Instruction* instr);
|
||||
|
||||
// Decode the Advanced SIMD (NEON) vector data processing part of the
|
||||
// instruction tree, and call the corresponding visitors.
|
||||
// On entry, instruction bits 28:25 = 0x7.
|
||||
void DecodeNEONVectorDataProcessing(const Instruction* instr);
|
||||
|
||||
// Decode the Advanced SIMD (NEON) scalar data processing part of the
|
||||
// instruction tree, and call the corresponding visitors.
|
||||
// On entry, instruction bits 28:25 = 0xF.
|
||||
void DecodeNEONScalarDataProcessing(const Instruction* instr);
|
||||
|
||||
private:
|
||||
// Visitors are registered in a list.
|
||||
std::list<DecoderVisitor*> visitors_;
|
||||
};
|
||||
|
||||
} // namespace vixl
|
||||
|
||||
#endif // VIXL_A64_DECODER_A64_H_
|
File diff suppressed because it is too large
Load Diff
@ -1,177 +0,0 @@
|
||||
// Copyright 2015, ARM Limited
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of ARM Limited nor the names of its contributors may be
|
||||
// used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#ifndef VIXL_A64_DISASM_A64_H
|
||||
#define VIXL_A64_DISASM_A64_H
|
||||
|
||||
#include "vixl/globals.h"
|
||||
#include "vixl/utils.h"
|
||||
#include "vixl/a64/instructions-a64.h"
|
||||
#include "vixl/a64/decoder-a64.h"
|
||||
#include "vixl/a64/assembler-a64.h"
|
||||
|
||||
namespace vixl {
|
||||
|
||||
class Disassembler: public DecoderVisitor {
|
||||
public:
|
||||
Disassembler();
|
||||
Disassembler(char* text_buffer, int buffer_size);
|
||||
virtual ~Disassembler();
|
||||
char* GetOutput();
|
||||
|
||||
// Declare all Visitor functions.
|
||||
#define DECLARE(A) virtual void Visit##A(const Instruction* instr);
|
||||
VISITOR_LIST(DECLARE)
|
||||
#undef DECLARE
|
||||
|
||||
protected:
|
||||
virtual void ProcessOutput(const Instruction* instr);
|
||||
|
||||
// Default output functions. The functions below implement a default way of
|
||||
// printing elements in the disassembly. A sub-class can override these to
|
||||
// customize the disassembly output.
|
||||
|
||||
// Prints the name of a register.
|
||||
// TODO: This currently doesn't allow renaming of V registers.
|
||||
virtual void AppendRegisterNameToOutput(const Instruction* instr,
|
||||
const CPURegister& reg);
|
||||
|
||||
// Prints a PC-relative offset. This is used for example when disassembling
|
||||
// branches to immediate offsets.
|
||||
virtual void AppendPCRelativeOffsetToOutput(const Instruction* instr,
|
||||
int64_t offset);
|
||||
|
||||
// Prints an address, in the general case. It can be code or data. This is
|
||||
// used for example to print the target address of an ADR instruction.
|
||||
virtual void AppendCodeRelativeAddressToOutput(const Instruction* instr,
|
||||
const void* addr);
|
||||
|
||||
// Prints the address of some code.
|
||||
// This is used for example to print the target address of a branch to an
|
||||
// immediate offset.
|
||||
// A sub-class can for example override this method to lookup the address and
|
||||
// print an appropriate name.
|
||||
virtual void AppendCodeRelativeCodeAddressToOutput(const Instruction* instr,
|
||||
const void* addr);
|
||||
|
||||
// Prints the address of some data.
|
||||
// This is used for example to print the source address of a load literal
|
||||
// instruction.
|
||||
virtual void AppendCodeRelativeDataAddressToOutput(const Instruction* instr,
|
||||
const void* addr);
|
||||
|
||||
// Same as the above, but for addresses that are not relative to the code
|
||||
// buffer. They are currently not used by VIXL.
|
||||
virtual void AppendAddressToOutput(const Instruction* instr,
|
||||
const void* addr);
|
||||
virtual void AppendCodeAddressToOutput(const Instruction* instr,
|
||||
const void* addr);
|
||||
virtual void AppendDataAddressToOutput(const Instruction* instr,
|
||||
const void* addr);
|
||||
|
||||
public:
|
||||
// Get/Set the offset that should be added to code addresses when printing
|
||||
// code-relative addresses in the AppendCodeRelative<Type>AddressToOutput()
|
||||
// helpers.
|
||||
// Below is an example of how a branch immediate instruction in memory at
|
||||
// address 0xb010200 would disassemble with different offsets.
|
||||
// Base address | Disassembly
|
||||
// 0x0 | 0xb010200: b #+0xcc (addr 0xb0102cc)
|
||||
// 0x10000 | 0xb000200: b #+0xcc (addr 0xb0002cc)
|
||||
// 0xb010200 | 0x0: b #+0xcc (addr 0xcc)
|
||||
void MapCodeAddress(int64_t base_address, const Instruction* instr_address);
|
||||
int64_t CodeRelativeAddress(const void* instr);
|
||||
|
||||
private:
|
||||
void Format(
|
||||
const Instruction* instr, const char* mnemonic, const char* format);
|
||||
void Substitute(const Instruction* instr, const char* string);
|
||||
int SubstituteField(const Instruction* instr, const char* format);
|
||||
int SubstituteRegisterField(const Instruction* instr, const char* format);
|
||||
int SubstituteImmediateField(const Instruction* instr, const char* format);
|
||||
int SubstituteLiteralField(const Instruction* instr, const char* format);
|
||||
int SubstituteBitfieldImmediateField(
|
||||
const Instruction* instr, const char* format);
|
||||
int SubstituteShiftField(const Instruction* instr, const char* format);
|
||||
int SubstituteExtendField(const Instruction* instr, const char* format);
|
||||
int SubstituteConditionField(const Instruction* instr, const char* format);
|
||||
int SubstitutePCRelAddressField(const Instruction* instr, const char* format);
|
||||
int SubstituteBranchTargetField(const Instruction* instr, const char* format);
|
||||
int SubstituteLSRegOffsetField(const Instruction* instr, const char* format);
|
||||
int SubstitutePrefetchField(const Instruction* instr, const char* format);
|
||||
int SubstituteBarrierField(const Instruction* instr, const char* format);
|
||||
int SubstituteSysOpField(const Instruction* instr, const char* format);
|
||||
int SubstituteCrField(const Instruction* instr, const char* format);
|
||||
bool RdIsZROrSP(const Instruction* instr) const {
|
||||
return (instr->Rd() == kZeroRegCode);
|
||||
}
|
||||
|
||||
bool RnIsZROrSP(const Instruction* instr) const {
|
||||
return (instr->Rn() == kZeroRegCode);
|
||||
}
|
||||
|
||||
bool RmIsZROrSP(const Instruction* instr) const {
|
||||
return (instr->Rm() == kZeroRegCode);
|
||||
}
|
||||
|
||||
bool RaIsZROrSP(const Instruction* instr) const {
|
||||
return (instr->Ra() == kZeroRegCode);
|
||||
}
|
||||
|
||||
bool IsMovzMovnImm(unsigned reg_size, uint64_t value);
|
||||
|
||||
int64_t code_address_offset() const { return code_address_offset_; }
|
||||
|
||||
protected:
|
||||
void ResetOutput();
|
||||
void AppendToOutput(const char* string, ...) PRINTF_CHECK(2, 3);
|
||||
|
||||
void set_code_address_offset(int64_t code_address_offset) {
|
||||
code_address_offset_ = code_address_offset;
|
||||
}
|
||||
|
||||
char* buffer_;
|
||||
uint32_t buffer_pos_;
|
||||
uint32_t buffer_size_;
|
||||
bool own_buffer_;
|
||||
|
||||
int64_t code_address_offset_;
|
||||
};
|
||||
|
||||
|
||||
class PrintDisassembler: public Disassembler {
|
||||
public:
|
||||
explicit PrintDisassembler(FILE* stream) : stream_(stream) { }
|
||||
|
||||
protected:
|
||||
virtual void ProcessOutput(const Instruction* instr);
|
||||
|
||||
private:
|
||||
FILE *stream_;
|
||||
};
|
||||
} // namespace vixl
|
||||
|
||||
#endif // VIXL_A64_DISASM_A64_H
|
@ -1,622 +0,0 @@
|
||||
// Copyright 2015, ARM Limited
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of ARM Limited nor the names of its contributors may be
|
||||
// used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#include "vixl/a64/instructions-a64.h"
|
||||
#include "vixl/a64/assembler-a64.h"
|
||||
|
||||
namespace vixl {
|
||||
|
||||
|
||||
// Floating-point infinity values.
|
||||
const float16 kFP16PositiveInfinity = 0x7c00;
|
||||
const float16 kFP16NegativeInfinity = 0xfc00;
|
||||
const float kFP32PositiveInfinity = rawbits_to_float(0x7f800000);
|
||||
const float kFP32NegativeInfinity = rawbits_to_float(0xff800000);
|
||||
const double kFP64PositiveInfinity =
|
||||
rawbits_to_double(UINT64_C(0x7ff0000000000000));
|
||||
const double kFP64NegativeInfinity =
|
||||
rawbits_to_double(UINT64_C(0xfff0000000000000));
|
||||
|
||||
|
||||
// The default NaN values (for FPCR.DN=1).
|
||||
const double kFP64DefaultNaN = rawbits_to_double(UINT64_C(0x7ff8000000000000));
|
||||
const float kFP32DefaultNaN = rawbits_to_float(0x7fc00000);
|
||||
const float16 kFP16DefaultNaN = 0x7e00;
|
||||
|
||||
|
||||
static uint64_t RotateRight(uint64_t value,
|
||||
unsigned int rotate,
|
||||
unsigned int width) {
|
||||
VIXL_ASSERT(width <= 64);
|
||||
rotate &= 63;
|
||||
return ((value & ((UINT64_C(1) << rotate) - 1)) <<
|
||||
(width - rotate)) | (value >> rotate);
|
||||
}
|
||||
|
||||
|
||||
static uint64_t RepeatBitsAcrossReg(unsigned reg_size,
|
||||
uint64_t value,
|
||||
unsigned width) {
|
||||
VIXL_ASSERT((width == 2) || (width == 4) || (width == 8) || (width == 16) ||
|
||||
(width == 32));
|
||||
VIXL_ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize));
|
||||
uint64_t result = value & ((UINT64_C(1) << width) - 1);
|
||||
for (unsigned i = width; i < reg_size; i *= 2) {
|
||||
result |= (result << i);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
bool Instruction::IsLoad() const {
|
||||
if (Mask(LoadStoreAnyFMask) != LoadStoreAnyFixed) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (Mask(LoadStorePairAnyFMask) == LoadStorePairAnyFixed) {
|
||||
return Mask(LoadStorePairLBit) != 0;
|
||||
} else {
|
||||
LoadStoreOp op = static_cast<LoadStoreOp>(Mask(LoadStoreMask));
|
||||
switch (op) {
|
||||
case LDRB_w:
|
||||
case LDRH_w:
|
||||
case LDR_w:
|
||||
case LDR_x:
|
||||
case LDRSB_w:
|
||||
case LDRSB_x:
|
||||
case LDRSH_w:
|
||||
case LDRSH_x:
|
||||
case LDRSW_x:
|
||||
case LDR_b:
|
||||
case LDR_h:
|
||||
case LDR_s:
|
||||
case LDR_d:
|
||||
case LDR_q: return true;
|
||||
default: return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
bool Instruction::IsStore() const {
|
||||
if (Mask(LoadStoreAnyFMask) != LoadStoreAnyFixed) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (Mask(LoadStorePairAnyFMask) == LoadStorePairAnyFixed) {
|
||||
return Mask(LoadStorePairLBit) == 0;
|
||||
} else {
|
||||
LoadStoreOp op = static_cast<LoadStoreOp>(Mask(LoadStoreMask));
|
||||
switch (op) {
|
||||
case STRB_w:
|
||||
case STRH_w:
|
||||
case STR_w:
|
||||
case STR_x:
|
||||
case STR_b:
|
||||
case STR_h:
|
||||
case STR_s:
|
||||
case STR_d:
|
||||
case STR_q: return true;
|
||||
default: return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Logical immediates can't encode zero, so a return value of zero is used to
|
||||
// indicate a failure case. Specifically, where the constraints on imm_s are
|
||||
// not met.
|
||||
uint64_t Instruction::ImmLogical() const {
|
||||
unsigned reg_size = SixtyFourBits() ? kXRegSize : kWRegSize;
|
||||
int32_t n = BitN();
|
||||
int32_t imm_s = ImmSetBits();
|
||||
int32_t imm_r = ImmRotate();
|
||||
|
||||
// An integer is constructed from the n, imm_s and imm_r bits according to
|
||||
// the following table:
|
||||
//
|
||||
// N imms immr size S R
|
||||
// 1 ssssss rrrrrr 64 UInt(ssssss) UInt(rrrrrr)
|
||||
// 0 0sssss xrrrrr 32 UInt(sssss) UInt(rrrrr)
|
||||
// 0 10ssss xxrrrr 16 UInt(ssss) UInt(rrrr)
|
||||
// 0 110sss xxxrrr 8 UInt(sss) UInt(rrr)
|
||||
// 0 1110ss xxxxrr 4 UInt(ss) UInt(rr)
|
||||
// 0 11110s xxxxxr 2 UInt(s) UInt(r)
|
||||
// (s bits must not be all set)
|
||||
//
|
||||
// A pattern is constructed of size bits, where the least significant S+1
|
||||
// bits are set. The pattern is rotated right by R, and repeated across a
|
||||
// 32 or 64-bit value, depending on destination register width.
|
||||
//
|
||||
|
||||
if (n == 1) {
|
||||
if (imm_s == 0x3f) {
|
||||
return 0;
|
||||
}
|
||||
uint64_t bits = (UINT64_C(1) << (imm_s + 1)) - 1;
|
||||
return RotateRight(bits, imm_r, 64);
|
||||
} else {
|
||||
if ((imm_s >> 1) == 0x1f) {
|
||||
return 0;
|
||||
}
|
||||
for (int width = 0x20; width >= 0x2; width >>= 1) {
|
||||
if ((imm_s & width) == 0) {
|
||||
int mask = width - 1;
|
||||
if ((imm_s & mask) == mask) {
|
||||
return 0;
|
||||
}
|
||||
uint64_t bits = (UINT64_C(1) << ((imm_s & mask) + 1)) - 1;
|
||||
return RepeatBitsAcrossReg(reg_size,
|
||||
RotateRight(bits, imm_r & mask, width),
|
||||
width);
|
||||
}
|
||||
}
|
||||
}
|
||||
VIXL_UNREACHABLE();
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
uint32_t Instruction::ImmNEONabcdefgh() const {
|
||||
return ImmNEONabc() << 5 | ImmNEONdefgh();
|
||||
}
|
||||
|
||||
|
||||
float Instruction::Imm8ToFP32(uint32_t imm8) {
|
||||
// Imm8: abcdefgh (8 bits)
|
||||
// Single: aBbb.bbbc.defg.h000.0000.0000.0000.0000 (32 bits)
|
||||
// where B is b ^ 1
|
||||
uint32_t bits = imm8;
|
||||
uint32_t bit7 = (bits >> 7) & 0x1;
|
||||
uint32_t bit6 = (bits >> 6) & 0x1;
|
||||
uint32_t bit5_to_0 = bits & 0x3f;
|
||||
uint32_t result = (bit7 << 31) | ((32 - bit6) << 25) | (bit5_to_0 << 19);
|
||||
|
||||
return rawbits_to_float(result);
|
||||
}
|
||||
|
||||
|
||||
float Instruction::ImmFP32() const {
|
||||
return Imm8ToFP32(ImmFP());
|
||||
}
|
||||
|
||||
|
||||
double Instruction::Imm8ToFP64(uint32_t imm8) {
|
||||
// Imm8: abcdefgh (8 bits)
|
||||
// Double: aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
|
||||
// 0000.0000.0000.0000.0000.0000.0000.0000 (64 bits)
|
||||
// where B is b ^ 1
|
||||
uint32_t bits = imm8;
|
||||
uint64_t bit7 = (bits >> 7) & 0x1;
|
||||
uint64_t bit6 = (bits >> 6) & 0x1;
|
||||
uint64_t bit5_to_0 = bits & 0x3f;
|
||||
uint64_t result = (bit7 << 63) | ((256 - bit6) << 54) | (bit5_to_0 << 48);
|
||||
|
||||
return rawbits_to_double(result);
|
||||
}
|
||||
|
||||
|
||||
double Instruction::ImmFP64() const {
|
||||
return Imm8ToFP64(ImmFP());
|
||||
}
|
||||
|
||||
|
||||
float Instruction::ImmNEONFP32() const {
|
||||
return Imm8ToFP32(ImmNEONabcdefgh());
|
||||
}
|
||||
|
||||
|
||||
double Instruction::ImmNEONFP64() const {
|
||||
return Imm8ToFP64(ImmNEONabcdefgh());
|
||||
}
|
||||
|
||||
|
||||
unsigned CalcLSDataSize(LoadStoreOp op) {
|
||||
VIXL_ASSERT((LSSize_offset + LSSize_width) == (kInstructionSize * 8));
|
||||
unsigned size = static_cast<Instr>(op) >> LSSize_offset;
|
||||
if ((op & LSVector_mask) != 0) {
|
||||
// Vector register memory operations encode the access size in the "size"
|
||||
// and "opc" fields.
|
||||
if ((size == 0) && ((op & LSOpc_mask) >> LSOpc_offset) >= 2) {
|
||||
size = kQRegSizeInBytesLog2;
|
||||
}
|
||||
}
|
||||
return size;
|
||||
}
|
||||
|
||||
|
||||
unsigned CalcLSPairDataSize(LoadStorePairOp op) {
|
||||
VIXL_STATIC_ASSERT(kXRegSizeInBytes == kDRegSizeInBytes);
|
||||
VIXL_STATIC_ASSERT(kWRegSizeInBytes == kSRegSizeInBytes);
|
||||
switch (op) {
|
||||
case STP_q:
|
||||
case LDP_q: return kQRegSizeInBytesLog2;
|
||||
case STP_x:
|
||||
case LDP_x:
|
||||
case STP_d:
|
||||
case LDP_d: return kXRegSizeInBytesLog2;
|
||||
default: return kWRegSizeInBytesLog2;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
int Instruction::ImmBranchRangeBitwidth(ImmBranchType branch_type) {
|
||||
switch (branch_type) {
|
||||
case UncondBranchType:
|
||||
return ImmUncondBranch_width;
|
||||
case CondBranchType:
|
||||
return ImmCondBranch_width;
|
||||
case CompareBranchType:
|
||||
return ImmCmpBranch_width;
|
||||
case TestBranchType:
|
||||
return ImmTestBranch_width;
|
||||
default:
|
||||
VIXL_UNREACHABLE();
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
int32_t Instruction::ImmBranchForwardRange(ImmBranchType branch_type) {
|
||||
int32_t encoded_max = 1 << (ImmBranchRangeBitwidth(branch_type) - 1);
|
||||
return encoded_max * kInstructionSize;
|
||||
}
|
||||
|
||||
|
||||
bool Instruction::IsValidImmPCOffset(ImmBranchType branch_type,
|
||||
int64_t offset) {
|
||||
return is_intn(ImmBranchRangeBitwidth(branch_type), offset);
|
||||
}
|
||||
|
||||
|
||||
const Instruction* Instruction::ImmPCOffsetTarget() const {
|
||||
const Instruction * base = this;
|
||||
ptrdiff_t offset;
|
||||
if (IsPCRelAddressing()) {
|
||||
// ADR and ADRP.
|
||||
offset = ImmPCRel();
|
||||
if (Mask(PCRelAddressingMask) == ADRP) {
|
||||
base = AlignDown(base, kPageSize);
|
||||
offset *= kPageSize;
|
||||
} else {
|
||||
VIXL_ASSERT(Mask(PCRelAddressingMask) == ADR);
|
||||
}
|
||||
} else {
|
||||
// All PC-relative branches.
|
||||
VIXL_ASSERT(BranchType() != UnknownBranchType);
|
||||
// Relative branch offsets are instruction-size-aligned.
|
||||
offset = ImmBranch() << kInstructionSizeLog2;
|
||||
}
|
||||
return base + offset;
|
||||
}
|
||||
|
||||
|
||||
int Instruction::ImmBranch() const {
|
||||
switch (BranchType()) {
|
||||
case CondBranchType: return ImmCondBranch();
|
||||
case UncondBranchType: return ImmUncondBranch();
|
||||
case CompareBranchType: return ImmCmpBranch();
|
||||
case TestBranchType: return ImmTestBranch();
|
||||
default: VIXL_UNREACHABLE();
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
void Instruction::SetImmPCOffsetTarget(const Instruction* target) {
|
||||
if (IsPCRelAddressing()) {
|
||||
SetPCRelImmTarget(target);
|
||||
} else {
|
||||
SetBranchImmTarget(target);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Instruction::SetPCRelImmTarget(const Instruction* target) {
|
||||
ptrdiff_t imm21;
|
||||
if ((Mask(PCRelAddressingMask) == ADR)) {
|
||||
imm21 = target - this;
|
||||
} else {
|
||||
VIXL_ASSERT(Mask(PCRelAddressingMask) == ADRP);
|
||||
uintptr_t this_page = reinterpret_cast<uintptr_t>(this) / kPageSize;
|
||||
uintptr_t target_page = reinterpret_cast<uintptr_t>(target) / kPageSize;
|
||||
imm21 = target_page - this_page;
|
||||
}
|
||||
Instr imm = Assembler::ImmPCRelAddress(static_cast<int32_t>(imm21));
|
||||
|
||||
SetInstructionBits(Mask(~ImmPCRel_mask) | imm);
|
||||
}
|
||||
|
||||
|
||||
void Instruction::SetBranchImmTarget(const Instruction* target) {
|
||||
VIXL_ASSERT(((target - this) & 3) == 0);
|
||||
Instr branch_imm = 0;
|
||||
uint32_t imm_mask = 0;
|
||||
int offset = static_cast<int>((target - this) >> kInstructionSizeLog2);
|
||||
switch (BranchType()) {
|
||||
case CondBranchType: {
|
||||
branch_imm = Assembler::ImmCondBranch(offset);
|
||||
imm_mask = ImmCondBranch_mask;
|
||||
break;
|
||||
}
|
||||
case UncondBranchType: {
|
||||
branch_imm = Assembler::ImmUncondBranch(offset);
|
||||
imm_mask = ImmUncondBranch_mask;
|
||||
break;
|
||||
}
|
||||
case CompareBranchType: {
|
||||
branch_imm = Assembler::ImmCmpBranch(offset);
|
||||
imm_mask = ImmCmpBranch_mask;
|
||||
break;
|
||||
}
|
||||
case TestBranchType: {
|
||||
branch_imm = Assembler::ImmTestBranch(offset);
|
||||
imm_mask = ImmTestBranch_mask;
|
||||
break;
|
||||
}
|
||||
default: VIXL_UNREACHABLE();
|
||||
}
|
||||
SetInstructionBits(Mask(~imm_mask) | branch_imm);
|
||||
}
|
||||
|
||||
|
||||
void Instruction::SetImmLLiteral(const Instruction* source) {
|
||||
VIXL_ASSERT(IsWordAligned(source));
|
||||
ptrdiff_t offset = (source - this) >> kLiteralEntrySizeLog2;
|
||||
Instr imm = Assembler::ImmLLiteral(static_cast<int>(offset));
|
||||
Instr mask = ImmLLiteral_mask;
|
||||
|
||||
SetInstructionBits(Mask(~mask) | imm);
|
||||
}
|
||||
|
||||
|
||||
VectorFormat VectorFormatHalfWidth(const VectorFormat vform) {
|
||||
VIXL_ASSERT(vform == kFormat8H || vform == kFormat4S || vform == kFormat2D ||
|
||||
vform == kFormatH || vform == kFormatS || vform == kFormatD);
|
||||
switch (vform) {
|
||||
case kFormat8H: return kFormat8B;
|
||||
case kFormat4S: return kFormat4H;
|
||||
case kFormat2D: return kFormat2S;
|
||||
case kFormatH: return kFormatB;
|
||||
case kFormatS: return kFormatH;
|
||||
case kFormatD: return kFormatS;
|
||||
default: VIXL_UNREACHABLE(); return kFormatUndefined;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
VectorFormat VectorFormatDoubleWidth(const VectorFormat vform) {
|
||||
VIXL_ASSERT(vform == kFormat8B || vform == kFormat4H || vform == kFormat2S ||
|
||||
vform == kFormatB || vform == kFormatH || vform == kFormatS);
|
||||
switch (vform) {
|
||||
case kFormat8B: return kFormat8H;
|
||||
case kFormat4H: return kFormat4S;
|
||||
case kFormat2S: return kFormat2D;
|
||||
case kFormatB: return kFormatH;
|
||||
case kFormatH: return kFormatS;
|
||||
case kFormatS: return kFormatD;
|
||||
default: VIXL_UNREACHABLE(); return kFormatUndefined;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
VectorFormat VectorFormatFillQ(const VectorFormat vform) {
|
||||
switch (vform) {
|
||||
case kFormatB:
|
||||
case kFormat8B:
|
||||
case kFormat16B: return kFormat16B;
|
||||
case kFormatH:
|
||||
case kFormat4H:
|
||||
case kFormat8H: return kFormat8H;
|
||||
case kFormatS:
|
||||
case kFormat2S:
|
||||
case kFormat4S: return kFormat4S;
|
||||
case kFormatD:
|
||||
case kFormat1D:
|
||||
case kFormat2D: return kFormat2D;
|
||||
default: VIXL_UNREACHABLE(); return kFormatUndefined;
|
||||
}
|
||||
}
|
||||
|
||||
VectorFormat VectorFormatHalfWidthDoubleLanes(const VectorFormat vform) {
|
||||
switch (vform) {
|
||||
case kFormat4H: return kFormat8B;
|
||||
case kFormat8H: return kFormat16B;
|
||||
case kFormat2S: return kFormat4H;
|
||||
case kFormat4S: return kFormat8H;
|
||||
case kFormat1D: return kFormat2S;
|
||||
case kFormat2D: return kFormat4S;
|
||||
default: VIXL_UNREACHABLE(); return kFormatUndefined;
|
||||
}
|
||||
}
|
||||
|
||||
VectorFormat VectorFormatDoubleLanes(const VectorFormat vform) {
|
||||
VIXL_ASSERT(vform == kFormat8B || vform == kFormat4H || vform == kFormat2S);
|
||||
switch (vform) {
|
||||
case kFormat8B: return kFormat16B;
|
||||
case kFormat4H: return kFormat8H;
|
||||
case kFormat2S: return kFormat4S;
|
||||
default: VIXL_UNREACHABLE(); return kFormatUndefined;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
VectorFormat VectorFormatHalfLanes(const VectorFormat vform) {
|
||||
VIXL_ASSERT(vform == kFormat16B || vform == kFormat8H || vform == kFormat4S);
|
||||
switch (vform) {
|
||||
case kFormat16B: return kFormat8B;
|
||||
case kFormat8H: return kFormat4H;
|
||||
case kFormat4S: return kFormat2S;
|
||||
default: VIXL_UNREACHABLE(); return kFormatUndefined;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
VectorFormat ScalarFormatFromLaneSize(int laneSize) {
|
||||
switch (laneSize) {
|
||||
case 8: return kFormatB;
|
||||
case 16: return kFormatH;
|
||||
case 32: return kFormatS;
|
||||
case 64: return kFormatD;
|
||||
default: VIXL_UNREACHABLE(); return kFormatUndefined;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
unsigned RegisterSizeInBitsFromFormat(VectorFormat vform) {
|
||||
VIXL_ASSERT(vform != kFormatUndefined);
|
||||
switch (vform) {
|
||||
case kFormatB: return kBRegSize;
|
||||
case kFormatH: return kHRegSize;
|
||||
case kFormatS: return kSRegSize;
|
||||
case kFormatD: return kDRegSize;
|
||||
case kFormat8B:
|
||||
case kFormat4H:
|
||||
case kFormat2S:
|
||||
case kFormat1D: return kDRegSize;
|
||||
default: return kQRegSize;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
unsigned RegisterSizeInBytesFromFormat(VectorFormat vform) {
|
||||
return RegisterSizeInBitsFromFormat(vform) / 8;
|
||||
}
|
||||
|
||||
|
||||
unsigned LaneSizeInBitsFromFormat(VectorFormat vform) {
|
||||
VIXL_ASSERT(vform != kFormatUndefined);
|
||||
switch (vform) {
|
||||
case kFormatB:
|
||||
case kFormat8B:
|
||||
case kFormat16B: return 8;
|
||||
case kFormatH:
|
||||
case kFormat4H:
|
||||
case kFormat8H: return 16;
|
||||
case kFormatS:
|
||||
case kFormat2S:
|
||||
case kFormat4S: return 32;
|
||||
case kFormatD:
|
||||
case kFormat1D:
|
||||
case kFormat2D: return 64;
|
||||
default: VIXL_UNREACHABLE(); return 0;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
int LaneSizeInBytesFromFormat(VectorFormat vform) {
|
||||
return LaneSizeInBitsFromFormat(vform) / 8;
|
||||
}
|
||||
|
||||
|
||||
int LaneSizeInBytesLog2FromFormat(VectorFormat vform) {
|
||||
VIXL_ASSERT(vform != kFormatUndefined);
|
||||
switch (vform) {
|
||||
case kFormatB:
|
||||
case kFormat8B:
|
||||
case kFormat16B: return 0;
|
||||
case kFormatH:
|
||||
case kFormat4H:
|
||||
case kFormat8H: return 1;
|
||||
case kFormatS:
|
||||
case kFormat2S:
|
||||
case kFormat4S: return 2;
|
||||
case kFormatD:
|
||||
case kFormat1D:
|
||||
case kFormat2D: return 3;
|
||||
default: VIXL_UNREACHABLE(); return 0;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
int LaneCountFromFormat(VectorFormat vform) {
|
||||
VIXL_ASSERT(vform != kFormatUndefined);
|
||||
switch (vform) {
|
||||
case kFormat16B: return 16;
|
||||
case kFormat8B:
|
||||
case kFormat8H: return 8;
|
||||
case kFormat4H:
|
||||
case kFormat4S: return 4;
|
||||
case kFormat2S:
|
||||
case kFormat2D: return 2;
|
||||
case kFormat1D:
|
||||
case kFormatB:
|
||||
case kFormatH:
|
||||
case kFormatS:
|
||||
case kFormatD: return 1;
|
||||
default: VIXL_UNREACHABLE(); return 0;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
int MaxLaneCountFromFormat(VectorFormat vform) {
|
||||
VIXL_ASSERT(vform != kFormatUndefined);
|
||||
switch (vform) {
|
||||
case kFormatB:
|
||||
case kFormat8B:
|
||||
case kFormat16B: return 16;
|
||||
case kFormatH:
|
||||
case kFormat4H:
|
||||
case kFormat8H: return 8;
|
||||
case kFormatS:
|
||||
case kFormat2S:
|
||||
case kFormat4S: return 4;
|
||||
case kFormatD:
|
||||
case kFormat1D:
|
||||
case kFormat2D: return 2;
|
||||
default: VIXL_UNREACHABLE(); return 0;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Does 'vform' indicate a vector format or a scalar format?
|
||||
bool IsVectorFormat(VectorFormat vform) {
|
||||
VIXL_ASSERT(vform != kFormatUndefined);
|
||||
switch (vform) {
|
||||
case kFormatB:
|
||||
case kFormatH:
|
||||
case kFormatS:
|
||||
case kFormatD: return false;
|
||||
default: return true;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
int64_t MaxIntFromFormat(VectorFormat vform) {
|
||||
return INT64_MAX >> (64 - LaneSizeInBitsFromFormat(vform));
|
||||
}
|
||||
|
||||
|
||||
int64_t MinIntFromFormat(VectorFormat vform) {
|
||||
return INT64_MIN >> (64 - LaneSizeInBitsFromFormat(vform));
|
||||
}
|
||||
|
||||
|
||||
uint64_t MaxUintFromFormat(VectorFormat vform) {
|
||||
return UINT64_MAX >> (64 - LaneSizeInBitsFromFormat(vform));
|
||||
}
|
||||
} // namespace vixl
|
||||
|
@ -1,757 +0,0 @@
|
||||
// Copyright 2015, ARM Limited
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of ARM Limited nor the names of its contributors may be
|
||||
// used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#ifndef VIXL_A64_INSTRUCTIONS_A64_H_
|
||||
#define VIXL_A64_INSTRUCTIONS_A64_H_
|
||||
|
||||
#include "vixl/globals.h"
|
||||
#include "vixl/utils.h"
|
||||
#include "vixl/a64/constants-a64.h"
|
||||
|
||||
namespace vixl {
|
||||
// ISA constants. --------------------------------------------------------------
|
||||
|
||||
typedef uint32_t Instr;
|
||||
const unsigned kInstructionSize = 4;
|
||||
const unsigned kInstructionSizeLog2 = 2;
|
||||
const unsigned kLiteralEntrySize = 4;
|
||||
const unsigned kLiteralEntrySizeLog2 = 2;
|
||||
const unsigned kMaxLoadLiteralRange = 1 * MBytes;
|
||||
|
||||
// This is the nominal page size (as used by the adrp instruction); the actual
|
||||
// size of the memory pages allocated by the kernel is likely to differ.
|
||||
const unsigned kPageSize = 4 * KBytes;
|
||||
const unsigned kPageSizeLog2 = 12;
|
||||
|
||||
const unsigned kBRegSize = 8;
|
||||
const unsigned kBRegSizeLog2 = 3;
|
||||
const unsigned kBRegSizeInBytes = kBRegSize / 8;
|
||||
const unsigned kBRegSizeInBytesLog2 = kBRegSizeLog2 - 3;
|
||||
const unsigned kHRegSize = 16;
|
||||
const unsigned kHRegSizeLog2 = 4;
|
||||
const unsigned kHRegSizeInBytes = kHRegSize / 8;
|
||||
const unsigned kHRegSizeInBytesLog2 = kHRegSizeLog2 - 3;
|
||||
const unsigned kWRegSize = 32;
|
||||
const unsigned kWRegSizeLog2 = 5;
|
||||
const unsigned kWRegSizeInBytes = kWRegSize / 8;
|
||||
const unsigned kWRegSizeInBytesLog2 = kWRegSizeLog2 - 3;
|
||||
const unsigned kXRegSize = 64;
|
||||
const unsigned kXRegSizeLog2 = 6;
|
||||
const unsigned kXRegSizeInBytes = kXRegSize / 8;
|
||||
const unsigned kXRegSizeInBytesLog2 = kXRegSizeLog2 - 3;
|
||||
const unsigned kSRegSize = 32;
|
||||
const unsigned kSRegSizeLog2 = 5;
|
||||
const unsigned kSRegSizeInBytes = kSRegSize / 8;
|
||||
const unsigned kSRegSizeInBytesLog2 = kSRegSizeLog2 - 3;
|
||||
const unsigned kDRegSize = 64;
|
||||
const unsigned kDRegSizeLog2 = 6;
|
||||
const unsigned kDRegSizeInBytes = kDRegSize / 8;
|
||||
const unsigned kDRegSizeInBytesLog2 = kDRegSizeLog2 - 3;
|
||||
const unsigned kQRegSize = 128;
|
||||
const unsigned kQRegSizeLog2 = 7;
|
||||
const unsigned kQRegSizeInBytes = kQRegSize / 8;
|
||||
const unsigned kQRegSizeInBytesLog2 = kQRegSizeLog2 - 3;
|
||||
const uint64_t kWRegMask = UINT64_C(0xffffffff);
|
||||
const uint64_t kXRegMask = UINT64_C(0xffffffffffffffff);
|
||||
const uint64_t kSRegMask = UINT64_C(0xffffffff);
|
||||
const uint64_t kDRegMask = UINT64_C(0xffffffffffffffff);
|
||||
const uint64_t kSSignMask = UINT64_C(0x80000000);
|
||||
const uint64_t kDSignMask = UINT64_C(0x8000000000000000);
|
||||
const uint64_t kWSignMask = UINT64_C(0x80000000);
|
||||
const uint64_t kXSignMask = UINT64_C(0x8000000000000000);
|
||||
const uint64_t kByteMask = UINT64_C(0xff);
|
||||
const uint64_t kHalfWordMask = UINT64_C(0xffff);
|
||||
const uint64_t kWordMask = UINT64_C(0xffffffff);
|
||||
const uint64_t kXMaxUInt = UINT64_C(0xffffffffffffffff);
|
||||
const uint64_t kWMaxUInt = UINT64_C(0xffffffff);
|
||||
const int64_t kXMaxInt = INT64_C(0x7fffffffffffffff);
|
||||
const int64_t kXMinInt = INT64_C(0x8000000000000000);
|
||||
const int32_t kWMaxInt = INT32_C(0x7fffffff);
|
||||
const int32_t kWMinInt = INT32_C(0x80000000);
|
||||
const unsigned kLinkRegCode = 30;
|
||||
const unsigned kZeroRegCode = 31;
|
||||
const unsigned kSPRegInternalCode = 63;
|
||||
const unsigned kRegCodeMask = 0x1f;
|
||||
|
||||
const unsigned kAddressTagOffset = 56;
|
||||
const unsigned kAddressTagWidth = 8;
|
||||
const uint64_t kAddressTagMask =
|
||||
((UINT64_C(1) << kAddressTagWidth) - 1) << kAddressTagOffset;
|
||||
VIXL_STATIC_ASSERT(kAddressTagMask == UINT64_C(0xff00000000000000));
|
||||
|
||||
// AArch64 floating-point specifics. These match IEEE-754.
|
||||
const unsigned kDoubleMantissaBits = 52;
|
||||
const unsigned kDoubleExponentBits = 11;
|
||||
const unsigned kFloatMantissaBits = 23;
|
||||
const unsigned kFloatExponentBits = 8;
|
||||
const unsigned kFloat16MantissaBits = 10;
|
||||
const unsigned kFloat16ExponentBits = 5;
|
||||
|
||||
// Floating-point infinity values.
|
||||
extern const float16 kFP16PositiveInfinity;
|
||||
extern const float16 kFP16NegativeInfinity;
|
||||
extern const float kFP32PositiveInfinity;
|
||||
extern const float kFP32NegativeInfinity;
|
||||
extern const double kFP64PositiveInfinity;
|
||||
extern const double kFP64NegativeInfinity;
|
||||
|
||||
// The default NaN values (for FPCR.DN=1).
|
||||
extern const float16 kFP16DefaultNaN;
|
||||
extern const float kFP32DefaultNaN;
|
||||
extern const double kFP64DefaultNaN;
|
||||
|
||||
unsigned CalcLSDataSize(LoadStoreOp op);
|
||||
unsigned CalcLSPairDataSize(LoadStorePairOp op);
|
||||
|
||||
enum ImmBranchType {
|
||||
UnknownBranchType = 0,
|
||||
CondBranchType = 1,
|
||||
UncondBranchType = 2,
|
||||
CompareBranchType = 3,
|
||||
TestBranchType = 4
|
||||
};
|
||||
|
||||
enum AddrMode {
|
||||
Offset,
|
||||
PreIndex,
|
||||
PostIndex
|
||||
};
|
||||
|
||||
enum FPRounding {
|
||||
// The first four values are encodable directly by FPCR<RMode>.
|
||||
FPTieEven = 0x0,
|
||||
FPPositiveInfinity = 0x1,
|
||||
FPNegativeInfinity = 0x2,
|
||||
FPZero = 0x3,
|
||||
|
||||
// The final rounding modes are only available when explicitly specified by
|
||||
// the instruction (such as with fcvta). It cannot be set in FPCR.
|
||||
FPTieAway,
|
||||
FPRoundOdd
|
||||
};
|
||||
|
||||
enum Reg31Mode {
|
||||
Reg31IsStackPointer,
|
||||
Reg31IsZeroRegister
|
||||
};
|
||||
|
||||
// Instructions. ---------------------------------------------------------------
|
||||
|
||||
class Instruction {
|
||||
public:
|
||||
Instr InstructionBits() const {
|
||||
return *(reinterpret_cast<const Instr*>(this));
|
||||
}
|
||||
|
||||
void SetInstructionBits(Instr new_instr) {
|
||||
*(reinterpret_cast<Instr*>(this)) = new_instr;
|
||||
}
|
||||
|
||||
int Bit(int pos) const {
|
||||
return (InstructionBits() >> pos) & 1;
|
||||
}
|
||||
|
||||
uint32_t Bits(int msb, int lsb) const {
|
||||
return unsigned_bitextract_32(msb, lsb, InstructionBits());
|
||||
}
|
||||
|
||||
int32_t SignedBits(int msb, int lsb) const {
|
||||
int32_t bits = *(reinterpret_cast<const int32_t*>(this));
|
||||
return signed_bitextract_32(msb, lsb, bits);
|
||||
}
|
||||
|
||||
Instr Mask(uint32_t mask) const {
|
||||
return InstructionBits() & mask;
|
||||
}
|
||||
|
||||
#define DEFINE_GETTER(Name, HighBit, LowBit, Func) \
|
||||
int32_t Name() const { return Func(HighBit, LowBit); }
|
||||
INSTRUCTION_FIELDS_LIST(DEFINE_GETTER)
|
||||
#undef DEFINE_GETTER
|
||||
|
||||
// ImmPCRel is a compound field (not present in INSTRUCTION_FIELDS_LIST),
|
||||
// formed from ImmPCRelLo and ImmPCRelHi.
|
||||
int ImmPCRel() const {
|
||||
int offset =
|
||||
static_cast<int>((ImmPCRelHi() << ImmPCRelLo_width) | ImmPCRelLo());
|
||||
int width = ImmPCRelLo_width + ImmPCRelHi_width;
|
||||
return signed_bitextract_32(width - 1, 0, offset);
|
||||
}
|
||||
|
||||
uint64_t ImmLogical() const;
|
||||
unsigned ImmNEONabcdefgh() const;
|
||||
float ImmFP32() const;
|
||||
double ImmFP64() const;
|
||||
float ImmNEONFP32() const;
|
||||
double ImmNEONFP64() const;
|
||||
|
||||
unsigned SizeLS() const {
|
||||
return CalcLSDataSize(static_cast<LoadStoreOp>(Mask(LoadStoreMask)));
|
||||
}
|
||||
|
||||
unsigned SizeLSPair() const {
|
||||
return CalcLSPairDataSize(
|
||||
static_cast<LoadStorePairOp>(Mask(LoadStorePairMask)));
|
||||
}
|
||||
|
||||
int NEONLSIndex(int access_size_shift) const {
|
||||
int64_t q = NEONQ();
|
||||
int64_t s = NEONS();
|
||||
int64_t size = NEONLSSize();
|
||||
int64_t index = (q << 3) | (s << 2) | size;
|
||||
return static_cast<int>(index >> access_size_shift);
|
||||
}
|
||||
|
||||
// Helpers.
|
||||
bool IsCondBranchImm() const {
|
||||
return Mask(ConditionalBranchFMask) == ConditionalBranchFixed;
|
||||
}
|
||||
|
||||
bool IsUncondBranchImm() const {
|
||||
return Mask(UnconditionalBranchFMask) == UnconditionalBranchFixed;
|
||||
}
|
||||
|
||||
bool IsCompareBranch() const {
|
||||
return Mask(CompareBranchFMask) == CompareBranchFixed;
|
||||
}
|
||||
|
||||
bool IsTestBranch() const {
|
||||
return Mask(TestBranchFMask) == TestBranchFixed;
|
||||
}
|
||||
|
||||
bool IsImmBranch() const {
|
||||
return BranchType() != UnknownBranchType;
|
||||
}
|
||||
|
||||
bool IsPCRelAddressing() const {
|
||||
return Mask(PCRelAddressingFMask) == PCRelAddressingFixed;
|
||||
}
|
||||
|
||||
bool IsLogicalImmediate() const {
|
||||
return Mask(LogicalImmediateFMask) == LogicalImmediateFixed;
|
||||
}
|
||||
|
||||
bool IsAddSubImmediate() const {
|
||||
return Mask(AddSubImmediateFMask) == AddSubImmediateFixed;
|
||||
}
|
||||
|
||||
bool IsAddSubExtended() const {
|
||||
return Mask(AddSubExtendedFMask) == AddSubExtendedFixed;
|
||||
}
|
||||
|
||||
bool IsLoadOrStore() const {
|
||||
return Mask(LoadStoreAnyFMask) == LoadStoreAnyFixed;
|
||||
}
|
||||
|
||||
bool IsLoad() const;
|
||||
bool IsStore() const;
|
||||
|
||||
bool IsLoadLiteral() const {
|
||||
// This includes PRFM_lit.
|
||||
return Mask(LoadLiteralFMask) == LoadLiteralFixed;
|
||||
}
|
||||
|
||||
bool IsMovn() const {
|
||||
return (Mask(MoveWideImmediateMask) == MOVN_x) ||
|
||||
(Mask(MoveWideImmediateMask) == MOVN_w);
|
||||
}
|
||||
|
||||
static int ImmBranchRangeBitwidth(ImmBranchType branch_type);
|
||||
static int32_t ImmBranchForwardRange(ImmBranchType branch_type);
|
||||
static bool IsValidImmPCOffset(ImmBranchType branch_type, int64_t offset);
|
||||
|
||||
// Indicate whether Rd can be the stack pointer or the zero register. This
|
||||
// does not check that the instruction actually has an Rd field.
|
||||
Reg31Mode RdMode() const {
|
||||
// The following instructions use sp or wsp as Rd:
|
||||
// Add/sub (immediate) when not setting the flags.
|
||||
// Add/sub (extended) when not setting the flags.
|
||||
// Logical (immediate) when not setting the flags.
|
||||
// Otherwise, r31 is the zero register.
|
||||
if (IsAddSubImmediate() || IsAddSubExtended()) {
|
||||
if (Mask(AddSubSetFlagsBit)) {
|
||||
return Reg31IsZeroRegister;
|
||||
} else {
|
||||
return Reg31IsStackPointer;
|
||||
}
|
||||
}
|
||||
if (IsLogicalImmediate()) {
|
||||
// Of the logical (immediate) instructions, only ANDS (and its aliases)
|
||||
// can set the flags. The others can all write into sp.
|
||||
// Note that some logical operations are not available to
|
||||
// immediate-operand instructions, so we have to combine two masks here.
|
||||
if (Mask(LogicalImmediateMask & LogicalOpMask) == ANDS) {
|
||||
return Reg31IsZeroRegister;
|
||||
} else {
|
||||
return Reg31IsStackPointer;
|
||||
}
|
||||
}
|
||||
return Reg31IsZeroRegister;
|
||||
}
|
||||
|
||||
// Indicate whether Rn can be the stack pointer or the zero register. This
|
||||
// does not check that the instruction actually has an Rn field.
|
||||
Reg31Mode RnMode() const {
|
||||
// The following instructions use sp or wsp as Rn:
|
||||
// All loads and stores.
|
||||
// Add/sub (immediate).
|
||||
// Add/sub (extended).
|
||||
// Otherwise, r31 is the zero register.
|
||||
if (IsLoadOrStore() || IsAddSubImmediate() || IsAddSubExtended()) {
|
||||
return Reg31IsStackPointer;
|
||||
}
|
||||
return Reg31IsZeroRegister;
|
||||
}
|
||||
|
||||
ImmBranchType BranchType() const {
|
||||
if (IsCondBranchImm()) {
|
||||
return CondBranchType;
|
||||
} else if (IsUncondBranchImm()) {
|
||||
return UncondBranchType;
|
||||
} else if (IsCompareBranch()) {
|
||||
return CompareBranchType;
|
||||
} else if (IsTestBranch()) {
|
||||
return TestBranchType;
|
||||
} else {
|
||||
return UnknownBranchType;
|
||||
}
|
||||
}
|
||||
|
||||
// Find the target of this instruction. 'this' may be a branch or a
|
||||
// PC-relative addressing instruction.
|
||||
const Instruction* ImmPCOffsetTarget() const;
|
||||
|
||||
// Patch a PC-relative offset to refer to 'target'. 'this' may be a branch or
|
||||
// a PC-relative addressing instruction.
|
||||
void SetImmPCOffsetTarget(const Instruction* target);
|
||||
// Patch a literal load instruction to load from 'source'.
|
||||
void SetImmLLiteral(const Instruction* source);
|
||||
|
||||
// The range of a load literal instruction, expressed as 'instr +- range'.
|
||||
// The range is actually the 'positive' range; the branch instruction can
|
||||
// target [instr - range - kInstructionSize, instr + range].
|
||||
static const int kLoadLiteralImmBitwidth = 19;
|
||||
static const int kLoadLiteralRange =
|
||||
(1 << kLoadLiteralImmBitwidth) / 2 - kInstructionSize;
|
||||
|
||||
// Calculate the address of a literal referred to by a load-literal
|
||||
// instruction, and return it as the specified type.
|
||||
//
|
||||
// The literal itself is safely mutable only if the backing buffer is safely
|
||||
// mutable.
|
||||
template <typename T>
|
||||
T LiteralAddress() const {
|
||||
uint64_t base_raw = reinterpret_cast<uint64_t>(this);
|
||||
int64_t offset = ImmLLiteral() << kLiteralEntrySizeLog2;
|
||||
uint64_t address_raw = base_raw + offset;
|
||||
|
||||
// Cast the address using a C-style cast. A reinterpret_cast would be
|
||||
// appropriate, but it can't cast one integral type to another.
|
||||
T address = (T)(address_raw);
|
||||
|
||||
// Assert that the address can be represented by the specified type.
|
||||
VIXL_ASSERT((uint64_t)(address) == address_raw);
|
||||
|
||||
return address;
|
||||
}
|
||||
|
||||
uint32_t Literal32() const {
|
||||
uint32_t literal;
|
||||
memcpy(&literal, LiteralAddress<const void*>(), sizeof(literal));
|
||||
return literal;
|
||||
}
|
||||
|
||||
uint64_t Literal64() const {
|
||||
uint64_t literal;
|
||||
memcpy(&literal, LiteralAddress<const void*>(), sizeof(literal));
|
||||
return literal;
|
||||
}
|
||||
|
||||
float LiteralFP32() const {
|
||||
return rawbits_to_float(Literal32());
|
||||
}
|
||||
|
||||
double LiteralFP64() const {
|
||||
return rawbits_to_double(Literal64());
|
||||
}
|
||||
|
||||
const Instruction* NextInstruction() const {
|
||||
return this + kInstructionSize;
|
||||
}
|
||||
|
||||
const Instruction* InstructionAtOffset(int64_t offset) const {
|
||||
VIXL_ASSERT(IsWordAligned(this + offset));
|
||||
return this + offset;
|
||||
}
|
||||
|
||||
template<typename T> static Instruction* Cast(T src) {
|
||||
return reinterpret_cast<Instruction*>(src);
|
||||
}
|
||||
|
||||
template<typename T> static const Instruction* CastConst(T src) {
|
||||
return reinterpret_cast<const Instruction*>(src);
|
||||
}
|
||||
|
||||
private:
|
||||
int ImmBranch() const;
|
||||
|
||||
static float Imm8ToFP32(uint32_t imm8);
|
||||
static double Imm8ToFP64(uint32_t imm8);
|
||||
|
||||
void SetPCRelImmTarget(const Instruction* target);
|
||||
void SetBranchImmTarget(const Instruction* target);
|
||||
};
|
||||
|
||||
|
||||
// Functions for handling NEON vector format information.
|
||||
enum VectorFormat {
|
||||
kFormatUndefined = 0xffffffff,
|
||||
kFormat8B = NEON_8B,
|
||||
kFormat16B = NEON_16B,
|
||||
kFormat4H = NEON_4H,
|
||||
kFormat8H = NEON_8H,
|
||||
kFormat2S = NEON_2S,
|
||||
kFormat4S = NEON_4S,
|
||||
kFormat1D = NEON_1D,
|
||||
kFormat2D = NEON_2D,
|
||||
|
||||
// Scalar formats. We add the scalar bit to distinguish between scalar and
|
||||
// vector enumerations; the bit is always set in the encoding of scalar ops
|
||||
// and always clear for vector ops. Although kFormatD and kFormat1D appear
|
||||
// to be the same, their meaning is subtly different. The first is a scalar
|
||||
// operation, the second a vector operation that only affects one lane.
|
||||
kFormatB = NEON_B | NEONScalar,
|
||||
kFormatH = NEON_H | NEONScalar,
|
||||
kFormatS = NEON_S | NEONScalar,
|
||||
kFormatD = NEON_D | NEONScalar
|
||||
};
|
||||
|
||||
VectorFormat VectorFormatHalfWidth(const VectorFormat vform);
|
||||
VectorFormat VectorFormatDoubleWidth(const VectorFormat vform);
|
||||
VectorFormat VectorFormatDoubleLanes(const VectorFormat vform);
|
||||
VectorFormat VectorFormatHalfLanes(const VectorFormat vform);
|
||||
VectorFormat ScalarFormatFromLaneSize(int lanesize);
|
||||
VectorFormat VectorFormatHalfWidthDoubleLanes(const VectorFormat vform);
|
||||
VectorFormat VectorFormatFillQ(const VectorFormat vform);
|
||||
unsigned RegisterSizeInBitsFromFormat(VectorFormat vform);
|
||||
unsigned RegisterSizeInBytesFromFormat(VectorFormat vform);
|
||||
// TODO: Make the return types of these functions consistent.
|
||||
unsigned LaneSizeInBitsFromFormat(VectorFormat vform);
|
||||
int LaneSizeInBytesFromFormat(VectorFormat vform);
|
||||
int LaneSizeInBytesLog2FromFormat(VectorFormat vform);
|
||||
int LaneCountFromFormat(VectorFormat vform);
|
||||
int MaxLaneCountFromFormat(VectorFormat vform);
|
||||
bool IsVectorFormat(VectorFormat vform);
|
||||
int64_t MaxIntFromFormat(VectorFormat vform);
|
||||
int64_t MinIntFromFormat(VectorFormat vform);
|
||||
uint64_t MaxUintFromFormat(VectorFormat vform);
|
||||
|
||||
|
||||
enum NEONFormat {
|
||||
NF_UNDEF = 0,
|
||||
NF_8B = 1,
|
||||
NF_16B = 2,
|
||||
NF_4H = 3,
|
||||
NF_8H = 4,
|
||||
NF_2S = 5,
|
||||
NF_4S = 6,
|
||||
NF_1D = 7,
|
||||
NF_2D = 8,
|
||||
NF_B = 9,
|
||||
NF_H = 10,
|
||||
NF_S = 11,
|
||||
NF_D = 12
|
||||
};
|
||||
|
||||
static const unsigned kNEONFormatMaxBits = 6;
|
||||
|
||||
struct NEONFormatMap {
|
||||
// The bit positions in the instruction to consider.
|
||||
uint8_t bits[kNEONFormatMaxBits];
|
||||
|
||||
// Mapping from concatenated bits to format.
|
||||
NEONFormat map[1 << kNEONFormatMaxBits];
|
||||
};
|
||||
|
||||
class NEONFormatDecoder {
|
||||
public:
|
||||
enum SubstitutionMode {
|
||||
kPlaceholder,
|
||||
kFormat
|
||||
};
|
||||
|
||||
// Construct a format decoder with increasingly specific format maps for each
|
||||
// subsitution. If no format map is specified, the default is the integer
|
||||
// format map.
|
||||
explicit NEONFormatDecoder(const Instruction* instr) {
|
||||
instrbits_ = instr->InstructionBits();
|
||||
SetFormatMaps(IntegerFormatMap());
|
||||
}
|
||||
NEONFormatDecoder(const Instruction* instr,
|
||||
const NEONFormatMap* format) {
|
||||
instrbits_ = instr->InstructionBits();
|
||||
SetFormatMaps(format);
|
||||
}
|
||||
NEONFormatDecoder(const Instruction* instr,
|
||||
const NEONFormatMap* format0,
|
||||
const NEONFormatMap* format1) {
|
||||
instrbits_ = instr->InstructionBits();
|
||||
SetFormatMaps(format0, format1);
|
||||
}
|
||||
NEONFormatDecoder(const Instruction* instr,
|
||||
const NEONFormatMap* format0,
|
||||
const NEONFormatMap* format1,
|
||||
const NEONFormatMap* format2) {
|
||||
instrbits_ = instr->InstructionBits();
|
||||
SetFormatMaps(format0, format1, format2);
|
||||
}
|
||||
|
||||
// Set the format mapping for all or individual substitutions.
|
||||
void SetFormatMaps(const NEONFormatMap* format0,
|
||||
const NEONFormatMap* format1 = NULL,
|
||||
const NEONFormatMap* format2 = NULL) {
|
||||
VIXL_ASSERT(format0 != NULL);
|
||||
formats_[0] = format0;
|
||||
formats_[1] = (format1 == NULL) ? formats_[0] : format1;
|
||||
formats_[2] = (format2 == NULL) ? formats_[1] : format2;
|
||||
}
|
||||
void SetFormatMap(unsigned index, const NEONFormatMap* format) {
|
||||
VIXL_ASSERT(index <= (sizeof(formats_) / sizeof(formats_[0])));
|
||||
VIXL_ASSERT(format != NULL);
|
||||
formats_[index] = format;
|
||||
}
|
||||
|
||||
// Substitute %s in the input string with the placeholder string for each
|
||||
// register, ie. "'B", "'H", etc.
|
||||
const char* SubstitutePlaceholders(const char* string) {
|
||||
return Substitute(string, kPlaceholder, kPlaceholder, kPlaceholder);
|
||||
}
|
||||
|
||||
// Substitute %s in the input string with a new string based on the
|
||||
// substitution mode.
|
||||
const char* Substitute(const char* string,
|
||||
SubstitutionMode mode0 = kFormat,
|
||||
SubstitutionMode mode1 = kFormat,
|
||||
SubstitutionMode mode2 = kFormat) {
|
||||
snprintf(form_buffer_, sizeof(form_buffer_), string,
|
||||
GetSubstitute(0, mode0),
|
||||
GetSubstitute(1, mode1),
|
||||
GetSubstitute(2, mode2));
|
||||
return form_buffer_;
|
||||
}
|
||||
|
||||
// Append a "2" to a mnemonic string based of the state of the Q bit.
|
||||
const char* Mnemonic(const char* mnemonic) {
|
||||
if ((instrbits_ & NEON_Q) != 0) {
|
||||
snprintf(mne_buffer_, sizeof(mne_buffer_), "%s2", mnemonic);
|
||||
return mne_buffer_;
|
||||
}
|
||||
return mnemonic;
|
||||
}
|
||||
|
||||
VectorFormat GetVectorFormat(int format_index = 0) {
|
||||
return GetVectorFormat(formats_[format_index]);
|
||||
}
|
||||
|
||||
VectorFormat GetVectorFormat(const NEONFormatMap* format_map) {
|
||||
static const VectorFormat vform[] = {
|
||||
kFormatUndefined,
|
||||
kFormat8B, kFormat16B, kFormat4H, kFormat8H,
|
||||
kFormat2S, kFormat4S, kFormat1D, kFormat2D,
|
||||
kFormatB, kFormatH, kFormatS, kFormatD
|
||||
};
|
||||
VIXL_ASSERT(GetNEONFormat(format_map) < (sizeof(vform) / sizeof(vform[0])));
|
||||
return vform[GetNEONFormat(format_map)];
|
||||
}
|
||||
|
||||
// Built in mappings for common cases.
|
||||
|
||||
// The integer format map uses three bits (Q, size<1:0>) to encode the
|
||||
// "standard" set of NEON integer vector formats.
|
||||
static const NEONFormatMap* IntegerFormatMap() {
|
||||
static const NEONFormatMap map = {
|
||||
{23, 22, 30},
|
||||
{NF_8B, NF_16B, NF_4H, NF_8H, NF_2S, NF_4S, NF_UNDEF, NF_2D}
|
||||
};
|
||||
return ↦
|
||||
}
|
||||
|
||||
// The long integer format map uses two bits (size<1:0>) to encode the
|
||||
// long set of NEON integer vector formats. These are used in narrow, wide
|
||||
// and long operations.
|
||||
static const NEONFormatMap* LongIntegerFormatMap() {
|
||||
static const NEONFormatMap map = {
|
||||
{23, 22}, {NF_8H, NF_4S, NF_2D}
|
||||
};
|
||||
return ↦
|
||||
}
|
||||
|
||||
// The FP format map uses two bits (Q, size<0>) to encode the NEON FP vector
|
||||
// formats: NF_2S, NF_4S, NF_2D.
|
||||
static const NEONFormatMap* FPFormatMap() {
|
||||
// The FP format map assumes two bits (Q, size<0>) are used to encode the
|
||||
// NEON FP vector formats: NF_2S, NF_4S, NF_2D.
|
||||
static const NEONFormatMap map = {
|
||||
{22, 30}, {NF_2S, NF_4S, NF_UNDEF, NF_2D}
|
||||
};
|
||||
return ↦
|
||||
}
|
||||
|
||||
// The load/store format map uses three bits (Q, 11, 10) to encode the
|
||||
// set of NEON vector formats.
|
||||
static const NEONFormatMap* LoadStoreFormatMap() {
|
||||
static const NEONFormatMap map = {
|
||||
{11, 10, 30},
|
||||
{NF_8B, NF_16B, NF_4H, NF_8H, NF_2S, NF_4S, NF_1D, NF_2D}
|
||||
};
|
||||
return ↦
|
||||
}
|
||||
|
||||
// The logical format map uses one bit (Q) to encode the NEON vector format:
|
||||
// NF_8B, NF_16B.
|
||||
static const NEONFormatMap* LogicalFormatMap() {
|
||||
static const NEONFormatMap map = {
|
||||
{30}, {NF_8B, NF_16B}
|
||||
};
|
||||
return ↦
|
||||
}
|
||||
|
||||
// The triangular format map uses between two and five bits to encode the NEON
|
||||
// vector format:
|
||||
// xxx10->8B, xxx11->16B, xx100->4H, xx101->8H
|
||||
// x1000->2S, x1001->4S, 10001->2D, all others undefined.
|
||||
static const NEONFormatMap* TriangularFormatMap() {
|
||||
static const NEONFormatMap map = {
|
||||
{19, 18, 17, 16, 30},
|
||||
{NF_UNDEF, NF_UNDEF, NF_8B, NF_16B, NF_4H, NF_8H, NF_8B, NF_16B, NF_2S,
|
||||
NF_4S, NF_8B, NF_16B, NF_4H, NF_8H, NF_8B, NF_16B, NF_UNDEF, NF_2D,
|
||||
NF_8B, NF_16B, NF_4H, NF_8H, NF_8B, NF_16B, NF_2S, NF_4S, NF_8B, NF_16B,
|
||||
NF_4H, NF_8H, NF_8B, NF_16B}
|
||||
};
|
||||
return ↦
|
||||
}
|
||||
|
||||
// The scalar format map uses two bits (size<1:0>) to encode the NEON scalar
|
||||
// formats: NF_B, NF_H, NF_S, NF_D.
|
||||
static const NEONFormatMap* ScalarFormatMap() {
|
||||
static const NEONFormatMap map = {
|
||||
{23, 22}, {NF_B, NF_H, NF_S, NF_D}
|
||||
};
|
||||
return ↦
|
||||
}
|
||||
|
||||
// The long scalar format map uses two bits (size<1:0>) to encode the longer
|
||||
// NEON scalar formats: NF_H, NF_S, NF_D.
|
||||
static const NEONFormatMap* LongScalarFormatMap() {
|
||||
static const NEONFormatMap map = {
|
||||
{23, 22}, {NF_H, NF_S, NF_D}
|
||||
};
|
||||
return ↦
|
||||
}
|
||||
|
||||
// The FP scalar format map assumes one bit (size<0>) is used to encode the
|
||||
// NEON FP scalar formats: NF_S, NF_D.
|
||||
static const NEONFormatMap* FPScalarFormatMap() {
|
||||
static const NEONFormatMap map = {
|
||||
{22}, {NF_S, NF_D}
|
||||
};
|
||||
return ↦
|
||||
}
|
||||
|
||||
// The triangular scalar format map uses between one and four bits to encode
|
||||
// the NEON FP scalar formats:
|
||||
// xxx1->B, xx10->H, x100->S, 1000->D, all others undefined.
|
||||
static const NEONFormatMap* TriangularScalarFormatMap() {
|
||||
static const NEONFormatMap map = {
|
||||
{19, 18, 17, 16},
|
||||
{NF_UNDEF, NF_B, NF_H, NF_B, NF_S, NF_B, NF_H, NF_B,
|
||||
NF_D, NF_B, NF_H, NF_B, NF_S, NF_B, NF_H, NF_B}
|
||||
};
|
||||
return ↦
|
||||
}
|
||||
|
||||
private:
|
||||
// Get a pointer to a string that represents the format or placeholder for
|
||||
// the specified substitution index, based on the format map and instruction.
|
||||
const char* GetSubstitute(int index, SubstitutionMode mode) {
|
||||
if (mode == kFormat) {
|
||||
return NEONFormatAsString(GetNEONFormat(formats_[index]));
|
||||
}
|
||||
VIXL_ASSERT(mode == kPlaceholder);
|
||||
return NEONFormatAsPlaceholder(GetNEONFormat(formats_[index]));
|
||||
}
|
||||
|
||||
// Get the NEONFormat enumerated value for bits obtained from the
|
||||
// instruction based on the specified format mapping.
|
||||
NEONFormat GetNEONFormat(const NEONFormatMap* format_map) {
|
||||
return format_map->map[PickBits(format_map->bits)];
|
||||
}
|
||||
|
||||
// Convert a NEONFormat into a string.
|
||||
static const char* NEONFormatAsString(NEONFormat format) {
|
||||
static const char* formats[] = {
|
||||
"undefined",
|
||||
"8b", "16b", "4h", "8h", "2s", "4s", "1d", "2d",
|
||||
"b", "h", "s", "d"
|
||||
};
|
||||
VIXL_ASSERT(format < (sizeof(formats) / sizeof(formats[0])));
|
||||
return formats[format];
|
||||
}
|
||||
|
||||
// Convert a NEONFormat into a register placeholder string.
|
||||
static const char* NEONFormatAsPlaceholder(NEONFormat format) {
|
||||
VIXL_ASSERT((format == NF_B) || (format == NF_H) ||
|
||||
(format == NF_S) || (format == NF_D) ||
|
||||
(format == NF_UNDEF));
|
||||
static const char* formats[] = {
|
||||
"undefined",
|
||||
"undefined", "undefined", "undefined", "undefined",
|
||||
"undefined", "undefined", "undefined", "undefined",
|
||||
"'B", "'H", "'S", "'D"
|
||||
};
|
||||
return formats[format];
|
||||
}
|
||||
|
||||
// Select bits from instrbits_ defined by the bits array, concatenate them,
|
||||
// and return the value.
|
||||
uint8_t PickBits(const uint8_t bits[]) {
|
||||
uint8_t result = 0;
|
||||
for (unsigned b = 0; b < kNEONFormatMaxBits; b++) {
|
||||
if (bits[b] == 0) break;
|
||||
result <<= 1;
|
||||
result |= ((instrbits_ & (1 << bits[b])) == 0) ? 0 : 1;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
Instr instrbits_;
|
||||
const NEONFormatMap* formats_[3];
|
||||
char form_buffer_[64];
|
||||
char mne_buffer_[16];
|
||||
};
|
||||
} // namespace vixl
|
||||
|
||||
#endif // VIXL_A64_INSTRUCTIONS_A64_H_
|
@ -1,113 +0,0 @@
|
||||
// Copyright 2014, ARM Limited
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of ARM Limited nor the names of its contributors may be
|
||||
// used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#ifndef VIXL_CODE_BUFFER_H
|
||||
#define VIXL_CODE_BUFFER_H
|
||||
|
||||
#include <cstring>
|
||||
#include "vixl/globals.h"
|
||||
|
||||
namespace vixl {
|
||||
|
||||
class CodeBuffer {
|
||||
public:
|
||||
explicit CodeBuffer(size_t capacity = 4 * KBytes);
|
||||
CodeBuffer(void* buffer, size_t capacity);
|
||||
~CodeBuffer();
|
||||
|
||||
void Reset();
|
||||
|
||||
ptrdiff_t OffsetFrom(ptrdiff_t offset) const {
|
||||
ptrdiff_t cursor_offset = cursor_ - buffer_;
|
||||
VIXL_ASSERT((offset >= 0) && (offset <= cursor_offset));
|
||||
return cursor_offset - offset;
|
||||
}
|
||||
|
||||
ptrdiff_t CursorOffset() const {
|
||||
return OffsetFrom(0);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
T GetOffsetAddress(ptrdiff_t offset) const {
|
||||
VIXL_ASSERT((offset >= 0) && (offset <= (cursor_ - buffer_)));
|
||||
return reinterpret_cast<T>(buffer_ + offset);
|
||||
}
|
||||
|
||||
size_t RemainingBytes() const {
|
||||
VIXL_ASSERT((cursor_ >= buffer_) && (cursor_ <= (buffer_ + capacity_)));
|
||||
return (buffer_ + capacity_) - cursor_;
|
||||
}
|
||||
|
||||
// A code buffer can emit:
|
||||
// * 32-bit data: instruction and constant.
|
||||
// * 64-bit data: constant.
|
||||
// * string: debug info.
|
||||
void Emit32(uint32_t data) { Emit(data); }
|
||||
|
||||
void Emit64(uint64_t data) { Emit(data); }
|
||||
|
||||
void EmitString(const char* string);
|
||||
|
||||
// Align to kInstructionSize.
|
||||
void Align();
|
||||
|
||||
size_t capacity() const { return capacity_; }
|
||||
|
||||
bool IsManaged() const { return managed_; }
|
||||
|
||||
void Grow(size_t new_capacity);
|
||||
|
||||
bool IsDirty() const { return dirty_; }
|
||||
|
||||
void SetClean() { dirty_ = false; }
|
||||
|
||||
private:
|
||||
template <typename T>
|
||||
void Emit(T value) {
|
||||
VIXL_ASSERT(RemainingBytes() >= sizeof(value));
|
||||
dirty_ = true;
|
||||
memcpy(cursor_, &value, sizeof(value));
|
||||
cursor_ += sizeof(value);
|
||||
}
|
||||
|
||||
// Backing store of the buffer.
|
||||
byte* buffer_;
|
||||
// If true the backing store is allocated and deallocated by the buffer. The
|
||||
// backing store can then grow on demand. If false the backing store is
|
||||
// provided by the user and cannot be resized internally.
|
||||
bool managed_;
|
||||
// Pointer to the next location to be written.
|
||||
byte* cursor_;
|
||||
// True if there has been any write since the buffer was created or cleaned.
|
||||
bool dirty_;
|
||||
// Capacity in bytes of the backing store.
|
||||
size_t capacity_;
|
||||
};
|
||||
|
||||
} // namespace vixl
|
||||
|
||||
#endif // VIXL_CODE_BUFFER_H
|
||||
|
@ -1,144 +0,0 @@
|
||||
// Copyright 2015, ARM Limited
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of ARM Limited nor the names of its contributors may be
|
||||
// used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#include "compiler-intrinsics.h"
|
||||
|
||||
namespace vixl {
|
||||
|
||||
|
||||
int CountLeadingSignBitsFallBack(int64_t value, int width) {
|
||||
VIXL_ASSERT(IsPowerOf2(width) && (width <= 64));
|
||||
if (value >= 0) {
|
||||
return CountLeadingZeros(value, width) - 1;
|
||||
} else {
|
||||
return CountLeadingZeros(~value, width) - 1;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
int CountLeadingZerosFallBack(uint64_t value, int width) {
|
||||
VIXL_ASSERT(IsPowerOf2(width) && (width <= 64));
|
||||
if (value == 0) {
|
||||
return width;
|
||||
}
|
||||
int count = 0;
|
||||
value = value << (64 - width);
|
||||
if ((value & UINT64_C(0xffffffff00000000)) == 0) {
|
||||
count += 32;
|
||||
value = value << 32;
|
||||
}
|
||||
if ((value & UINT64_C(0xffff000000000000)) == 0) {
|
||||
count += 16;
|
||||
value = value << 16;
|
||||
}
|
||||
if ((value & UINT64_C(0xff00000000000000)) == 0) {
|
||||
count += 8;
|
||||
value = value << 8;
|
||||
}
|
||||
if ((value & UINT64_C(0xf000000000000000)) == 0) {
|
||||
count += 4;
|
||||
value = value << 4;
|
||||
}
|
||||
if ((value & UINT64_C(0xc000000000000000)) == 0) {
|
||||
count += 2;
|
||||
value = value << 2;
|
||||
}
|
||||
if ((value & UINT64_C(0x8000000000000000)) == 0) {
|
||||
count += 1;
|
||||
}
|
||||
count += (value == 0);
|
||||
return count;
|
||||
}
|
||||
|
||||
|
||||
int CountSetBitsFallBack(uint64_t value, int width) {
|
||||
VIXL_ASSERT(IsPowerOf2(width) && (width <= 64));
|
||||
|
||||
// Mask out unused bits to ensure that they are not counted.
|
||||
value &= (UINT64_C(0xffffffffffffffff) >> (64 - width));
|
||||
|
||||
// Add up the set bits.
|
||||
// The algorithm works by adding pairs of bit fields together iteratively,
|
||||
// where the size of each bit field doubles each time.
|
||||
// An example for an 8-bit value:
|
||||
// Bits: h g f e d c b a
|
||||
// \ | \ | \ | \ |
|
||||
// value = h+g f+e d+c b+a
|
||||
// \ | \ |
|
||||
// value = h+g+f+e d+c+b+a
|
||||
// \ |
|
||||
// value = h+g+f+e+d+c+b+a
|
||||
const uint64_t kMasks[] = {
|
||||
UINT64_C(0x5555555555555555),
|
||||
UINT64_C(0x3333333333333333),
|
||||
UINT64_C(0x0f0f0f0f0f0f0f0f),
|
||||
UINT64_C(0x00ff00ff00ff00ff),
|
||||
UINT64_C(0x0000ffff0000ffff),
|
||||
UINT64_C(0x00000000ffffffff),
|
||||
};
|
||||
|
||||
for (unsigned i = 0; i < (sizeof(kMasks) / sizeof(kMasks[0])); i++) {
|
||||
int shift = 1 << i;
|
||||
value = ((value >> shift) & kMasks[i]) + (value & kMasks[i]);
|
||||
}
|
||||
|
||||
return static_cast<int>(value);
|
||||
}
|
||||
|
||||
|
||||
int CountTrailingZerosFallBack(uint64_t value, int width) {
|
||||
VIXL_ASSERT(IsPowerOf2(width) && (width <= 64));
|
||||
int count = 0;
|
||||
value = value << (64 - width);
|
||||
if ((value & UINT64_C(0xffffffff)) == 0) {
|
||||
count += 32;
|
||||
value = value >> 32;
|
||||
}
|
||||
if ((value & 0xffff) == 0) {
|
||||
count += 16;
|
||||
value = value >> 16;
|
||||
}
|
||||
if ((value & 0xff) == 0) {
|
||||
count += 8;
|
||||
value = value >> 8;
|
||||
}
|
||||
if ((value & 0xf) == 0) {
|
||||
count += 4;
|
||||
value = value >> 4;
|
||||
}
|
||||
if ((value & 0x3) == 0) {
|
||||
count += 2;
|
||||
value = value >> 2;
|
||||
}
|
||||
if ((value & 0x1) == 0) {
|
||||
count += 1;
|
||||
}
|
||||
count += (value == 0);
|
||||
return count - (64 - width);
|
||||
}
|
||||
|
||||
|
||||
} // namespace vixl
|
@ -1,155 +0,0 @@
|
||||
// Copyright 2015, ARM Limited
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of ARM Limited nor the names of its contributors may be
|
||||
// used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
|
||||
#ifndef VIXL_COMPILER_INTRINSICS_H
|
||||
#define VIXL_COMPILER_INTRINSICS_H
|
||||
|
||||
#include "globals.h"
|
||||
|
||||
namespace vixl {
|
||||
|
||||
// Helper to check whether the version of GCC used is greater than the specified
|
||||
// requirement.
|
||||
#define MAJOR 1000000
|
||||
#define MINOR 1000
|
||||
#if defined(__GNUC__) && defined(__GNUC_MINOR__) && defined(__GNUC_PATCHLEVEL__)
|
||||
#define GCC_VERSION_OR_NEWER(major, minor, patchlevel) \
|
||||
((__GNUC__ * MAJOR + __GNUC_MINOR__ * MINOR + __GNUC_PATCHLEVEL__) >= \
|
||||
((major) * MAJOR + (minor) * MINOR + (patchlevel)))
|
||||
#elif defined(__GNUC__) && defined(__GNUC_MINOR__)
|
||||
#define GCC_VERSION_OR_NEWER(major, minor, patchlevel) \
|
||||
((__GNUC__ * MAJOR + __GNUC_MINOR__ * MINOR) >= \
|
||||
((major) * MAJOR + (minor) * MINOR + (patchlevel)))
|
||||
#else
|
||||
#define GCC_VERSION_OR_NEWER(major, minor, patchlevel) 0
|
||||
#endif
|
||||
|
||||
|
||||
#if defined(__clang__) && !defined(VIXL_NO_COMPILER_BUILTINS)
|
||||
|
||||
#define COMPILER_HAS_BUILTIN_CLRSB (__has_builtin(__builtin_clrsb))
|
||||
#define COMPILER_HAS_BUILTIN_CLZ (__has_builtin(__builtin_clz))
|
||||
#define COMPILER_HAS_BUILTIN_CTZ (__has_builtin(__builtin_ctz))
|
||||
#define COMPILER_HAS_BUILTIN_FFS (__has_builtin(__builtin_ffs))
|
||||
#define COMPILER_HAS_BUILTIN_POPCOUNT (__has_builtin(__builtin_popcount))
|
||||
|
||||
#elif defined(__GNUC__) && !defined(VIXL_NO_COMPILER_BUILTINS)
|
||||
// The documentation for these builtins is available at:
|
||||
// https://gcc.gnu.org/onlinedocs/gcc-$MAJOR.$MINOR.$PATCHLEVEL/gcc//Other-Builtins.html
|
||||
|
||||
# define COMPILER_HAS_BUILTIN_CLRSB (GCC_VERSION_OR_NEWER(4, 7, 0))
|
||||
# define COMPILER_HAS_BUILTIN_CLZ (GCC_VERSION_OR_NEWER(3, 4, 0))
|
||||
# define COMPILER_HAS_BUILTIN_CTZ (GCC_VERSION_OR_NEWER(3, 4, 0))
|
||||
# define COMPILER_HAS_BUILTIN_FFS (GCC_VERSION_OR_NEWER(3, 4, 0))
|
||||
# define COMPILER_HAS_BUILTIN_POPCOUNT (GCC_VERSION_OR_NEWER(3, 4, 0))
|
||||
|
||||
#else
|
||||
// One can define VIXL_NO_COMPILER_BUILTINS to force using the manually
|
||||
// implemented C++ methods.
|
||||
|
||||
#define COMPILER_HAS_BUILTIN_BSWAP false
|
||||
#define COMPILER_HAS_BUILTIN_CLRSB false
|
||||
#define COMPILER_HAS_BUILTIN_CLZ false
|
||||
#define COMPILER_HAS_BUILTIN_CTZ false
|
||||
#define COMPILER_HAS_BUILTIN_FFS false
|
||||
#define COMPILER_HAS_BUILTIN_POPCOUNT false
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
template<typename V>
|
||||
inline bool IsPowerOf2(V value) {
|
||||
return (value != 0) && ((value & (value - 1)) == 0);
|
||||
}
|
||||
|
||||
|
||||
// Declaration of fallback functions.
|
||||
int CountLeadingSignBitsFallBack(int64_t value, int width);
|
||||
int CountLeadingZerosFallBack(uint64_t value, int width);
|
||||
int CountSetBitsFallBack(uint64_t value, int width);
|
||||
int CountTrailingZerosFallBack(uint64_t value, int width);
|
||||
|
||||
|
||||
// Implementation of intrinsics functions.
|
||||
// TODO: The implementations could be improved for sizes different from 32bit
|
||||
// and 64bit: we could mask the values and call the appropriate builtin.
|
||||
|
||||
template<typename V>
|
||||
inline int CountLeadingSignBits(V value, int width = (sizeof(V) * 8)) {
|
||||
#if COMPILER_HAS_BUILTIN_CLRSB
|
||||
if (width == 32) {
|
||||
return __builtin_clrsb(value);
|
||||
} else if (width == 64) {
|
||||
return __builtin_clrsbll(value);
|
||||
}
|
||||
#endif
|
||||
return CountLeadingSignBitsFallBack(value, width);
|
||||
}
|
||||
|
||||
|
||||
template<typename V>
|
||||
inline int CountLeadingZeros(V value, int width = (sizeof(V) * 8)) {
|
||||
#if COMPILER_HAS_BUILTIN_CLZ
|
||||
if (width == 32) {
|
||||
return (value == 0) ? 32 : __builtin_clz(static_cast<unsigned>(value));
|
||||
} else if (width == 64) {
|
||||
return (value == 0) ? 64 : __builtin_clzll(value);
|
||||
}
|
||||
#endif
|
||||
return CountLeadingZerosFallBack(value, width);
|
||||
}
|
||||
|
||||
|
||||
template<typename V>
|
||||
inline int CountSetBits(V value, int width = (sizeof(V) * 8)) {
|
||||
#if COMPILER_HAS_BUILTIN_POPCOUNT
|
||||
if (width == 32) {
|
||||
return __builtin_popcount(static_cast<unsigned>(value));
|
||||
} else if (width == 64) {
|
||||
return __builtin_popcountll(value);
|
||||
}
|
||||
#endif
|
||||
return CountSetBitsFallBack(value, width);
|
||||
}
|
||||
|
||||
|
||||
template<typename V>
|
||||
inline int CountTrailingZeros(V value, int width = (sizeof(V) * 8)) {
|
||||
#if COMPILER_HAS_BUILTIN_CTZ
|
||||
if (width == 32) {
|
||||
return (value == 0) ? 32 : __builtin_ctz(static_cast<unsigned>(value));
|
||||
} else if (width == 64) {
|
||||
return (value == 0) ? 64 : __builtin_ctzll(value);
|
||||
}
|
||||
#endif
|
||||
return CountTrailingZerosFallBack(value, width);
|
||||
}
|
||||
|
||||
} // namespace vixl
|
||||
|
||||
#endif // VIXL_COMPILER_INTRINSICS_H
|
||||
|
@ -1,155 +0,0 @@
|
||||
// Copyright 2015, ARM Limited
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of ARM Limited nor the names of its contributors may be
|
||||
// used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#ifndef VIXL_GLOBALS_H
|
||||
#define VIXL_GLOBALS_H
|
||||
|
||||
// Get standard C99 macros for integer types.
|
||||
#ifndef __STDC_CONSTANT_MACROS
|
||||
#define __STDC_CONSTANT_MACROS
|
||||
#endif
|
||||
|
||||
#ifndef __STDC_LIMIT_MACROS
|
||||
#define __STDC_LIMIT_MACROS
|
||||
#endif
|
||||
|
||||
#ifndef __STDC_FORMAT_MACROS
|
||||
#define __STDC_FORMAT_MACROS
|
||||
#endif
|
||||
|
||||
extern "C" {
|
||||
#include <inttypes.h>
|
||||
#include <stdint.h>
|
||||
}
|
||||
|
||||
#include <cassert>
|
||||
#include <cstdarg>
|
||||
#include <cstddef>
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
|
||||
#include "vixl/platform.h"
|
||||
|
||||
|
||||
typedef uint8_t byte;
|
||||
|
||||
// Type for half-precision (16 bit) floating point numbers.
|
||||
typedef uint16_t float16;
|
||||
|
||||
const int KBytes = 1024;
|
||||
const int MBytes = 1024 * KBytes;
|
||||
|
||||
#define VIXL_ABORT() \
|
||||
do { printf("in %s, line %i", __FILE__, __LINE__); abort(); } while (false)
|
||||
#ifdef VIXL_DEBUG
|
||||
#define VIXL_ASSERT(condition) assert(condition)
|
||||
#define VIXL_CHECK(condition) VIXL_ASSERT(condition)
|
||||
#define VIXL_UNIMPLEMENTED() \
|
||||
do { fprintf(stderr, "UNIMPLEMENTED\t"); VIXL_ABORT(); } while (false)
|
||||
#define VIXL_UNREACHABLE() \
|
||||
do { fprintf(stderr, "UNREACHABLE\t"); VIXL_ABORT(); } while (false)
|
||||
#else
|
||||
#define VIXL_ASSERT(condition) ((void) 0)
|
||||
#define VIXL_CHECK(condition) assert(condition)
|
||||
#define VIXL_UNIMPLEMENTED() ((void) 0)
|
||||
#define VIXL_UNREACHABLE() ((void) 0)
|
||||
#endif
|
||||
// This is not as powerful as template based assertions, but it is simple.
|
||||
// It assumes that the descriptions are unique. If this starts being a problem,
|
||||
// we can switch to a different implemention.
|
||||
#define VIXL_CONCAT(a, b) a##b
|
||||
#define VIXL_STATIC_ASSERT_LINE(line, condition) \
|
||||
typedef char VIXL_CONCAT(STATIC_ASSERT_LINE_, line)[(condition) ? 1 : -1] \
|
||||
__attribute__((unused))
|
||||
#define VIXL_STATIC_ASSERT(condition) \
|
||||
VIXL_STATIC_ASSERT_LINE(__LINE__, condition)
|
||||
|
||||
template <typename T1>
|
||||
inline void USE(T1) {}
|
||||
|
||||
template <typename T1, typename T2>
|
||||
inline void USE(T1, T2) {}
|
||||
|
||||
template <typename T1, typename T2, typename T3>
|
||||
inline void USE(T1, T2, T3) {}
|
||||
|
||||
template <typename T1, typename T2, typename T3, typename T4>
|
||||
inline void USE(T1, T2, T3, T4) {}
|
||||
|
||||
#define VIXL_ALIGNMENT_EXCEPTION() \
|
||||
do { fprintf(stderr, "ALIGNMENT EXCEPTION\t"); VIXL_ABORT(); } while (0)
|
||||
|
||||
// The clang::fallthrough attribute is used along with the Wimplicit-fallthrough
|
||||
// argument to annotate intentional fall-through between switch labels.
|
||||
// For more information please refer to:
|
||||
// http://clang.llvm.org/docs/AttributeReference.html#fallthrough-clang-fallthrough
|
||||
#ifndef __has_warning
|
||||
#define __has_warning(x) 0
|
||||
#endif
|
||||
|
||||
// Fallthrough annotation for Clang and C++11(201103L).
|
||||
#if __has_warning("-Wimplicit-fallthrough") && __cplusplus >= 201103L
|
||||
#define VIXL_FALLTHROUGH() [[clang::fallthrough]] //NOLINT
|
||||
// Fallthrough annotation for GCC >= 7.
|
||||
#elif __GNUC__ >= 7
|
||||
#define VIXL_FALLTHROUGH() __attribute__((fallthrough))
|
||||
#else
|
||||
#define VIXL_FALLTHROUGH() do {} while (0)
|
||||
#endif
|
||||
|
||||
#if __cplusplus >= 201103L
|
||||
#define VIXL_NO_RETURN [[noreturn]] //NOLINT
|
||||
#else
|
||||
#define VIXL_NO_RETURN __attribute__((noreturn))
|
||||
#endif
|
||||
|
||||
// Some functions might only be marked as "noreturn" for the DEBUG build. This
|
||||
// macro should be used for such cases (for more details see what
|
||||
// VIXL_UNREACHABLE expands to).
|
||||
#ifdef VIXL_DEBUG
|
||||
#define VIXL_DEBUG_NO_RETURN VIXL_NO_RETURN
|
||||
#else
|
||||
#define VIXL_DEBUG_NO_RETURN
|
||||
#endif
|
||||
|
||||
#ifdef VIXL_INCLUDE_SIMULATOR
|
||||
#ifndef VIXL_GENERATE_SIMULATOR_INSTRUCTIONS_VALUE
|
||||
#define VIXL_GENERATE_SIMULATOR_INSTRUCTIONS_VALUE 1
|
||||
#endif
|
||||
#else
|
||||
#ifndef VIXL_GENERATE_SIMULATOR_INSTRUCTIONS_VALUE
|
||||
#define VIXL_GENERATE_SIMULATOR_INSTRUCTIONS_VALUE 0
|
||||
#endif
|
||||
#if VIXL_GENERATE_SIMULATOR_INSTRUCTIONS_VALUE
|
||||
#warning "Generating Simulator instructions without Simulator support."
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifdef USE_SIMULATOR
|
||||
#error "Please see the release notes for USE_SIMULATOR."
|
||||
#endif
|
||||
|
||||
#endif // VIXL_GLOBALS_H
|
@ -1,775 +0,0 @@
|
||||
// Copyright 2015, ARM Limited
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of ARM Limited nor the names of its contributors may be
|
||||
// used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#ifndef VIXL_INVALSET_H_
|
||||
#define VIXL_INVALSET_H_
|
||||
|
||||
#include <cstring>
|
||||
|
||||
#include <algorithm>
|
||||
#include <vector>
|
||||
|
||||
#include "vixl/globals.h"
|
||||
|
||||
namespace vixl {
|
||||
|
||||
// We define a custom data structure template and its iterator as `std`
|
||||
// containers do not fit the performance requirements for some of our use cases.
|
||||
//
|
||||
// The structure behaves like an iterable unordered set with special properties
|
||||
// and restrictions. "InvalSet" stands for "Invalidatable Set".
|
||||
//
|
||||
// Restrictions and requirements:
|
||||
// - Adding an element already present in the set is illegal. In debug mode,
|
||||
// this is checked at insertion time.
|
||||
// - The templated class `ElementType` must provide comparison operators so that
|
||||
// `std::sort()` can be used.
|
||||
// - A key must be available to represent invalid elements.
|
||||
// - Elements with an invalid key must compare higher or equal to any other
|
||||
// element.
|
||||
//
|
||||
// Use cases and performance considerations:
|
||||
// Our use cases present two specificities that allow us to design this
|
||||
// structure to provide fast insertion *and* fast search and deletion
|
||||
// operations:
|
||||
// - Elements are (generally) inserted in order (sorted according to their key).
|
||||
// - A key is available to mark elements as invalid (deleted).
|
||||
// The backing `std::vector` allows for fast insertions. When
|
||||
// searching for an element we ensure the elements are sorted (this is generally
|
||||
// the case) and perform a binary search. When deleting an element we do not
|
||||
// free the associated memory immediately. Instead, an element to be deleted is
|
||||
// marked with the 'invalid' key. Other methods of the container take care of
|
||||
// ignoring entries marked as invalid.
|
||||
// To avoid the overhead of the `std::vector` container when only few entries
|
||||
// are used, a number of elements are preallocated.
|
||||
|
||||
// 'ElementType' and 'KeyType' are respectively the types of the elements and
|
||||
// their key. The structure only reclaims memory when safe to do so, if the
|
||||
// number of elements that can be reclaimed is greater than `RECLAIM_FROM` and
|
||||
// greater than `<total number of elements> / RECLAIM_FACTOR.
|
||||
#define TEMPLATE_INVALSET_P_DECL \
|
||||
class ElementType, \
|
||||
unsigned N_PREALLOCATED_ELEMENTS, \
|
||||
class KeyType, \
|
||||
KeyType INVALID_KEY, \
|
||||
size_t RECLAIM_FROM, \
|
||||
unsigned RECLAIM_FACTOR
|
||||
|
||||
#define TEMPLATE_INVALSET_P_DEF \
|
||||
ElementType, N_PREALLOCATED_ELEMENTS, \
|
||||
KeyType, INVALID_KEY, RECLAIM_FROM, RECLAIM_FACTOR
|
||||
|
||||
template<class S> class InvalSetIterator; // Forward declaration.
|
||||
|
||||
template<TEMPLATE_INVALSET_P_DECL> class InvalSet {
|
||||
public:
|
||||
InvalSet();
|
||||
~InvalSet();
|
||||
|
||||
static const size_t kNPreallocatedElements = N_PREALLOCATED_ELEMENTS;
|
||||
static const KeyType kInvalidKey = INVALID_KEY;
|
||||
|
||||
// It is illegal to insert an element already present in the set.
|
||||
void insert(const ElementType& element);
|
||||
|
||||
// Looks for the specified element in the set and - if found - deletes it.
|
||||
void erase(const ElementType& element);
|
||||
|
||||
// This indicates the number of (valid) elements stored in this set.
|
||||
size_t size() const;
|
||||
|
||||
// Returns true if no elements are stored in the set.
|
||||
// Note that this does not mean the the backing storage is empty: it can still
|
||||
// contain invalid elements.
|
||||
bool empty() const;
|
||||
|
||||
void clear();
|
||||
|
||||
const ElementType min_element();
|
||||
|
||||
// This returns the key of the minimum element in the set.
|
||||
KeyType min_element_key();
|
||||
|
||||
static bool IsValid(const ElementType& element);
|
||||
static KeyType Key(const ElementType& element);
|
||||
static void SetKey(ElementType* element, KeyType key);
|
||||
|
||||
protected:
|
||||
// Returns a pointer to the element in vector_ if it was found, or NULL
|
||||
// otherwise.
|
||||
ElementType* Search(const ElementType& element);
|
||||
|
||||
// The argument *must* point to an element stored in *this* set.
|
||||
// This function is not allowed to move elements in the backing vector
|
||||
// storage.
|
||||
void EraseInternal(ElementType* element);
|
||||
|
||||
// The elements in the range searched must be sorted.
|
||||
ElementType* BinarySearch(const ElementType& element,
|
||||
ElementType* start,
|
||||
ElementType* end) const;
|
||||
|
||||
// Sort the elements.
|
||||
enum SortType {
|
||||
// The 'hard' version guarantees that invalid elements are moved to the end
|
||||
// of the container.
|
||||
kHardSort,
|
||||
// The 'soft' version only guarantees that the elements will be sorted.
|
||||
// Invalid elements may still be present anywhere in the set.
|
||||
kSoftSort
|
||||
};
|
||||
void Sort(SortType sort_type);
|
||||
|
||||
// Delete the elements that have an invalid key. The complexity is linear
|
||||
// with the size of the vector.
|
||||
void Clean();
|
||||
|
||||
const ElementType Front() const;
|
||||
const ElementType Back() const;
|
||||
|
||||
// Delete invalid trailing elements and return the last valid element in the
|
||||
// set.
|
||||
const ElementType CleanBack();
|
||||
|
||||
// Returns a pointer to the start or end of the backing storage.
|
||||
const ElementType* StorageBegin() const;
|
||||
const ElementType* StorageEnd() const;
|
||||
ElementType* StorageBegin();
|
||||
ElementType* StorageEnd();
|
||||
|
||||
// Returns the index of the element within the backing storage. The element
|
||||
// must belong to the backing storage.
|
||||
size_t ElementIndex(const ElementType* element) const;
|
||||
|
||||
// Returns the element at the specified index in the backing storage.
|
||||
const ElementType* ElementAt(size_t index) const;
|
||||
ElementType* ElementAt(size_t index);
|
||||
|
||||
static const ElementType* FirstValidElement(const ElementType* from,
|
||||
const ElementType* end);
|
||||
|
||||
void CacheMinElement();
|
||||
const ElementType CachedMinElement() const;
|
||||
|
||||
bool ShouldReclaimMemory() const;
|
||||
void ReclaimMemory();
|
||||
|
||||
bool IsUsingVector() const { return vector_ != NULL; }
|
||||
void set_sorted(bool sorted) { sorted_ = sorted; }
|
||||
|
||||
// We cache some data commonly required by users to improve performance.
|
||||
// We cannot cache pointers to elements as we do not control the backing
|
||||
// storage.
|
||||
bool valid_cached_min_;
|
||||
size_t cached_min_index_; // Valid iff `valid_cached_min_` is true.
|
||||
KeyType cached_min_key_; // Valid iff `valid_cached_min_` is true.
|
||||
|
||||
// Indicates whether the elements are sorted.
|
||||
bool sorted_;
|
||||
|
||||
// This represents the number of (valid) elements in this set.
|
||||
size_t size_;
|
||||
|
||||
// The backing storage is either the array of preallocated elements or the
|
||||
// vector. The structure starts by using the preallocated elements, and
|
||||
// transitions (permanently) to using the vector once more than
|
||||
// kNPreallocatedElements are used.
|
||||
// Elements are only invalidated when using the vector. The preallocated
|
||||
// storage always only contains valid elements.
|
||||
ElementType preallocated_[kNPreallocatedElements];
|
||||
std::vector<ElementType>* vector_;
|
||||
|
||||
#ifdef VIXL_DEBUG
|
||||
// Iterators acquire and release this monitor. While a set is acquired,
|
||||
// certain operations are illegal to ensure that the iterator will
|
||||
// correctly iterate over the elements in the set.
|
||||
int monitor_;
|
||||
int monitor() const { return monitor_; }
|
||||
void Acquire() { monitor_++; }
|
||||
void Release() {
|
||||
monitor_--;
|
||||
VIXL_ASSERT(monitor_ >= 0);
|
||||
}
|
||||
#endif
|
||||
|
||||
friend class InvalSetIterator<InvalSet<TEMPLATE_INVALSET_P_DEF> >;
|
||||
typedef ElementType _ElementType;
|
||||
typedef KeyType _KeyType;
|
||||
};
|
||||
|
||||
|
||||
template<class S> class InvalSetIterator {
|
||||
private:
|
||||
// Redefine types to mirror the associated set types.
|
||||
typedef typename S::_ElementType ElementType;
|
||||
typedef typename S::_KeyType KeyType;
|
||||
|
||||
public:
|
||||
explicit InvalSetIterator(S* inval_set);
|
||||
~InvalSetIterator();
|
||||
|
||||
ElementType* Current() const;
|
||||
void Advance();
|
||||
bool Done() const;
|
||||
|
||||
// Mark this iterator as 'done'.
|
||||
void Finish();
|
||||
|
||||
// Delete the current element and advance the iterator to point to the next
|
||||
// element.
|
||||
void DeleteCurrentAndAdvance();
|
||||
|
||||
static bool IsValid(const ElementType& element);
|
||||
static KeyType Key(const ElementType& element);
|
||||
|
||||
protected:
|
||||
void MoveToValidElement();
|
||||
|
||||
// Indicates if the iterator is looking at the vector or at the preallocated
|
||||
// elements.
|
||||
const bool using_vector_;
|
||||
// Used when looking at the preallocated elements, or in debug mode when using
|
||||
// the vector to track how many times the iterator has advanced.
|
||||
size_t index_;
|
||||
typename std::vector<ElementType>::iterator iterator_;
|
||||
S* inval_set_;
|
||||
};
|
||||
|
||||
|
||||
template<TEMPLATE_INVALSET_P_DECL>
|
||||
InvalSet<TEMPLATE_INVALSET_P_DEF>::InvalSet()
|
||||
: valid_cached_min_(false),
|
||||
sorted_(true), size_(0), vector_(NULL) {
|
||||
#ifdef VIXL_DEBUG
|
||||
monitor_ = 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
template<TEMPLATE_INVALSET_P_DECL>
|
||||
InvalSet<TEMPLATE_INVALSET_P_DEF>::~InvalSet() {
|
||||
VIXL_ASSERT(monitor_ == 0);
|
||||
delete vector_;
|
||||
}
|
||||
|
||||
|
||||
template<TEMPLATE_INVALSET_P_DECL>
|
||||
void InvalSet<TEMPLATE_INVALSET_P_DEF>::insert(const ElementType& element) {
|
||||
VIXL_ASSERT(monitor() == 0);
|
||||
VIXL_ASSERT(IsValid(element));
|
||||
VIXL_ASSERT(Search(element) == NULL);
|
||||
set_sorted(empty() || (sorted_ && (element > CleanBack())));
|
||||
if (IsUsingVector()) {
|
||||
vector_->push_back(element);
|
||||
} else {
|
||||
if (size_ < kNPreallocatedElements) {
|
||||
preallocated_[size_] = element;
|
||||
} else {
|
||||
// Transition to using the vector.
|
||||
vector_ = new std::vector<ElementType>(preallocated_,
|
||||
preallocated_ + size_);
|
||||
vector_->push_back(element);
|
||||
}
|
||||
}
|
||||
size_++;
|
||||
|
||||
if (valid_cached_min_ && (element < min_element())) {
|
||||
cached_min_index_ = IsUsingVector() ? vector_->size() - 1 : size_ - 1;
|
||||
cached_min_key_ = Key(element);
|
||||
valid_cached_min_ = true;
|
||||
}
|
||||
|
||||
if (ShouldReclaimMemory()) {
|
||||
ReclaimMemory();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
template<TEMPLATE_INVALSET_P_DECL>
|
||||
void InvalSet<TEMPLATE_INVALSET_P_DEF>::erase(const ElementType& element) {
|
||||
VIXL_ASSERT(monitor() == 0);
|
||||
VIXL_ASSERT(IsValid(element));
|
||||
ElementType* local_element = Search(element);
|
||||
if (local_element != NULL) {
|
||||
EraseInternal(local_element);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
template<TEMPLATE_INVALSET_P_DECL>
|
||||
ElementType* InvalSet<TEMPLATE_INVALSET_P_DEF>::Search(
|
||||
const ElementType& element) {
|
||||
VIXL_ASSERT(monitor() == 0);
|
||||
if (empty()) {
|
||||
return NULL;
|
||||
}
|
||||
if (ShouldReclaimMemory()) {
|
||||
ReclaimMemory();
|
||||
}
|
||||
if (!sorted_) {
|
||||
Sort(kHardSort);
|
||||
}
|
||||
if (!valid_cached_min_) {
|
||||
CacheMinElement();
|
||||
}
|
||||
return BinarySearch(element, ElementAt(cached_min_index_), StorageEnd());
|
||||
}
|
||||
|
||||
|
||||
template<TEMPLATE_INVALSET_P_DECL>
|
||||
size_t InvalSet<TEMPLATE_INVALSET_P_DEF>::size() const {
|
||||
return size_;
|
||||
}
|
||||
|
||||
|
||||
template<TEMPLATE_INVALSET_P_DECL>
|
||||
bool InvalSet<TEMPLATE_INVALSET_P_DEF>::empty() const {
|
||||
return size_ == 0;
|
||||
}
|
||||
|
||||
|
||||
template<TEMPLATE_INVALSET_P_DECL>
|
||||
void InvalSet<TEMPLATE_INVALSET_P_DEF>::clear() {
|
||||
VIXL_ASSERT(monitor() == 0);
|
||||
size_ = 0;
|
||||
if (IsUsingVector()) {
|
||||
vector_->clear();
|
||||
}
|
||||
set_sorted(true);
|
||||
valid_cached_min_ = false;
|
||||
}
|
||||
|
||||
|
||||
template<TEMPLATE_INVALSET_P_DECL>
|
||||
const ElementType InvalSet<TEMPLATE_INVALSET_P_DEF>::min_element() {
|
||||
VIXL_ASSERT(monitor() == 0);
|
||||
VIXL_ASSERT(!empty());
|
||||
CacheMinElement();
|
||||
return *ElementAt(cached_min_index_);
|
||||
}
|
||||
|
||||
|
||||
template<TEMPLATE_INVALSET_P_DECL>
|
||||
KeyType InvalSet<TEMPLATE_INVALSET_P_DEF>::min_element_key() {
|
||||
VIXL_ASSERT(monitor() == 0);
|
||||
if (valid_cached_min_) {
|
||||
return cached_min_key_;
|
||||
} else {
|
||||
return Key(min_element());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
template<TEMPLATE_INVALSET_P_DECL>
|
||||
bool InvalSet<TEMPLATE_INVALSET_P_DEF>::IsValid(const ElementType& element) {
|
||||
return Key(element) != kInvalidKey;
|
||||
}
|
||||
|
||||
|
||||
template<TEMPLATE_INVALSET_P_DECL>
|
||||
void InvalSet<TEMPLATE_INVALSET_P_DEF>::EraseInternal(ElementType* element) {
|
||||
// Note that this function must be safe even while an iterator has acquired
|
||||
// this set.
|
||||
VIXL_ASSERT(element != NULL);
|
||||
size_t deleted_index = ElementIndex(element);
|
||||
if (IsUsingVector()) {
|
||||
VIXL_ASSERT((&(vector_->front()) <= element) &&
|
||||
(element <= &(vector_->back())));
|
||||
SetKey(element, kInvalidKey);
|
||||
} else {
|
||||
VIXL_ASSERT((preallocated_ <= element) &&
|
||||
(element < (preallocated_ + kNPreallocatedElements)));
|
||||
ElementType* end = preallocated_ + kNPreallocatedElements;
|
||||
size_t copy_size = sizeof(*element) * (end - element - 1);
|
||||
memmove(element, element + 1, copy_size);
|
||||
}
|
||||
size_--;
|
||||
|
||||
if (valid_cached_min_ &&
|
||||
(deleted_index == cached_min_index_)) {
|
||||
if (sorted_ && !empty()) {
|
||||
const ElementType* min = FirstValidElement(element, StorageEnd());
|
||||
cached_min_index_ = ElementIndex(min);
|
||||
cached_min_key_ = Key(*min);
|
||||
valid_cached_min_ = true;
|
||||
} else {
|
||||
valid_cached_min_ = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
template<TEMPLATE_INVALSET_P_DECL>
|
||||
ElementType* InvalSet<TEMPLATE_INVALSET_P_DEF>::BinarySearch(
|
||||
const ElementType& element, ElementType* start, ElementType* end) const {
|
||||
if (start == end) {
|
||||
return NULL;
|
||||
}
|
||||
VIXL_ASSERT(sorted_);
|
||||
VIXL_ASSERT(start < end);
|
||||
VIXL_ASSERT(!empty());
|
||||
|
||||
// Perform a binary search through the elements while ignoring invalid
|
||||
// elements.
|
||||
ElementType* elements = start;
|
||||
size_t low = 0;
|
||||
size_t high = (end - start) - 1;
|
||||
while (low < high) {
|
||||
// Find valid bounds.
|
||||
while (!IsValid(elements[low]) && (low < high)) ++low;
|
||||
while (!IsValid(elements[high]) && (low < high)) --high;
|
||||
VIXL_ASSERT(low <= high);
|
||||
// Avoid overflow when computing the middle index.
|
||||
size_t middle = low / 2 + high / 2 + (low & high & 1);
|
||||
if ((middle == low) || (middle == high)) {
|
||||
break;
|
||||
}
|
||||
while (!IsValid(elements[middle]) && (middle < high - 1)) ++middle;
|
||||
while (!IsValid(elements[middle]) && (low + 1 < middle)) --middle;
|
||||
if (!IsValid(elements[middle])) {
|
||||
break;
|
||||
}
|
||||
if (elements[middle] < element) {
|
||||
low = middle;
|
||||
} else {
|
||||
high = middle;
|
||||
}
|
||||
}
|
||||
|
||||
if (elements[low] == element) return &elements[low];
|
||||
if (elements[high] == element) return &elements[high];
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
template<TEMPLATE_INVALSET_P_DECL>
|
||||
void InvalSet<TEMPLATE_INVALSET_P_DEF>::Sort(SortType sort_type) {
|
||||
VIXL_ASSERT(monitor() == 0);
|
||||
if (sort_type == kSoftSort) {
|
||||
if (sorted_) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
if (empty()) {
|
||||
return;
|
||||
}
|
||||
|
||||
Clean();
|
||||
std::sort(StorageBegin(), StorageEnd());
|
||||
|
||||
set_sorted(true);
|
||||
cached_min_index_ = 0;
|
||||
cached_min_key_ = Key(Front());
|
||||
valid_cached_min_ = true;
|
||||
}
|
||||
|
||||
|
||||
template<TEMPLATE_INVALSET_P_DECL>
|
||||
void InvalSet<TEMPLATE_INVALSET_P_DEF>::Clean() {
|
||||
VIXL_ASSERT(monitor() == 0);
|
||||
if (empty() || !IsUsingVector()) {
|
||||
return;
|
||||
}
|
||||
// Manually iterate through the vector storage to discard invalid elements.
|
||||
ElementType* start = &(vector_->front());
|
||||
ElementType* end = start + vector_->size();
|
||||
ElementType* c = start;
|
||||
ElementType* first_invalid;
|
||||
ElementType* first_valid;
|
||||
ElementType* next_invalid;
|
||||
|
||||
while (c < end && IsValid(*c)) { c++; }
|
||||
first_invalid = c;
|
||||
|
||||
while (c < end) {
|
||||
while (c < end && !IsValid(*c)) { c++; }
|
||||
first_valid = c;
|
||||
while (c < end && IsValid(*c)) { c++; }
|
||||
next_invalid = c;
|
||||
|
||||
ptrdiff_t n_moved_elements = (next_invalid - first_valid);
|
||||
memmove(first_invalid, first_valid, n_moved_elements * sizeof(*c));
|
||||
first_invalid = first_invalid + n_moved_elements;
|
||||
c = next_invalid;
|
||||
}
|
||||
|
||||
// Delete the trailing invalid elements.
|
||||
vector_->erase(vector_->begin() + (first_invalid - start), vector_->end());
|
||||
VIXL_ASSERT(vector_->size() == size_);
|
||||
|
||||
if (sorted_) {
|
||||
valid_cached_min_ = true;
|
||||
cached_min_index_ = 0;
|
||||
cached_min_key_ = Key(*ElementAt(0));
|
||||
} else {
|
||||
valid_cached_min_ = false;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
template<TEMPLATE_INVALSET_P_DECL>
|
||||
const ElementType InvalSet<TEMPLATE_INVALSET_P_DEF>::Front() const {
|
||||
VIXL_ASSERT(!empty());
|
||||
return IsUsingVector() ? vector_->front() : preallocated_[0];
|
||||
}
|
||||
|
||||
|
||||
template<TEMPLATE_INVALSET_P_DECL>
|
||||
const ElementType InvalSet<TEMPLATE_INVALSET_P_DEF>::Back() const {
|
||||
VIXL_ASSERT(!empty());
|
||||
return IsUsingVector() ? vector_->back() : preallocated_[size_ - 1];
|
||||
}
|
||||
|
||||
|
||||
template<TEMPLATE_INVALSET_P_DECL>
|
||||
const ElementType InvalSet<TEMPLATE_INVALSET_P_DEF>::CleanBack() {
|
||||
VIXL_ASSERT(monitor() == 0);
|
||||
if (IsUsingVector()) {
|
||||
// Delete the invalid trailing elements.
|
||||
typename std::vector<ElementType>::reverse_iterator it = vector_->rbegin();
|
||||
while (!IsValid(*it)) {
|
||||
it++;
|
||||
}
|
||||
vector_->erase(it.base(), vector_->end());
|
||||
}
|
||||
return Back();
|
||||
}
|
||||
|
||||
|
||||
template<TEMPLATE_INVALSET_P_DECL>
|
||||
const ElementType* InvalSet<TEMPLATE_INVALSET_P_DEF>::StorageBegin() const {
|
||||
return IsUsingVector() ? &(vector_->front()) : preallocated_;
|
||||
}
|
||||
|
||||
|
||||
template<TEMPLATE_INVALSET_P_DECL>
|
||||
const ElementType* InvalSet<TEMPLATE_INVALSET_P_DEF>::StorageEnd() const {
|
||||
return IsUsingVector() ? &(vector_->back()) + 1 : preallocated_ + size_;
|
||||
}
|
||||
|
||||
|
||||
template<TEMPLATE_INVALSET_P_DECL>
|
||||
ElementType* InvalSet<TEMPLATE_INVALSET_P_DEF>::StorageBegin() {
|
||||
return IsUsingVector() ? &(vector_->front()) : preallocated_;
|
||||
}
|
||||
|
||||
|
||||
template<TEMPLATE_INVALSET_P_DECL>
|
||||
ElementType* InvalSet<TEMPLATE_INVALSET_P_DEF>::StorageEnd() {
|
||||
return IsUsingVector() ? &(vector_->back()) + 1 : preallocated_ + size_;
|
||||
}
|
||||
|
||||
|
||||
template<TEMPLATE_INVALSET_P_DECL>
|
||||
size_t InvalSet<TEMPLATE_INVALSET_P_DEF>::ElementIndex(
|
||||
const ElementType* element) const {
|
||||
VIXL_ASSERT((StorageBegin() <= element) && (element < StorageEnd()));
|
||||
return element - StorageBegin();
|
||||
}
|
||||
|
||||
|
||||
template<TEMPLATE_INVALSET_P_DECL>
|
||||
const ElementType* InvalSet<TEMPLATE_INVALSET_P_DEF>::ElementAt(
|
||||
size_t index) const {
|
||||
VIXL_ASSERT(
|
||||
(IsUsingVector() && (index < vector_->size())) || (index < size_));
|
||||
return StorageBegin() + index;
|
||||
}
|
||||
|
||||
template<TEMPLATE_INVALSET_P_DECL>
|
||||
ElementType* InvalSet<TEMPLATE_INVALSET_P_DEF>::ElementAt(size_t index) {
|
||||
VIXL_ASSERT(
|
||||
(IsUsingVector() && (index < vector_->size())) || (index < size_));
|
||||
return StorageBegin() + index;
|
||||
}
|
||||
|
||||
template<TEMPLATE_INVALSET_P_DECL>
|
||||
const ElementType* InvalSet<TEMPLATE_INVALSET_P_DEF>::FirstValidElement(
|
||||
const ElementType* from, const ElementType* end) {
|
||||
while ((from < end) && !IsValid(*from)) {
|
||||
from++;
|
||||
}
|
||||
return from;
|
||||
}
|
||||
|
||||
|
||||
template<TEMPLATE_INVALSET_P_DECL>
|
||||
void InvalSet<TEMPLATE_INVALSET_P_DEF>::CacheMinElement() {
|
||||
VIXL_ASSERT(monitor() == 0);
|
||||
VIXL_ASSERT(!empty());
|
||||
|
||||
if (valid_cached_min_) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (sorted_) {
|
||||
const ElementType* min = FirstValidElement(StorageBegin(), StorageEnd());
|
||||
cached_min_index_ = ElementIndex(min);
|
||||
cached_min_key_ = Key(*min);
|
||||
valid_cached_min_ = true;
|
||||
} else {
|
||||
Sort(kHardSort);
|
||||
}
|
||||
VIXL_ASSERT(valid_cached_min_);
|
||||
}
|
||||
|
||||
|
||||
template<TEMPLATE_INVALSET_P_DECL>
|
||||
bool InvalSet<TEMPLATE_INVALSET_P_DEF>::ShouldReclaimMemory() const {
|
||||
if (!IsUsingVector()) {
|
||||
return false;
|
||||
}
|
||||
size_t n_invalid_elements = vector_->size() - size_;
|
||||
return (n_invalid_elements > RECLAIM_FROM) &&
|
||||
(n_invalid_elements > vector_->size() / RECLAIM_FACTOR);
|
||||
}
|
||||
|
||||
|
||||
template<TEMPLATE_INVALSET_P_DECL>
|
||||
void InvalSet<TEMPLATE_INVALSET_P_DEF>::ReclaimMemory() {
|
||||
VIXL_ASSERT(monitor() == 0);
|
||||
Clean();
|
||||
}
|
||||
|
||||
|
||||
template<class S>
|
||||
InvalSetIterator<S>::InvalSetIterator(S* inval_set)
|
||||
: using_vector_((inval_set != NULL) && inval_set->IsUsingVector()),
|
||||
index_(0),
|
||||
inval_set_(inval_set) {
|
||||
if (inval_set != NULL) {
|
||||
inval_set->Sort(S::kSoftSort);
|
||||
#ifdef VIXL_DEBUG
|
||||
inval_set->Acquire();
|
||||
#endif
|
||||
if (using_vector_) {
|
||||
iterator_ = typename std::vector<ElementType>::iterator(
|
||||
inval_set_->vector_->begin());
|
||||
}
|
||||
MoveToValidElement();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
template<class S>
|
||||
InvalSetIterator<S>::~InvalSetIterator() {
|
||||
#ifdef VIXL_DEBUG
|
||||
if (inval_set_ != NULL) {
|
||||
inval_set_->Release();
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
template<class S>
|
||||
typename S::_ElementType* InvalSetIterator<S>::Current() const {
|
||||
VIXL_ASSERT(!Done());
|
||||
if (using_vector_) {
|
||||
return &(*iterator_);
|
||||
} else {
|
||||
return &(inval_set_->preallocated_[index_]);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
template<class S>
|
||||
void InvalSetIterator<S>::Advance() {
|
||||
VIXL_ASSERT(!Done());
|
||||
if (using_vector_) {
|
||||
iterator_++;
|
||||
#ifdef VIXL_DEBUG
|
||||
index_++;
|
||||
#endif
|
||||
MoveToValidElement();
|
||||
} else {
|
||||
index_++;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
template<class S>
|
||||
bool InvalSetIterator<S>::Done() const {
|
||||
if (using_vector_) {
|
||||
bool done = (iterator_ == inval_set_->vector_->end());
|
||||
VIXL_ASSERT(done == (index_ == inval_set_->size()));
|
||||
return done;
|
||||
} else {
|
||||
return index_ == inval_set_->size();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
template<class S>
|
||||
void InvalSetIterator<S>::Finish() {
|
||||
VIXL_ASSERT(inval_set_->sorted_);
|
||||
if (using_vector_) {
|
||||
iterator_ = inval_set_->vector_->end();
|
||||
}
|
||||
index_ = inval_set_->size();
|
||||
}
|
||||
|
||||
|
||||
template<class S>
|
||||
void InvalSetIterator<S>::DeleteCurrentAndAdvance() {
|
||||
if (using_vector_) {
|
||||
inval_set_->EraseInternal(&(*iterator_));
|
||||
MoveToValidElement();
|
||||
} else {
|
||||
inval_set_->EraseInternal(inval_set_->preallocated_ + index_);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
template<class S>
|
||||
bool InvalSetIterator<S>::IsValid(const ElementType& element) {
|
||||
return S::IsValid(element);
|
||||
}
|
||||
|
||||
|
||||
template<class S>
|
||||
typename S::_KeyType InvalSetIterator<S>::Key(const ElementType& element) {
|
||||
return S::Key(element);
|
||||
}
|
||||
|
||||
|
||||
template<class S>
|
||||
void InvalSetIterator<S>::MoveToValidElement() {
|
||||
if (using_vector_) {
|
||||
while ((iterator_ != inval_set_->vector_->end()) && !IsValid(*iterator_)) {
|
||||
iterator_++;
|
||||
}
|
||||
} else {
|
||||
VIXL_ASSERT(inval_set_->empty() || IsValid(inval_set_->preallocated_[0]));
|
||||
// Nothing to do.
|
||||
}
|
||||
}
|
||||
|
||||
#undef TEMPLATE_INVALSET_P_DECL
|
||||
#undef TEMPLATE_INVALSET_P_DEF
|
||||
|
||||
} // namespace vixl
|
||||
|
||||
#endif // VIXL_INVALSET_H_
|
@ -1,39 +0,0 @@
|
||||
// Copyright 2014, ARM Limited
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of ARM Limited nor the names of its contributors may be
|
||||
// used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#ifndef PLATFORM_H
|
||||
#define PLATFORM_H
|
||||
|
||||
// Define platform specific functionalities.
|
||||
extern "C" {
|
||||
#include <signal.h>
|
||||
}
|
||||
|
||||
namespace vixl {
|
||||
inline void HostBreakpoint() { raise(SIGINT); }
|
||||
} // namespace vixl
|
||||
|
||||
#endif
|
@ -1,142 +0,0 @@
|
||||
// Copyright 2015, ARM Limited
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of ARM Limited nor the names of its contributors may be
|
||||
// used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#include "vixl/utils.h"
|
||||
#include <cstdio>
|
||||
|
||||
namespace vixl {
|
||||
|
||||
uint32_t float_to_rawbits(float value) {
|
||||
uint32_t bits = 0;
|
||||
memcpy(&bits, &value, 4);
|
||||
return bits;
|
||||
}
|
||||
|
||||
|
||||
uint64_t double_to_rawbits(double value) {
|
||||
uint64_t bits = 0;
|
||||
memcpy(&bits, &value, 8);
|
||||
return bits;
|
||||
}
|
||||
|
||||
|
||||
float rawbits_to_float(uint32_t bits) {
|
||||
float value = 0.0;
|
||||
memcpy(&value, &bits, 4);
|
||||
return value;
|
||||
}
|
||||
|
||||
|
||||
double rawbits_to_double(uint64_t bits) {
|
||||
double value = 0.0;
|
||||
memcpy(&value, &bits, 8);
|
||||
return value;
|
||||
}
|
||||
|
||||
|
||||
uint32_t float_sign(float val) {
|
||||
uint32_t rawbits = float_to_rawbits(val);
|
||||
return unsigned_bitextract_32(31, 31, rawbits);
|
||||
}
|
||||
|
||||
|
||||
uint32_t float_exp(float val) {
|
||||
uint32_t rawbits = float_to_rawbits(val);
|
||||
return unsigned_bitextract_32(30, 23, rawbits);
|
||||
}
|
||||
|
||||
|
||||
uint32_t float_mantissa(float val) {
|
||||
uint32_t rawbits = float_to_rawbits(val);
|
||||
return unsigned_bitextract_32(22, 0, rawbits);
|
||||
}
|
||||
|
||||
|
||||
uint32_t double_sign(double val) {
|
||||
uint64_t rawbits = double_to_rawbits(val);
|
||||
return static_cast<uint32_t>(unsigned_bitextract_64(63, 63, rawbits));
|
||||
}
|
||||
|
||||
|
||||
uint32_t double_exp(double val) {
|
||||
uint64_t rawbits = double_to_rawbits(val);
|
||||
return static_cast<uint32_t>(unsigned_bitextract_64(62, 52, rawbits));
|
||||
}
|
||||
|
||||
|
||||
uint64_t double_mantissa(double val) {
|
||||
uint64_t rawbits = double_to_rawbits(val);
|
||||
return unsigned_bitextract_64(51, 0, rawbits);
|
||||
}
|
||||
|
||||
|
||||
float float_pack(uint32_t sign, uint32_t exp, uint32_t mantissa) {
|
||||
uint32_t bits = (sign << 31) | (exp << 23) | mantissa;
|
||||
return rawbits_to_float(bits);
|
||||
}
|
||||
|
||||
|
||||
double double_pack(uint64_t sign, uint64_t exp, uint64_t mantissa) {
|
||||
uint64_t bits = (sign << 63) | (exp << 52) | mantissa;
|
||||
return rawbits_to_double(bits);
|
||||
}
|
||||
|
||||
|
||||
int float16classify(float16 value) {
|
||||
uint16_t exponent_max = (1 << 5) - 1;
|
||||
uint16_t exponent_mask = exponent_max << 10;
|
||||
uint16_t mantissa_mask = (1 << 10) - 1;
|
||||
|
||||
uint16_t exponent = (value & exponent_mask) >> 10;
|
||||
uint16_t mantissa = value & mantissa_mask;
|
||||
if (exponent == 0) {
|
||||
if (mantissa == 0) {
|
||||
return FP_ZERO;
|
||||
}
|
||||
return FP_SUBNORMAL;
|
||||
} else if (exponent == exponent_max) {
|
||||
if (mantissa == 0) {
|
||||
return FP_INFINITE;
|
||||
}
|
||||
return FP_NAN;
|
||||
}
|
||||
return FP_NORMAL;
|
||||
}
|
||||
|
||||
|
||||
unsigned CountClearHalfWords(uint64_t imm, unsigned reg_size) {
|
||||
VIXL_ASSERT((reg_size % 8) == 0);
|
||||
int count = 0;
|
||||
for (unsigned i = 0; i < (reg_size / 16); i++) {
|
||||
if ((imm & 0xffff) == 0) {
|
||||
count++;
|
||||
}
|
||||
imm >>= 16;
|
||||
}
|
||||
return count;
|
||||
}
|
||||
|
||||
} // namespace vixl
|
@ -1,286 +0,0 @@
|
||||
// Copyright 2015, ARM Limited
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of ARM Limited nor the names of its contributors may be
|
||||
// used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#ifndef VIXL_UTILS_H
|
||||
#define VIXL_UTILS_H
|
||||
|
||||
#include <cmath>
|
||||
#include <cstring>
|
||||
#include "vixl/globals.h"
|
||||
#include "vixl/compiler-intrinsics.h"
|
||||
|
||||
namespace vixl {
|
||||
|
||||
// Macros for compile-time format checking.
|
||||
#if GCC_VERSION_OR_NEWER(4, 4, 0)
|
||||
#define PRINTF_CHECK(format_index, varargs_index) \
|
||||
__attribute__((format(gnu_printf, format_index, varargs_index)))
|
||||
#else
|
||||
#define PRINTF_CHECK(format_index, varargs_index)
|
||||
#endif
|
||||
|
||||
// Check number width.
|
||||
inline bool is_intn(unsigned n, int64_t x) {
|
||||
VIXL_ASSERT((0 < n) && (n < 64));
|
||||
int64_t limit = INT64_C(1) << (n - 1);
|
||||
return (-limit <= x) && (x < limit);
|
||||
}
|
||||
|
||||
inline bool is_uintn(unsigned n, int64_t x) {
|
||||
VIXL_ASSERT((0 < n) && (n < 64));
|
||||
return !(x >> n);
|
||||
}
|
||||
|
||||
inline uint32_t truncate_to_intn(unsigned n, int64_t x) {
|
||||
VIXL_ASSERT((0 < n) && (n < 64));
|
||||
return static_cast<uint32_t>(x & ((INT64_C(1) << n) - 1));
|
||||
}
|
||||
|
||||
#define INT_1_TO_63_LIST(V) \
|
||||
V(1) V(2) V(3) V(4) V(5) V(6) V(7) V(8) \
|
||||
V(9) V(10) V(11) V(12) V(13) V(14) V(15) V(16) \
|
||||
V(17) V(18) V(19) V(20) V(21) V(22) V(23) V(24) \
|
||||
V(25) V(26) V(27) V(28) V(29) V(30) V(31) V(32) \
|
||||
V(33) V(34) V(35) V(36) V(37) V(38) V(39) V(40) \
|
||||
V(41) V(42) V(43) V(44) V(45) V(46) V(47) V(48) \
|
||||
V(49) V(50) V(51) V(52) V(53) V(54) V(55) V(56) \
|
||||
V(57) V(58) V(59) V(60) V(61) V(62) V(63)
|
||||
|
||||
#define DECLARE_IS_INT_N(N) \
|
||||
inline bool is_int##N(int64_t x) { return is_intn(N, x); }
|
||||
#define DECLARE_IS_UINT_N(N) \
|
||||
inline bool is_uint##N(int64_t x) { return is_uintn(N, x); }
|
||||
#define DECLARE_TRUNCATE_TO_INT_N(N) \
|
||||
inline uint32_t truncate_to_int##N(int x) { return truncate_to_intn(N, x); }
|
||||
INT_1_TO_63_LIST(DECLARE_IS_INT_N)
|
||||
INT_1_TO_63_LIST(DECLARE_IS_UINT_N)
|
||||
INT_1_TO_63_LIST(DECLARE_TRUNCATE_TO_INT_N)
|
||||
#undef DECLARE_IS_INT_N
|
||||
#undef DECLARE_IS_UINT_N
|
||||
#undef DECLARE_TRUNCATE_TO_INT_N
|
||||
|
||||
// Bit field extraction.
|
||||
inline uint32_t unsigned_bitextract_32(int msb, int lsb, uint32_t x) {
|
||||
return (x >> lsb) & ((1 << (1 + msb - lsb)) - 1);
|
||||
}
|
||||
|
||||
inline uint64_t unsigned_bitextract_64(int msb, int lsb, uint64_t x) {
|
||||
return (x >> lsb) & ((static_cast<uint64_t>(1) << (1 + msb - lsb)) - 1);
|
||||
}
|
||||
|
||||
inline int32_t signed_bitextract_32(int msb, int lsb, int32_t x) {
|
||||
return (x << (31 - msb)) >> (lsb + 31 - msb);
|
||||
}
|
||||
|
||||
inline int64_t signed_bitextract_64(int msb, int lsb, int64_t x) {
|
||||
return (x << (63 - msb)) >> (lsb + 63 - msb);
|
||||
}
|
||||
|
||||
// Floating point representation.
|
||||
uint32_t float_to_rawbits(float value);
|
||||
uint64_t double_to_rawbits(double value);
|
||||
float rawbits_to_float(uint32_t bits);
|
||||
double rawbits_to_double(uint64_t bits);
|
||||
|
||||
uint32_t float_sign(float val);
|
||||
uint32_t float_exp(float val);
|
||||
uint32_t float_mantissa(float val);
|
||||
uint32_t double_sign(double val);
|
||||
uint32_t double_exp(double val);
|
||||
uint64_t double_mantissa(double val);
|
||||
|
||||
float float_pack(uint32_t sign, uint32_t exp, uint32_t mantissa);
|
||||
double double_pack(uint64_t sign, uint64_t exp, uint64_t mantissa);
|
||||
|
||||
// An fpclassify() function for 16-bit half-precision floats.
|
||||
int float16classify(float16 value);
|
||||
|
||||
// NaN tests.
|
||||
inline bool IsSignallingNaN(double num) {
|
||||
const uint64_t kFP64QuietNaNMask = UINT64_C(0x0008000000000000);
|
||||
uint64_t raw = double_to_rawbits(num);
|
||||
if (std::isnan(num) && ((raw & kFP64QuietNaNMask) == 0)) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
inline bool IsSignallingNaN(float num) {
|
||||
const uint32_t kFP32QuietNaNMask = 0x00400000;
|
||||
uint32_t raw = float_to_rawbits(num);
|
||||
if (std::isnan(num) && ((raw & kFP32QuietNaNMask) == 0)) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
inline bool IsSignallingNaN(float16 num) {
|
||||
const uint16_t kFP16QuietNaNMask = 0x0200;
|
||||
return (float16classify(num) == FP_NAN) &&
|
||||
((num & kFP16QuietNaNMask) == 0);
|
||||
}
|
||||
|
||||
|
||||
template <typename T>
|
||||
inline bool IsQuietNaN(T num) {
|
||||
return std::isnan(num) && !IsSignallingNaN(num);
|
||||
}
|
||||
|
||||
|
||||
// Convert the NaN in 'num' to a quiet NaN.
|
||||
inline double ToQuietNaN(double num) {
|
||||
const uint64_t kFP64QuietNaNMask = UINT64_C(0x0008000000000000);
|
||||
VIXL_ASSERT(std::isnan(num));
|
||||
return rawbits_to_double(double_to_rawbits(num) | kFP64QuietNaNMask);
|
||||
}
|
||||
|
||||
|
||||
inline float ToQuietNaN(float num) {
|
||||
const uint32_t kFP32QuietNaNMask = 0x00400000;
|
||||
VIXL_ASSERT(std::isnan(num));
|
||||
return rawbits_to_float(float_to_rawbits(num) | kFP32QuietNaNMask);
|
||||
}
|
||||
|
||||
|
||||
// Fused multiply-add.
|
||||
inline double FusedMultiplyAdd(double op1, double op2, double a) {
|
||||
return fma(op1, op2, a);
|
||||
}
|
||||
|
||||
|
||||
inline float FusedMultiplyAdd(float op1, float op2, float a) {
|
||||
return fmaf(op1, op2, a);
|
||||
}
|
||||
|
||||
|
||||
inline uint64_t LowestSetBit(uint64_t value) {
|
||||
return value & -value;
|
||||
}
|
||||
|
||||
|
||||
template<typename T>
|
||||
inline int HighestSetBitPosition(T value) {
|
||||
VIXL_ASSERT(value != 0);
|
||||
return (sizeof(value) * 8 - 1) - CountLeadingZeros(value);
|
||||
}
|
||||
|
||||
|
||||
template<typename V>
|
||||
inline int WhichPowerOf2(V value) {
|
||||
VIXL_ASSERT(IsPowerOf2(value));
|
||||
return CountTrailingZeros(value);
|
||||
}
|
||||
|
||||
|
||||
unsigned CountClearHalfWords(uint64_t imm, unsigned reg_size);
|
||||
|
||||
|
||||
template <typename T>
|
||||
T ReverseBits(T value) {
|
||||
VIXL_ASSERT((sizeof(value) == 1) || (sizeof(value) == 2) ||
|
||||
(sizeof(value) == 4) || (sizeof(value) == 8));
|
||||
T result = 0;
|
||||
for (unsigned i = 0; i < (sizeof(value) * 8); i++) {
|
||||
result = (result << 1) | (value & 1);
|
||||
value >>= 1;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
template <typename T>
|
||||
T ReverseBytes(T value, int block_bytes_log2) {
|
||||
VIXL_ASSERT((sizeof(value) == 4) || (sizeof(value) == 8));
|
||||
VIXL_ASSERT((1U << block_bytes_log2) <= sizeof(value));
|
||||
// Split the 64-bit value into an 8-bit array, where b[0] is the least
|
||||
// significant byte, and b[7] is the most significant.
|
||||
uint8_t bytes[8];
|
||||
uint64_t mask = UINT64_C(0xff00000000000000);
|
||||
for (int i = 7; i >= 0; i--) {
|
||||
bytes[i] = (static_cast<uint64_t>(value) & mask) >> (i * 8);
|
||||
mask >>= 8;
|
||||
}
|
||||
|
||||
// Permutation tables for REV instructions.
|
||||
// permute_table[0] is used by REV16_x, REV16_w
|
||||
// permute_table[1] is used by REV32_x, REV_w
|
||||
// permute_table[2] is used by REV_x
|
||||
VIXL_ASSERT((0 < block_bytes_log2) && (block_bytes_log2 < 4));
|
||||
static const uint8_t permute_table[3][8] = { {6, 7, 4, 5, 2, 3, 0, 1},
|
||||
{4, 5, 6, 7, 0, 1, 2, 3},
|
||||
{0, 1, 2, 3, 4, 5, 6, 7} };
|
||||
T result = 0;
|
||||
for (int i = 0; i < 8; i++) {
|
||||
result <<= 8;
|
||||
result |= bytes[permute_table[block_bytes_log2 - 1][i]];
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
// Pointer alignment
|
||||
// TODO: rename/refactor to make it specific to instructions.
|
||||
template<typename T>
|
||||
bool IsWordAligned(T pointer) {
|
||||
VIXL_ASSERT(sizeof(pointer) == sizeof(intptr_t)); // NOLINT(runtime/sizeof)
|
||||
return ((intptr_t)(pointer) & 3) == 0;
|
||||
}
|
||||
|
||||
// Increment a pointer (up to 64 bits) until it has the specified alignment.
|
||||
template<class T>
|
||||
T AlignUp(T pointer, size_t alignment) {
|
||||
// Use C-style casts to get static_cast behaviour for integral types (T), and
|
||||
// reinterpret_cast behaviour for other types.
|
||||
|
||||
uint64_t pointer_raw = (uint64_t)pointer;
|
||||
VIXL_STATIC_ASSERT(sizeof(pointer) <= sizeof(pointer_raw));
|
||||
|
||||
size_t align_step = (alignment - pointer_raw) % alignment;
|
||||
VIXL_ASSERT((pointer_raw + align_step) % alignment == 0);
|
||||
|
||||
return (T)(pointer_raw + align_step);
|
||||
}
|
||||
|
||||
// Decrement a pointer (up to 64 bits) until it has the specified alignment.
|
||||
template<class T>
|
||||
T AlignDown(T pointer, size_t alignment) {
|
||||
// Use C-style casts to get static_cast behaviour for integral types (T), and
|
||||
// reinterpret_cast behaviour for other types.
|
||||
|
||||
uint64_t pointer_raw = (uint64_t)pointer;
|
||||
VIXL_STATIC_ASSERT(sizeof(pointer) <= sizeof(pointer_raw));
|
||||
|
||||
size_t align_step = pointer_raw % alignment;
|
||||
VIXL_ASSERT((pointer_raw - align_step) % alignment == 0);
|
||||
|
||||
return (T)(pointer_raw - align_step);
|
||||
}
|
||||
|
||||
} // namespace vixl
|
||||
|
||||
#endif // VIXL_UTILS_H
|
@ -1,9 +1,4 @@
|
||||
libvixl_ss = ss.source_set()
|
||||
subdir('libvixl')
|
||||
|
||||
common_ss.add(when: 'CONFIG_ALPHA_DIS', if_true: files('alpha.c'))
|
||||
common_ss.add(when: 'CONFIG_ARM_A64_DIS', if_true: files('arm-a64.cc'))
|
||||
common_ss.add_all(when: 'CONFIG_ARM_A64_DIS', if_true: libvixl_ss)
|
||||
common_ss.add(when: 'CONFIG_CRIS_DIS', if_true: files('cris.c'))
|
||||
common_ss.add(when: 'CONFIG_HEXAGON_DIS', if_true: files('hexagon.c'))
|
||||
common_ss.add(when: 'CONFIG_HPPA_DIS', if_true: files('hppa.c'))
|
||||
|
@ -66,8 +66,6 @@
|
||||
#pragma GCC poison CPU_INTERRUPT_TGT_INT_2
|
||||
|
||||
#pragma GCC poison CONFIG_ALPHA_DIS
|
||||
#pragma GCC poison CONFIG_ARM_A64_DIS
|
||||
#pragma GCC poison CONFIG_ARM_DIS
|
||||
#pragma GCC poison CONFIG_CRIS_DIS
|
||||
#pragma GCC poison CONFIG_HPPA_DIS
|
||||
#pragma GCC poison CONFIG_I386_DIS
|
||||
|
@ -376,12 +376,7 @@ static inline uint64_t uabs64(int64_t v)
|
||||
*/
|
||||
static inline bool sadd32_overflow(int32_t x, int32_t y, int32_t *ret)
|
||||
{
|
||||
#if __has_builtin(__builtin_add_overflow) || __GNUC__ >= 5
|
||||
return __builtin_add_overflow(x, y, ret);
|
||||
#else
|
||||
*ret = x + y;
|
||||
return ((*ret ^ x) & ~(x ^ y)) < 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
@ -394,12 +389,7 @@ static inline bool sadd32_overflow(int32_t x, int32_t y, int32_t *ret)
|
||||
*/
|
||||
static inline bool sadd64_overflow(int64_t x, int64_t y, int64_t *ret)
|
||||
{
|
||||
#if __has_builtin(__builtin_add_overflow) || __GNUC__ >= 5
|
||||
return __builtin_add_overflow(x, y, ret);
|
||||
#else
|
||||
*ret = x + y;
|
||||
return ((*ret ^ x) & ~(x ^ y)) < 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
@ -412,12 +402,7 @@ static inline bool sadd64_overflow(int64_t x, int64_t y, int64_t *ret)
|
||||
*/
|
||||
static inline bool uadd32_overflow(uint32_t x, uint32_t y, uint32_t *ret)
|
||||
{
|
||||
#if __has_builtin(__builtin_add_overflow) || __GNUC__ >= 5
|
||||
return __builtin_add_overflow(x, y, ret);
|
||||
#else
|
||||
*ret = x + y;
|
||||
return *ret < x;
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
@ -430,12 +415,7 @@ static inline bool uadd32_overflow(uint32_t x, uint32_t y, uint32_t *ret)
|
||||
*/
|
||||
static inline bool uadd64_overflow(uint64_t x, uint64_t y, uint64_t *ret)
|
||||
{
|
||||
#if __has_builtin(__builtin_add_overflow) || __GNUC__ >= 5
|
||||
return __builtin_add_overflow(x, y, ret);
|
||||
#else
|
||||
*ret = x + y;
|
||||
return *ret < x;
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
@ -449,12 +429,7 @@ static inline bool uadd64_overflow(uint64_t x, uint64_t y, uint64_t *ret)
|
||||
*/
|
||||
static inline bool ssub32_overflow(int32_t x, int32_t y, int32_t *ret)
|
||||
{
|
||||
#if __has_builtin(__builtin_sub_overflow) || __GNUC__ >= 5
|
||||
return __builtin_sub_overflow(x, y, ret);
|
||||
#else
|
||||
*ret = x - y;
|
||||
return ((*ret ^ x) & (x ^ y)) < 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
@ -468,12 +443,7 @@ static inline bool ssub32_overflow(int32_t x, int32_t y, int32_t *ret)
|
||||
*/
|
||||
static inline bool ssub64_overflow(int64_t x, int64_t y, int64_t *ret)
|
||||
{
|
||||
#if __has_builtin(__builtin_sub_overflow) || __GNUC__ >= 5
|
||||
return __builtin_sub_overflow(x, y, ret);
|
||||
#else
|
||||
*ret = x - y;
|
||||
return ((*ret ^ x) & (x ^ y)) < 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
@ -487,12 +457,7 @@ static inline bool ssub64_overflow(int64_t x, int64_t y, int64_t *ret)
|
||||
*/
|
||||
static inline bool usub32_overflow(uint32_t x, uint32_t y, uint32_t *ret)
|
||||
{
|
||||
#if __has_builtin(__builtin_sub_overflow) || __GNUC__ >= 5
|
||||
return __builtin_sub_overflow(x, y, ret);
|
||||
#else
|
||||
*ret = x - y;
|
||||
return x < y;
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
@ -506,12 +471,7 @@ static inline bool usub32_overflow(uint32_t x, uint32_t y, uint32_t *ret)
|
||||
*/
|
||||
static inline bool usub64_overflow(uint64_t x, uint64_t y, uint64_t *ret)
|
||||
{
|
||||
#if __has_builtin(__builtin_sub_overflow) || __GNUC__ >= 5
|
||||
return __builtin_sub_overflow(x, y, ret);
|
||||
#else
|
||||
*ret = x - y;
|
||||
return x < y;
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
@ -524,13 +484,7 @@ static inline bool usub64_overflow(uint64_t x, uint64_t y, uint64_t *ret)
|
||||
*/
|
||||
static inline bool smul32_overflow(int32_t x, int32_t y, int32_t *ret)
|
||||
{
|
||||
#if __has_builtin(__builtin_mul_overflow) || __GNUC__ >= 5
|
||||
return __builtin_mul_overflow(x, y, ret);
|
||||
#else
|
||||
int64_t z = (int64_t)x * y;
|
||||
*ret = z;
|
||||
return *ret != z;
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
@ -543,14 +497,7 @@ static inline bool smul32_overflow(int32_t x, int32_t y, int32_t *ret)
|
||||
*/
|
||||
static inline bool smul64_overflow(int64_t x, int64_t y, int64_t *ret)
|
||||
{
|
||||
#if __has_builtin(__builtin_mul_overflow) || __GNUC__ >= 5
|
||||
return __builtin_mul_overflow(x, y, ret);
|
||||
#else
|
||||
uint64_t hi, lo;
|
||||
muls64(&lo, &hi, x, y);
|
||||
*ret = lo;
|
||||
return hi != ((int64_t)lo >> 63);
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
@ -563,13 +510,7 @@ static inline bool smul64_overflow(int64_t x, int64_t y, int64_t *ret)
|
||||
*/
|
||||
static inline bool umul32_overflow(uint32_t x, uint32_t y, uint32_t *ret)
|
||||
{
|
||||
#if __has_builtin(__builtin_mul_overflow) || __GNUC__ >= 5
|
||||
return __builtin_mul_overflow(x, y, ret);
|
||||
#else
|
||||
uint64_t z = (uint64_t)x * y;
|
||||
*ret = z;
|
||||
return z > UINT32_MAX;
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
@ -582,13 +523,7 @@ static inline bool umul32_overflow(uint32_t x, uint32_t y, uint32_t *ret)
|
||||
*/
|
||||
static inline bool umul64_overflow(uint64_t x, uint64_t y, uint64_t *ret)
|
||||
{
|
||||
#if __has_builtin(__builtin_mul_overflow) || __GNUC__ >= 5
|
||||
return __builtin_mul_overflow(x, y, ret);
|
||||
#else
|
||||
uint64_t hi;
|
||||
mulu64(ret, &hi, x, y);
|
||||
return hi != 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -250,7 +250,6 @@ endif
|
||||
add_project_arguments('-iquote', '.',
|
||||
'-iquote', meson.current_source_dir(),
|
||||
'-iquote', meson.current_source_dir() / 'include',
|
||||
'-iquote', meson.current_source_dir() / 'disas/libvixl',
|
||||
language: ['c', 'cpp', 'objc'])
|
||||
|
||||
link_language = meson.get_external_property('link_language', 'cpp')
|
||||
@ -1210,7 +1209,7 @@ if gtkx11.found()
|
||||
endif
|
||||
png = not_found
|
||||
if get_option('png').allowed() and have_system
|
||||
png = dependency('libpng', required: get_option('png'),
|
||||
png = dependency('libpng', version: '>=1.6.34', required: get_option('png'),
|
||||
method: 'pkg-config', kwargs: static_kwargs)
|
||||
endif
|
||||
vnc = not_found
|
||||
@ -2372,7 +2371,6 @@ config_target_mak = {}
|
||||
|
||||
disassemblers = {
|
||||
'alpha' : ['CONFIG_ALPHA_DIS'],
|
||||
'arm' : ['CONFIG_ARM_DIS'],
|
||||
'avr' : ['CONFIG_AVR_DIS'],
|
||||
'cris' : ['CONFIG_CRIS_DIS'],
|
||||
'hexagon' : ['CONFIG_HEXAGON_DIS'],
|
||||
@ -2395,8 +2393,6 @@ disassemblers = {
|
||||
}
|
||||
if link_language == 'cpp'
|
||||
disassemblers += {
|
||||
'aarch64' : [ 'CONFIG_ARM_A64_DIS'],
|
||||
'arm' : [ 'CONFIG_ARM_DIS', 'CONFIG_ARM_A64_DIS'],
|
||||
'mips' : [ 'CONFIG_MIPS_DIS', 'CONFIG_NANOMIPS_DIS'],
|
||||
}
|
||||
endif
|
||||
|
@ -5,4 +5,5 @@ hu
|
||||
it
|
||||
sv
|
||||
tr
|
||||
uk
|
||||
zh_CN
|
||||
|
75
po/uk.po
Normal file
75
po/uk.po
Normal file
@ -0,0 +1,75 @@
|
||||
# Ukrainian translation for QEMU.
|
||||
# This file is put in the public domain.
|
||||
# Andrij Mizyk <andmizyk@gmail.com>, 2022.
|
||||
#
|
||||
msgid ""
|
||||
msgstr ""
|
||||
"Project-Id-Version: QEMU 1.4.50\n"
|
||||
"Report-Msgid-Bugs-To: qemu-devel@nongnu.org\n"
|
||||
"POT-Creation-Date: 2018-07-18 07:56+0200\n"
|
||||
"PO-Revision-Date: 2022-06-13 01:33+0300\n"
|
||||
"Last-Translator: Andrij Mizyk <andmizyk@gmail.com>\n"
|
||||
"Language-Team: Ukrainian\n"
|
||||
"Language: uk\n"
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=UTF-8\n"
|
||||
"Content-Transfer-Encoding: 8bit\n"
|
||||
"Plural-Forms: nplurals=1; plural=0;\n"
|
||||
"X-Generator: Gtranslator 2.91.6\n"
|
||||
|
||||
msgid " - Press Ctrl+Alt+G to release grab"
|
||||
msgstr " - Натисніть Ctrl+Alt+G, щоб відпустити захоплення"
|
||||
|
||||
msgid " [Paused]"
|
||||
msgstr " [Призупинено]"
|
||||
|
||||
msgid "_Pause"
|
||||
msgstr "_Призупинити"
|
||||
|
||||
msgid "_Reset"
|
||||
msgstr "_Скинути"
|
||||
|
||||
msgid "Power _Down"
|
||||
msgstr "Вимкнути _живлення"
|
||||
|
||||
msgid "_Quit"
|
||||
msgstr "_Вийти"
|
||||
|
||||
msgid "_Fullscreen"
|
||||
msgstr "Повний _екран"
|
||||
|
||||
msgid "_Copy"
|
||||
msgstr "_Копіювати"
|
||||
|
||||
msgid "Zoom _In"
|
||||
msgstr "_Збільшити"
|
||||
|
||||
msgid "Zoom _Out"
|
||||
msgstr "З_меншити"
|
||||
|
||||
msgid "Best _Fit"
|
||||
msgstr "Найкращий _розмір"
|
||||
|
||||
msgid "Zoom To _Fit"
|
||||
msgstr "Збільшити до _розміру"
|
||||
|
||||
msgid "Grab On _Hover"
|
||||
msgstr "Захопити при _наведенні"
|
||||
|
||||
msgid "_Grab Input"
|
||||
msgstr "Захопити _введення"
|
||||
|
||||
msgid "Show _Tabs"
|
||||
msgstr "Показувати _вкладки"
|
||||
|
||||
msgid "Detach Tab"
|
||||
msgstr "Відʼєднати вкладку"
|
||||
|
||||
msgid "Show Menubar"
|
||||
msgstr "Показувати рядок меню"
|
||||
|
||||
msgid "_Machine"
|
||||
msgstr "_Машина"
|
||||
|
||||
msgid "_View"
|
||||
msgstr "_Вигляд"
|
@ -32,8 +32,8 @@ use warnings;
|
||||
use Getopt::Std;
|
||||
|
||||
# Stuff we don't want to clean because we import it into our tree:
|
||||
my $exclude = qr,^(disas/libvixl/|include/standard-headers/
|
||||
|linux-headers/|pc-bios/|tests/tcg/|tests/multiboot/),x;
|
||||
my $exclude = qr,^(include/standard-headers/|linux-headers/
|
||||
|pc-bios/|tests/tcg/|tests/multiboot/),x;
|
||||
# Stuff that is expected to fail the preprocessing test:
|
||||
my $exclude_cpp = qr,^include/libdecnumber/decNumberLocal.h,;
|
||||
|
||||
|
@ -51,7 +51,7 @@ GIT=no
|
||||
DUPHEAD=no
|
||||
|
||||
# Extended regular expression defining files to ignore when using --all
|
||||
XDIRREGEX='^(tests/tcg|tests/multiboot|pc-bios|disas/libvixl)'
|
||||
XDIRREGEX='^(tests/tcg|tests/multiboot|pc-bios)'
|
||||
|
||||
while true
|
||||
do
|
||||
|
@ -87,9 +87,6 @@ io
|
||||
ipmi
|
||||
~ (/qemu)?((/include)?/hw/ipmi/.*)
|
||||
|
||||
libvixl
|
||||
~ (/qemu)?(/disas/libvixl/.*)
|
||||
|
||||
migration
|
||||
~ (/qemu)?((/include)?/migration/.*)
|
||||
|
||||
|
@ -828,13 +828,6 @@ static void arm_disas_set_info(CPUState *cpu, disassemble_info *info)
|
||||
bool sctlr_b;
|
||||
|
||||
if (is_a64(env)) {
|
||||
/* We might not be compiled with the A64 disassembler
|
||||
* because it needs a C++ compiler. Leave print_insn
|
||||
* unset in this case to use the caller default behaviour.
|
||||
*/
|
||||
#if defined(CONFIG_ARM_A64_DIS)
|
||||
info->print_insn = print_insn_arm_a64;
|
||||
#endif
|
||||
info->cap_arch = CS_ARCH_ARM64;
|
||||
info->cap_insn_unit = 4;
|
||||
info->cap_insn_split = 4;
|
||||
|
@ -1,3 +1,6 @@
|
||||
if 'CONFIG_TCG' not in config_all
|
||||
subdir_done()
|
||||
endif
|
||||
# There are namespace pollution issues on Windows, due to osdep.h
|
||||
# bringing in Windows headers that define a FLOAT128 type.
|
||||
if targetos == 'windows'
|
||||
|
@ -15,6 +15,14 @@
|
||||
|
||||
#include "migration-helpers.h"
|
||||
|
||||
/*
|
||||
* Number of seconds we wait when looking for migration
|
||||
* status changes, to avoid test suite hanging forever
|
||||
* when things go wrong. Needs to be higher enough to
|
||||
* avoid false positives on loaded hosts.
|
||||
*/
|
||||
#define MIGRATION_STATUS_WAIT_TIMEOUT 120
|
||||
|
||||
bool got_stop;
|
||||
|
||||
static void check_stop_event(QTestState *who)
|
||||
@ -166,8 +174,11 @@ static bool check_migration_status(QTestState *who, const char *goal,
|
||||
void wait_for_migration_status(QTestState *who,
|
||||
const char *goal, const char **ungoals)
|
||||
{
|
||||
g_test_timer_start();
|
||||
while (!check_migration_status(who, goal, ungoals)) {
|
||||
usleep(1000);
|
||||
|
||||
g_assert(g_test_timer_elapsed() < MIGRATION_STATUS_WAIT_TIMEOUT);
|
||||
}
|
||||
}
|
||||
|
||||
@ -178,6 +189,7 @@ void wait_for_migration_complete(QTestState *who)
|
||||
|
||||
void wait_for_migration_fail(QTestState *from, bool allow_active)
|
||||
{
|
||||
g_test_timer_start();
|
||||
QDict *rsp_return;
|
||||
char *status;
|
||||
bool failed;
|
||||
@ -193,6 +205,8 @@ void wait_for_migration_fail(QTestState *from, bool allow_active)
|
||||
g_assert(result);
|
||||
failed = !strcmp(status, "failed");
|
||||
g_free(status);
|
||||
|
||||
g_assert(g_test_timer_elapsed() < MIGRATION_STATUS_WAIT_TIMEOUT);
|
||||
} while (!failed);
|
||||
|
||||
/* Is the machine currently running? */
|
||||
|
@ -46,9 +46,6 @@ unsigned start_address;
|
||||
unsigned end_address;
|
||||
static bool uffd_feature_thread_id;
|
||||
|
||||
/* A downtime where the test really should converge */
|
||||
#define CONVERGE_DOWNTIME 1000
|
||||
|
||||
#if defined(__linux__)
|
||||
#include <sys/syscall.h>
|
||||
#include <sys/vfs.h>
|
||||
@ -402,6 +399,20 @@ static void migrate_set_parameter_str(QTestState *who, const char *parameter,
|
||||
migrate_check_parameter_str(who, parameter, value);
|
||||
}
|
||||
|
||||
static void migrate_ensure_non_converge(QTestState *who)
|
||||
{
|
||||
/* Can't converge with 1ms downtime + 30 mbs bandwidth limit */
|
||||
migrate_set_parameter_int(who, "max-bandwidth", 30 * 1000 * 1000);
|
||||
migrate_set_parameter_int(who, "downtime-limit", 1);
|
||||
}
|
||||
|
||||
static void migrate_ensure_converge(QTestState *who)
|
||||
{
|
||||
/* Should converge with 30s downtime + 1 gbs bandwidth limit */
|
||||
migrate_set_parameter_int(who, "max-bandwidth", 1 * 1000 * 1000 * 1000);
|
||||
migrate_set_parameter_int(who, "downtime-limit", 30 * 1000);
|
||||
}
|
||||
|
||||
static void migrate_pause(QTestState *who)
|
||||
{
|
||||
QDict *rsp;
|
||||
@ -984,12 +995,7 @@ static int migrate_postcopy_prepare(QTestState **from_ptr,
|
||||
migrate_set_capability(to, "postcopy-ram", true);
|
||||
migrate_set_capability(to, "postcopy-blocktime", true);
|
||||
|
||||
/* We want to pick a speed slow enough that the test completes
|
||||
* quickly, but that it doesn't complete precopy even on a slow
|
||||
* machine, so also set the downtime.
|
||||
*/
|
||||
migrate_set_parameter_int(from, "max-bandwidth", 30000000);
|
||||
migrate_set_parameter_int(from, "downtime-limit", 1);
|
||||
migrate_ensure_non_converge(from);
|
||||
|
||||
/* Wait for the first serial output from the source */
|
||||
wait_for_serial("src_serial");
|
||||
@ -1188,15 +1194,7 @@ static void test_precopy_common(MigrateCommon *args)
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* We want to pick a speed slow enough that the test completes
|
||||
* quickly, but that it doesn't complete precopy even on a slow
|
||||
* machine, so also set the downtime.
|
||||
*/
|
||||
/* 1 ms should make it not converge*/
|
||||
migrate_set_parameter_int(from, "downtime-limit", 1);
|
||||
/* 1GB/s */
|
||||
migrate_set_parameter_int(from, "max-bandwidth", 1000000000);
|
||||
migrate_ensure_non_converge(from);
|
||||
|
||||
if (args->start_hook) {
|
||||
data_hook = args->start_hook(from, to);
|
||||
@ -1230,7 +1228,11 @@ static void test_precopy_common(MigrateCommon *args)
|
||||
wait_for_migration_pass(from);
|
||||
}
|
||||
|
||||
migrate_set_parameter_int(from, "downtime-limit", CONVERGE_DOWNTIME);
|
||||
migrate_ensure_converge(from);
|
||||
|
||||
/* We do this first, as it has a timeout to stop us
|
||||
* hanging forever if migration didn't converge */
|
||||
wait_for_migration_complete(from);
|
||||
|
||||
if (!got_stop) {
|
||||
qtest_qmp_eventwait(from, "STOP");
|
||||
@ -1239,7 +1241,6 @@ static void test_precopy_common(MigrateCommon *args)
|
||||
qtest_qmp_eventwait(to, "RESUME");
|
||||
|
||||
wait_for_serial("dest_serial");
|
||||
wait_for_migration_complete(from);
|
||||
}
|
||||
|
||||
if (args->finish_hook) {
|
||||
@ -1691,8 +1692,7 @@ static void test_migrate_auto_converge(void)
|
||||
* Set the initial parameters so that the migration could not converge
|
||||
* without throttling.
|
||||
*/
|
||||
migrate_set_parameter_int(from, "downtime-limit", 1);
|
||||
migrate_set_parameter_int(from, "max-bandwidth", 100000000); /* ~100Mb/s */
|
||||
migrate_ensure_non_converge(from);
|
||||
|
||||
/* To check remaining size after precopy */
|
||||
migrate_set_capability(from, "pause-before-switchover", true);
|
||||
@ -1997,15 +1997,7 @@ static void test_multifd_tcp_cancel(void)
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* We want to pick a speed slow enough that the test completes
|
||||
* quickly, but that it doesn't complete precopy even on a slow
|
||||
* machine, so also set the downtime.
|
||||
*/
|
||||
/* 1 ms should make it not converge*/
|
||||
migrate_set_parameter_int(from, "downtime-limit", 1);
|
||||
/* 300MB/s */
|
||||
migrate_set_parameter_int(from, "max-bandwidth", 30000000);
|
||||
migrate_ensure_non_converge(from);
|
||||
|
||||
migrate_set_parameter_int(from, "multifd-channels", 16);
|
||||
migrate_set_parameter_int(to, "multifd-channels", 16);
|
||||
@ -2051,10 +2043,7 @@ static void test_multifd_tcp_cancel(void)
|
||||
|
||||
wait_for_migration_status(from, "cancelled", NULL);
|
||||
|
||||
/* 300ms it should converge */
|
||||
migrate_set_parameter_int(from, "downtime-limit", 300);
|
||||
/* 1GB/s */
|
||||
migrate_set_parameter_int(from, "max-bandwidth", 1000000000);
|
||||
migrate_ensure_converge(from);
|
||||
|
||||
migrate_qmp(from, uri, "{}");
|
||||
|
||||
|
@ -2452,18 +2452,44 @@ static void test_qemu_strtosz_metric(void)
|
||||
|
||||
static void test_freq_to_str(void)
|
||||
{
|
||||
g_assert_cmpstr(freq_to_str(999), ==, "999 Hz");
|
||||
g_assert_cmpstr(freq_to_str(1000), ==, "1 KHz");
|
||||
g_assert_cmpstr(freq_to_str(1010), ==, "1.01 KHz");
|
||||
char *str;
|
||||
|
||||
str = freq_to_str(999);
|
||||
g_assert_cmpstr(str, ==, "999 Hz");
|
||||
g_free(str);
|
||||
|
||||
str = freq_to_str(1000);
|
||||
g_assert_cmpstr(str, ==, "1 KHz");
|
||||
g_free(str);
|
||||
|
||||
str = freq_to_str(1010);
|
||||
g_assert_cmpstr(str, ==, "1.01 KHz");
|
||||
g_free(str);
|
||||
}
|
||||
|
||||
static void test_size_to_str(void)
|
||||
{
|
||||
g_assert_cmpstr(size_to_str(0), ==, "0 B");
|
||||
g_assert_cmpstr(size_to_str(1), ==, "1 B");
|
||||
g_assert_cmpstr(size_to_str(1016), ==, "0.992 KiB");
|
||||
g_assert_cmpstr(size_to_str(1024), ==, "1 KiB");
|
||||
g_assert_cmpstr(size_to_str(512ull << 20), ==, "512 MiB");
|
||||
char *str;
|
||||
|
||||
str = size_to_str(0);
|
||||
g_assert_cmpstr(str, ==, "0 B");
|
||||
g_free(str);
|
||||
|
||||
str = size_to_str(1);
|
||||
g_assert_cmpstr(str, ==, "1 B");
|
||||
g_free(str);
|
||||
|
||||
str = size_to_str(1016);
|
||||
g_assert_cmpstr(str, ==, "0.992 KiB");
|
||||
g_free(str);
|
||||
|
||||
str = size_to_str(1024);
|
||||
g_assert_cmpstr(str, ==, "1 KiB");
|
||||
g_free(str);
|
||||
|
||||
str = size_to_str(512ull << 20);
|
||||
g_assert_cmpstr(str, ==, "512 MiB");
|
||||
g_free(str);
|
||||
}
|
||||
|
||||
static void test_iec_binary_prefix(void)
|
||||
|
Loading…
Reference in New Issue
Block a user