target-arm queue:

* Implement IMPDEF pauth algorithm
  * Support ARMv8.4-SEL2
  * Fix bug where we were truncating predicate vector lengths in SVE insns
  * npcm7xx_adc-test: Fix memleak in adc_qom_set
  * target/arm/m_helper: Silence GCC 10 maybe-uninitialized error
  * docs: Build and install all the docs in a single manual
 -----BEGIN PGP SIGNATURE-----
 
 iQJNBAABCAA3FiEE4aXFk81BneKOgxXPPCUl7RQ2DN4FAmAG/toZHHBldGVyLm1h
 eWRlbGxAbGluYXJvLm9yZwAKCRA8JSXtFDYM3tOsD/4qyH4/FZkhKxZkG75GqkeP
 RBgyHuAWMSsNrXKhLGIZ+FeaktPR+axOGnizLl5idw0FJ2l3oWtP0Y3V0Sj95hYy
 XgXh9K5wuupHMr+RaJBsfv2biIwUGfveX1r9iVNjB2mZwv6uVxTAn8kqdPOl+yCZ
 ComxA4WRrG/Nmws9jvw+UWsWHlNGlsS8PF5xYzsgKAR5RU4bnHKCRGJYeAFufgCo
 Y7yZRj3Pki5fLeCTXAixQ2YohVxWCDt8+Q+7XwqY3b7fPt4wLpjs5OHffaiPeAJ4
 f/4yN4PKQqQx4l6uB9DvNXTXCE1Hak0YcSjsc1zQF2N9sa57WVWvH74bU7y2VdW6
 8LKYD6K9cTq8YLEjS1wQMv7jUZAz+tcq4UVwU5aRsFdVvEl3s/spTcvC8D1Y4cki
 vD3FtLJkFX8NxkmmXkW9ov2PajWHtyoecXVxAw3xUVA8qfZyw0GYcCC17epe93Gz
 sn5SyC3BSYWfJthd1ToihiuHGMjNrCCY6cmh8PTyaOCctAQMENDR66PWeP39gXmy
 gsviKRcqq5fZDDR99lxq6gjNWqRsBafT5zIeZBZPZfbW+Tkhp/mTr+Hhv4T0uW8D
 SZD6GjnK3vz9EOSdBGrLlrDMTazGMdOdF9QdZxBT5Ns1LZvcw5svun/gwXFmf0J5
 nzALEIuLTiPvD/MOFcubVw==
 =L31U
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/pmaydell/tags/pull-target-arm-20210119-1' into staging

target-arm queue:
 * Implement IMPDEF pauth algorithm
 * Support ARMv8.4-SEL2
 * Fix bug where we were truncating predicate vector lengths in SVE insns
 * npcm7xx_adc-test: Fix memleak in adc_qom_set
 * target/arm/m_helper: Silence GCC 10 maybe-uninitialized error
 * docs: Build and install all the docs in a single manual

# gpg: Signature made Tue 19 Jan 2021 15:46:34 GMT
# gpg:                using RSA key E1A5C593CD419DE28E8315CF3C2525ED14360CDE
# gpg:                issuer "peter.maydell@linaro.org"
# gpg: Good signature from "Peter Maydell <peter.maydell@linaro.org>" [ultimate]
# gpg:                 aka "Peter Maydell <pmaydell@gmail.com>" [ultimate]
# gpg:                 aka "Peter Maydell <pmaydell@chiark.greenend.org.uk>" [ultimate]
# Primary key fingerprint: E1A5 C593 CD41 9DE2 8E83  15CF 3C25 25ED 1436 0CDE

* remotes/pmaydell/tags/pull-target-arm-20210119-1: (29 commits)
  docs: Build and install all the docs in a single manual
  target/arm/m_helper: Silence GCC 10 maybe-uninitialized error
  npcm7xx_adc-test: Fix memleak in adc_qom_set
  target/arm: Update REV, PUNPK for pred_desc
  target/arm: Update ZIP, UZP, TRN for pred_desc
  target/arm: Update PFIRST, PNEXT for pred_desc
  target/arm: Introduce PREDDESC field definitions
  target/arm: refactor vae1_tlbmask()
  target/arm: enable Secure EL2 in max CPU
  target/arm: Implement SCR_EL2.EEL2
  target/arm: revector to run-time pick target EL
  target/arm: set HPFAR_EL2.NS on secure stage 2 faults
  target/arm: secure stage 2 translation regime
  target/arm: generalize 2-stage page-walk condition
  target/arm: translate NS bit in page-walks
  target/arm: do S1_ptw_translate() before address space lookup
  target/arm: handle VMID change in secure state
  target/arm: add ARMv8.4-SEL2 system registers
  target/arm: add MMU stage 1 for Secure EL2
  target/arm: add 64-bit S-EL2 to EL exception table
  ...

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2021-01-19 15:47:23 +00:00
commit 48202c7124
30 changed files with 770 additions and 438 deletions

View File

@ -622,8 +622,8 @@ pages:
-t "Welcome to the QEMU sourcecode"
- mv HTML public/src
# Project documentation
- mv build/docs/index.html public/
- for i in devel interop specs system tools user ; do mv build/docs/$i public/ ; done
- make -C build install DESTDIR=$(pwd)/temp-install
- mv temp-install/usr/local/share/doc/qemu/* public/
artifacts:
paths:
- public

View File

@ -224,7 +224,51 @@ latex_documents = [
# -- Options for manual page output ---------------------------------------
# Individual manual/conf.py can override this to create man pages
man_pages = []
man_pages = [
('interop/qemu-ga', 'qemu-ga',
'QEMU Guest Agent',
['Michael Roth <mdroth@linux.vnet.ibm.com>'], 8),
('interop/qemu-ga-ref', 'qemu-ga-ref',
'QEMU Guest Agent Protocol Reference',
[], 7),
('interop/qemu-qmp-ref', 'qemu-qmp-ref',
'QEMU QMP Reference Manual',
[], 7),
('interop/qemu-storage-daemon-qmp-ref', 'qemu-storage-daemon-qmp-ref',
'QEMU Storage Daemon QMP Reference Manual',
[], 7),
('system/qemu-manpage', 'qemu',
'QEMU User Documentation',
['Fabrice Bellard'], 1),
('system/qemu-block-drivers', 'qemu-block-drivers',
'QEMU block drivers reference',
['Fabrice Bellard and the QEMU Project developers'], 7),
('system/qemu-cpu-models', 'qemu-cpu-models',
'QEMU CPU Models',
['The QEMU Project developers'], 7),
('tools/qemu-img', 'qemu-img',
'QEMU disk image utility',
['Fabrice Bellard'], 1),
('tools/qemu-nbd', 'qemu-nbd',
'QEMU Disk Network Block Device Server',
['Anthony Liguori <anthony@codemonkey.ws>'], 8),
('tools/qemu-pr-helper', 'qemu-pr-helper',
'QEMU persistent reservation helper',
[], 8),
('tools/qemu-storage-daemon', 'qemu-storage-daemon',
'QEMU storage daemon',
[], 1),
('tools/qemu-trace-stap', 'qemu-trace-stap',
'QEMU SystemTap trace tool',
[], 1),
('tools/virtfs-proxy-helper', 'virtfs-proxy-helper',
'QEMU 9p virtfs proxy filesystem helper',
['M. Mohan Kumar'], 1),
('tools/virtiofsd', 'virtiofsd',
'QEMU virtio-fs shared file system daemon',
['Stefan Hajnoczi <stefanha@redhat.com>',
'Masayoshi Mizuma <m.mizuma@jp.fujitsu.com>'], 1),
]
# -- Options for Texinfo output -------------------------------------------

View File

@ -1,15 +0,0 @@
# -*- coding: utf-8 -*-
#
# QEMU documentation build configuration file for the 'devel' manual.
#
# This includes the top level conf file and then makes any necessary tweaks.
import sys
import os
qemu_docdir = os.path.abspath("..")
parent_config = os.path.join(qemu_docdir, "conf.py")
exec(compile(open(parent_config, "rb").read(), parent_config, 'exec'))
# This slightly misuses the 'description', but is the best way to get
# the manual title to appear in the sidebar.
html_theme_options['description'] = u'Developer''s Guide'

View File

@ -1,17 +0,0 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>QEMU @VERSION@ Documentation</title>
</head>
<body>
<h1>QEMU @VERSION@ Documentation</h1>
<ul>
<li><a href="system/index.html">System Emulation User's Guide</a></li>
<li><a href="user/index.html">User Mode Emulation User's Guide</a></li>
<li><a href="tools/index.html">Tools Guide</a></li>
<li><a href="interop/index.html">System Emulation Management and Interoperability Guide</a></li>
<li><a href="specs/index.html">System Emulation Guest Hardware Specifications</a></li>
</ul>
</body>
</html>

View File

@ -1,28 +0,0 @@
# -*- coding: utf-8 -*-
#
# QEMU documentation build configuration file for the 'interop' manual.
#
# This includes the top level conf file and then makes any necessary tweaks.
import sys
import os
qemu_docdir = os.path.abspath("..")
parent_config = os.path.join(qemu_docdir, "conf.py")
exec(compile(open(parent_config, "rb").read(), parent_config, 'exec'))
# This slightly misuses the 'description', but is the best way to get
# the manual title to appear in the sidebar.
html_theme_options['description'] = u'System Emulation Management and Interoperability Guide'
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('qemu-ga', 'qemu-ga', u'QEMU Guest Agent',
['Michael Roth <mdroth@linux.vnet.ibm.com>'], 8),
('qemu-ga-ref', 'qemu-ga-ref', 'QEMU Guest Agent Protocol Reference',
[], 7),
('qemu-qmp-ref', 'qemu-qmp-ref', 'QEMU QMP Reference Manual',
[], 7),
('qemu-storage-daemon-qmp-ref', 'qemu-storage-daemon-qmp-ref',
'QEMU Storage Daemon QMP Reference Manual', [], 7),
]

View File

@ -46,19 +46,11 @@ if build_docs
meson.source_root() / 'docs/sphinx/qmp_lexer.py',
qapi_gen_depends ]
configure_file(output: 'index.html',
input: files('index.html.in'),
configuration: {'VERSION': meson.project_version()},
install_dir: qemu_docdir)
manuals = [ 'devel', 'interop', 'tools', 'specs', 'system', 'user' ]
man_pages = {
'interop' : {
'qemu-ga.8': (have_tools ? 'man8' : ''),
'qemu-ga-ref.7': 'man7',
'qemu-qmp-ref.7': 'man7',
'qemu-storage-daemon-qmp-ref.7': (have_tools ? 'man7' : ''),
},
'tools': {
'qemu-img.1': (have_tools ? 'man1' : ''),
'qemu-nbd.8': (have_tools ? 'man8' : ''),
'qemu-pr-helper.8': (have_tools ? 'man8' : ''),
@ -66,53 +58,47 @@ if build_docs
'qemu-trace-stap.1': (config_host.has_key('CONFIG_TRACE_SYSTEMTAP') ? 'man1' : ''),
'virtfs-proxy-helper.1': (have_virtfs_proxy_helper ? 'man1' : ''),
'virtiofsd.1': (have_virtiofsd ? 'man1' : ''),
},
'system': {
'qemu.1': 'man1',
'qemu-block-drivers.7': 'man7',
'qemu-cpu-models.7': 'man7'
},
}
sphinxdocs = []
sphinxmans = []
foreach manual : manuals
private_dir = meson.current_build_dir() / (manual + '.p')
output_dir = meson.current_build_dir() / manual
input_dir = meson.current_source_dir() / manual
this_manual = custom_target(manual + ' manual',
private_dir = meson.current_build_dir() / 'manual.p'
output_dir = meson.current_build_dir() / 'manual'
input_dir = meson.current_source_dir()
this_manual = custom_target('QEMU manual',
build_by_default: build_docs,
output: [manual + '.stamp'],
input: [files('conf.py'), files(manual / 'conf.py')],
depfile: manual + '.d',
output: 'docs.stamp',
input: files('conf.py'),
depfile: 'docs.d',
depend_files: sphinx_extn_depends,
command: [SPHINX_ARGS, '-Ddepfile=@DEPFILE@',
'-Ddepfile_stamp=@OUTPUT0@',
'-b', 'html', '-d', private_dir,
input_dir, output_dir])
sphinxdocs += this_manual
if build_docs and manual != 'devel'
install_subdir(output_dir, install_dir: qemu_docdir)
endif
sphinxdocs += this_manual
install_subdir(output_dir, install_dir: qemu_docdir, strip_directory: true)
these_man_pages = []
install_dirs = []
foreach page, section : man_pages.get(manual, {})
these_man_pages += page
install_dirs += section == '' ? false : get_option('mandir') / section
endforeach
if these_man_pages.length() > 0
sphinxmans += custom_target(manual + ' man pages',
build_by_default: build_docs,
output: these_man_pages,
input: this_manual,
install: build_docs,
install_dir: install_dirs,
command: [SPHINX_ARGS, '-b', 'man', '-d', private_dir,
input_dir, meson.current_build_dir()])
endif
these_man_pages = []
install_dirs = []
foreach page, section : man_pages
these_man_pages += page
install_dirs += section == '' ? false : get_option('mandir') / section
endforeach
sphinxmans += custom_target('QEMU man pages',
build_by_default: build_docs,
output: these_man_pages,
input: this_manual,
install: build_docs,
install_dir: install_dirs,
command: [SPHINX_ARGS, '-b', 'man', '-d', private_dir,
input_dir, meson.current_build_dir()])
alias_target('sphinxdocs', sphinxdocs)
alias_target('html', sphinxdocs)
alias_target('man', sphinxmans)

View File

@ -1,16 +0,0 @@
# -*- coding: utf-8 -*-
#
# QEMU documentation build configuration file for the 'specs' manual.
#
# This includes the top level conf file and then makes any necessary tweaks.
import sys
import os
qemu_docdir = os.path.abspath("..")
parent_config = os.path.join(qemu_docdir, "conf.py")
exec(compile(open(parent_config, "rb").read(), parent_config, 'exec'))
# This slightly misuses the 'description', but is the best way to get
# the manual title to appear in the sidebar.
html_theme_options['description'] = \
u'System Emulation Guest Hardware Specifications'

View File

@ -211,6 +211,27 @@ the list of KVM VCPU features and their descriptions.
influence the guest scheduler behavior and/or be
exposed to the guest userspace.
TCG VCPU Features
=================
TCG VCPU features are CPU features that are specific to TCG.
Below is the list of TCG VCPU features and their descriptions.
pauth Enable or disable `FEAT_Pauth`, pointer
authentication. By default, the feature is
enabled with `-cpu max`.
pauth-impdef When `FEAT_Pauth` is enabled, either the
*impdef* (Implementation Defined) algorithm
is enabled or the *architected* QARMA algorithm
is enabled. By default the impdef algorithm
is disabled, and QARMA is enabled.
The architected QARMA algorithm has good
cryptographic properties, but can be quite slow
to emulate. The impdef algorithm used by QEMU
is non-cryptographic but significantly faster.
SVE CPU Properties
==================

View File

@ -1,28 +0,0 @@
# -*- coding: utf-8 -*-
#
# QEMU documentation build configuration file for the 'system' manual.
#
# This includes the top level conf file and then makes any necessary tweaks.
import sys
import os
qemu_docdir = os.path.abspath("..")
parent_config = os.path.join(qemu_docdir, "conf.py")
exec(compile(open(parent_config, "rb").read(), parent_config, 'exec'))
# This slightly misuses the 'description', but is the best way to get
# the manual title to appear in the sidebar.
html_theme_options['description'] = u'System Emulation User''s Guide'
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('qemu-manpage', 'qemu', u'QEMU User Documentation',
['Fabrice Bellard'], 1),
('qemu-block-drivers', 'qemu-block-drivers',
u'QEMU block drivers reference',
['Fabrice Bellard and the QEMU Project developers'], 7),
('qemu-cpu-models', 'qemu-cpu-models',
u'QEMU CPU Models',
['The QEMU Project developers'], 7)
]

View File

@ -1,37 +0,0 @@
# -*- coding: utf-8 -*-
#
# QEMU documentation build configuration file for the 'tools' manual.
#
# This includes the top level conf file and then makes any necessary tweaks.
import sys
import os
qemu_docdir = os.path.abspath("..")
parent_config = os.path.join(qemu_docdir, "conf.py")
exec(compile(open(parent_config, "rb").read(), parent_config, 'exec'))
# This slightly misuses the 'description', but is the best way to get
# the manual title to appear in the sidebar.
html_theme_options['description'] = \
u'Tools Guide'
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('qemu-img', 'qemu-img', u'QEMU disk image utility',
['Fabrice Bellard'], 1),
('qemu-storage-daemon', 'qemu-storage-daemon', u'QEMU storage daemon',
[], 1),
('qemu-nbd', 'qemu-nbd', u'QEMU Disk Network Block Device Server',
['Anthony Liguori <anthony@codemonkey.ws>'], 8),
('qemu-pr-helper', 'qemu-pr-helper', 'QEMU persistent reservation helper',
[], 8),
('qemu-trace-stap', 'qemu-trace-stap', u'QEMU SystemTap trace tool',
[], 1),
('virtfs-proxy-helper', 'virtfs-proxy-helper',
u'QEMU 9p virtfs proxy filesystem helper',
['M. Mohan Kumar'], 1),
('virtiofsd', 'virtiofsd', u'QEMU virtio-fs shared file system daemon',
['Stefan Hajnoczi <stefanha@redhat.com>',
'Masayoshi Mizuma <m.mizuma@jp.fujitsu.com>'], 1),
]

View File

@ -1,15 +0,0 @@
# -*- coding: utf-8 -*-
#
# QEMU documentation build configuration file for the 'user' manual.
#
# This includes the top level conf file and then makes any necessary tweaks.
import sys
import os
qemu_docdir = os.path.abspath("..")
parent_config = os.path.join(qemu_docdir, "conf.py")
exec(compile(open(parent_config, "rb").read(), parent_config, 'exec'))
# This slightly misuses the 'description', but is the best way to get
# the manual title to appear in the sidebar.
html_theme_options['description'] = u'User Mode Emulation User''s Guide'

View File

@ -119,4 +119,102 @@ static inline uint32_t qemu_xxhash6(uint64_t ab, uint64_t cd, uint32_t e,
return qemu_xxhash7(ab, cd, e, f, 0);
}
/*
* Component parts of the XXH64 algorithm from
* https://github.com/Cyan4973/xxHash/blob/v0.8.0/xxhash.h
*
* The complete algorithm looks like
*
* i = 0;
* if (len >= 32) {
* v1 = seed + XXH_PRIME64_1 + XXH_PRIME64_2;
* v2 = seed + XXH_PRIME64_2;
* v3 = seed + 0;
* v4 = seed - XXH_PRIME64_1;
* do {
* v1 = XXH64_round(v1, get64bits(input + i));
* v2 = XXH64_round(v2, get64bits(input + i + 8));
* v3 = XXH64_round(v3, get64bits(input + i + 16));
* v4 = XXH64_round(v4, get64bits(input + i + 24));
* } while ((i += 32) <= len);
* h64 = XXH64_mergerounds(v1, v2, v3, v4);
* } else {
* h64 = seed + XXH_PRIME64_5;
* }
* h64 += len;
*
* for (; i + 8 <= len; i += 8) {
* h64 ^= XXH64_round(0, get64bits(input + i));
* h64 = rol64(h64, 27) * XXH_PRIME64_1 + XXH_PRIME64_4;
* }
* for (; i + 4 <= len; i += 4) {
* h64 ^= get32bits(input + i) * PRIME64_1;
* h64 = rol64(h64, 23) * XXH_PRIME64_2 + XXH_PRIME64_3;
* }
* for (; i < len; i += 1) {
* h64 ^= get8bits(input + i) * XXH_PRIME64_5;
* h64 = rol64(h64, 11) * XXH_PRIME64_1;
* }
*
* return XXH64_avalanche(h64)
*
* Exposing the pieces instead allows for simplified usage when
* the length is a known constant and the inputs are in registers.
*/
#define XXH_PRIME64_1 0x9E3779B185EBCA87ULL
#define XXH_PRIME64_2 0xC2B2AE3D27D4EB4FULL
#define XXH_PRIME64_3 0x165667B19E3779F9ULL
#define XXH_PRIME64_4 0x85EBCA77C2B2AE63ULL
#define XXH_PRIME64_5 0x27D4EB2F165667C5ULL
static inline uint64_t XXH64_round(uint64_t acc, uint64_t input)
{
return rol64(acc + input * XXH_PRIME64_2, 31) * XXH_PRIME64_1;
}
static inline uint64_t XXH64_mergeround(uint64_t acc, uint64_t val)
{
return (acc ^ XXH64_round(0, val)) * XXH_PRIME64_1 + XXH_PRIME64_4;
}
static inline uint64_t XXH64_mergerounds(uint64_t v1, uint64_t v2,
uint64_t v3, uint64_t v4)
{
uint64_t h64;
h64 = rol64(v1, 1) + rol64(v2, 7) + rol64(v3, 12) + rol64(v4, 18);
h64 = XXH64_mergeround(h64, v1);
h64 = XXH64_mergeround(h64, v2);
h64 = XXH64_mergeround(h64, v3);
h64 = XXH64_mergeround(h64, v4);
return h64;
}
static inline uint64_t XXH64_avalanche(uint64_t h64)
{
h64 ^= h64 >> 33;
h64 *= XXH_PRIME64_2;
h64 ^= h64 >> 29;
h64 *= XXH_PRIME64_3;
h64 ^= h64 >> 32;
return h64;
}
static inline uint64_t qemu_xxhash64_4(uint64_t a, uint64_t b,
uint64_t c, uint64_t d)
{
uint64_t v1 = QEMU_XXHASH_SEED + XXH_PRIME64_1 + XXH_PRIME64_2;
uint64_t v2 = QEMU_XXHASH_SEED + XXH_PRIME64_2;
uint64_t v3 = QEMU_XXHASH_SEED + 0;
uint64_t v4 = QEMU_XXHASH_SEED - XXH_PRIME64_1;
v1 = XXH64_round(v1, a);
v2 = XXH64_round(v2, b);
v3 = XXH64_round(v3, c);
v4 = XXH64_round(v4, d);
return XXH64_avalanche(XXH64_mergerounds(v1, v2, v3, v4));
}
#endif /* QEMU_XXHASH_H */

View File

@ -29,6 +29,6 @@
# define TARGET_PAGE_BITS_MIN 10
#endif
#define NB_MMU_MODES 11
#define NB_MMU_MODES 15
#endif

View File

@ -451,14 +451,14 @@ static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,
break;
case EXCP_VFIQ:
if (secure || !(hcr_el2 & HCR_FMO) || (hcr_el2 & HCR_TGE)) {
/* VFIQs are only taken when hypervized and non-secure. */
if (!(hcr_el2 & HCR_FMO) || (hcr_el2 & HCR_TGE)) {
/* VFIQs are only taken when hypervized. */
return false;
}
return !(env->daif & PSTATE_F);
case EXCP_VIRQ:
if (secure || !(hcr_el2 & HCR_IMO) || (hcr_el2 & HCR_TGE)) {
/* VIRQs are only taken when hypervized and non-secure. */
if (!(hcr_el2 & HCR_IMO) || (hcr_el2 & HCR_TGE)) {
/* VIRQs are only taken when hypervized. */
return false;
}
return !(env->daif & PSTATE_I);
@ -480,7 +480,7 @@ static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,
* masked from Secure state. The HCR and SCR settings
* don't affect the masking logic, only the interrupt routing.
*/
if (target_el == 3 || !secure) {
if (target_el == 3 || !secure || (env->cp15.scr_el3 & SCR_EEL2)) {
unmasked = true;
}
} else {
@ -1320,6 +1320,19 @@ void arm_cpu_finalize_features(ARMCPU *cpu, Error **errp)
error_propagate(errp, local_err);
return;
}
/*
* KVM does not support modifications to this feature.
* We have not registered the cpu properties when KVM
* is in use, so the user will not be able to set them.
*/
if (!kvm_enabled()) {
arm_cpu_pauth_finalize(cpu, &local_err);
if (local_err != NULL) {
error_propagate(errp, local_err);
return;
}
}
}
if (kvm_enabled()) {

View File

@ -168,6 +168,11 @@ typedef struct {
uint32_t base_mask;
} TCR;
#define VTCR_NSW (1u << 29)
#define VTCR_NSA (1u << 30)
#define VSTCR_SW VTCR_NSW
#define VSTCR_SA VTCR_NSA
/* Define a maximum sized vector register.
* For 32-bit, this is a 128-bit NEON/AdvSIMD register.
* For 64-bit, this is a 2048-bit SVE register.
@ -197,9 +202,11 @@ typedef struct {
#ifdef TARGET_AARCH64
# define ARM_MAX_VQ 16
void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp);
void arm_cpu_pauth_finalize(ARMCPU *cpu, Error **errp);
#else
# define ARM_MAX_VQ 1
static inline void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp) { }
static inline void arm_cpu_pauth_finalize(ARMCPU *cpu, Error **errp) { }
#endif
typedef struct ARMVectorReg {
@ -323,9 +330,11 @@ typedef struct CPUARMState {
uint64_t ttbr1_el[4];
};
uint64_t vttbr_el2; /* Virtualization Translation Table Base. */
uint64_t vsttbr_el2; /* Secure Virtualization Translation Table. */
/* MMU translation table base control. */
TCR tcr_el[4];
TCR vtcr_el2; /* Virtualization Translation Control. */
TCR vstcr_el2; /* Secure Virtualization Translation Control. */
uint32_t c2_data; /* MPU data cacheable bits. */
uint32_t c2_insn; /* MPU instruction cacheable bits. */
union { /* MMU domain access control register
@ -947,6 +956,14 @@ struct ARMCPU {
uint64_t reset_cbar;
uint32_t reset_auxcr;
bool reset_hivecs;
/*
* Intermediate values used during property parsing.
* Once finalized, the values should be read from ID_AA64ISAR1.
*/
bool prop_pauth;
bool prop_pauth_impdef;
/* DCZ blocksize, in log_2(words), ie low 4 bits of DCZID_EL0 */
uint32_t dcz_blocksize;
uint64_t rvbar;
@ -1467,6 +1484,8 @@ static inline void xpsr_write(CPUARMState *env, uint32_t val, uint32_t mask)
#define HCR_TWEDEN (1ULL << 59)
#define HCR_TWEDEL MAKE_64BIT_MASK(60, 4)
#define HPFAR_NS (1ULL << 63)
#define SCR_NS (1U << 0)
#define SCR_IRQ (1U << 1)
#define SCR_FIQ (1U << 2)
@ -2138,6 +2157,21 @@ static inline bool arm_is_secure(CPUARMState *env)
return arm_is_secure_below_el3(env);
}
/*
* Return true if the current security state has AArch64 EL2 or AArch32 Hyp.
* This corresponds to the pseudocode EL2Enabled()
*/
static inline bool arm_is_el2_enabled(CPUARMState *env)
{
if (arm_feature(env, ARM_FEATURE_EL2)) {
if (arm_is_secure_below_el3(env)) {
return (env->cp15.scr_el3 & SCR_EEL2) != 0;
}
return true;
}
return false;
}
#else
static inline bool arm_is_secure_below_el3(CPUARMState *env)
{
@ -2148,6 +2182,11 @@ static inline bool arm_is_secure(CPUARMState *env)
{
return false;
}
static inline bool arm_is_el2_enabled(CPUARMState *env)
{
return false;
}
#endif
/**
@ -2175,7 +2214,8 @@ static inline bool arm_el_is_aa64(CPUARMState *env, int el)
return aa64;
}
if (arm_feature(env, ARM_FEATURE_EL3)) {
if (arm_feature(env, ARM_FEATURE_EL3) &&
((env->cp15.scr_el3 & SCR_NS) || !(env->cp15.scr_el3 & SCR_EEL2))) {
aa64 = aa64 && (env->cp15.scr_el3 & SCR_RW);
}
@ -2183,7 +2223,7 @@ static inline bool arm_el_is_aa64(CPUARMState *env, int el)
return aa64;
}
if (arm_feature(env, ARM_FEATURE_EL2) && !arm_is_secure_below_el3(env)) {
if (arm_is_el2_enabled(env)) {
aa64 = aa64 && (env->cp15.hcr_el2 & HCR_RW);
}
@ -3022,6 +3062,9 @@ bool write_cpustate_to_list(ARMCPU *cpu, bool kvm_sync);
#define ARM_MMU_IDX_NOTLB 0x20 /* does not have a TLB */
#define ARM_MMU_IDX_M 0x40 /* M profile */
/* Meanings of the bits for A profile mmu idx values */
#define ARM_MMU_IDX_A_NS 0x8
/* Meanings of the bits for M profile mmu idx values */
#define ARM_MMU_IDX_M_PRIV 0x1
#define ARM_MMU_IDX_M_NEGPRI 0x2
@ -3035,20 +3078,22 @@ typedef enum ARMMMUIdx {
/*
* A-profile.
*/
ARMMMUIdx_E10_0 = 0 | ARM_MMU_IDX_A,
ARMMMUIdx_E20_0 = 1 | ARM_MMU_IDX_A,
ARMMMUIdx_SE10_0 = 0 | ARM_MMU_IDX_A,
ARMMMUIdx_SE20_0 = 1 | ARM_MMU_IDX_A,
ARMMMUIdx_SE10_1 = 2 | ARM_MMU_IDX_A,
ARMMMUIdx_SE20_2 = 3 | ARM_MMU_IDX_A,
ARMMMUIdx_SE10_1_PAN = 4 | ARM_MMU_IDX_A,
ARMMMUIdx_SE20_2_PAN = 5 | ARM_MMU_IDX_A,
ARMMMUIdx_SE2 = 6 | ARM_MMU_IDX_A,
ARMMMUIdx_SE3 = 7 | ARM_MMU_IDX_A,
ARMMMUIdx_E10_1 = 2 | ARM_MMU_IDX_A,
ARMMMUIdx_E10_1_PAN = 3 | ARM_MMU_IDX_A,
ARMMMUIdx_E2 = 4 | ARM_MMU_IDX_A,
ARMMMUIdx_E20_2 = 5 | ARM_MMU_IDX_A,
ARMMMUIdx_E20_2_PAN = 6 | ARM_MMU_IDX_A,
ARMMMUIdx_SE10_0 = 7 | ARM_MMU_IDX_A,
ARMMMUIdx_SE10_1 = 8 | ARM_MMU_IDX_A,
ARMMMUIdx_SE10_1_PAN = 9 | ARM_MMU_IDX_A,
ARMMMUIdx_SE3 = 10 | ARM_MMU_IDX_A,
ARMMMUIdx_E10_0 = ARMMMUIdx_SE10_0 | ARM_MMU_IDX_A_NS,
ARMMMUIdx_E20_0 = ARMMMUIdx_SE20_0 | ARM_MMU_IDX_A_NS,
ARMMMUIdx_E10_1 = ARMMMUIdx_SE10_1 | ARM_MMU_IDX_A_NS,
ARMMMUIdx_E20_2 = ARMMMUIdx_SE20_2 | ARM_MMU_IDX_A_NS,
ARMMMUIdx_E10_1_PAN = ARMMMUIdx_SE10_1_PAN | ARM_MMU_IDX_A_NS,
ARMMMUIdx_E20_2_PAN = ARMMMUIdx_SE20_2_PAN | ARM_MMU_IDX_A_NS,
ARMMMUIdx_E2 = ARMMMUIdx_SE2 | ARM_MMU_IDX_A_NS,
/*
* These are not allocated TLBs and are used only for AT system
@ -3057,6 +3102,9 @@ typedef enum ARMMMUIdx {
ARMMMUIdx_Stage1_E0 = 0 | ARM_MMU_IDX_NOTLB,
ARMMMUIdx_Stage1_E1 = 1 | ARM_MMU_IDX_NOTLB,
ARMMMUIdx_Stage1_E1_PAN = 2 | ARM_MMU_IDX_NOTLB,
ARMMMUIdx_Stage1_SE0 = 3 | ARM_MMU_IDX_NOTLB,
ARMMMUIdx_Stage1_SE1 = 4 | ARM_MMU_IDX_NOTLB,
ARMMMUIdx_Stage1_SE1_PAN = 5 | ARM_MMU_IDX_NOTLB,
/*
* Not allocated a TLB: used only for second stage of an S12 page
* table walk, or for descriptor loads during first stage of an S1
@ -3064,7 +3112,8 @@ typedef enum ARMMMUIdx {
* then various TLB flush insns which currently are no-ops or flush
* only stage 1 MMU indexes will need to change to flush stage 2.
*/
ARMMMUIdx_Stage2 = 3 | ARM_MMU_IDX_NOTLB,
ARMMMUIdx_Stage2 = 6 | ARM_MMU_IDX_NOTLB,
ARMMMUIdx_Stage2_S = 7 | ARM_MMU_IDX_NOTLB,
/*
* M-profile.
@ -3095,8 +3144,12 @@ typedef enum ARMMMUIdxBit {
TO_CORE_BIT(E20_2),
TO_CORE_BIT(E20_2_PAN),
TO_CORE_BIT(SE10_0),
TO_CORE_BIT(SE20_0),
TO_CORE_BIT(SE10_1),
TO_CORE_BIT(SE20_2),
TO_CORE_BIT(SE10_1_PAN),
TO_CORE_BIT(SE20_2_PAN),
TO_CORE_BIT(SE2),
TO_CORE_BIT(SE3),
TO_CORE_BIT(MUser),
@ -3127,7 +3180,7 @@ static inline int arm_debug_target_el(CPUARMState *env)
bool secure = arm_is_secure(env);
bool route_to_el2 = false;
if (arm_feature(env, ARM_FEATURE_EL2) && !secure) {
if (arm_is_el2_enabled(env)) {
route_to_el2 = env->cp15.hcr_el2 & HCR_TGE ||
env->cp15.mdcr_el2 & MDCR_TDE;
}
@ -3918,10 +3971,8 @@ static inline bool isar_feature_aa64_fcma(const ARMISARegisters *id)
static inline bool isar_feature_aa64_pauth(const ARMISARegisters *id)
{
/*
* Note that while QEMU will only implement the architected algorithm
* QARMA, and thus APA+GPA, the host cpu for kvm may use implementation
* defined algorithms, and thus API+GPI, and this predicate controls
* migration of the 128-bit keys.
* Return true if any form of pauth is enabled, as this
* predicate controls migration of the 128-bit keys.
*/
return (id->id_aa64isar1 &
(FIELD_DP64(0, ID_AA64ISAR1, APA, 0xf) |
@ -3930,6 +3981,15 @@ static inline bool isar_feature_aa64_pauth(const ARMISARegisters *id)
FIELD_DP64(0, ID_AA64ISAR1, GPI, 0xf))) != 0;
}
static inline bool isar_feature_aa64_pauth_arch(const ARMISARegisters *id)
{
/*
* Return true if pauth is enabled with the architected QARMA algorithm.
* QEMU will always set APA+GPA to the same value.
*/
return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, APA) != 0;
}
static inline bool isar_feature_aa64_sb(const ARMISARegisters *id)
{
return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, SB) != 0;
@ -3977,6 +4037,11 @@ static inline bool isar_feature_aa64_sve(const ARMISARegisters *id)
return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, SVE) != 0;
}
static inline bool isar_feature_aa64_sel2(const ARMISARegisters *id)
{
return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, SEL2) != 0;
}
static inline bool isar_feature_aa64_vh(const ARMISARegisters *id)
{
return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, VH) != 0;

View File

@ -28,6 +28,8 @@
#include "sysemu/kvm.h"
#include "kvm_arm.h"
#include "qapi/visitor.h"
#include "hw/qdev-properties.h"
#ifndef CONFIG_USER_ONLY
static uint64_t a57_a53_l2ctlr_read(CPUARMState *env, const ARMCPRegInfo *ri)
@ -486,6 +488,12 @@ static void cpu_max_set_sve_max_vq(Object *obj, Visitor *v, const char *name,
cpu->sve_max_vq = max_vq;
}
/*
* Note that cpu_arm_get/set_sve_vq cannot use the simpler
* object_property_add_bool interface because they make use
* of the contents of "name" to determine which bit on which
* to operate.
*/
static void cpu_arm_get_sve_vq(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
{
@ -527,26 +535,17 @@ static void cpu_arm_set_sve_vq(Object *obj, Visitor *v, const char *name,
set_bit(vq - 1, cpu->sve_vq_init);
}
static void cpu_arm_get_sve(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
static bool cpu_arm_get_sve(Object *obj, Error **errp)
{
ARMCPU *cpu = ARM_CPU(obj);
bool value = cpu_isar_feature(aa64_sve, cpu);
visit_type_bool(v, name, &value, errp);
return cpu_isar_feature(aa64_sve, cpu);
}
static void cpu_arm_set_sve(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
static void cpu_arm_set_sve(Object *obj, bool value, Error **errp)
{
ARMCPU *cpu = ARM_CPU(obj);
bool value;
uint64_t t;
if (!visit_type_bool(v, name, &value, errp)) {
return;
}
if (value && kvm_enabled() && !kvm_arm_sve_supported()) {
error_setg(errp, "'sve' feature not supported by KVM on this host");
return;
@ -561,8 +560,7 @@ void aarch64_add_sve_properties(Object *obj)
{
uint32_t vq;
object_property_add(obj, "sve", "bool", cpu_arm_get_sve,
cpu_arm_set_sve, NULL, NULL);
object_property_add_bool(obj, "sve", cpu_arm_get_sve, cpu_arm_set_sve);
for (vq = 1; vq <= ARM_MAX_VQ; ++vq) {
char name[8];
@ -572,6 +570,36 @@ void aarch64_add_sve_properties(Object *obj)
}
}
void arm_cpu_pauth_finalize(ARMCPU *cpu, Error **errp)
{
int arch_val = 0, impdef_val = 0;
uint64_t t;
/* TODO: Handle HaveEnhancedPAC, HaveEnhancedPAC2, HaveFPAC. */
if (cpu->prop_pauth) {
if (cpu->prop_pauth_impdef) {
impdef_val = 1;
} else {
arch_val = 1;
}
} else if (cpu->prop_pauth_impdef) {
error_setg(errp, "cannot enable pauth-impdef without pauth");
error_append_hint(errp, "Add pauth=on to the CPU property list.\n");
}
t = cpu->isar.id_aa64isar1;
t = FIELD_DP64(t, ID_AA64ISAR1, APA, arch_val);
t = FIELD_DP64(t, ID_AA64ISAR1, GPA, arch_val);
t = FIELD_DP64(t, ID_AA64ISAR1, API, impdef_val);
t = FIELD_DP64(t, ID_AA64ISAR1, GPI, impdef_val);
cpu->isar.id_aa64isar1 = t;
}
static Property arm_cpu_pauth_property =
DEFINE_PROP_BOOL("pauth", ARMCPU, prop_pauth, true);
static Property arm_cpu_pauth_impdef_property =
DEFINE_PROP_BOOL("pauth-impdef", ARMCPU, prop_pauth_impdef, false);
/* -cpu max: if KVM is enabled, like -cpu host (best possible with this host);
* otherwise, a CPU with as many features enabled as our emulation supports.
* The version of '-cpu max' for qemu-system-arm is defined in cpu.c;
@ -627,10 +655,6 @@ static void aarch64_max_initfn(Object *obj)
t = FIELD_DP64(t, ID_AA64ISAR1, DPB, 2);
t = FIELD_DP64(t, ID_AA64ISAR1, JSCVT, 1);
t = FIELD_DP64(t, ID_AA64ISAR1, FCMA, 1);
t = FIELD_DP64(t, ID_AA64ISAR1, APA, 1); /* PAuth, architected only */
t = FIELD_DP64(t, ID_AA64ISAR1, API, 0);
t = FIELD_DP64(t, ID_AA64ISAR1, GPA, 1);
t = FIELD_DP64(t, ID_AA64ISAR1, GPI, 0);
t = FIELD_DP64(t, ID_AA64ISAR1, SB, 1);
t = FIELD_DP64(t, ID_AA64ISAR1, SPECRES, 1);
t = FIELD_DP64(t, ID_AA64ISAR1, FRINTTS, 1);
@ -641,6 +665,7 @@ static void aarch64_max_initfn(Object *obj)
t = FIELD_DP64(t, ID_AA64PFR0, SVE, 1);
t = FIELD_DP64(t, ID_AA64PFR0, FP, 1);
t = FIELD_DP64(t, ID_AA64PFR0, ADVSIMD, 1);
t = FIELD_DP64(t, ID_AA64PFR0, SEL2, 1);
cpu->isar.id_aa64pfr0 = t;
t = cpu->isar.id_aa64pfr1;
@ -721,6 +746,10 @@ static void aarch64_max_initfn(Object *obj)
cpu->ctr = 0x80038003; /* 32 byte I and D cacheline size, VIPT icache */
cpu->dcz_blocksize = 7; /* 512 bytes */
#endif
/* Default to PAUTH on, with the architected algorithm. */
qdev_property_add_static(DEVICE(obj), &arm_cpu_pauth_property);
qdev_property_add_static(DEVICE(obj), &arm_cpu_pauth_impdef_property);
}
aarch64_add_sve_properties(obj);

View File

@ -972,8 +972,7 @@ void HELPER(exception_return)(CPUARMState *env, uint64_t new_pc)
if (new_el == -1) {
goto illegal_return;
}
if (new_el > cur_el
|| (new_el == 2 && !arm_feature(env, ARM_FEATURE_EL2))) {
if (new_el > cur_el || (new_el == 2 && !arm_is_el2_enabled(env))) {
/* Disallow return to an EL which is unimplemented or higher
* than the current one.
*/
@ -985,11 +984,6 @@ void HELPER(exception_return)(CPUARMState *env, uint64_t new_pc)
goto illegal_return;
}
if (new_el == 2 && arm_is_secure_below_el3(env)) {
/* Return to the non-existent secure-EL2 */
goto illegal_return;
}
if (new_el == 1 && (arm_hcr_el2_eff(env) & HCR_TGE)) {
goto illegal_return;
}

View File

@ -533,12 +533,20 @@ static CPAccessResult access_trap_aa32s_el1(CPUARMState *env,
return CP_ACCESS_OK;
}
if (arm_is_secure_below_el3(env)) {
if (env->cp15.scr_el3 & SCR_EEL2) {
return CP_ACCESS_TRAP_EL2;
}
return CP_ACCESS_TRAP_EL3;
}
/* This will be EL1 NS and EL2 NS, which just UNDEF */
return CP_ACCESS_TRAP_UNCATEGORIZED;
}
static uint64_t arm_mdcr_el2_eff(CPUARMState *env)
{
return arm_is_el2_enabled(env) ? env->cp15.mdcr_el2 : 0;
}
/* Check for traps to "powerdown debug" registers, which are controlled
* by MDCR.TDOSA
*/
@ -546,11 +554,11 @@ static CPAccessResult access_tdosa(CPUARMState *env, const ARMCPRegInfo *ri,
bool isread)
{
int el = arm_current_el(env);
bool mdcr_el2_tdosa = (env->cp15.mdcr_el2 & MDCR_TDOSA) ||
(env->cp15.mdcr_el2 & MDCR_TDE) ||
uint64_t mdcr_el2 = arm_mdcr_el2_eff(env);
bool mdcr_el2_tdosa = (mdcr_el2 & MDCR_TDOSA) || (mdcr_el2 & MDCR_TDE) ||
(arm_hcr_el2_eff(env) & HCR_TGE);
if (el < 2 && mdcr_el2_tdosa && !arm_is_secure_below_el3(env)) {
if (el < 2 && mdcr_el2_tdosa) {
return CP_ACCESS_TRAP_EL2;
}
if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDOSA)) {
@ -566,11 +574,11 @@ static CPAccessResult access_tdra(CPUARMState *env, const ARMCPRegInfo *ri,
bool isread)
{
int el = arm_current_el(env);
bool mdcr_el2_tdra = (env->cp15.mdcr_el2 & MDCR_TDRA) ||
(env->cp15.mdcr_el2 & MDCR_TDE) ||
uint64_t mdcr_el2 = arm_mdcr_el2_eff(env);
bool mdcr_el2_tdra = (mdcr_el2 & MDCR_TDRA) || (mdcr_el2 & MDCR_TDE) ||
(arm_hcr_el2_eff(env) & HCR_TGE);
if (el < 2 && mdcr_el2_tdra && !arm_is_secure_below_el3(env)) {
if (el < 2 && mdcr_el2_tdra) {
return CP_ACCESS_TRAP_EL2;
}
if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) {
@ -586,11 +594,11 @@ static CPAccessResult access_tda(CPUARMState *env, const ARMCPRegInfo *ri,
bool isread)
{
int el = arm_current_el(env);
bool mdcr_el2_tda = (env->cp15.mdcr_el2 & MDCR_TDA) ||
(env->cp15.mdcr_el2 & MDCR_TDE) ||
uint64_t mdcr_el2 = arm_mdcr_el2_eff(env);
bool mdcr_el2_tda = (mdcr_el2 & MDCR_TDA) || (mdcr_el2 & MDCR_TDE) ||
(arm_hcr_el2_eff(env) & HCR_TGE);
if (el < 2 && mdcr_el2_tda && !arm_is_secure_below_el3(env)) {
if (el < 2 && mdcr_el2_tda) {
return CP_ACCESS_TRAP_EL2;
}
if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) {
@ -606,9 +614,9 @@ static CPAccessResult access_tpm(CPUARMState *env, const ARMCPRegInfo *ri,
bool isread)
{
int el = arm_current_el(env);
uint64_t mdcr_el2 = arm_mdcr_el2_eff(env);
if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TPM)
&& !arm_is_secure_below_el3(env)) {
if (el < 2 && (mdcr_el2 & MDCR_TPM)) {
return CP_ACCESS_TRAP_EL2;
}
if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) {
@ -1049,8 +1057,8 @@ static CPAccessResult cpacr_access(CPUARMState *env, const ARMCPRegInfo *ri,
{
if (arm_feature(env, ARM_FEATURE_V8)) {
/* Check if CPACR accesses are to be trapped to EL2 */
if (arm_current_el(env) == 1 &&
(env->cp15.cptr_el[2] & CPTR_TCPAC) && !arm_is_secure(env)) {
if (arm_current_el(env) == 1 && arm_is_el2_enabled(env) &&
(env->cp15.cptr_el[2] & CPTR_TCPAC)) {
return CP_ACCESS_TRAP_EL2;
/* Check if CPACR accesses are to be trapped to EL3 */
} else if (arm_current_el(env) < 3 &&
@ -1348,12 +1356,12 @@ static CPAccessResult pmreg_access(CPUARMState *env, const ARMCPRegInfo *ri,
* trapping to EL2 or EL3 for other accesses.
*/
int el = arm_current_el(env);
uint64_t mdcr_el2 = arm_mdcr_el2_eff(env);
if (el == 0 && !(env->cp15.c9_pmuserenr & 1)) {
return CP_ACCESS_TRAP;
}
if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TPM)
&& !arm_is_secure_below_el3(env)) {
if (el < 2 && (mdcr_el2 & MDCR_TPM)) {
return CP_ACCESS_TRAP_EL2;
}
if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) {
@ -1432,7 +1440,8 @@ static bool pmu_counter_enabled(CPUARMState *env, uint8_t counter)
bool enabled, prohibited, filtered;
bool secure = arm_is_secure(env);
int el = arm_current_el(env);
uint8_t hpmn = env->cp15.mdcr_el2 & MDCR_HPMN;
uint64_t mdcr_el2 = arm_mdcr_el2_eff(env);
uint8_t hpmn = mdcr_el2 & MDCR_HPMN;
if (!arm_feature(env, ARM_FEATURE_PMU)) {
return false;
@ -1442,13 +1451,13 @@ static bool pmu_counter_enabled(CPUARMState *env, uint8_t counter)
(counter < hpmn || counter == 31)) {
e = env->cp15.c9_pmcr & PMCRE;
} else {
e = env->cp15.mdcr_el2 & MDCR_HPME;
e = mdcr_el2 & MDCR_HPME;
}
enabled = e && (env->cp15.c9_pmcnten & (1 << counter));
if (!secure) {
if (el == 2 && (counter < hpmn || counter == 31)) {
prohibited = env->cp15.mdcr_el2 & MDCR_HPMD;
prohibited = mdcr_el2 & MDCR_HPMD;
} else {
prohibited = false;
}
@ -2024,6 +2033,9 @@ static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
if (cpu_isar_feature(aa64_pauth, cpu)) {
valid_mask |= SCR_API | SCR_APK;
}
if (cpu_isar_feature(aa64_sel2, cpu)) {
valid_mask |= SCR_EEL2;
}
if (cpu_isar_feature(aa64_mte, cpu)) {
valid_mask |= SCR_ATA;
}
@ -2084,13 +2096,11 @@ static void csselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
static uint64_t isr_read(CPUARMState *env, const ARMCPRegInfo *ri)
{
CPUState *cs = env_cpu(env);
uint64_t hcr_el2 = arm_hcr_el2_eff(env);
bool el1 = arm_current_el(env) == 1;
uint64_t hcr_el2 = el1 ? arm_hcr_el2_eff(env) : 0;
uint64_t ret = 0;
bool allow_virt = (arm_current_el(env) == 1 &&
(!arm_is_secure_below_el3(env) ||
(env->cp15.scr_el3 & SCR_EEL2)));
if (allow_virt && (hcr_el2 & HCR_IMO)) {
if (hcr_el2 & HCR_IMO) {
if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) {
ret |= CPSR_I;
}
@ -2100,7 +2110,7 @@ static uint64_t isr_read(CPUARMState *env, const ARMCPRegInfo *ri)
}
}
if (allow_virt && (hcr_el2 & HCR_FMO)) {
if (hcr_el2 & HCR_FMO) {
if (cs->interrupt_request & CPU_INTERRUPT_VFIQ) {
ret |= CPSR_F;
}
@ -2522,7 +2532,7 @@ static CPAccessResult gt_counter_access(CPUARMState *env, int timeridx,
bool isread)
{
unsigned int cur_el = arm_current_el(env);
bool secure = arm_is_secure(env);
bool has_el2 = arm_is_el2_enabled(env);
uint64_t hcr = arm_hcr_el2_eff(env);
switch (cur_el) {
@ -2546,8 +2556,7 @@ static CPAccessResult gt_counter_access(CPUARMState *env, int timeridx,
}
} else {
/* If HCR_EL2.<E2H> == 0: check CNTHCTL_EL2.EL1PCEN. */
if (arm_feature(env, ARM_FEATURE_EL2) &&
timeridx == GTIMER_PHYS && !secure &&
if (has_el2 && timeridx == GTIMER_PHYS &&
!extract32(env->cp15.cnthctl_el2, 1, 1)) {
return CP_ACCESS_TRAP_EL2;
}
@ -2556,8 +2565,7 @@ static CPAccessResult gt_counter_access(CPUARMState *env, int timeridx,
case 1:
/* Check CNTHCTL_EL2.EL1PCTEN, which changes location based on E2H. */
if (arm_feature(env, ARM_FEATURE_EL2) &&
timeridx == GTIMER_PHYS && !secure &&
if (has_el2 && timeridx == GTIMER_PHYS &&
(hcr & HCR_E2H
? !extract32(env->cp15.cnthctl_el2, 10, 1)
: !extract32(env->cp15.cnthctl_el2, 0, 1))) {
@ -2572,7 +2580,7 @@ static CPAccessResult gt_timer_access(CPUARMState *env, int timeridx,
bool isread)
{
unsigned int cur_el = arm_current_el(env);
bool secure = arm_is_secure(env);
bool has_el2 = arm_is_el2_enabled(env);
uint64_t hcr = arm_hcr_el2_eff(env);
switch (cur_el) {
@ -2593,8 +2601,7 @@ static CPAccessResult gt_timer_access(CPUARMState *env, int timeridx,
/* fall through */
case 1:
if (arm_feature(env, ARM_FEATURE_EL2) &&
timeridx == GTIMER_PHYS && !secure) {
if (has_el2 && timeridx == GTIMER_PHYS) {
if (hcr & HCR_E2H) {
/* If HCR_EL2.<E2H,TGE> == '10': check CNTHCTL_EL2.EL1PTEN. */
if (!extract32(env->cp15.cnthctl_el2, 11, 1)) {
@ -2861,6 +2868,9 @@ static int gt_phys_redir_timeridx(CPUARMState *env)
case ARMMMUIdx_E20_0:
case ARMMMUIdx_E20_2:
case ARMMMUIdx_E20_2_PAN:
case ARMMMUIdx_SE20_0:
case ARMMMUIdx_SE20_2:
case ARMMMUIdx_SE20_2_PAN:
return GTIMER_HYP;
default:
return GTIMER_PHYS;
@ -2873,6 +2883,9 @@ static int gt_virt_redir_timeridx(CPUARMState *env)
case ARMMMUIdx_E20_0:
case ARMMMUIdx_E20_2:
case ARMMMUIdx_E20_2_PAN:
case ARMMMUIdx_SE20_0:
case ARMMMUIdx_SE20_2:
case ARMMMUIdx_SE20_2_PAN:
return GTIMER_HYPVIRT;
default:
return GTIMER_VIRT;
@ -3381,13 +3394,16 @@ static CPAccessResult ats_access(CPUARMState *env, const ARMCPRegInfo *ri,
bool isread)
{
if (ri->opc2 & 4) {
/* The ATS12NSO* operations must trap to EL3 if executed in
/* The ATS12NSO* operations must trap to EL3 or EL2 if executed in
* Secure EL1 (which can only happen if EL3 is AArch64).
* They are simply UNDEF if executed from NS EL1.
* They function normally from EL2 or EL3.
*/
if (arm_current_el(env) == 1) {
if (arm_is_secure_below_el3(env)) {
if (env->cp15.scr_el3 & SCR_EEL2) {
return CP_ACCESS_TRAP_UNCATEGORIZED_EL2;
}
return CP_ACCESS_TRAP_UNCATEGORIZED_EL3;
}
return CP_ACCESS_TRAP_UNCATEGORIZED;
@ -3423,7 +3439,7 @@ static uint64_t do_ats_write(CPUARMState *env, uint64_t value,
uint32_t syn, fsr, fsc;
bool take_exc = false;
if (fi.s1ptw && current_el == 1 && !arm_is_secure(env)
if (fi.s1ptw && current_el == 1
&& arm_mmu_idx_is_stage1_of_2(mmu_idx)) {
/*
* Synchronous stage 2 fault on an access made as part of the
@ -3438,6 +3454,9 @@ static uint64_t do_ats_write(CPUARMState *env, uint64_t value,
target_el = 3;
} else {
env->cp15.hpfar_el2 = extract64(fi.s2addr, 12, 47) << 4;
if (arm_is_secure_below_el3(env) && fi.s1ns) {
env->cp15.hpfar_el2 |= HPFAR_NS;
}
target_el = 2;
}
take_exc = true;
@ -3576,14 +3595,14 @@ static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
mmu_idx = ARMMMUIdx_SE3;
break;
case 2:
g_assert(!secure); /* TODO: ARMv8.4-SecEL2 */
g_assert(!secure); /* ARMv8.4-SecEL2 is 64-bit only */
/* fall through */
case 1:
if (ri->crm == 9 && (env->uncached_cpsr & CPSR_PAN)) {
mmu_idx = (secure ? ARMMMUIdx_SE10_1_PAN
mmu_idx = (secure ? ARMMMUIdx_Stage1_SE1_PAN
: ARMMMUIdx_Stage1_E1_PAN);
} else {
mmu_idx = secure ? ARMMMUIdx_SE10_1 : ARMMMUIdx_Stage1_E1;
mmu_idx = secure ? ARMMMUIdx_Stage1_SE1 : ARMMMUIdx_Stage1_E1;
}
break;
default:
@ -3597,10 +3616,11 @@ static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
mmu_idx = ARMMMUIdx_SE10_0;
break;
case 2:
g_assert(!secure); /* ARMv8.4-SecEL2 is 64-bit only */
mmu_idx = ARMMMUIdx_Stage1_E0;
break;
case 1:
mmu_idx = secure ? ARMMMUIdx_SE10_0 : ARMMMUIdx_Stage1_E0;
mmu_idx = secure ? ARMMMUIdx_Stage1_SE0 : ARMMMUIdx_Stage1_E0;
break;
default:
g_assert_not_reached();
@ -3646,7 +3666,8 @@ static void ats1h_write(CPUARMState *env, const ARMCPRegInfo *ri,
static CPAccessResult at_s1e2_access(CPUARMState *env, const ARMCPRegInfo *ri,
bool isread)
{
if (arm_current_el(env) == 3 && !(env->cp15.scr_el3 & SCR_NS)) {
if (arm_current_el(env) == 3 &&
!(env->cp15.scr_el3 & (SCR_NS | SCR_EEL2))) {
return CP_ACCESS_TRAP;
}
return CP_ACCESS_OK;
@ -3665,14 +3686,14 @@ static void ats_write64(CPUARMState *env, const ARMCPRegInfo *ri,
switch (ri->opc1) {
case 0: /* AT S1E1R, AT S1E1W, AT S1E1RP, AT S1E1WP */
if (ri->crm == 9 && (env->pstate & PSTATE_PAN)) {
mmu_idx = (secure ? ARMMMUIdx_SE10_1_PAN
mmu_idx = (secure ? ARMMMUIdx_Stage1_SE1_PAN
: ARMMMUIdx_Stage1_E1_PAN);
} else {
mmu_idx = secure ? ARMMMUIdx_SE10_1 : ARMMMUIdx_Stage1_E1;
mmu_idx = secure ? ARMMMUIdx_Stage1_SE1 : ARMMMUIdx_Stage1_E1;
}
break;
case 4: /* AT S1E2R, AT S1E2W */
mmu_idx = ARMMMUIdx_E2;
mmu_idx = secure ? ARMMMUIdx_SE2 : ARMMMUIdx_E2;
break;
case 6: /* AT S1E3R, AT S1E3W */
mmu_idx = ARMMMUIdx_SE3;
@ -3682,7 +3703,7 @@ static void ats_write64(CPUARMState *env, const ARMCPRegInfo *ri,
}
break;
case 2: /* AT S1E0R, AT S1E0W */
mmu_idx = secure ? ARMMMUIdx_SE10_0 : ARMMMUIdx_Stage1_E0;
mmu_idx = secure ? ARMMMUIdx_Stage1_SE0 : ARMMMUIdx_Stage1_E0;
break;
case 4: /* AT S12E1R, AT S12E1W */
mmu_idx = secure ? ARMMMUIdx_SE10_1 : ARMMMUIdx_E10_1;
@ -3987,10 +4008,15 @@ static void vmsa_tcr_ttbr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri,
*/
if (extract64(raw_read(env, ri) ^ value, 48, 16) &&
(arm_hcr_el2_eff(env) & HCR_E2H)) {
tlb_flush_by_mmuidx(env_cpu(env),
ARMMMUIdxBit_E20_2 |
ARMMMUIdxBit_E20_2_PAN |
ARMMMUIdxBit_E20_0);
uint16_t mask = ARMMMUIdxBit_E20_2 |
ARMMMUIdxBit_E20_2_PAN |
ARMMMUIdxBit_E20_0;
if (arm_is_secure_below_el3(env)) {
mask >>= ARM_MMU_IDX_A_NS;
}
tlb_flush_by_mmuidx(env_cpu(env), mask);
}
raw_write(env, ri, value);
}
@ -4006,10 +4032,15 @@ static void vttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
* the combined stage 1&2 tlbs (EL10_1 and EL10_0).
*/
if (raw_read(env, ri) != value) {
tlb_flush_by_mmuidx(cs,
ARMMMUIdxBit_E10_1 |
ARMMMUIdxBit_E10_1_PAN |
ARMMMUIdxBit_E10_0);
uint16_t mask = ARMMMUIdxBit_E10_1 |
ARMMMUIdxBit_E10_1_PAN |
ARMMMUIdxBit_E10_0;
if (arm_is_secure_below_el3(env)) {
mask >>= ARM_MMU_IDX_A_NS;
}
tlb_flush_by_mmuidx(cs, mask);
raw_write(env, ri, value);
}
}
@ -4250,11 +4281,9 @@ static const ARMCPRegInfo strongarm_cp_reginfo[] = {
static uint64_t midr_read(CPUARMState *env, const ARMCPRegInfo *ri)
{
ARMCPU *cpu = env_archcpu(env);
unsigned int cur_el = arm_current_el(env);
bool secure = arm_is_secure(env);
if (arm_feature(&cpu->env, ARM_FEATURE_EL2) && !secure && cur_el == 1) {
if (arm_is_el2_enabled(env) && cur_el == 1) {
return env->cp15.vpidr_el2;
}
return raw_read(env, ri);
@ -4281,9 +4310,8 @@ static uint64_t mpidr_read_val(CPUARMState *env)
static uint64_t mpidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
{
unsigned int cur_el = arm_current_el(env);
bool secure = arm_is_secure(env);
if (arm_feature(env, ARM_FEATURE_EL2) && !secure && cur_el == 1) {
if (arm_is_el2_enabled(env) && cur_el == 1) {
return env->cp15.vmpidr_el2;
}
return mpidr_read_val(env);
@ -4441,21 +4469,24 @@ static CPAccessResult aa64_cacheop_pou_access(CPUARMState *env,
static int vae1_tlbmask(CPUARMState *env)
{
/* Since we exclude secure first, we may read HCR_EL2 directly. */
if (arm_is_secure_below_el3(env)) {
return ARMMMUIdxBit_SE10_1 |
ARMMMUIdxBit_SE10_1_PAN |
ARMMMUIdxBit_SE10_0;
} else if ((env->cp15.hcr_el2 & (HCR_E2H | HCR_TGE))
== (HCR_E2H | HCR_TGE)) {
return ARMMMUIdxBit_E20_2 |
uint64_t hcr = arm_hcr_el2_eff(env);
uint16_t mask;
if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
mask = ARMMMUIdxBit_E20_2 |
ARMMMUIdxBit_E20_2_PAN |
ARMMMUIdxBit_E20_0;
} else {
return ARMMMUIdxBit_E10_1 |
mask = ARMMMUIdxBit_E10_1 |
ARMMMUIdxBit_E10_1_PAN |
ARMMMUIdxBit_E10_0;
}
if (arm_is_secure_below_el3(env)) {
mask >>= ARM_MMU_IDX_A_NS;
}
return mask;
}
/* Return 56 if TBI is enabled, 64 otherwise. */
@ -4471,17 +4502,20 @@ static int tlbbits_for_regime(CPUARMState *env, ARMMMUIdx mmu_idx,
static int vae1_tlbbits(CPUARMState *env, uint64_t addr)
{
uint64_t hcr = arm_hcr_el2_eff(env);
ARMMMUIdx mmu_idx;
/* Only the regime of the mmu_idx below is significant. */
if (arm_is_secure_below_el3(env)) {
mmu_idx = ARMMMUIdx_SE10_0;
} else if ((env->cp15.hcr_el2 & (HCR_E2H | HCR_TGE))
== (HCR_E2H | HCR_TGE)) {
if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
mmu_idx = ARMMMUIdx_E20_0;
} else {
mmu_idx = ARMMMUIdx_E10_0;
}
if (arm_is_secure_below_el3(env)) {
mmu_idx &= ~ARM_MMU_IDX_A_NS;
}
return tlbbits_for_regime(env, mmu_idx, addr);
}
@ -4527,11 +4561,17 @@ static int alle1_tlbmask(CPUARMState *env)
static int e2_tlbmask(CPUARMState *env)
{
/* TODO: ARMv8.4-SecEL2 */
return ARMMMUIdxBit_E20_0 |
ARMMMUIdxBit_E20_2 |
ARMMMUIdxBit_E20_2_PAN |
ARMMMUIdxBit_E2;
if (arm_is_secure_below_el3(env)) {
return ARMMMUIdxBit_SE20_0 |
ARMMMUIdxBit_SE20_2 |
ARMMMUIdxBit_SE20_2_PAN |
ARMMMUIdxBit_SE2;
} else {
return ARMMMUIdxBit_E20_0 |
ARMMMUIdxBit_E20_2 |
ARMMMUIdxBit_E20_2_PAN |
ARMMMUIdxBit_E2;
}
}
static void tlbi_aa64_alle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
@ -4651,10 +4691,12 @@ static void tlbi_aa64_vae2is_write(CPUARMState *env, const ARMCPRegInfo *ri,
{
CPUState *cs = env_cpu(env);
uint64_t pageaddr = sextract64(value << 12, 0, 56);
int bits = tlbbits_for_regime(env, ARMMMUIdx_E2, pageaddr);
bool secure = arm_is_secure_below_el3(env);
int mask = secure ? ARMMMUIdxBit_SE2 : ARMMMUIdxBit_E2;
int bits = tlbbits_for_regime(env, secure ? ARMMMUIdx_E2 : ARMMMUIdx_SE2,
pageaddr);
tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr,
ARMMMUIdxBit_E2, bits);
tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr, mask, bits);
}
static void tlbi_aa64_vae3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
@ -5350,7 +5392,7 @@ uint64_t arm_hcr_el2_eff(CPUARMState *env)
{
uint64_t ret = env->cp15.hcr_el2;
if (arm_is_secure_below_el3(env)) {
if (!arm_is_el2_enabled(env)) {
/*
* "This register has no effect if EL2 is not enabled in the
* current Security state". This is ARMv8.4-SecEL2 speak for
@ -5696,16 +5738,40 @@ static const ARMCPRegInfo el2_v8_cp_reginfo[] = {
REGINFO_SENTINEL
};
static CPAccessResult sel2_access(CPUARMState *env, const ARMCPRegInfo *ri,
bool isread)
{
if (arm_current_el(env) == 3 || arm_is_secure_below_el3(env)) {
return CP_ACCESS_OK;
}
return CP_ACCESS_TRAP_UNCATEGORIZED;
}
static const ARMCPRegInfo el2_sec_cp_reginfo[] = {
{ .name = "VSTTBR_EL2", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 4, .crn = 2, .crm = 6, .opc2 = 0,
.access = PL2_RW, .accessfn = sel2_access,
.fieldoffset = offsetof(CPUARMState, cp15.vsttbr_el2) },
{ .name = "VSTCR_EL2", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 4, .crn = 2, .crm = 6, .opc2 = 2,
.access = PL2_RW, .accessfn = sel2_access,
.fieldoffset = offsetof(CPUARMState, cp15.vstcr_el2) },
REGINFO_SENTINEL
};
static CPAccessResult nsacr_access(CPUARMState *env, const ARMCPRegInfo *ri,
bool isread)
{
/* The NSACR is RW at EL3, and RO for NS EL1 and NS EL2.
* At Secure EL1 it traps to EL3.
* At Secure EL1 it traps to EL3 or EL2.
*/
if (arm_current_el(env) == 3) {
return CP_ACCESS_OK;
}
if (arm_is_secure_below_el3(env)) {
if (env->cp15.scr_el3 & SCR_EEL2) {
return CP_ACCESS_TRAP_EL2;
}
return CP_ACCESS_TRAP_EL3;
}
/* Accesses from EL1 NS and EL2 NS are UNDEF for write but allow reads. */
@ -6147,7 +6213,7 @@ int sve_exception_el(CPUARMState *env, int el)
/* CPTR_EL2. Since TZ and TFP are positive,
* they will be zero when EL2 is not present.
*/
if (el <= 2 && !arm_is_secure_below_el3(env)) {
if (el <= 2 && arm_is_el2_enabled(env)) {
if (env->cp15.cptr_el[2] & CPTR_TZ) {
return 2;
}
@ -7708,6 +7774,9 @@ void register_cp_regs_for_features(ARMCPU *cpu)
if (arm_feature(env, ARM_FEATURE_V8)) {
define_arm_cp_regs(cpu, el2_v8_cp_reginfo);
}
if (cpu_isar_feature(aa64_sel2, cpu)) {
define_arm_cp_regs(cpu, el2_sec_cp_reginfo);
}
/* RVBAR_EL2 is only implemented if EL2 is the highest EL */
if (!arm_feature(env, ARM_FEATURE_EL3)) {
ARMCPRegInfo rvbar = {
@ -8722,8 +8791,7 @@ static int bad_mode_switch(CPUARMState *env, int mode, CPSRWriteType write_type)
}
return 0;
case ARM_CPU_MODE_HYP:
return !arm_feature(env, ARM_FEATURE_EL2)
|| arm_current_el(env) < 2 || arm_is_secure_below_el3(env);
return !arm_is_el2_enabled(env) || arm_current_el(env) < 2;
case ARM_CPU_MODE_MON:
return arm_current_el(env) < 3;
default:
@ -9005,13 +9073,13 @@ static const int8_t target_el_table[2][2][2][2][2][4] = {
{{/* 0 1 1 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},
{/* 0 1 1 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},},},},
{{{{/* 1 0 0 0 */{ 1, 1, 2, -1 },{ 1, 1, -1, 1 },},
{/* 1 0 0 1 */{ 2, 2, 2, -1 },{ 1, 1, -1, 1 },},},
{{/* 1 0 1 0 */{ 1, 1, 1, -1 },{ 1, 1, -1, 1 },},
{/* 1 0 1 1 */{ 2, 2, 2, -1 },{ 1, 1, -1, 1 },},},},
{/* 1 0 0 1 */{ 2, 2, 2, -1 },{ 2, 2, -1, 1 },},},
{{/* 1 0 1 0 */{ 1, 1, 1, -1 },{ 1, 1, 1, 1 },},
{/* 1 0 1 1 */{ 2, 2, 2, -1 },{ 2, 2, 2, 1 },},},},
{{{/* 1 1 0 0 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},
{/* 1 1 0 1 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},},
{{/* 1 1 1 0 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},
{/* 1 1 1 1 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},},},},
{{/* 1 1 1 0 */{ 3, 3, 3, -1 },{ 3, 3, 3, 3 },},
{/* 1 1 1 1 */{ 3, 3, 3, -1 },{ 3, 3, 3, 3 },},},},},
};
/*
@ -9957,7 +10025,8 @@ uint64_t arm_sctlr(CPUARMState *env, int el)
/* Only EL0 needs to be adjusted for EL1&0 or EL2&0. */
if (el == 0) {
ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, 0);
el = (mmu_idx == ARMMMUIdx_E20_0 ? 2 : 1);
el = (mmu_idx == ARMMMUIdx_E20_0 || mmu_idx == ARMMMUIdx_SE20_0)
? 2 : 1;
}
return env->cp15.sctlr_el[el];
}
@ -9974,6 +10043,8 @@ static inline uint64_t regime_sctlr(CPUARMState *env, ARMMMUIdx mmu_idx)
static inline bool regime_translation_disabled(CPUARMState *env,
ARMMMUIdx mmu_idx)
{
uint64_t hcr_el2;
if (arm_feature(env, ARM_FEATURE_M)) {
switch (env->v7m.mpu_ctrl[regime_is_secure(env, mmu_idx)] &
(R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK)) {
@ -9992,19 +10063,21 @@ static inline bool regime_translation_disabled(CPUARMState *env,
}
}
if (mmu_idx == ARMMMUIdx_Stage2) {
hcr_el2 = arm_hcr_el2_eff(env);
if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
/* HCR.DC means HCR.VM behaves as 1 */
return (env->cp15.hcr_el2 & (HCR_DC | HCR_VM)) == 0;
return (hcr_el2 & (HCR_DC | HCR_VM)) == 0;
}
if (env->cp15.hcr_el2 & HCR_TGE) {
if (hcr_el2 & HCR_TGE) {
/* TGE means that NS EL0/1 act as if SCTLR_EL1.M is zero */
if (!regime_is_secure(env, mmu_idx) && regime_el(env, mmu_idx) == 1) {
return true;
}
}
if ((env->cp15.hcr_el2 & HCR_DC) && arm_mmu_idx_is_stage1_of_2(mmu_idx)) {
if ((hcr_el2 & HCR_DC) && arm_mmu_idx_is_stage1_of_2(mmu_idx)) {
/* HCR.DC means SCTLR_EL1.M behaves as 0 */
return true;
}
@ -10025,6 +10098,9 @@ static inline uint64_t regime_ttbr(CPUARMState *env, ARMMMUIdx mmu_idx,
if (mmu_idx == ARMMMUIdx_Stage2) {
return env->cp15.vttbr_el2;
}
if (mmu_idx == ARMMMUIdx_Stage2_S) {
return env->cp15.vsttbr_el2;
}
if (ttbrn == 0) {
return env->cp15.ttbr0_el[regime_el(env, mmu_idx)];
} else {
@ -10040,6 +10116,12 @@ static inline uint64_t regime_ttbr(CPUARMState *env, ARMMMUIdx mmu_idx,
static inline ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx)
{
switch (mmu_idx) {
case ARMMMUIdx_SE10_0:
return ARMMMUIdx_Stage1_SE0;
case ARMMMUIdx_SE10_1:
return ARMMMUIdx_Stage1_SE1;
case ARMMMUIdx_SE10_1_PAN:
return ARMMMUIdx_Stage1_SE1_PAN;
case ARMMMUIdx_E10_0:
return ARMMMUIdx_Stage1_E0;
case ARMMMUIdx_E10_1:
@ -10082,7 +10164,9 @@ static inline bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx)
switch (mmu_idx) {
case ARMMMUIdx_SE10_0:
case ARMMMUIdx_E20_0:
case ARMMMUIdx_SE20_0:
case ARMMMUIdx_Stage1_E0:
case ARMMMUIdx_Stage1_SE0:
case ARMMMUIdx_MUser:
case ARMMMUIdx_MSUser:
case ARMMMUIdx_MUserNegPri:
@ -10248,6 +10332,7 @@ static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64,
int wxn = 0;
assert(mmu_idx != ARMMMUIdx_Stage2);
assert(mmu_idx != ARMMMUIdx_Stage2_S);
user_rw = simple_ap_to_rw_prot_is_user(ap, true);
if (is_user) {
@ -10333,7 +10418,7 @@ static bool get_level1_table_address(CPUARMState *env, ARMMMUIdx mmu_idx,
/* Translate a S1 pagetable walk through S2 if needed. */
static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx,
hwaddr addr, MemTxAttrs txattrs,
hwaddr addr, bool *is_secure,
ARMMMUFaultInfo *fi)
{
if (arm_mmu_idx_is_stage1_of_2(mmu_idx) &&
@ -10342,10 +10427,12 @@ static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx,
hwaddr s2pa;
int s2prot;
int ret;
ARMMMUIdx s2_mmu_idx = *is_secure ? ARMMMUIdx_Stage2_S
: ARMMMUIdx_Stage2;
ARMCacheAttrs cacheattrs = {};
MemTxAttrs txattrs = {};
ret = get_phys_addr_lpae(env, addr, MMU_DATA_LOAD, ARMMMUIdx_Stage2,
false,
ret = get_phys_addr_lpae(env, addr, MMU_DATA_LOAD, s2_mmu_idx, false,
&s2pa, &txattrs, &s2prot, &s2size, fi,
&cacheattrs);
if (ret) {
@ -10353,9 +10440,11 @@ static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx,
fi->s2addr = addr;
fi->stage2 = true;
fi->s1ptw = true;
fi->s1ns = !*is_secure;
return ~0;
}
if ((env->cp15.hcr_el2 & HCR_PTW) && (cacheattrs.attrs & 0xf0) == 0) {
if ((arm_hcr_el2_eff(env) & HCR_PTW) &&
(cacheattrs.attrs & 0xf0) == 0) {
/*
* PTW set and S1 walk touched S2 Device memory:
* generate Permission fault.
@ -10364,8 +10453,21 @@ static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx,
fi->s2addr = addr;
fi->stage2 = true;
fi->s1ptw = true;
fi->s1ns = !*is_secure;
return ~0;
}
if (arm_is_secure_below_el3(env)) {
/* Check if page table walk is to secure or non-secure PA space. */
if (*is_secure) {
*is_secure = !(env->cp15.vstcr_el2.raw_tcr & VSTCR_SW);
} else {
*is_secure = !(env->cp15.vtcr_el2.raw_tcr & VTCR_NSW);
}
} else {
assert(!*is_secure);
}
addr = s2pa;
}
return addr;
@ -10382,9 +10484,9 @@ static uint32_t arm_ldl_ptw(CPUState *cs, hwaddr addr, bool is_secure,
AddressSpace *as;
uint32_t data;
addr = S1_ptw_translate(env, mmu_idx, addr, &is_secure, fi);
attrs.secure = is_secure;
as = arm_addressspace(cs, attrs);
addr = S1_ptw_translate(env, mmu_idx, addr, attrs, fi);
if (fi->s1ptw) {
return 0;
}
@ -10411,9 +10513,9 @@ static uint64_t arm_ldq_ptw(CPUState *cs, hwaddr addr, bool is_secure,
AddressSpace *as;
uint64_t data;
addr = S1_ptw_translate(env, mmu_idx, addr, &is_secure, fi);
attrs.secure = is_secure;
as = arm_addressspace(cs, attrs);
addr = S1_ptw_translate(env, mmu_idx, addr, attrs, fi);
if (fi->s1ptw) {
return 0;
}
@ -10788,7 +10890,7 @@ static uint8_t convert_stage2_attrs(CPUARMState *env, uint8_t s2attrs)
uint8_t hihint = 0, lohint = 0;
if (hiattr != 0) { /* normal memory */
if ((env->cp15.hcr_el2 & HCR_CD) != 0) { /* cache disabled */
if (arm_hcr_el2_eff(env) & HCR_CD) { /* cache disabled */
hiattr = loattr = 1; /* non-cacheable */
} else {
if (hiattr != 1) { /* Write-through or write-back */
@ -10808,7 +10910,7 @@ static int aa64_va_parameter_tbi(uint64_t tcr, ARMMMUIdx mmu_idx)
{
if (regime_has_2_ranges(mmu_idx)) {
return extract64(tcr, 37, 2);
} else if (mmu_idx == ARMMMUIdx_Stage2) {
} else if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
return 0; /* VTCR_EL2 */
} else {
/* Replicate the single TBI bit so we always have 2 bits. */
@ -10820,7 +10922,7 @@ static int aa64_va_parameter_tbid(uint64_t tcr, ARMMMUIdx mmu_idx)
{
if (regime_has_2_ranges(mmu_idx)) {
return extract64(tcr, 51, 2);
} else if (mmu_idx == ARMMMUIdx_Stage2) {
} else if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
return 0; /* VTCR_EL2 */
} else {
/* Replicate the single TBID bit so we always have 2 bits. */
@ -10850,7 +10952,7 @@ ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
tsz = extract32(tcr, 0, 6);
using64k = extract32(tcr, 14, 1);
using16k = extract32(tcr, 15, 1);
if (mmu_idx == ARMMMUIdx_Stage2) {
if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
/* VTCR_EL2 */
hpd = false;
} else {
@ -10915,6 +11017,8 @@ static ARMVAParameters aa32_va_parameters(CPUARMState *env, uint32_t va,
int select, tsz;
bool epd, hpd;
assert(mmu_idx != ARMMMUIdx_Stage2_S);
if (mmu_idx == ARMMMUIdx_Stage2) {
/* VTCR */
bool sext = extract32(tcr, 4, 1);
@ -11080,7 +11184,7 @@ static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
goto do_fault;
}
if (mmu_idx != ARMMMUIdx_Stage2) {
if (mmu_idx != ARMMMUIdx_Stage2 && mmu_idx != ARMMMUIdx_Stage2_S) {
/* The starting level depends on the virtual address size (which can
* be up to 48 bits) and the translation granule size. It indicates
* the number of strides (stride bits at a time) needed to
@ -11188,7 +11292,7 @@ static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
attrs = extract64(descriptor, 2, 10)
| (extract64(descriptor, 52, 12) << 10);
if (mmu_idx == ARMMMUIdx_Stage2) {
if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
/* Stage 2 table descriptors do not include any attribute fields */
break;
}
@ -11218,8 +11322,8 @@ static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
ap = extract32(attrs, 4, 2);
if (mmu_idx == ARMMMUIdx_Stage2) {
ns = true;
if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
ns = mmu_idx == ARMMMUIdx_Stage2;
xn = extract32(attrs, 11, 2);
*prot = get_S2prot(env, ap, xn, s1_is_el0);
} else {
@ -11246,7 +11350,7 @@ static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
arm_tlb_bti_gp(txattrs) = true;
}
if (mmu_idx == ARMMMUIdx_Stage2) {
if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
cacheattrs->attrs = convert_stage2_attrs(env, extract32(attrs, 0, 4));
} else {
/* Index into MAIR registers for cache attributes */
@ -11265,7 +11369,9 @@ do_fault:
fi->type = fault_type;
fi->level = level;
/* Tag the error as S2 for failed S1 PTW at S2 or ordinary S2. */
fi->stage2 = fi->s1ptw || (mmu_idx == ARMMMUIdx_Stage2);
fi->stage2 = fi->s1ptw || (mmu_idx == ARMMMUIdx_Stage2 ||
mmu_idx == ARMMMUIdx_Stage2_S);
fi->s1ns = mmu_idx == ARMMMUIdx_Stage2;
return true;
}
@ -12084,21 +12190,22 @@ bool get_phys_addr(CPUARMState *env, target_ulong address,
target_ulong *page_size,
ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
{
if (mmu_idx == ARMMMUIdx_E10_0 ||
mmu_idx == ARMMMUIdx_E10_1 ||
mmu_idx == ARMMMUIdx_E10_1_PAN) {
ARMMMUIdx s1_mmu_idx = stage_1_mmu_idx(mmu_idx);
if (mmu_idx != s1_mmu_idx) {
/* Call ourselves recursively to do the stage 1 and then stage 2
* translations.
* translations if mmu_idx is a two-stage regime.
*/
if (arm_feature(env, ARM_FEATURE_EL2)) {
hwaddr ipa;
int s2_prot;
int ret;
ARMCacheAttrs cacheattrs2 = {};
ARMMMUIdx s2_mmu_idx;
bool is_el0;
ret = get_phys_addr(env, address, access_type,
stage_1_mmu_idx(mmu_idx), &ipa, attrs,
prot, page_size, fi, cacheattrs);
ret = get_phys_addr(env, address, access_type, s1_mmu_idx, &ipa,
attrs, prot, page_size, fi, cacheattrs);
/* If S1 fails or S2 is disabled, return early. */
if (ret || regime_translation_disabled(env, ARMMMUIdx_Stage2)) {
@ -12106,9 +12213,11 @@ bool get_phys_addr(CPUARMState *env, target_ulong address,
return ret;
}
s2_mmu_idx = attrs->secure ? ARMMMUIdx_Stage2_S : ARMMMUIdx_Stage2;
is_el0 = mmu_idx == ARMMMUIdx_E10_0 || mmu_idx == ARMMMUIdx_SE10_0;
/* S1 is done. Now do S2 translation. */
ret = get_phys_addr_lpae(env, ipa, access_type, ARMMMUIdx_Stage2,
mmu_idx == ARMMMUIdx_E10_0,
ret = get_phys_addr_lpae(env, ipa, access_type, s2_mmu_idx, is_el0,
phys_ptr, attrs, &s2_prot,
page_size, fi, &cacheattrs2);
fi->s2addr = ipa;
@ -12121,7 +12230,7 @@ bool get_phys_addr(CPUARMState *env, target_ulong address,
}
/* Combine the S1 and S2 cache attributes. */
if (env->cp15.hcr_el2 & HCR_DC) {
if (arm_hcr_el2_eff(env) & HCR_DC) {
/*
* HCR.DC forces the first stage attributes to
* Normal Non-Shareable,
@ -12135,6 +12244,18 @@ bool get_phys_addr(CPUARMState *env, target_ulong address,
cacheattrs->shareability = 0;
}
*cacheattrs = combine_cacheattrs(*cacheattrs, cacheattrs2);
/* Check if IPA translates to secure or non-secure PA space. */
if (arm_is_secure_below_el3(env)) {
if (attrs->secure) {
attrs->secure =
!(env->cp15.vstcr_el2.raw_tcr & (VSTCR_SA | VSTCR_SW));
} else {
attrs->secure =
!((env->cp15.vtcr_el2.raw_tcr & (VTCR_NSA | VTCR_NSW))
|| (env->cp15.vstcr_el2.raw_tcr & VSTCR_SA));
}
}
return 0;
} else {
/*
@ -12203,7 +12324,7 @@ bool get_phys_addr(CPUARMState *env, target_ulong address,
* MMU disabled. S1 addresses within aa64 translation regimes are
* still checked for bounds -- see AArch64.TranslateAddressS1Off.
*/
if (mmu_idx != ARMMMUIdx_Stage2) {
if (mmu_idx != ARMMMUIdx_Stage2 && mmu_idx != ARMMMUIdx_Stage2_S) {
int r_el = regime_el(env, mmu_idx);
if (arm_el_is_aa64(env, r_el)) {
int pamax = arm_pamax(env_archcpu(env));
@ -12649,7 +12770,7 @@ int fp_exception_el(CPUARMState *env, int cur_el)
/* CPTR_EL2 : present in v7VE or v8 */
if (cur_el <= 2 && extract32(env->cp15.cptr_el[2], 10, 1)
&& !arm_is_secure_below_el3(env)) {
&& arm_is_el2_enabled(env)) {
/* Trap FP ops at EL2, NS-EL1 or NS-EL0 to EL2 */
return 2;
}
@ -12674,6 +12795,7 @@ int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx)
case ARMMMUIdx_E10_0:
case ARMMMUIdx_E20_0:
case ARMMMUIdx_SE10_0:
case ARMMMUIdx_SE20_0:
return 0;
case ARMMMUIdx_E10_1:
case ARMMMUIdx_E10_1_PAN:
@ -12683,6 +12805,9 @@ int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx)
case ARMMMUIdx_E2:
case ARMMMUIdx_E20_2:
case ARMMMUIdx_E20_2_PAN:
case ARMMMUIdx_SE2:
case ARMMMUIdx_SE20_2:
case ARMMMUIdx_SE20_2_PAN:
return 2;
case ARMMMUIdx_SE3:
return 3;
@ -12700,6 +12825,9 @@ ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate)
ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el)
{
ARMMMUIdx idx;
uint64_t hcr;
if (arm_feature(env, ARM_FEATURE_M)) {
return arm_v7m_mmu_idx_for_secstate(env, env->v7m.secure);
}
@ -12707,40 +12835,43 @@ ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el)
/* See ARM pseudo-function ELIsInHost. */
switch (el) {
case 0:
if (arm_is_secure_below_el3(env)) {
return ARMMMUIdx_SE10_0;
hcr = arm_hcr_el2_eff(env);
if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
idx = ARMMMUIdx_E20_0;
} else {
idx = ARMMMUIdx_E10_0;
}
if ((env->cp15.hcr_el2 & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)
&& arm_el_is_aa64(env, 2)) {
return ARMMMUIdx_E20_0;
}
return ARMMMUIdx_E10_0;
break;
case 1:
if (arm_is_secure_below_el3(env)) {
if (env->pstate & PSTATE_PAN) {
return ARMMMUIdx_SE10_1_PAN;
}
return ARMMMUIdx_SE10_1;
}
if (env->pstate & PSTATE_PAN) {
return ARMMMUIdx_E10_1_PAN;
idx = ARMMMUIdx_E10_1_PAN;
} else {
idx = ARMMMUIdx_E10_1;
}
return ARMMMUIdx_E10_1;
break;
case 2:
/* TODO: ARMv8.4-SecEL2 */
/* Note that TGE does not apply at EL2. */
if ((env->cp15.hcr_el2 & HCR_E2H) && arm_el_is_aa64(env, 2)) {
if (arm_hcr_el2_eff(env) & HCR_E2H) {
if (env->pstate & PSTATE_PAN) {
return ARMMMUIdx_E20_2_PAN;
idx = ARMMMUIdx_E20_2_PAN;
} else {
idx = ARMMMUIdx_E20_2;
}
return ARMMMUIdx_E20_2;
} else {
idx = ARMMMUIdx_E2;
}
return ARMMMUIdx_E2;
break;
case 3:
return ARMMMUIdx_SE3;
default:
g_assert_not_reached();
}
if (arm_is_secure_below_el3(env)) {
idx &= ~ARM_MMU_IDX_A_NS;
}
return idx;
}
ARMMMUIdx arm_mmu_idx(CPUARMState *env)
@ -12905,7 +13036,8 @@ static uint32_t rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
break;
case ARMMMUIdx_E20_2:
case ARMMMUIdx_E20_2_PAN:
/* TODO: ARMv8.4-SecEL2 */
case ARMMMUIdx_SE20_2:
case ARMMMUIdx_SE20_2_PAN:
/*
* Note that EL20_2 is gated by HCR_EL2.E2H == 1, but EL20_0 is
* gated by HCR_EL2.<E2H,TGE> == '11', and so is LDTR.

View File

@ -593,6 +593,7 @@ typedef enum ARMFaultType {
* @s2addr: Address that caused a fault at stage 2
* @stage2: True if we faulted at stage 2
* @s1ptw: True if we faulted at stage 2 while doing a stage 1 page-table walk
* @s1ns: True if we faulted on a non-secure IPA while in secure state
* @ea: True if we should set the EA (external abort type) bit in syndrome
*/
typedef struct ARMMMUFaultInfo ARMMMUFaultInfo;
@ -603,6 +604,7 @@ struct ARMMMUFaultInfo {
int domain;
bool stage2;
bool s1ptw;
bool s1ns;
bool ea;
};
@ -851,6 +853,9 @@ static inline bool regime_has_2_ranges(ARMMMUIdx mmu_idx)
case ARMMMUIdx_Stage1_E0:
case ARMMMUIdx_Stage1_E1:
case ARMMMUIdx_Stage1_E1_PAN:
case ARMMMUIdx_Stage1_SE0:
case ARMMMUIdx_Stage1_SE1:
case ARMMMUIdx_Stage1_SE1_PAN:
case ARMMMUIdx_E10_0:
case ARMMMUIdx_E10_1:
case ARMMMUIdx_E10_1_PAN:
@ -860,6 +865,9 @@ static inline bool regime_has_2_ranges(ARMMMUIdx mmu_idx)
case ARMMMUIdx_SE10_0:
case ARMMMUIdx_SE10_1:
case ARMMMUIdx_SE10_1_PAN:
case ARMMMUIdx_SE20_0:
case ARMMMUIdx_SE20_2:
case ARMMMUIdx_SE20_2_PAN:
return true;
default:
return false;
@ -890,6 +898,14 @@ static inline bool regime_is_secure(CPUARMState *env, ARMMMUIdx mmu_idx)
case ARMMMUIdx_SE10_0:
case ARMMMUIdx_SE10_1:
case ARMMMUIdx_SE10_1_PAN:
case ARMMMUIdx_SE20_0:
case ARMMMUIdx_SE20_2:
case ARMMMUIdx_SE20_2_PAN:
case ARMMMUIdx_Stage1_SE0:
case ARMMMUIdx_Stage1_SE1:
case ARMMMUIdx_Stage1_SE1_PAN:
case ARMMMUIdx_SE2:
case ARMMMUIdx_Stage2_S:
case ARMMMUIdx_MSPrivNegPri:
case ARMMMUIdx_MSUserNegPri:
case ARMMMUIdx_MSPriv:
@ -904,9 +920,11 @@ static inline bool regime_is_pan(CPUARMState *env, ARMMMUIdx mmu_idx)
{
switch (mmu_idx) {
case ARMMMUIdx_Stage1_E1_PAN:
case ARMMMUIdx_Stage1_SE1_PAN:
case ARMMMUIdx_E10_1_PAN:
case ARMMMUIdx_E20_2_PAN:
case ARMMMUIdx_SE10_1_PAN:
case ARMMMUIdx_SE20_2_PAN:
return true;
default:
return false;
@ -917,21 +935,29 @@ static inline bool regime_is_pan(CPUARMState *env, ARMMMUIdx mmu_idx)
static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx)
{
switch (mmu_idx) {
case ARMMMUIdx_SE20_0:
case ARMMMUIdx_SE20_2:
case ARMMMUIdx_SE20_2_PAN:
case ARMMMUIdx_E20_0:
case ARMMMUIdx_E20_2:
case ARMMMUIdx_E20_2_PAN:
case ARMMMUIdx_Stage2:
case ARMMMUIdx_Stage2_S:
case ARMMMUIdx_SE2:
case ARMMMUIdx_E2:
return 2;
case ARMMMUIdx_SE3:
return 3;
case ARMMMUIdx_SE10_0:
case ARMMMUIdx_Stage1_SE0:
return arm_el_is_aa64(env, 3) ? 1 : 3;
case ARMMMUIdx_SE10_1:
case ARMMMUIdx_SE10_1_PAN:
case ARMMMUIdx_Stage1_E0:
case ARMMMUIdx_Stage1_E1:
case ARMMMUIdx_Stage1_E1_PAN:
case ARMMMUIdx_Stage1_SE1:
case ARMMMUIdx_Stage1_SE1_PAN:
case ARMMMUIdx_E10_0:
case ARMMMUIdx_E10_1:
case ARMMMUIdx_E10_1_PAN:
@ -955,6 +981,13 @@ static inline TCR *regime_tcr(CPUARMState *env, ARMMMUIdx mmu_idx)
if (mmu_idx == ARMMMUIdx_Stage2) {
return &env->cp15.vtcr_el2;
}
if (mmu_idx == ARMMMUIdx_Stage2_S) {
/*
* Note: Secure stage 2 nominally shares fields from VTCR_EL2, but
* those are not currently used by QEMU, so just return VSTCR_EL2.
*/
return &env->cp15.vstcr_el2;
}
return &env->cp15.tcr_el[regime_el(env, mmu_idx)];
}
@ -1157,6 +1190,9 @@ static inline bool arm_mmu_idx_is_stage1_of_2(ARMMMUIdx mmu_idx)
case ARMMMUIdx_Stage1_E0:
case ARMMMUIdx_Stage1_E1:
case ARMMMUIdx_Stage1_E1_PAN:
case ARMMMUIdx_Stage1_SE0:
case ARMMMUIdx_Stage1_SE1:
case ARMMMUIdx_Stage1_SE1_PAN:
return true;
default:
return false;
@ -1312,6 +1348,15 @@ void arm_log_exception(int idx);
#define LOG2_TAG_GRANULE 4
#define TAG_GRANULE (1 << LOG2_TAG_GRANULE)
/*
* SVE predicates are 1/8 the size of SVE vectors, and cannot use
* the same simd_desc() encoding due to restrictions on size.
* Use these instead.
*/
FIELD(PREDDESC, OPRSZ, 0, 6)
FIELD(PREDDESC, ESZ, 6, 2)
FIELD(PREDDESC, DATA, 8, 24)
/*
* The SVE simd_data field, for memory ops, contains either
* rd (5 bits) or a shift count (2 bits).

View File

@ -1347,7 +1347,7 @@ static void do_v7m_exception_exit(ARMCPU *cpu)
bool exc_secure = false;
bool return_to_secure;
bool ftype;
bool restore_s16_s31;
bool restore_s16_s31 = false;
/*
* If we're not in Handler mode then jumps to magic exception-exit

View File

@ -95,6 +95,7 @@ static const char *cpu_model_advertised_features[] = {
"sve640", "sve768", "sve896", "sve1024", "sve1152", "sve1280",
"sve1408", "sve1536", "sve1664", "sve1792", "sve1920", "sve2048",
"kvm-no-adjvtime", "kvm-steal-time",
"pauth", "pauth-impdef",
NULL
};

View File

@ -652,10 +652,10 @@ void HELPER(access_check_cp_reg)(CPUARMState *env, void *rip, uint32_t syndrome,
target_el = exception_target_el(env);
break;
case CP_ACCESS_TRAP_EL2:
/* Requesting a trap to EL2 when we're in EL3 or S-EL0/1 is
/* Requesting a trap to EL2 when we're in EL3 is
* a bug in the access function.
*/
assert(!arm_is_secure(env) && arm_current_el(env) != 3);
assert(arm_current_el(env) != 3);
target_el = 2;
break;
case CP_ACCESS_TRAP_EL3:

View File

@ -24,6 +24,7 @@
#include "exec/cpu_ldst.h"
#include "exec/helper-proto.h"
#include "tcg/tcg-gvec-desc.h"
#include "qemu/xxhash.h"
static uint64_t pac_cell_shuffle(uint64_t i)
@ -207,8 +208,8 @@ static uint64_t tweak_inv_shuffle(uint64_t i)
return o;
}
static uint64_t pauth_computepac(uint64_t data, uint64_t modifier,
ARMPACKey key)
static uint64_t pauth_computepac_architected(uint64_t data, uint64_t modifier,
ARMPACKey key)
{
static const uint64_t RC[5] = {
0x0000000000000000ull,
@ -272,6 +273,22 @@ static uint64_t pauth_computepac(uint64_t data, uint64_t modifier,
return workingval;
}
static uint64_t pauth_computepac_impdef(uint64_t data, uint64_t modifier,
ARMPACKey key)
{
return qemu_xxhash64_4(data, modifier, key.lo, key.hi);
}
static uint64_t pauth_computepac(CPUARMState *env, uint64_t data,
uint64_t modifier, ARMPACKey key)
{
if (cpu_isar_feature(aa64_pauth_arch, env_archcpu(env))) {
return pauth_computepac_architected(data, modifier, key);
} else {
return pauth_computepac_impdef(data, modifier, key);
}
}
static uint64_t pauth_addpac(CPUARMState *env, uint64_t ptr, uint64_t modifier,
ARMPACKey *key, bool data)
{
@ -292,7 +309,7 @@ static uint64_t pauth_addpac(CPUARMState *env, uint64_t ptr, uint64_t modifier,
bot_bit = 64 - param.tsz;
ext_ptr = deposit64(ptr, bot_bit, top_bit - bot_bit, ext);
pac = pauth_computepac(ext_ptr, modifier, *key);
pac = pauth_computepac(env, ext_ptr, modifier, *key);
/*
* Check if the ptr has good extension bits and corrupt the
@ -341,7 +358,7 @@ static uint64_t pauth_auth(CPUARMState *env, uint64_t ptr, uint64_t modifier,
uint64_t pac, orig_ptr, test;
orig_ptr = pauth_original_ptr(ptr, param);
pac = pauth_computepac(orig_ptr, modifier, *key);
pac = pauth_computepac(env, orig_ptr, modifier, *key);
bot_bit = 64 - param.tsz;
top_bit = 64 - 8 * param.tbi;
@ -442,7 +459,7 @@ uint64_t HELPER(pacga)(CPUARMState *env, uint64_t x, uint64_t y)
uint64_t pac;
pauth_check_trap(env, arm_current_el(env), GETPC());
pac = pauth_computepac(x, y, env->keys.apga);
pac = pauth_computepac(env, x, y, env->keys.apga);
return pac & 0xffffffff00000000ull;
}

View File

@ -889,8 +889,9 @@ static intptr_t last_active_element(uint64_t *g, intptr_t words, intptr_t esz)
return (intptr_t)-1 << esz;
}
uint32_t HELPER(sve_pfirst)(void *vd, void *vg, uint32_t words)
uint32_t HELPER(sve_pfirst)(void *vd, void *vg, uint32_t pred_desc)
{
intptr_t words = DIV_ROUND_UP(FIELD_EX32(pred_desc, PREDDESC, OPRSZ), 8);
uint32_t flags = PREDTEST_INIT;
uint64_t *d = vd, *g = vg;
intptr_t i = 0;
@ -914,8 +915,8 @@ uint32_t HELPER(sve_pfirst)(void *vd, void *vg, uint32_t words)
uint32_t HELPER(sve_pnext)(void *vd, void *vg, uint32_t pred_desc)
{
intptr_t words = extract32(pred_desc, 0, SIMD_OPRSZ_BITS);
intptr_t esz = extract32(pred_desc, SIMD_DATA_SHIFT, 2);
intptr_t words = DIV_ROUND_UP(FIELD_EX32(pred_desc, PREDDESC, OPRSZ), 8);
intptr_t esz = FIELD_EX32(pred_desc, PREDDESC, ESZ);
uint32_t flags = PREDTEST_INIT;
uint64_t *d = vd, *g = vg, esz_mask;
intptr_t i, next;
@ -1867,9 +1868,9 @@ static uint64_t compress_bits(uint64_t x, int n)
void HELPER(sve_zip_p)(void *vd, void *vn, void *vm, uint32_t pred_desc)
{
intptr_t oprsz = extract32(pred_desc, 0, SIMD_OPRSZ_BITS) + 2;
int esz = extract32(pred_desc, SIMD_DATA_SHIFT, 2);
intptr_t high = extract32(pred_desc, SIMD_DATA_SHIFT + 2, 1);
intptr_t oprsz = FIELD_EX32(pred_desc, PREDDESC, OPRSZ);
int esz = FIELD_EX32(pred_desc, PREDDESC, ESZ);
intptr_t high = FIELD_EX32(pred_desc, PREDDESC, DATA);
uint64_t *d = vd;
intptr_t i;
@ -1928,9 +1929,9 @@ void HELPER(sve_zip_p)(void *vd, void *vn, void *vm, uint32_t pred_desc)
void HELPER(sve_uzp_p)(void *vd, void *vn, void *vm, uint32_t pred_desc)
{
intptr_t oprsz = extract32(pred_desc, 0, SIMD_OPRSZ_BITS) + 2;
int esz = extract32(pred_desc, SIMD_DATA_SHIFT, 2);
int odd = extract32(pred_desc, SIMD_DATA_SHIFT + 2, 1) << esz;
intptr_t oprsz = FIELD_EX32(pred_desc, PREDDESC, OPRSZ);
int esz = FIELD_EX32(pred_desc, PREDDESC, ESZ);
int odd = FIELD_EX32(pred_desc, PREDDESC, DATA) << esz;
uint64_t *d = vd, *n = vn, *m = vm;
uint64_t l, h;
intptr_t i;
@ -1985,9 +1986,9 @@ void HELPER(sve_uzp_p)(void *vd, void *vn, void *vm, uint32_t pred_desc)
void HELPER(sve_trn_p)(void *vd, void *vn, void *vm, uint32_t pred_desc)
{
intptr_t oprsz = extract32(pred_desc, 0, SIMD_OPRSZ_BITS) + 2;
uintptr_t esz = extract32(pred_desc, SIMD_DATA_SHIFT, 2);
bool odd = extract32(pred_desc, SIMD_DATA_SHIFT + 2, 1);
intptr_t oprsz = FIELD_EX32(pred_desc, PREDDESC, OPRSZ);
int esz = FIELD_EX32(pred_desc, PREDDESC, ESZ);
int odd = FIELD_EX32(pred_desc, PREDDESC, DATA);
uint64_t *d = vd, *n = vn, *m = vm;
uint64_t mask;
int shr, shl;
@ -2035,8 +2036,8 @@ static uint8_t reverse_bits_8(uint8_t x, int n)
void HELPER(sve_rev_p)(void *vd, void *vn, uint32_t pred_desc)
{
intptr_t oprsz = extract32(pred_desc, 0, SIMD_OPRSZ_BITS) + 2;
int esz = extract32(pred_desc, SIMD_DATA_SHIFT, 2);
intptr_t oprsz = FIELD_EX32(pred_desc, PREDDESC, OPRSZ);
int esz = FIELD_EX32(pred_desc, PREDDESC, ESZ);
intptr_t i, oprsz_2 = oprsz / 2;
if (oprsz <= 8) {
@ -2065,8 +2066,8 @@ void HELPER(sve_rev_p)(void *vd, void *vn, uint32_t pred_desc)
void HELPER(sve_punpk_p)(void *vd, void *vn, uint32_t pred_desc)
{
intptr_t oprsz = extract32(pred_desc, 0, SIMD_OPRSZ_BITS) + 2;
intptr_t high = extract32(pred_desc, SIMD_DATA_SHIFT + 2, 1);
intptr_t oprsz = FIELD_EX32(pred_desc, PREDDESC, OPRSZ);
intptr_t high = FIELD_EX32(pred_desc, PREDDESC, DATA);
uint64_t *d = vd;
intptr_t i;

View File

@ -63,6 +63,9 @@ static void QEMU_NORETURN arm_deliver_fault(ARMCPU *cpu, vaddr addr,
if (fi->stage2) {
target_el = 2;
env->cp15.hpfar_el2 = extract64(fi->s2addr, 12, 47) << 4;
if (arm_is_secure_below_el3(env) && fi->s1ns) {
env->cp15.hpfar_el2 |= HPFAR_NS;
}
}
same_el = (arm_current_el(env) == target_el);

View File

@ -118,6 +118,10 @@ static int get_a64_user_mem_index(DisasContext *s)
case ARMMMUIdx_SE10_1_PAN:
useridx = ARMMMUIdx_SE10_0;
break;
case ARMMMUIdx_SE20_2:
case ARMMMUIdx_SE20_2_PAN:
useridx = ARMMMUIdx_SE20_0;
break;
default:
g_assert_not_reached();
}

View File

@ -1494,10 +1494,10 @@ static bool do_pfirst_pnext(DisasContext *s, arg_rr_esz *a,
TCGv_ptr t_pd = tcg_temp_new_ptr();
TCGv_ptr t_pg = tcg_temp_new_ptr();
TCGv_i32 t;
unsigned desc;
unsigned desc = 0;
desc = DIV_ROUND_UP(pred_full_reg_size(s), 8);
desc = deposit32(desc, SIMD_DATA_SHIFT, 2, a->esz);
desc = FIELD_DP32(desc, PREDDESC, OPRSZ, pred_full_reg_size(s));
desc = FIELD_DP32(desc, PREDDESC, ESZ, a->esz);
tcg_gen_addi_ptr(t_pd, cpu_env, pred_full_reg_offset(s, a->rd));
tcg_gen_addi_ptr(t_pg, cpu_env, pred_full_reg_offset(s, a->rn));
@ -2110,19 +2110,15 @@ static bool do_perm_pred3(DisasContext *s, arg_rrr_esz *a, bool high_odd,
unsigned vsz = pred_full_reg_size(s);
/* Predicate sizes may be smaller and cannot use simd_desc.
We cannot round up, as we do elsewhere, because we need
the exact size for ZIP2 and REV. We retain the style for
the other helpers for consistency. */
TCGv_ptr t_d = tcg_temp_new_ptr();
TCGv_ptr t_n = tcg_temp_new_ptr();
TCGv_ptr t_m = tcg_temp_new_ptr();
TCGv_i32 t_desc;
int desc;
uint32_t desc = 0;
desc = vsz - 2;
desc = deposit32(desc, SIMD_DATA_SHIFT, 2, a->esz);
desc = deposit32(desc, SIMD_DATA_SHIFT + 2, 2, high_odd);
desc = FIELD_DP32(desc, PREDDESC, OPRSZ, vsz);
desc = FIELD_DP32(desc, PREDDESC, ESZ, a->esz);
desc = FIELD_DP32(desc, PREDDESC, DATA, high_odd);
tcg_gen_addi_ptr(t_d, cpu_env, pred_full_reg_offset(s, a->rd));
tcg_gen_addi_ptr(t_n, cpu_env, pred_full_reg_offset(s, a->rn));
@ -2149,19 +2145,14 @@ static bool do_perm_pred2(DisasContext *s, arg_rr_esz *a, bool high_odd,
TCGv_ptr t_d = tcg_temp_new_ptr();
TCGv_ptr t_n = tcg_temp_new_ptr();
TCGv_i32 t_desc;
int desc;
uint32_t desc = 0;
tcg_gen_addi_ptr(t_d, cpu_env, pred_full_reg_offset(s, a->rd));
tcg_gen_addi_ptr(t_n, cpu_env, pred_full_reg_offset(s, a->rn));
/* Predicate sizes may be smaller and cannot use simd_desc.
We cannot round up, as we do elsewhere, because we need
the exact size for ZIP2 and REV. We retain the style for
the other helpers for consistency. */
desc = vsz - 2;
desc = deposit32(desc, SIMD_DATA_SHIFT, 2, a->esz);
desc = deposit32(desc, SIMD_DATA_SHIFT + 2, 2, high_odd);
desc = FIELD_DP32(desc, PREDDESC, OPRSZ, vsz);
desc = FIELD_DP32(desc, PREDDESC, ESZ, a->esz);
desc = FIELD_DP32(desc, PREDDESC, DATA, high_odd);
t_desc = tcg_const_i32(desc);
fn(t_d, t_n, t_desc);

View File

@ -1094,6 +1094,22 @@ static void unallocated_encoding(DisasContext *s)
default_exception_el(s));
}
static void gen_exception_el(DisasContext *s, int excp, uint32_t syn,
TCGv_i32 tcg_el)
{
TCGv_i32 tcg_excp;
TCGv_i32 tcg_syn;
gen_set_condexec(s);
gen_set_pc_im(s, s->pc_curr);
tcg_excp = tcg_const_i32(excp);
tcg_syn = tcg_const_i32(syn);
gen_helper_exception_with_syndrome(cpu_env, tcg_excp, tcg_syn, tcg_el);
tcg_temp_free_i32(tcg_syn);
tcg_temp_free_i32(tcg_excp);
s->base.is_jmp = DISAS_NORETURN;
}
/* Force a TB lookup after an instruction that changes the CPU state. */
static inline void gen_lookup_tb(DisasContext *s)
{
@ -2816,10 +2832,24 @@ static bool msr_banked_access_decode(DisasContext *s, int r, int sysm, int rn,
}
if (s->current_el == 1) {
/* If we're in Secure EL1 (which implies that EL3 is AArch64)
* then accesses to Mon registers trap to EL3
* then accesses to Mon registers trap to Secure EL2, if it exists,
* otherwise EL3.
*/
exc_target = 3;
goto undef;
TCGv_i32 tcg_el;
if (arm_dc_feature(s, ARM_FEATURE_AARCH64) &&
dc_isar_feature(aa64_sel2, s)) {
/* Target EL is EL<3 minus SCR_EL3.EEL2> */
tcg_el = load_cpu_field(cp15.scr_el3);
tcg_gen_sextract_i32(tcg_el, tcg_el, ctz32(SCR_EEL2), 1);
tcg_gen_addi_i32(tcg_el, tcg_el, 3);
} else {
tcg_el = tcg_const_i32(3);
}
gen_exception_el(s, EXCP_UDEF, syn_uncategorized(), tcg_el);
tcg_temp_free_i32(tcg_el);
return false;
}
break;
case ARM_CPU_MODE_HYP:

View File

@ -427,6 +427,18 @@ static void sve_tests_sve_off_kvm(const void *data)
qtest_quit(qts);
}
static void pauth_tests_default(QTestState *qts, const char *cpu_type)
{
assert_has_feature_enabled(qts, cpu_type, "pauth");
assert_has_feature_disabled(qts, cpu_type, "pauth-impdef");
assert_set_feature(qts, cpu_type, "pauth", false);
assert_set_feature(qts, cpu_type, "pauth", true);
assert_set_feature(qts, cpu_type, "pauth-impdef", true);
assert_set_feature(qts, cpu_type, "pauth-impdef", false);
assert_error(qts, cpu_type, "cannot enable pauth-impdef without pauth",
"{ 'pauth': false, 'pauth-impdef': true }");
}
static void test_query_cpu_model_expansion(const void *data)
{
QTestState *qts;
@ -462,6 +474,7 @@ static void test_query_cpu_model_expansion(const void *data)
assert_has_feature_enabled(qts, "cortex-a57", "aarch64");
sve_tests_default(qts, "max");
pauth_tests_default(qts, "max");
/* Test that features that depend on KVM generate errors without. */
assert_error(qts, "max",

View File

@ -129,6 +129,7 @@ static void adc_qom_set(QTestState *qts, const ADC *adc,
path, name, value);
/* The qom set message returns successfully. */
g_assert_true(qdict_haskey(response, "return"));
qobject_unref(response);
}
static void adc_write_input(QTestState *qts, const ADC *adc,