qemu-e2k/tests/tcg/aarch64/Makefile.target
Zhuojia Shen bc6bd20ee3 target/arm: align exposed ID registers with Linux
In CPUID registers exposed to userspace, some registers were missing
and some fields were not exposed.  This patch aligns exposed ID
registers and their fields with what the upstream kernel currently
exposes.

Specifically, the following new ID registers/fields are exposed to
userspace:

ID_AA64PFR1_EL1.BT:       bits 3-0
ID_AA64PFR1_EL1.MTE:      bits 11-8
ID_AA64PFR1_EL1.SME:      bits 27-24

ID_AA64ZFR0_EL1.SVEver:   bits 3-0
ID_AA64ZFR0_EL1.AES:      bits 7-4
ID_AA64ZFR0_EL1.BitPerm:  bits 19-16
ID_AA64ZFR0_EL1.BF16:     bits 23-20
ID_AA64ZFR0_EL1.SHA3:     bits 35-32
ID_AA64ZFR0_EL1.SM4:      bits 43-40
ID_AA64ZFR0_EL1.I8MM:     bits 47-44
ID_AA64ZFR0_EL1.F32MM:    bits 55-52
ID_AA64ZFR0_EL1.F64MM:    bits 59-56

ID_AA64SMFR0_EL1.F32F32:  bit 32
ID_AA64SMFR0_EL1.B16F32:  bit 34
ID_AA64SMFR0_EL1.F16F32:  bit 35
ID_AA64SMFR0_EL1.I8I32:   bits 39-36
ID_AA64SMFR0_EL1.F64F64:  bit 48
ID_AA64SMFR0_EL1.I16I64:  bits 55-52
ID_AA64SMFR0_EL1.FA64:    bit 63

ID_AA64MMFR0_EL1.ECV:     bits 63-60

ID_AA64MMFR1_EL1.AFP:     bits 47-44

ID_AA64MMFR2_EL1.AT:      bits 35-32

ID_AA64ISAR0_EL1.RNDR:    bits 63-60

ID_AA64ISAR1_EL1.FRINTTS: bits 35-32
ID_AA64ISAR1_EL1.BF16:    bits 47-44
ID_AA64ISAR1_EL1.DGH:     bits 51-48
ID_AA64ISAR1_EL1.I8MM:    bits 55-52

ID_AA64ISAR2_EL1.WFxT:    bits 3-0
ID_AA64ISAR2_EL1.RPRES:   bits 7-4
ID_AA64ISAR2_EL1.GPA3:    bits 11-8
ID_AA64ISAR2_EL1.APA3:    bits 15-12

The code is also refactored to use symbolic names for ID register fields
for better readability and maintainability.

The test case in tests/tcg/aarch64/sysregs.c is also updated to match
the intended behavior.

Signed-off-by: Zhuojia Shen <chaosdefinition@hotmail.com>
Message-id: DS7PR12MB6309FB585E10772928F14271ACE79@DS7PR12MB6309.namprd12.prod.outlook.com
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
[PMM: use Sn_n_Cn_Cn_n syntax to work with older assemblers
that don't recognize id_aa64isar2_el1 and id_aa64mmfr2_el1]
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
2023-01-05 14:12:34 +00:00

111 lines
3.0 KiB
Makefile

# -*- Mode: makefile -*-
#
# AArch64 specific tweaks
ARM_SRC=$(SRC_PATH)/tests/tcg/arm
VPATH += $(ARM_SRC)
AARCH64_SRC=$(SRC_PATH)/tests/tcg/aarch64
VPATH += $(AARCH64_SRC)
# Base architecture tests
AARCH64_TESTS=fcvt pcalign-a64
fcvt: LDFLAGS+=-lm
run-fcvt: fcvt
$(call run-test,$<,$(QEMU) $<, "$< on $(TARGET_NAME)")
$(call diff-out,$<,$(AARCH64_SRC)/fcvt.ref)
config-cc.mak: Makefile
$(quiet-@)( \
$(call cc-option,-march=armv8.1-a+sve, CROSS_CC_HAS_SVE); \
$(call cc-option,-march=armv8.1-a+sve2, CROSS_CC_HAS_SVE2); \
$(call cc-option,-march=armv8.3-a, CROSS_CC_HAS_ARMV8_3); \
$(call cc-option,-mbranch-protection=standard, CROSS_CC_HAS_ARMV8_BTI); \
$(call cc-option,-march=armv8.5-a+memtag, CROSS_CC_HAS_ARMV8_MTE); \
$(call cc-option,-march=armv9-a+sme, CROSS_CC_HAS_ARMV9_SME)) 3> config-cc.mak
-include config-cc.mak
# Pauth Tests
ifneq ($(CROSS_CC_HAS_ARMV8_3),)
AARCH64_TESTS += pauth-1 pauth-2 pauth-4 pauth-5
pauth-%: CFLAGS += -march=armv8.3-a
run-pauth-%: QEMU_OPTS += -cpu max
run-plugin-pauth-%: QEMU_OPTS += -cpu max
endif
# BTI Tests
# bti-1 tests the elf notes, so we require special compiler support.
ifneq ($(CROSS_CC_HAS_ARMV8_BTI),)
AARCH64_TESTS += bti-1 bti-3
bti-1 bti-3: CFLAGS += -mbranch-protection=standard
bti-1 bti-3: LDFLAGS += -nostdlib
endif
# bti-2 tests PROT_BTI, so no special compiler support required.
AARCH64_TESTS += bti-2
# MTE Tests
ifneq ($(CROSS_CC_HAS_ARMV8_MTE),)
AARCH64_TESTS += mte-1 mte-2 mte-3 mte-4 mte-5 mte-6 mte-7
mte-%: CFLAGS += -march=armv8.5-a+memtag
endif
ifneq ($(CROSS_CC_HAS_SVE),)
# System Registers Tests
AARCH64_TESTS += sysregs
ifneq ($(CROSS_CC_HAS_ARMV9_SME),)
sysregs: CFLAGS+=-march=armv9-a+sme -DHAS_ARMV9_SME
else
sysregs: CFLAGS+=-march=armv8.1-a+sve
endif
# SVE ioctl test
AARCH64_TESTS += sve-ioctls
sve-ioctls: CFLAGS+=-march=armv8.1-a+sve
# Vector SHA1
sha1-vector: CFLAGS=-O3
sha1-vector: sha1.c
$(CC) $(CFLAGS) $(EXTRA_CFLAGS) $< -o $@ $(LDFLAGS)
run-sha1-vector: sha1-vector run-sha1
$(call run-test, $<, $(QEMU) $(QEMU_OPTS) $<)
$(call diff-out, sha1-vector, sha1.out)
TESTS += sha1-vector
# Vector versions of sha512 (-O3 triggers vectorisation)
sha512-vector: CFLAGS=-O3
sha512-vector: sha512.c
$(CC) $(CFLAGS) $(EXTRA_CFLAGS) $< -o $@ $(LDFLAGS)
TESTS += sha512-vector
ifneq ($(HAVE_GDB_BIN),)
GDB_SCRIPT=$(SRC_PATH)/tests/guest-debug/run-test.py
run-gdbstub-sysregs: sysregs
$(call run-test, $@, $(GDB_SCRIPT) \
--gdb $(HAVE_GDB_BIN) \
--qemu $(QEMU) --qargs "$(QEMU_OPTS)" \
--bin $< --test $(AARCH64_SRC)/gdbstub/test-sve.py, \
basic gdbstub SVE support)
run-gdbstub-sve-ioctls: sve-ioctls
$(call run-test, $@, $(GDB_SCRIPT) \
--gdb $(HAVE_GDB_BIN) \
--qemu $(QEMU) --qargs "$(QEMU_OPTS)" \
--bin $< --test $(AARCH64_SRC)/gdbstub/test-sve-ioctl.py, \
basic gdbstub SVE ZLEN support)
EXTRA_RUNS += run-gdbstub-sysregs run-gdbstub-sve-ioctls
endif
endif
ifneq ($(CROSS_CC_HAS_SVE2),)
AARCH64_TESTS += test-826
test-826: CFLAGS+=-march=armv8.1-a+sve2
endif
TESTS += $(AARCH64_TESTS)