arm: suppress aes erratum when forwarding from aes

AES operations are commonly chained and since the result of one AES
operation is never a 32-bit value, they do not need an additional
mitigation instruction for the forwarded result.  We handle this
common case by adding additional patterns that allow for this.

gcc/ChangeLog:

	* config/arm/crypto.md (crypto_<CRYPTO_AESMC:crypto_pattern>_protected):
	New pattern.
	(aarch32_crypto_aese_fused_protected): Likewise.
	(aarch32_crypto_aesd_fused_protected): Likewise.
This commit is contained in:
Richard Earnshaw 2021-11-01 13:23:26 +00:00
parent bc13384e19
commit 2078550a00
1 changed files with 50 additions and 0 deletions

View File

@ -75,6 +75,20 @@
[(set_attr "type" "neon_move_q")]
)
;; An AESMC operation can feed directly into a subsequent AES
;; operation without needing mitigation.
(define_insn "*crypto_<CRYPTO_AESMC:crypto_pattern>_protected"
[(set (match_operand:<crypto_mode> 0 "register_operand" "=w")
(unspec:<crypto_mode>
[(unspec:<crypto_mode>
[(match_operand:<crypto_mode> 1 "register_operand" "w")]
CRYPTO_AESMC)]
UNSPEC_AES_PROTECT))]
"TARGET_CRYPTO && fix_aes_erratum_1742098"
"<crypto_pattern>.<crypto_size_sfx>\\t%q0, %q1"
[(set_attr "type" "<crypto_type>")]
)
;; When AESE/AESMC fusion is enabled we really want to keep the two together
;; and enforce the register dependency without scheduling or register
;; allocation messing up the order or introducing moves inbetween.
@ -95,6 +109,25 @@
(set_attr "length" "8")]
)
;; And similarly when mitigation is enabled, but not needed in this
;; case.
(define_insn "*aarch32_crypto_aese_fused_protected"
[(set (match_operand:V16QI 0 "register_operand" "=w")
(unspec:V16QI
[(unspec:V16QI
[(unspec:V16QI [(xor:V16QI
(match_operand:V16QI 1 "register_operand" "%0")
(match_operand:V16QI 2 "register_operand" "w"))]
UNSPEC_AESE)]
UNSPEC_AESMC)]
UNSPEC_AES_PROTECT))]
"TARGET_CRYPTO && fix_aes_erratum_1742098
&& arm_fusion_enabled_p (tune_params::FUSE_AES_AESMC)"
"aese.8\\t%q0, %q2\;aesmc.8\\t%q0, %q0"
[(set_attr "type" "crypto_aese")
(set_attr "length" "8")]
)
;; When AESD/AESIMC fusion is enabled we really want to keep the two together
;; and enforce the register dependency without scheduling or register
;; allocation messing up the order or introducing moves inbetween.
@ -115,6 +148,23 @@
(set_attr "length" "8")]
)
(define_insn "*aarch32_crypto_aesd_fused_protected"
[(set (match_operand:V16QI 0 "register_operand" "=w")
(unspec:V16QI
[(unspec:V16QI
[(unspec:V16QI [(xor:V16QI
(match_operand:V16QI 1 "register_operand" "%0")
(match_operand:V16QI 2 "register_operand" "w"))]
UNSPEC_AESD)]
UNSPEC_AESIMC)]
UNSPEC_AES_PROTECT))]
"TARGET_CRYPTO && fix_aes_erratum_1742098
&& arm_fusion_enabled_p (tune_params::FUSE_AES_AESMC)"
"aesd.8\\t%q0, %q2\;aesimc.8\\t%q0, %q0"
[(set_attr "type" "crypto_aese")
(set_attr "length" "8")]
)
(define_insn "crypto_<CRYPTO_BINARY:crypto_pattern>"
[(set (match_operand:<crypto_mode> 0 "register_operand" "=w")
(unspec:<crypto_mode>