From 3c9de0a93e5b3560550e0459f018a5e55a9566e5 Mon Sep 17 00:00:00 2001 From: liuhongt Date: Tue, 9 Apr 2019 14:38:33 +0800 Subject: [PATCH] AVX512FP16: Add scalar fma instructions. Add vfmadd[132,213,231]sh/vfnmadd[132,213,231]sh/ vfmsub[132,213,231]sh/vfnmsub[132,213,231]sh. gcc/ChangeLog: * config/i386/avx512fp16intrin.h (_mm_fmadd_sh): New intrinsic. (_mm_mask_fmadd_sh): Likewise. (_mm_mask3_fmadd_sh): Likewise. (_mm_maskz_fmadd_sh): Likewise. (_mm_fmadd_round_sh): Likewise. (_mm_mask_fmadd_round_sh): Likewise. (_mm_mask3_fmadd_round_sh): Likewise. (_mm_maskz_fmadd_round_sh): Likewise. (_mm_fnmadd_sh): Likewise. (_mm_mask_fnmadd_sh): Likewise. (_mm_mask3_fnmadd_sh): Likewise. (_mm_maskz_fnmadd_sh): Likewise. (_mm_fnmadd_round_sh): Likewise. (_mm_mask_fnmadd_round_sh): Likewise. (_mm_mask3_fnmadd_round_sh): Likewise. (_mm_maskz_fnmadd_round_sh): Likewise. (_mm_fmsub_sh): Likewise. (_mm_mask_fmsub_sh): Likewise. (_mm_mask3_fmsub_sh): Likewise. (_mm_maskz_fmsub_sh): Likewise. (_mm_fmsub_round_sh): Likewise. (_mm_mask_fmsub_round_sh): Likewise. (_mm_mask3_fmsub_round_sh): Likewise. (_mm_maskz_fmsub_round_sh): Likewise. (_mm_fnmsub_sh): Likewise. (_mm_mask_fnmsub_sh): Likewise. (_mm_mask3_fnmsub_sh): Likewise. (_mm_maskz_fnmsub_sh): Likewise. (_mm_fnmsub_round_sh): Likewise. (_mm_mask_fnmsub_round_sh): Likewise. (_mm_mask3_fnmsub_round_sh): Likewise. (_mm_maskz_fnmsub_round_sh): Likewise. * config/i386/i386-builtin-types.def (V8HF_FTYPE_V8HF_V8HF_V8HF_UQI_INT): New builtin type. * config/i386/i386-builtin.def: Add new builtins. * config/i386/i386-expand.c: Handle new builtin type. * config/i386/sse.md (fmai_vmfmadd_): Ajdust to support FP16. (fmai_vmfmsub_): Ditto. (fmai_vmfnmadd_): Ditto. (fmai_vmfnmsub_): Ditto. (*fmai_fmadd_): Ditto. (*fmai_fmsub_): Ditto. (*fmai_fnmadd_): Ditto. (*fmai_fnmsub_): Ditto. (avx512f_vmfmadd__mask): Ditto. (avx512f_vmfmadd__mask3): Ditto. (avx512f_vmfmadd__maskz): Ditto. (avx512f_vmfmadd__maskz_1): Ditto. (*avx512f_vmfmsub__mask): Ditto. (avx512f_vmfmsub__mask3): Ditto. (*avx512f_vmfmsub__maskz_1): Ditto. (*avx512f_vmfnmsub__mask): Ditto. (*avx512f_vmfnmsub__mask3): Ditto. (*avx512f_vmfnmsub__mask): Ditto. (*avx512f_vmfnmadd__mask): Renamed to ... (avx512f_vmfnmadd__mask) ... this, and adjust to support FP16. (avx512f_vmfnmadd__mask3): Ditto. (avx512f_vmfnmadd__maskz_1): Ditto. (avx512f_vmfnmadd__maskz): New expander. gcc/testsuite/ChangeLog: * gcc.target/i386/avx-1.c: Add test for new builtins. * gcc.target/i386/sse-13.c: Ditto. * gcc.target/i386/sse-23.c: Ditto. * gcc.target/i386/sse-14.c: Add test for new intrinsics. * gcc.target/i386/sse-22.c: Ditto. --- gcc/config/i386/avx512fp16intrin.h | 412 +++++++++++++++++++++++++ gcc/config/i386/i386-builtin-types.def | 1 + gcc/config/i386/i386-builtin.def | 7 + gcc/config/i386/i386-expand.c | 1 + gcc/config/i386/sse.md | 340 ++++++++++---------- gcc/testsuite/gcc.target/i386/avx-1.c | 12 + gcc/testsuite/gcc.target/i386/sse-13.c | 12 + gcc/testsuite/gcc.target/i386/sse-14.c | 16 + gcc/testsuite/gcc.target/i386/sse-22.c | 16 + gcc/testsuite/gcc.target/i386/sse-23.c | 12 + 10 files changed, 666 insertions(+), 163 deletions(-) diff --git a/gcc/config/i386/avx512fp16intrin.h b/gcc/config/i386/avx512fp16intrin.h index 9ccbc463a47..47146967e40 100644 --- a/gcc/config/i386/avx512fp16intrin.h +++ b/gcc/config/i386/avx512fp16intrin.h @@ -5703,6 +5703,418 @@ _mm512_maskz_fnmsub_round_ph (__mmask32 __U, __m512h __A, __m512h __B, #endif /* __OPTIMIZE__ */ +/* Intrinsics vfmadd[132,213,231]sh. */ +extern __inline __m128h + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_fmadd_sh (__m128h __W, __m128h __A, __m128h __B) +{ + return (__m128h) __builtin_ia32_vfmaddsh3_mask ((__v8hf) __W, + (__v8hf) __A, + (__v8hf) __B, + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_fmadd_sh (__m128h __W, __mmask8 __U, __m128h __A, __m128h __B) +{ + return (__m128h) __builtin_ia32_vfmaddsh3_mask ((__v8hf) __W, + (__v8hf) __A, + (__v8hf) __B, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask3_fmadd_sh (__m128h __W, __m128h __A, __m128h __B, __mmask8 __U) +{ + return (__m128h) __builtin_ia32_vfmaddsh3_mask3 ((__v8hf) __W, + (__v8hf) __A, + (__v8hf) __B, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_fmadd_sh (__mmask8 __U, __m128h __W, __m128h __A, __m128h __B) +{ + return (__m128h) __builtin_ia32_vfmaddsh3_maskz ((__v8hf) __W, + (__v8hf) __A, + (__v8hf) __B, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + + +#ifdef __OPTIMIZE__ +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_fmadd_round_sh (__m128h __W, __m128h __A, __m128h __B, const int __R) +{ + return (__m128h) __builtin_ia32_vfmaddsh3_mask ((__v8hf) __W, + (__v8hf) __A, + (__v8hf) __B, + (__mmask8) -1, + __R); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_fmadd_round_sh (__m128h __W, __mmask8 __U, __m128h __A, __m128h __B, + const int __R) +{ + return (__m128h) __builtin_ia32_vfmaddsh3_mask ((__v8hf) __W, + (__v8hf) __A, + (__v8hf) __B, + (__mmask8) __U, __R); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask3_fmadd_round_sh (__m128h __W, __m128h __A, __m128h __B, __mmask8 __U, + const int __R) +{ + return (__m128h) __builtin_ia32_vfmaddsh3_mask3 ((__v8hf) __W, + (__v8hf) __A, + (__v8hf) __B, + (__mmask8) __U, __R); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_fmadd_round_sh (__mmask8 __U, __m128h __W, __m128h __A, + __m128h __B, const int __R) +{ + return (__m128h) __builtin_ia32_vfmaddsh3_maskz ((__v8hf) __W, + (__v8hf) __A, + (__v8hf) __B, + (__mmask8) __U, __R); +} + +#else +#define _mm_fmadd_round_sh(A, B, C, R) \ + ((__m128h) __builtin_ia32_vfmaddsh3_mask ((A), (B), (C), (-1), (R))) +#define _mm_mask_fmadd_round_sh(A, U, B, C, R) \ + ((__m128h) __builtin_ia32_vfmaddsh3_mask ((A), (B), (C), (U), (R))) +#define _mm_mask3_fmadd_round_sh(A, B, C, U, R) \ + ((__m128h) __builtin_ia32_vfmaddsh3_mask3 ((A), (B), (C), (U), (R))) +#define _mm_maskz_fmadd_round_sh(U, A, B, C, R) \ + ((__m128h) __builtin_ia32_vfmaddsh3_maskz ((A), (B), (C), (U), (R))) + +#endif /* __OPTIMIZE__ */ + +/* Intrinsics vfnmadd[132,213,231]sh. */ +extern __inline __m128h + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_fnmadd_sh (__m128h __W, __m128h __A, __m128h __B) +{ + return (__m128h) __builtin_ia32_vfnmaddsh3_mask ((__v8hf) __W, + (__v8hf) __A, + (__v8hf) __B, + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_fnmadd_sh (__m128h __W, __mmask8 __U, __m128h __A, __m128h __B) +{ + return (__m128h) __builtin_ia32_vfnmaddsh3_mask ((__v8hf) __W, + (__v8hf) __A, + (__v8hf) __B, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask3_fnmadd_sh (__m128h __W, __m128h __A, __m128h __B, __mmask8 __U) +{ + return (__m128h) __builtin_ia32_vfnmaddsh3_mask3 ((__v8hf) __W, + (__v8hf) __A, + (__v8hf) __B, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_fnmadd_sh (__mmask8 __U, __m128h __W, __m128h __A, __m128h __B) +{ + return (__m128h) __builtin_ia32_vfnmaddsh3_maskz ((__v8hf) __W, + (__v8hf) __A, + (__v8hf) __B, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + + +#ifdef __OPTIMIZE__ +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_fnmadd_round_sh (__m128h __W, __m128h __A, __m128h __B, const int __R) +{ + return (__m128h) __builtin_ia32_vfnmaddsh3_mask ((__v8hf) __W, + (__v8hf) __A, + (__v8hf) __B, + (__mmask8) -1, + __R); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_fnmadd_round_sh (__m128h __W, __mmask8 __U, __m128h __A, __m128h __B, + const int __R) +{ + return (__m128h) __builtin_ia32_vfnmaddsh3_mask ((__v8hf) __W, + (__v8hf) __A, + (__v8hf) __B, + (__mmask8) __U, __R); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask3_fnmadd_round_sh (__m128h __W, __m128h __A, __m128h __B, __mmask8 __U, + const int __R) +{ + return (__m128h) __builtin_ia32_vfnmaddsh3_mask3 ((__v8hf) __W, + (__v8hf) __A, + (__v8hf) __B, + (__mmask8) __U, __R); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_fnmadd_round_sh (__mmask8 __U, __m128h __W, __m128h __A, + __m128h __B, const int __R) +{ + return (__m128h) __builtin_ia32_vfnmaddsh3_maskz ((__v8hf) __W, + (__v8hf) __A, + (__v8hf) __B, + (__mmask8) __U, __R); +} + +#else +#define _mm_fnmadd_round_sh(A, B, C, R) \ + ((__m128h) __builtin_ia32_vfnmaddsh3_mask ((A), (B), (C), (-1), (R))) +#define _mm_mask_fnmadd_round_sh(A, U, B, C, R) \ + ((__m128h) __builtin_ia32_vfnmaddsh3_mask ((A), (B), (C), (U), (R))) +#define _mm_mask3_fnmadd_round_sh(A, B, C, U, R) \ + ((__m128h) __builtin_ia32_vfnmaddsh3_mask3 ((A), (B), (C), (U), (R))) +#define _mm_maskz_fnmadd_round_sh(U, A, B, C, R) \ + ((__m128h) __builtin_ia32_vfnmaddsh3_maskz ((A), (B), (C), (U), (R))) + +#endif /* __OPTIMIZE__ */ + +/* Intrinsics vfmsub[132,213,231]sh. */ +extern __inline __m128h + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_fmsub_sh (__m128h __W, __m128h __A, __m128h __B) +{ + return (__m128h) __builtin_ia32_vfmaddsh3_mask ((__v8hf) __W, + (__v8hf) __A, + -(__v8hf) __B, + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_fmsub_sh (__m128h __W, __mmask8 __U, __m128h __A, __m128h __B) +{ + return (__m128h) __builtin_ia32_vfmaddsh3_mask ((__v8hf) __W, + (__v8hf) __A, + -(__v8hf) __B, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask3_fmsub_sh (__m128h __W, __m128h __A, __m128h __B, __mmask8 __U) +{ + return (__m128h) __builtin_ia32_vfmsubsh3_mask3 ((__v8hf) __W, + (__v8hf) __A, + (__v8hf) __B, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_fmsub_sh (__mmask8 __U, __m128h __W, __m128h __A, __m128h __B) +{ + return (__m128h) __builtin_ia32_vfmaddsh3_maskz ((__v8hf) __W, + (__v8hf) __A, + -(__v8hf) __B, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + + +#ifdef __OPTIMIZE__ +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_fmsub_round_sh (__m128h __W, __m128h __A, __m128h __B, const int __R) +{ + return (__m128h) __builtin_ia32_vfmaddsh3_mask ((__v8hf) __W, + (__v8hf) __A, + -(__v8hf) __B, + (__mmask8) -1, + __R); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_fmsub_round_sh (__m128h __W, __mmask8 __U, __m128h __A, __m128h __B, + const int __R) +{ + return (__m128h) __builtin_ia32_vfmaddsh3_mask ((__v8hf) __W, + (__v8hf) __A, + -(__v8hf) __B, + (__mmask8) __U, __R); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask3_fmsub_round_sh (__m128h __W, __m128h __A, __m128h __B, __mmask8 __U, + const int __R) +{ + return (__m128h) __builtin_ia32_vfmsubsh3_mask3 ((__v8hf) __W, + (__v8hf) __A, + (__v8hf) __B, + (__mmask8) __U, __R); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_fmsub_round_sh (__mmask8 __U, __m128h __W, __m128h __A, + __m128h __B, const int __R) +{ + return (__m128h) __builtin_ia32_vfmaddsh3_maskz ((__v8hf) __W, + (__v8hf) __A, + -(__v8hf) __B, + (__mmask8) __U, __R); +} + +#else +#define _mm_fmsub_round_sh(A, B, C, R) \ + ((__m128h) __builtin_ia32_vfmaddsh3_mask ((A), (B), -(C), (-1), (R))) +#define _mm_mask_fmsub_round_sh(A, U, B, C, R) \ + ((__m128h) __builtin_ia32_vfmaddsh3_mask ((A), (B), -(C), (U), (R))) +#define _mm_mask3_fmsub_round_sh(A, B, C, U, R) \ + ((__m128h) __builtin_ia32_vfmsubsh3_mask3 ((A), (B), (C), (U), (R))) +#define _mm_maskz_fmsub_round_sh(U, A, B, C, R) \ + ((__m128h) __builtin_ia32_vfmaddsh3_maskz ((A), (B), -(C), (U), (R))) + +#endif /* __OPTIMIZE__ */ + +/* Intrinsics vfnmsub[132,213,231]sh. */ +extern __inline __m128h + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_fnmsub_sh (__m128h __W, __m128h __A, __m128h __B) +{ + return (__m128h) __builtin_ia32_vfmaddsh3_mask ((__v8hf) __W, + -(__v8hf) __A, + -(__v8hf) __B, + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_fnmsub_sh (__m128h __W, __mmask8 __U, __m128h __A, __m128h __B) +{ + return (__m128h) __builtin_ia32_vfmaddsh3_mask ((__v8hf) __W, + -(__v8hf) __A, + -(__v8hf) __B, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask3_fnmsub_sh (__m128h __W, __m128h __A, __m128h __B, __mmask8 __U) +{ + return (__m128h) __builtin_ia32_vfmsubsh3_mask3 ((__v8hf) __W, + -(__v8hf) __A, + (__v8hf) __B, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_fnmsub_sh (__mmask8 __U, __m128h __W, __m128h __A, __m128h __B) +{ + return (__m128h) __builtin_ia32_vfmaddsh3_maskz ((__v8hf) __W, + -(__v8hf) __A, + -(__v8hf) __B, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + + +#ifdef __OPTIMIZE__ +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_fnmsub_round_sh (__m128h __W, __m128h __A, __m128h __B, const int __R) +{ + return (__m128h) __builtin_ia32_vfmaddsh3_mask ((__v8hf) __W, + -(__v8hf) __A, + -(__v8hf) __B, + (__mmask8) -1, + __R); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_fnmsub_round_sh (__m128h __W, __mmask8 __U, __m128h __A, __m128h __B, + const int __R) +{ + return (__m128h) __builtin_ia32_vfmaddsh3_mask ((__v8hf) __W, + -(__v8hf) __A, + -(__v8hf) __B, + (__mmask8) __U, __R); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask3_fnmsub_round_sh (__m128h __W, __m128h __A, __m128h __B, __mmask8 __U, + const int __R) +{ + return (__m128h) __builtin_ia32_vfmsubsh3_mask3 ((__v8hf) __W, + -(__v8hf) __A, + (__v8hf) __B, + (__mmask8) __U, __R); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_fnmsub_round_sh (__mmask8 __U, __m128h __W, __m128h __A, + __m128h __B, const int __R) +{ + return (__m128h) __builtin_ia32_vfmaddsh3_maskz ((__v8hf) __W, + -(__v8hf) __A, + -(__v8hf) __B, + (__mmask8) __U, __R); +} + +#else +#define _mm_fnmsub_round_sh(A, B, C, R) \ + ((__m128h) __builtin_ia32_vfmaddsh3_mask ((A), -(B), -(C), (-1), (R))) +#define _mm_mask_fnmsub_round_sh(A, U, B, C, R) \ + ((__m128h) __builtin_ia32_vfmaddsh3_mask ((A), -(B), -(C), (U), (R))) +#define _mm_mask3_fnmsub_round_sh(A, B, C, U, R) \ + ((__m128h) __builtin_ia32_vfmsubsh3_mask3 ((A), -(B), (C), (U), (R))) +#define _mm_maskz_fnmsub_round_sh(U, A, B, C, R) \ + ((__m128h) __builtin_ia32_vfmaddsh3_maskz ((A), -(B), -(C), (U), (R))) + +#endif /* __OPTIMIZE__ */ + #ifdef __DISABLE_AVX512FP16__ #undef __DISABLE_AVX512FP16__ #pragma GCC pop_options diff --git a/gcc/config/i386/i386-builtin-types.def b/gcc/config/i386/i386-builtin-types.def index 7fd4286ef26..5eae4d0376a 100644 --- a/gcc/config/i386/i386-builtin-types.def +++ b/gcc/config/i386/i386-builtin-types.def @@ -1342,6 +1342,7 @@ DEF_FUNCTION_TYPE (V8HF, V8HF, V8HF, INT) DEF_FUNCTION_TYPE (V8HF, V8HF, INT, V8HF, UQI) DEF_FUNCTION_TYPE (UQI, V8HF, V8HF, INT, UQI) DEF_FUNCTION_TYPE (V8HF, V8HF, V8HF, V8HF, UQI) +DEF_FUNCTION_TYPE (V8HF, V8HF, V8HF, V8HF, INT) DEF_FUNCTION_TYPE (UQI, V8HF, V8HF, INT, UQI, INT) DEF_FUNCTION_TYPE (V8DI, V8HF, V8DI, UQI, INT) DEF_FUNCTION_TYPE (V8DF, V8HF, V8DF, UQI, INT) diff --git a/gcc/config/i386/i386-builtin.def b/gcc/config/i386/i386-builtin.def index 56d23b02658..5950d5e5773 100644 --- a/gcc/config/i386/i386-builtin.def +++ b/gcc/config/i386/i386-builtin.def @@ -3194,6 +3194,13 @@ BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512bw_fmsub_v32hf_maskz_round BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512bw_fnmsub_v32hf_mask_round, "__builtin_ia32_vfnmsubph512_mask", IX86_BUILTIN_VFNMSUBPH512_MASK, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_V32HF_USI_INT) BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512bw_fnmsub_v32hf_mask3_round, "__builtin_ia32_vfnmsubph512_mask3", IX86_BUILTIN_VFNMSUBPH512_MASK3, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_V32HF_USI_INT) BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512bw_fnmsub_v32hf_maskz_round, "__builtin_ia32_vfnmsubph512_maskz", IX86_BUILTIN_VFNMSUBPH512_MASKZ, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_V32HF_USI_INT) +BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512f_vmfmadd_v8hf_mask_round, "__builtin_ia32_vfmaddsh3_mask", IX86_BUILTIN_VFMADDSH3_MASK, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_V8HF_UQI_INT) +BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512f_vmfmadd_v8hf_mask3_round, "__builtin_ia32_vfmaddsh3_mask3", IX86_BUILTIN_VFMADDSH3_MASK3, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_V8HF_UQI_INT) +BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512f_vmfmadd_v8hf_maskz_round, "__builtin_ia32_vfmaddsh3_maskz", IX86_BUILTIN_VFMADDSH3_MASKZ, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_V8HF_UQI_INT) +BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512f_vmfnmadd_v8hf_mask_round, "__builtin_ia32_vfnmaddsh3_mask", IX86_BUILTIN_VFNMADDSH3_MASK, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_V8HF_UQI_INT) +BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512f_vmfnmadd_v8hf_mask3_round, "__builtin_ia32_vfnmaddsh3_mask3", IX86_BUILTIN_VFNMADDSH3_MASK3, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_V8HF_UQI_INT) +BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512f_vmfnmadd_v8hf_maskz_round, "__builtin_ia32_vfnmaddsh3_maskz", IX86_BUILTIN_VFNMADDSH3_MASKZ, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_V8HF_UQI_INT) +BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512f_vmfmsub_v8hf_mask3_round, "__builtin_ia32_vfmsubsh3_mask3", IX86_BUILTIN_VFMSUBSH3_MASK3, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_V8HF_UQI_INT) BDESC_END (ROUND_ARGS, MULTI_ARG) diff --git a/gcc/config/i386/i386-expand.c b/gcc/config/i386/i386-expand.c index 3ec032b9994..c88cb14bd72 100644 --- a/gcc/config/i386/i386-expand.c +++ b/gcc/config/i386/i386-expand.c @@ -10738,6 +10738,7 @@ ix86_expand_round_builtin (const struct builtin_description *d, case V8HF_FTYPE_V8DI_V8HF_UQI_INT: case V8HF_FTYPE_V8DF_V8HF_UQI_INT: case V16HF_FTYPE_V16SF_V16HF_UHI_INT: + case V8HF_FTYPE_V8HF_V8HF_V8HF_INT: nargs = 4; break; case V4SF_FTYPE_V4SF_V4SF_INT_INT: diff --git a/gcc/config/i386/sse.md b/gcc/config/i386/sse.md index bcded09d6f9..0016c027f0c 100644 --- a/gcc/config/i386/sse.md +++ b/gcc/config/i386/sse.md @@ -5343,60 +5343,60 @@ ;; high-order elements from the destination register. (define_expand "fmai_vmfmadd_" - [(set (match_operand:VF_128 0 "register_operand") - (vec_merge:VF_128 - (fma:VF_128 - (match_operand:VF_128 1 "register_operand") - (match_operand:VF_128 2 "") - (match_operand:VF_128 3 "")) + [(set (match_operand:VFH_128 0 "register_operand") + (vec_merge:VFH_128 + (fma:VFH_128 + (match_operand:VFH_128 1 "register_operand") + (match_operand:VFH_128 2 "") + (match_operand:VFH_128 3 "")) (match_dup 1) (const_int 1)))] "TARGET_FMA") (define_expand "fmai_vmfmsub_" - [(set (match_operand:VF_128 0 "register_operand") - (vec_merge:VF_128 - (fma:VF_128 - (match_operand:VF_128 1 "register_operand") - (match_operand:VF_128 2 "") - (neg:VF_128 - (match_operand:VF_128 3 ""))) + [(set (match_operand:VFH_128 0 "register_operand") + (vec_merge:VFH_128 + (fma:VFH_128 + (match_operand:VFH_128 1 "register_operand") + (match_operand:VFH_128 2 "") + (neg:VFH_128 + (match_operand:VFH_128 3 ""))) (match_dup 1) (const_int 1)))] "TARGET_FMA") (define_expand "fmai_vmfnmadd_" - [(set (match_operand:VF_128 0 "register_operand") - (vec_merge:VF_128 - (fma:VF_128 - (neg:VF_128 - (match_operand:VF_128 2 "")) - (match_operand:VF_128 1 "register_operand") - (match_operand:VF_128 3 "")) + [(set (match_operand:VFH_128 0 "register_operand") + (vec_merge:VFH_128 + (fma:VFH_128 + (neg:VFH_128 + (match_operand:VFH_128 2 "")) + (match_operand:VFH_128 1 "register_operand") + (match_operand:VFH_128 3 "")) (match_dup 1) (const_int 1)))] "TARGET_FMA") (define_expand "fmai_vmfnmsub_" - [(set (match_operand:VF_128 0 "register_operand") - (vec_merge:VF_128 - (fma:VF_128 - (neg:VF_128 - (match_operand:VF_128 2 "")) - (match_operand:VF_128 1 "register_operand") - (neg:VF_128 - (match_operand:VF_128 3 ""))) + [(set (match_operand:VFH_128 0 "register_operand") + (vec_merge:VFH_128 + (fma:VFH_128 + (neg:VFH_128 + (match_operand:VFH_128 2 "")) + (match_operand:VFH_128 1 "register_operand") + (neg:VFH_128 + (match_operand:VFH_128 3 ""))) (match_dup 1) (const_int 1)))] "TARGET_FMA") (define_insn "*fmai_fmadd_" - [(set (match_operand:VF_128 0 "register_operand" "=v,v") - (vec_merge:VF_128 - (fma:VF_128 - (match_operand:VF_128 1 "register_operand" "0,0") - (match_operand:VF_128 2 "" ", v") - (match_operand:VF_128 3 "" "v,")) + [(set (match_operand:VFH_128 0 "register_operand" "=v,v") + (vec_merge:VFH_128 + (fma:VFH_128 + (match_operand:VFH_128 1 "register_operand" "0,0") + (match_operand:VFH_128 2 "" ", v") + (match_operand:VFH_128 3 "" "v,")) (match_dup 1) (const_int 1)))] "TARGET_FMA || TARGET_AVX512F" @@ -5407,13 +5407,13 @@ (set_attr "mode" "")]) (define_insn "*fmai_fmsub_" - [(set (match_operand:VF_128 0 "register_operand" "=v,v") - (vec_merge:VF_128 - (fma:VF_128 - (match_operand:VF_128 1 "register_operand" "0,0") - (match_operand:VF_128 2 "" ",v") - (neg:VF_128 - (match_operand:VF_128 3 "" "v,"))) + [(set (match_operand:VFH_128 0 "register_operand" "=v,v") + (vec_merge:VFH_128 + (fma:VFH_128 + (match_operand:VFH_128 1 "register_operand" "0,0") + (match_operand:VFH_128 2 "" ",v") + (neg:VFH_128 + (match_operand:VFH_128 3 "" "v,"))) (match_dup 1) (const_int 1)))] "TARGET_FMA || TARGET_AVX512F" @@ -5424,13 +5424,13 @@ (set_attr "mode" "")]) (define_insn "*fmai_fnmadd_" - [(set (match_operand:VF_128 0 "register_operand" "=v,v") - (vec_merge:VF_128 - (fma:VF_128 - (neg:VF_128 - (match_operand:VF_128 2 "" ",v")) - (match_operand:VF_128 1 "register_operand" "0,0") - (match_operand:VF_128 3 "" "v,")) + [(set (match_operand:VFH_128 0 "register_operand" "=v,v") + (vec_merge:VFH_128 + (fma:VFH_128 + (neg:VFH_128 + (match_operand:VFH_128 2 "" ",v")) + (match_operand:VFH_128 1 "register_operand" "0,0") + (match_operand:VFH_128 3 "" "v,")) (match_dup 1) (const_int 1)))] "TARGET_FMA || TARGET_AVX512F" @@ -5441,14 +5441,14 @@ (set_attr "mode" "")]) (define_insn "*fmai_fnmsub_" - [(set (match_operand:VF_128 0 "register_operand" "=v,v") - (vec_merge:VF_128 - (fma:VF_128 - (neg:VF_128 - (match_operand:VF_128 2 "" ",v")) - (match_operand:VF_128 1 "register_operand" "0,0") - (neg:VF_128 - (match_operand:VF_128 3 "" "v,"))) + [(set (match_operand:VFH_128 0 "register_operand" "=v,v") + (vec_merge:VFH_128 + (fma:VFH_128 + (neg:VFH_128 + (match_operand:VFH_128 2 "" ",v")) + (match_operand:VFH_128 1 "register_operand" "0,0") + (neg:VFH_128 + (match_operand:VFH_128 3 "" "v,"))) (match_dup 1) (const_int 1)))] "TARGET_FMA || TARGET_AVX512F" @@ -5459,13 +5459,13 @@ (set_attr "mode" "")]) (define_insn "avx512f_vmfmadd__mask" - [(set (match_operand:VF_128 0 "register_operand" "=v,v") - (vec_merge:VF_128 - (vec_merge:VF_128 - (fma:VF_128 - (match_operand:VF_128 1 "register_operand" "0,0") - (match_operand:VF_128 2 "" ",v") - (match_operand:VF_128 3 "" "v,")) + [(set (match_operand:VFH_128 0 "register_operand" "=v,v") + (vec_merge:VFH_128 + (vec_merge:VFH_128 + (fma:VFH_128 + (match_operand:VFH_128 1 "register_operand" "0,0") + (match_operand:VFH_128 2 "" ",v") + (match_operand:VFH_128 3 "" "v,")) (match_dup 1) (match_operand:QI 4 "register_operand" "Yk,Yk")) (match_dup 1) @@ -5478,13 +5478,13 @@ (set_attr "mode" "")]) (define_insn "avx512f_vmfmadd__mask3" - [(set (match_operand:VF_128 0 "register_operand" "=v") - (vec_merge:VF_128 - (vec_merge:VF_128 - (fma:VF_128 - (match_operand:VF_128 1 "" "%v") - (match_operand:VF_128 2 "" "") - (match_operand:VF_128 3 "register_operand" "0")) + [(set (match_operand:VFH_128 0 "register_operand" "=v") + (vec_merge:VFH_128 + (vec_merge:VFH_128 + (fma:VFH_128 + (match_operand:VFH_128 1 "" "%v") + (match_operand:VFH_128 2 "" "") + (match_operand:VFH_128 3 "register_operand" "0")) (match_dup 3) (match_operand:QI 4 "register_operand" "Yk")) (match_dup 3) @@ -5495,10 +5495,10 @@ (set_attr "mode" "")]) (define_expand "avx512f_vmfmadd__maskz" - [(match_operand:VF_128 0 "register_operand") - (match_operand:VF_128 1 "") - (match_operand:VF_128 2 "") - (match_operand:VF_128 3 "") + [(match_operand:VFH_128 0 "register_operand") + (match_operand:VFH_128 1 "") + (match_operand:VFH_128 2 "") + (match_operand:VFH_128 3 "") (match_operand:QI 4 "register_operand")] "TARGET_AVX512F" { @@ -5509,14 +5509,14 @@ }) (define_insn "avx512f_vmfmadd__maskz_1" - [(set (match_operand:VF_128 0 "register_operand" "=v,v") - (vec_merge:VF_128 - (vec_merge:VF_128 - (fma:VF_128 - (match_operand:VF_128 1 "register_operand" "0,0") - (match_operand:VF_128 2 "" ",v") - (match_operand:VF_128 3 "" "v,")) - (match_operand:VF_128 4 "const0_operand" "C,C") + [(set (match_operand:VFH_128 0 "register_operand" "=v,v") + (vec_merge:VFH_128 + (vec_merge:VFH_128 + (fma:VFH_128 + (match_operand:VFH_128 1 "register_operand" "0,0") + (match_operand:VFH_128 2 "" ",v") + (match_operand:VFH_128 3 "" "v,")) + (match_operand:VFH_128 4 "const0_operand" "C,C") (match_operand:QI 5 "register_operand" "Yk,Yk")) (match_dup 1) (const_int 1)))] @@ -5528,14 +5528,14 @@ (set_attr "mode" "")]) (define_insn "*avx512f_vmfmsub__mask" - [(set (match_operand:VF_128 0 "register_operand" "=v,v") - (vec_merge:VF_128 - (vec_merge:VF_128 - (fma:VF_128 - (match_operand:VF_128 1 "register_operand" "0,0") - (match_operand:VF_128 2 "" ",v") - (neg:VF_128 - (match_operand:VF_128 3 "" "v,"))) + [(set (match_operand:VFH_128 0 "register_operand" "=v,v") + (vec_merge:VFH_128 + (vec_merge:VFH_128 + (fma:VFH_128 + (match_operand:VFH_128 1 "register_operand" "0,0") + (match_operand:VFH_128 2 "" ",v") + (neg:VFH_128 + (match_operand:VFH_128 3 "" "v,"))) (match_dup 1) (match_operand:QI 4 "register_operand" "Yk,Yk")) (match_dup 1) @@ -5548,14 +5548,14 @@ (set_attr "mode" "")]) (define_insn "avx512f_vmfmsub__mask3" - [(set (match_operand:VF_128 0 "register_operand" "=v") - (vec_merge:VF_128 - (vec_merge:VF_128 - (fma:VF_128 - (match_operand:VF_128 1 "" "%v") - (match_operand:VF_128 2 "" "") - (neg:VF_128 - (match_operand:VF_128 3 "register_operand" "0"))) + [(set (match_operand:VFH_128 0 "register_operand" "=v") + (vec_merge:VFH_128 + (vec_merge:VFH_128 + (fma:VFH_128 + (match_operand:VFH_128 1 "" "%v") + (match_operand:VFH_128 2 "" "") + (neg:VFH_128 + (match_operand:VFH_128 3 "register_operand" "0"))) (match_dup 3) (match_operand:QI 4 "register_operand" "Yk")) (match_dup 3) @@ -5566,15 +5566,15 @@ (set_attr "mode" "")]) (define_insn "*avx512f_vmfmsub__maskz_1" - [(set (match_operand:VF_128 0 "register_operand" "=v,v") - (vec_merge:VF_128 - (vec_merge:VF_128 - (fma:VF_128 - (match_operand:VF_128 1 "register_operand" "0,0") - (match_operand:VF_128 2 "" ",v") - (neg:VF_128 - (match_operand:VF_128 3 "" "v,"))) - (match_operand:VF_128 4 "const0_operand" "C,C") + [(set (match_operand:VFH_128 0 "register_operand" "=v,v") + (vec_merge:VFH_128 + (vec_merge:VFH_128 + (fma:VFH_128 + (match_operand:VFH_128 1 "register_operand" "0,0") + (match_operand:VFH_128 2 "" ",v") + (neg:VFH_128 + (match_operand:VFH_128 3 "" "v,"))) + (match_operand:VFH_128 4 "const0_operand" "C,C") (match_operand:QI 5 "register_operand" "Yk,Yk")) (match_dup 1) (const_int 1)))] @@ -5585,15 +5585,15 @@ [(set_attr "type" "ssemuladd") (set_attr "mode" "")]) -(define_insn "*avx512f_vmfnmadd__mask" - [(set (match_operand:VF_128 0 "register_operand" "=v,v") - (vec_merge:VF_128 - (vec_merge:VF_128 - (fma:VF_128 - (neg:VF_128 - (match_operand:VF_128 2 "" ",v")) - (match_operand:VF_128 1 "register_operand" "0,0") - (match_operand:VF_128 3 "" "v,")) +(define_insn "avx512f_vmfnmadd__mask" + [(set (match_operand:VFH_128 0 "register_operand" "=v,v") + (vec_merge:VFH_128 + (vec_merge:VFH_128 + (fma:VFH_128 + (neg:VFH_128 + (match_operand:VFH_128 2 "" ",v")) + (match_operand:VFH_128 1 "register_operand" "0,0") + (match_operand:VFH_128 3 "" "v,")) (match_dup 1) (match_operand:QI 4 "register_operand" "Yk,Yk")) (match_dup 1) @@ -5605,15 +5605,15 @@ [(set_attr "type" "ssemuladd") (set_attr "mode" "")]) -(define_insn "*avx512f_vmfnmadd__mask3" - [(set (match_operand:VF_128 0 "register_operand" "=v") - (vec_merge:VF_128 - (vec_merge:VF_128 - (fma:VF_128 - (neg:VF_128 - (match_operand:VF_128 2 "" "")) - (match_operand:VF_128 1 "" "%v") - (match_operand:VF_128 3 "register_operand" "0")) +(define_insn "avx512f_vmfnmadd__mask3" + [(set (match_operand:VFH_128 0 "register_operand" "=v") + (vec_merge:VFH_128 + (vec_merge:VFH_128 + (fma:VFH_128 + (neg:VFH_128 + (match_operand:VFH_128 2 "" "")) + (match_operand:VFH_128 1 "" "%v") + (match_operand:VFH_128 3 "register_operand" "0")) (match_dup 3) (match_operand:QI 4 "register_operand" "Yk")) (match_dup 3) @@ -5623,16 +5623,30 @@ [(set_attr "type" "ssemuladd") (set_attr "mode" "")]) -(define_insn "*avx512f_vmfnmadd__maskz_1" - [(set (match_operand:VF_128 0 "register_operand" "=v,v") - (vec_merge:VF_128 - (vec_merge:VF_128 - (fma:VF_128 - (neg:VF_128 - (match_operand:VF_128 2 "" ",v")) - (match_operand:VF_128 1 "register_operand" "0,0") - (match_operand:VF_128 3 "" "v,")) - (match_operand:VF_128 4 "const0_operand" "C,C") +(define_expand "avx512f_vmfnmadd__maskz" + [(match_operand:VFH_128 0 "register_operand") + (match_operand:VFH_128 1 "") + (match_operand:VFH_128 2 "") + (match_operand:VFH_128 3 "") + (match_operand:QI 4 "register_operand")] + "TARGET_AVX512F" +{ + emit_insn (gen_avx512f_vmfnmadd__maskz_1 ( + operands[0], operands[1], operands[2], operands[3], + CONST0_RTX (mode), operands[4])); + DONE; +}) + +(define_insn "avx512f_vmfnmadd__maskz_1" + [(set (match_operand:VFH_128 0 "register_operand" "=v,v") + (vec_merge:VFH_128 + (vec_merge:VFH_128 + (fma:VFH_128 + (neg:VFH_128 + (match_operand:VFH_128 2 "" ",v")) + (match_operand:VFH_128 1 "register_operand" "0,0") + (match_operand:VFH_128 3 "" "v,")) + (match_operand:VFH_128 4 "const0_operand" "C,C") (match_operand:QI 5 "register_operand" "Yk,Yk")) (match_dup 1) (const_int 1)))] @@ -5644,15 +5658,15 @@ (set_attr "mode" "")]) (define_insn "*avx512f_vmfnmsub__mask" - [(set (match_operand:VF_128 0 "register_operand" "=v,v") - (vec_merge:VF_128 - (vec_merge:VF_128 - (fma:VF_128 - (neg:VF_128 - (match_operand:VF_128 2 "" ",v")) - (match_operand:VF_128 1 "register_operand" "0,0") - (neg:VF_128 - (match_operand:VF_128 3 "" "v,"))) + [(set (match_operand:VFH_128 0 "register_operand" "=v,v") + (vec_merge:VFH_128 + (vec_merge:VFH_128 + (fma:VFH_128 + (neg:VFH_128 + (match_operand:VFH_128 2 "" ",v")) + (match_operand:VFH_128 1 "register_operand" "0,0") + (neg:VFH_128 + (match_operand:VFH_128 3 "" "v,"))) (match_dup 1) (match_operand:QI 4 "register_operand" "Yk,Yk")) (match_dup 1) @@ -5665,15 +5679,15 @@ (set_attr "mode" "")]) (define_insn "*avx512f_vmfnmsub__mask3" - [(set (match_operand:VF_128 0 "register_operand" "=v") - (vec_merge:VF_128 - (vec_merge:VF_128 - (fma:VF_128 - (neg:VF_128 - (match_operand:VF_128 2 "" "")) - (match_operand:VF_128 1 "" "%v") - (neg:VF_128 - (match_operand:VF_128 3 "register_operand" "0"))) + [(set (match_operand:VFH_128 0 "register_operand" "=v") + (vec_merge:VFH_128 + (vec_merge:VFH_128 + (fma:VFH_128 + (neg:VFH_128 + (match_operand:VFH_128 2 "" "")) + (match_operand:VFH_128 1 "" "%v") + (neg:VFH_128 + (match_operand:VFH_128 3 "register_operand" "0"))) (match_dup 3) (match_operand:QI 4 "register_operand" "Yk")) (match_dup 3) @@ -5684,16 +5698,16 @@ (set_attr "mode" "")]) (define_insn "*avx512f_vmfnmsub__maskz_1" - [(set (match_operand:VF_128 0 "register_operand" "=v,v") - (vec_merge:VF_128 - (vec_merge:VF_128 - (fma:VF_128 - (neg:VF_128 - (match_operand:VF_128 2 "" ",v")) - (match_operand:VF_128 1 "register_operand" "0,0") - (neg:VF_128 - (match_operand:VF_128 3 "" "v,"))) - (match_operand:VF_128 4 "const0_operand" "C,C") + [(set (match_operand:VFH_128 0 "register_operand" "=v,v") + (vec_merge:VFH_128 + (vec_merge:VFH_128 + (fma:VFH_128 + (neg:VFH_128 + (match_operand:VFH_128 2 "" ",v")) + (match_operand:VFH_128 1 "register_operand" "0,0") + (neg:VFH_128 + (match_operand:VFH_128 3 "" "v,"))) + (match_operand:VFH_128 4 "const0_operand" "C,C") (match_operand:QI 5 "register_operand" "Yk,Yk")) (match_dup 1) (const_int 1)))] diff --git a/gcc/testsuite/gcc.target/i386/avx-1.c b/gcc/testsuite/gcc.target/i386/avx-1.c index 8e2428a2476..a04c678dc37 100644 --- a/gcc/testsuite/gcc.target/i386/avx-1.c +++ b/gcc/testsuite/gcc.target/i386/avx-1.c @@ -775,6 +775,18 @@ #define __builtin_ia32_vfnmsubph512_mask(A, B, C, D, E) __builtin_ia32_vfnmsubph512_mask(A, B, C, D, 8) #define __builtin_ia32_vfnmsubph512_mask3(A, B, C, D, E) __builtin_ia32_vfnmsubph512_mask3(A, B, C, D, 8) #define __builtin_ia32_vfnmsubph512_maskz(A, B, C, D, E) __builtin_ia32_vfnmsubph512_maskz(A, B, C, D, 8) +#define __builtin_ia32_vfmaddsh3_mask(A, B, C, D, E) __builtin_ia32_vfmaddsh3_mask(A, B, C, D, 8) +#define __builtin_ia32_vfmaddsh3_mask3(A, B, C, D, E) __builtin_ia32_vfmaddsh3_mask3(A, B, C, D, 8) +#define __builtin_ia32_vfmaddsh3_maskz(A, B, C, D, E) __builtin_ia32_vfmaddsh3_maskz(A, B, C, D, 8) +#define __builtin_ia32_vfnmaddsh3_mask(A, B, C, D, E) __builtin_ia32_vfnmaddsh3_mask(A, B, C, D, 8) +#define __builtin_ia32_vfnmaddsh3_mask3(A, B, C, D, E) __builtin_ia32_vfnmaddsh3_mask3(A, B, C, D, 8) +#define __builtin_ia32_vfnmaddsh3_maskz(A, B, C, D, E) __builtin_ia32_vfnmaddsh3_maskz(A, B, C, D, 8) +#define __builtin_ia32_vfmsubsh3_mask(A, B, C, D, E) __builtin_ia32_vfmsubsh3_mask(A, B, C, D, 8) +#define __builtin_ia32_vfmsubsh3_mask3(A, B, C, D, E) __builtin_ia32_vfmsubsh3_mask3(A, B, C, D, 8) +#define __builtin_ia32_vfmsubsh3_maskz(A, B, C, D, E) __builtin_ia32_vfmsubsh3_maskz(A, B, C, D, 8) +#define __builtin_ia32_vfnmsubsh3_mask(A, B, C, D, E) __builtin_ia32_vfnmsubsh3_mask(A, B, C, D, 8) +#define __builtin_ia32_vfnmsubsh3_mask3(A, B, C, D, E) __builtin_ia32_vfnmsubsh3_mask3(A, B, C, D, 8) +#define __builtin_ia32_vfnmsubsh3_maskz(A, B, C, D, E) __builtin_ia32_vfnmsubsh3_maskz(A, B, C, D, 8) /* avx512fp16vlintrin.h */ #define __builtin_ia32_cmpph128_mask(A, B, C, D) __builtin_ia32_cmpph128_mask(A, B, 1, D) diff --git a/gcc/testsuite/gcc.target/i386/sse-13.c b/gcc/testsuite/gcc.target/i386/sse-13.c index 7f4b2a3b285..e9a838edf70 100644 --- a/gcc/testsuite/gcc.target/i386/sse-13.c +++ b/gcc/testsuite/gcc.target/i386/sse-13.c @@ -792,6 +792,18 @@ #define __builtin_ia32_vfnmsubph512_mask(A, B, C, D, E) __builtin_ia32_vfnmsubph512_mask(A, B, C, D, 8) #define __builtin_ia32_vfnmsubph512_mask3(A, B, C, D, E) __builtin_ia32_vfnmsubph512_mask3(A, B, C, D, 8) #define __builtin_ia32_vfnmsubph512_maskz(A, B, C, D, E) __builtin_ia32_vfnmsubph512_maskz(A, B, C, D, 8) +#define __builtin_ia32_vfmaddsh3_mask(A, B, C, D, E) __builtin_ia32_vfmaddsh3_mask(A, B, C, D, 8) +#define __builtin_ia32_vfmaddsh3_mask3(A, B, C, D, E) __builtin_ia32_vfmaddsh3_mask3(A, B, C, D, 8) +#define __builtin_ia32_vfmaddsh3_maskz(A, B, C, D, E) __builtin_ia32_vfmaddsh3_maskz(A, B, C, D, 8) +#define __builtin_ia32_vfnmaddsh3_mask(A, B, C, D, E) __builtin_ia32_vfnmaddsh3_mask(A, B, C, D, 8) +#define __builtin_ia32_vfnmaddsh3_mask3(A, B, C, D, E) __builtin_ia32_vfnmaddsh3_mask3(A, B, C, D, 8) +#define __builtin_ia32_vfnmaddsh3_maskz(A, B, C, D, E) __builtin_ia32_vfnmaddsh3_maskz(A, B, C, D, 8) +#define __builtin_ia32_vfmsubsh3_mask(A, B, C, D, E) __builtin_ia32_vfmsubsh3_mask(A, B, C, D, 8) +#define __builtin_ia32_vfmsubsh3_mask3(A, B, C, D, E) __builtin_ia32_vfmsubsh3_mask3(A, B, C, D, 8) +#define __builtin_ia32_vfmsubsh3_maskz(A, B, C, D, E) __builtin_ia32_vfmsubsh3_maskz(A, B, C, D, 8) +#define __builtin_ia32_vfnmsubsh3_mask(A, B, C, D, E) __builtin_ia32_vfnmsubsh3_mask(A, B, C, D, 8) +#define __builtin_ia32_vfnmsubsh3_mask3(A, B, C, D, E) __builtin_ia32_vfnmsubsh3_mask3(A, B, C, D, 8) +#define __builtin_ia32_vfnmsubsh3_maskz(A, B, C, D, E) __builtin_ia32_vfnmsubsh3_maskz(A, B, C, D, 8) /* avx512fp16vlintrin.h */ #define __builtin_ia32_cmpph128_mask(A, B, C, D) __builtin_ia32_cmpph128_mask(A, B, 1, D) diff --git a/gcc/testsuite/gcc.target/i386/sse-14.c b/gcc/testsuite/gcc.target/i386/sse-14.c index 9151e50afd2..01ac4e04173 100644 --- a/gcc/testsuite/gcc.target/i386/sse-14.c +++ b/gcc/testsuite/gcc.target/i386/sse-14.c @@ -842,6 +842,10 @@ test_3 (_mm512_fmadd_round_ph, __m512h, __m512h, __m512h, __m512h, 9) test_3 (_mm512_fnmadd_round_ph, __m512h, __m512h, __m512h, __m512h, 9) test_3 (_mm512_fmsub_round_ph, __m512h, __m512h, __m512h, __m512h, 9) test_3 (_mm512_fnmsub_round_ph, __m512h, __m512h, __m512h, __m512h, 9) +test_3 (_mm_fmadd_round_sh, __m128h, __m128h, __m128h, __m128h, 9) +test_3 (_mm_fnmadd_round_sh, __m128h, __m128h, __m128h, __m128h, 9) +test_3 (_mm_fmsub_round_sh, __m128h, __m128h, __m128h, __m128h, 9) +test_3 (_mm_fnmsub_round_sh, __m128h, __m128h, __m128h, __m128h, 9) test_3x (_mm512_mask_cmp_round_ph_mask, __mmask32, __mmask32, __m512h, __m512h, 1, 8) test_3x (_mm_mask_cmp_round_sh_mask, __mmask8, __mmask8, __m128h, __m128h, 1, 8) test_3x (_mm512_mask_reduce_round_ph, __m512h, __m512h, __mmask32, __m512h, 123, 8) @@ -892,6 +896,18 @@ test_4 (_mm512_maskz_fmsub_round_ph, __m512h, __mmask32, __m512h, __m512h, __m51 test_4 (_mm512_mask_fnmsub_round_ph, __m512h, __m512h, __mmask32, __m512h, __m512h, 9) test_4 (_mm512_mask3_fnmsub_round_ph, __m512h, __m512h, __m512h, __m512h, __mmask32, 9) test_4 (_mm512_maskz_fnmsub_round_ph, __m512h, __mmask32, __m512h, __m512h, __m512h, 9) +test_4 (_mm_mask_fmadd_round_sh, __m128h, __m128h, __mmask8, __m128h, __m128h, 9) +test_4 (_mm_mask3_fmadd_round_sh, __m128h, __m128h, __m128h, __m128h, __mmask8, 9) +test_4 (_mm_maskz_fmadd_round_sh, __m128h, __mmask8, __m128h, __m128h, __m128h, 9) +test_4 (_mm_mask_fnmadd_round_sh, __m128h, __m128h, __mmask8, __m128h, __m128h, 9) +test_4 (_mm_mask3_fnmadd_round_sh, __m128h, __m128h, __m128h, __m128h, __mmask8, 9) +test_4 (_mm_maskz_fnmadd_round_sh, __m128h, __mmask8, __m128h, __m128h, __m128h, 9) +test_4 (_mm_mask_fmsub_round_sh, __m128h, __m128h, __mmask8, __m128h, __m128h, 9) +test_4 (_mm_mask3_fmsub_round_sh, __m128h, __m128h, __m128h, __m128h, __mmask8, 9) +test_4 (_mm_maskz_fmsub_round_sh, __m128h, __mmask8, __m128h, __m128h, __m128h, 9) +test_4 (_mm_mask_fnmsub_round_sh, __m128h, __m128h, __mmask8, __m128h, __m128h, 9) +test_4 (_mm_mask3_fnmsub_round_sh, __m128h, __m128h, __m128h, __m128h, __mmask8, 9) +test_4 (_mm_maskz_fnmsub_round_sh, __m128h, __mmask8, __m128h, __m128h, __m128h, 9) test_4x (_mm_mask_reduce_round_sh, __m128h, __m128h, __mmask8, __m128h, __m128h, 123, 8) test_4x (_mm_mask_roundscale_round_sh, __m128h, __m128h, __mmask8, __m128h, __m128h, 123, 8) test_4x (_mm_mask_getmant_sh, __m128h, __m128h, __mmask8, __m128h, __m128h, 1, 1) diff --git a/gcc/testsuite/gcc.target/i386/sse-22.c b/gcc/testsuite/gcc.target/i386/sse-22.c index 892b6334ae2..79e3f35ab86 100644 --- a/gcc/testsuite/gcc.target/i386/sse-22.c +++ b/gcc/testsuite/gcc.target/i386/sse-22.c @@ -945,6 +945,10 @@ test_3 (_mm512_fmadd_round_ph, __m512h, __m512h, __m512h, __m512h, 9) test_3 (_mm512_fnmadd_round_ph, __m512h, __m512h, __m512h, __m512h, 9) test_3 (_mm512_fmsub_round_ph, __m512h, __m512h, __m512h, __m512h, 9) test_3 (_mm512_fnmsub_round_ph, __m512h, __m512h, __m512h, __m512h, 9) +test_3 (_mm_fmadd_round_sh, __m128h, __m128h, __m128h, __m128h, 9) +test_3 (_mm_fnmadd_round_sh, __m128h, __m128h, __m128h, __m128h, 9) +test_3 (_mm_fmsub_round_sh, __m128h, __m128h, __m128h, __m128h, 9) +test_3 (_mm_fnmsub_round_sh, __m128h, __m128h, __m128h, __m128h, 9) test_3x (_mm512_mask_cmp_round_ph_mask, __mmask32, __mmask32, __m512h, __m512h, 1, 8) test_3x (_mm_mask_cmp_round_sh_mask, __mmask8, __mmask8, __m128h, __m128h, 1, 8) test_3x (_mm512_mask_reduce_round_ph, __m512h, __m512h, __mmask32, __m512h, 123, 8) @@ -994,6 +998,18 @@ test_4 (_mm512_maskz_fmsub_round_ph, __m512h, __mmask32, __m512h, __m512h, __m51 test_4 (_mm512_mask_fnmsub_round_ph, __m512h, __m512h, __mmask32, __m512h, __m512h, 9) test_4 (_mm512_mask3_fnmsub_round_ph, __m512h, __m512h, __m512h, __m512h, __mmask32, 9) test_4 (_mm512_maskz_fnmsub_round_ph, __m512h, __mmask32, __m512h, __m512h, __m512h, 9) +test_4 (_mm_mask_fmadd_round_sh, __m128h, __m128h, __mmask8, __m128h, __m128h, 9) +test_4 (_mm_mask3_fmadd_round_sh, __m128h, __m128h, __m128h, __m128h, __mmask8, 9) +test_4 (_mm_maskz_fmadd_round_sh, __m128h, __mmask8, __m128h, __m128h, __m128h, 9) +test_4 (_mm_mask_fnmadd_round_sh, __m128h, __m128h, __mmask8, __m128h, __m128h, 9) +test_4 (_mm_mask3_fnmadd_round_sh, __m128h, __m128h, __m128h, __m128h, __mmask8, 9) +test_4 (_mm_maskz_fnmadd_round_sh, __m128h, __mmask8, __m128h, __m128h, __m128h, 9) +test_4 (_mm_mask_fmsub_round_sh, __m128h, __m128h, __mmask8, __m128h, __m128h, 9) +test_4 (_mm_mask3_fmsub_round_sh, __m128h, __m128h, __m128h, __m128h, __mmask8, 9) +test_4 (_mm_maskz_fmsub_round_sh, __m128h, __mmask8, __m128h, __m128h, __m128h, 9) +test_4 (_mm_mask_fnmsub_round_sh, __m128h, __m128h, __mmask8, __m128h, __m128h, 9) +test_4 (_mm_mask3_fnmsub_round_sh, __m128h, __m128h, __m128h, __m128h, __mmask8, 9) +test_4 (_mm_maskz_fnmsub_round_sh, __m128h, __mmask8, __m128h, __m128h, __m128h, 9) test_4x (_mm_mask_reduce_round_sh, __m128h, __m128h, __mmask8, __m128h, __m128h, 123, 8) test_4x (_mm_mask_roundscale_round_sh, __m128h, __m128h, __mmask8, __m128h, __m128h, 123, 8) test_4x (_mm_mask_getmant_sh, __m128h, __m128h, __mmask8, __m128h, __m128h, 1, 1) diff --git a/gcc/testsuite/gcc.target/i386/sse-23.c b/gcc/testsuite/gcc.target/i386/sse-23.c index 2eb5a649fe4..4be2c1e1628 100644 --- a/gcc/testsuite/gcc.target/i386/sse-23.c +++ b/gcc/testsuite/gcc.target/i386/sse-23.c @@ -793,6 +793,18 @@ #define __builtin_ia32_vfnmsubph512_mask(A, B, C, D, E) __builtin_ia32_vfnmsubph512_mask(A, B, C, D, 8) #define __builtin_ia32_vfnmsubph512_mask3(A, B, C, D, E) __builtin_ia32_vfnmsubph512_mask3(A, B, C, D, 8) #define __builtin_ia32_vfnmsubph512_maskz(A, B, C, D, E) __builtin_ia32_vfnmsubph512_maskz(A, B, C, D, 8) +#define __builtin_ia32_vfmaddsh3_mask(A, B, C, D, E) __builtin_ia32_vfmaddsh3_mask(A, B, C, D, 8) +#define __builtin_ia32_vfmaddsh3_mask3(A, B, C, D, E) __builtin_ia32_vfmaddsh3_mask3(A, B, C, D, 8) +#define __builtin_ia32_vfmaddsh3_maskz(A, B, C, D, E) __builtin_ia32_vfmaddsh3_maskz(A, B, C, D, 8) +#define __builtin_ia32_vfnmaddsh3_mask(A, B, C, D, E) __builtin_ia32_vfnmaddsh3_mask(A, B, C, D, 8) +#define __builtin_ia32_vfnmaddsh3_mask3(A, B, C, D, E) __builtin_ia32_vfnmaddsh3_mask3(A, B, C, D, 8) +#define __builtin_ia32_vfnmaddsh3_maskz(A, B, C, D, E) __builtin_ia32_vfnmaddsh3_maskz(A, B, C, D, 8) +#define __builtin_ia32_vfmsubsh3_mask(A, B, C, D, E) __builtin_ia32_vfmsubsh3_mask(A, B, C, D, 8) +#define __builtin_ia32_vfmsubsh3_mask3(A, B, C, D, E) __builtin_ia32_vfmsubsh3_mask3(A, B, C, D, 8) +#define __builtin_ia32_vfmsubsh3_maskz(A, B, C, D, E) __builtin_ia32_vfmsubsh3_maskz(A, B, C, D, 8) +#define __builtin_ia32_vfnmsubsh3_mask(A, B, C, D, E) __builtin_ia32_vfnmsubsh3_mask(A, B, C, D, 8) +#define __builtin_ia32_vfnmsubsh3_mask3(A, B, C, D, E) __builtin_ia32_vfnmsubsh3_mask3(A, B, C, D, 8) +#define __builtin_ia32_vfnmsubsh3_maskz(A, B, C, D, E) __builtin_ia32_vfnmsubsh3_maskz(A, B, C, D, 8) /* avx512fp16vlintrin.h */ #define __builtin_ia32_cmpph128_mask(A, B, C, D) __builtin_ia32_cmpph128_mask(A, B, 1, D)