Vector sinf for x86_64 and tests.

Here is implementation of vectorized sinf containing SSE, AVX,
AVX2 and AVX512 versions according to Vector ABI
<https://groups.google.com/forum/#!topic/x86-64-abi/LmppCfN1rZ4>.

    * sysdeps/unix/sysv/linux/x86_64/libmvec.abilist: New symbols added.
    * sysdeps/x86/fpu/bits/math-vector.h: Added SIMD declaration for sinf.
    * sysdeps/x86_64/fpu/Makefile (libmvec-support): Added new files.
    * sysdeps/x86_64/fpu/Versions: New versions added.
    * sysdeps/x86_64/fpu/libm-test-ulps: Regenerated.
    * sysdeps/x86_64/fpu/multiarch/Makefile (libmvec-sysdep_routines): Added
    build of SSE, AVX2 and AVX512 IFUNC versions.
    * sysdeps/x86_64/fpu/multiarch/svml_s_sinf16_core.S: New file.
    * sysdeps/x86_64/fpu/multiarch/svml_s_sinf16_core_avx512.S: New file.
    * sysdeps/x86_64/fpu/multiarch/svml_s_sinf4_core.S: New file.
    * sysdeps/x86_64/fpu/multiarch/svml_s_sinf4_core_sse4.S: New file.
    * sysdeps/x86_64/fpu/multiarch/svml_s_sinf8_core.S: New file.
    * sysdeps/x86_64/fpu/multiarch/svml_s_sinf8_core_avx2.S: New file.
    * sysdeps/x86_64/fpu/svml_s_sinf16_core.S: New file.
    * sysdeps/x86_64/fpu/svml_s_sinf4_core.S: New file.
    * sysdeps/x86_64/fpu/svml_s_sinf8_core.S: New file.
    * sysdeps/x86_64/fpu/svml_s_sinf8_core_avx.S: New file.
    * sysdeps/x86_64/fpu/svml_s_sinf_data.S: New file.
    * sysdeps/x86_64/fpu/svml_s_sinf_data.h: New file.
    * sysdeps/x86_64/fpu/test-float-vlen16-wrappers.c: Vector sinf tests.
    * sysdeps/x86_64/fpu/test-float-vlen16.c: Likewise.
    * sysdeps/x86_64/fpu/test-float-vlen4-wrappers.c: Likewise.
    * sysdeps/x86_64/fpu/test-float-vlen4.c: Likewise.
    * sysdeps/x86_64/fpu/test-float-vlen8-avx2-wrappers.c: Likewise.
    * sysdeps/x86_64/fpu/test-float-vlen8-avx2.c: Likewise.
    * sysdeps/x86_64/fpu/test-float-vlen8-wrappers.c: Likewise.
    * sysdeps/x86_64/fpu/test-float-vlen8.c: Likewise.
    * NEWS: Mention addition of x86_64 vector sinf.
This commit is contained in:
Andrew Senkevich 2015-06-15 15:06:53 +03:00
parent bf1435783d
commit 2a8c2c7b33
28 changed files with 2377 additions and 2 deletions

View File

@ -1,3 +1,34 @@
2015-06-15 Andrew Senkevich <andrew.senkevich@intel.com>
* sysdeps/unix/sysv/linux/x86_64/libmvec.abilist: New symbols added.
* sysdeps/x86/fpu/bits/math-vector.h: Added SIMD declaration for sinf.
* sysdeps/x86_64/fpu/Makefile (libmvec-support): Added new files.
* sysdeps/x86_64/fpu/Versions: New versions added.
* sysdeps/x86_64/fpu/libm-test-ulps: Regenerated.
* sysdeps/x86_64/fpu/multiarch/Makefile (libmvec-sysdep_routines): Added
build of SSE, AVX2 and AVX512 IFUNC versions.
* sysdeps/x86_64/fpu/multiarch/svml_s_sinf16_core.S: New file.
* sysdeps/x86_64/fpu/multiarch/svml_s_sinf16_core_avx512.S: New file.
* sysdeps/x86_64/fpu/multiarch/svml_s_sinf4_core.S: New file.
* sysdeps/x86_64/fpu/multiarch/svml_s_sinf4_core_sse4.S: New file.
* sysdeps/x86_64/fpu/multiarch/svml_s_sinf8_core.S: New file.
* sysdeps/x86_64/fpu/multiarch/svml_s_sinf8_core_avx2.S: New file.
* sysdeps/x86_64/fpu/svml_s_sinf16_core.S: New file.
* sysdeps/x86_64/fpu/svml_s_sinf4_core.S: New file.
* sysdeps/x86_64/fpu/svml_s_sinf8_core.S: New file.
* sysdeps/x86_64/fpu/svml_s_sinf8_core_avx.S: New file.
* sysdeps/x86_64/fpu/svml_s_sinf_data.S: New file.
* sysdeps/x86_64/fpu/svml_s_sinf_data.h: New file.
* sysdeps/x86_64/fpu/test-float-vlen16-wrappers.c: Vector sinf tests.
* sysdeps/x86_64/fpu/test-float-vlen16.c: Likewise.
* sysdeps/x86_64/fpu/test-float-vlen4-wrappers.c: Likewise.
* sysdeps/x86_64/fpu/test-float-vlen4.c: Likewise.
* sysdeps/x86_64/fpu/test-float-vlen8-avx2-wrappers.c: Likewise.
* sysdeps/x86_64/fpu/test-float-vlen8-avx2.c: Likewise.
* sysdeps/x86_64/fpu/test-float-vlen8-wrappers.c: Likewise.
* sysdeps/x86_64/fpu/test-float-vlen8.c: Likewise.
* NEWS: Mention addition of x86_64 vector sinf.
2015-06-14 Joseph Myers <joseph@codesourcery.com>
* conform/list-header-symbols.pl (%extra_syms): Add in6addr_any

2
NEWS
View File

@ -53,7 +53,7 @@ Version 2.22
condition in some applications.
* Added vector math library named libmvec with the following vectorized x86_64
implementations: cos, cosf, sin.
implementations: cos, cosf, sin, sinf.
The library can be disabled with --disable-mathvec. Use of the functions is
enabled with -fopenmp -ffast-math starting from -O1 for GCC version >= 4.9.0.
The library is linked in as needed when using -lm (no need to specify -lmvec

View File

@ -3,12 +3,16 @@ GLIBC_2.22
_ZGVbN2v_cos F
_ZGVbN2v_sin F
_ZGVbN4v_cosf F
_ZGVbN4v_sinf F
_ZGVcN4v_cos F
_ZGVcN4v_sin F
_ZGVcN8v_cosf F
_ZGVcN8v_sinf F
_ZGVdN4v_cos F
_ZGVdN4v_sin F
_ZGVdN8v_cosf F
_ZGVdN8v_sinf F
_ZGVeN16v_cosf F
_ZGVeN16v_sinf F
_ZGVeN8v_cos F
_ZGVeN8v_sin F

View File

@ -34,5 +34,7 @@
# define __DECL_SIMD_cosf __DECL_SIMD_x86_64
# undef __DECL_SIMD_sin
# define __DECL_SIMD_sin __DECL_SIMD_x86_64
# undef __DECL_SIMD_sinf
# define __DECL_SIMD_sinf __DECL_SIMD_x86_64
# endif
#endif

View File

@ -5,6 +5,8 @@ libmvec-support += svml_d_cos2_core svml_d_cos4_core_avx \
svml_d_sin4_core svml_d_sin8_core svml_d_sin_data \
svml_s_cosf4_core svml_s_cosf8_core_avx \
svml_s_cosf8_core svml_s_cosf16_core svml_s_cosf_data \
svml_s_sinf4_core svml_s_sinf8_core_avx \
svml_s_sinf8_core svml_s_sinf16_core svml_s_sinf_data \
init-arch
endif

View File

@ -3,5 +3,6 @@ libmvec {
_ZGVbN2v_cos; _ZGVcN4v_cos; _ZGVdN4v_cos; _ZGVeN8v_cos;
_ZGVbN2v_sin; _ZGVcN4v_sin; _ZGVdN4v_sin; _ZGVeN8v_sin;
_ZGVbN4v_cosf; _ZGVcN8v_cosf; _ZGVdN8v_cosf; _ZGVeN16v_cosf;
_ZGVbN4v_sinf; _ZGVcN8v_sinf; _ZGVdN8v_sinf; _ZGVeN16v_sinf;
}
}

View File

@ -1929,17 +1929,25 @@ idouble: 1
ildouble: 3
ldouble: 3
Function: "sin_vlen16":
float: 1
Function: "sin_vlen2":
double: 2
Function: "sin_vlen4":
double: 2
float: 1
Function: "sin_vlen4_avx2":
double: 2
Function: "sin_vlen8":
double: 2
float: 1
Function: "sin_vlen8_avx2":
float: 1
Function: "sincos":
ildouble: 1

View File

@ -57,5 +57,6 @@ libmvec-sysdep_routines += svml_d_cos2_core_sse4 svml_d_cos4_core_avx2 \
svml_d_cos8_core_avx512 svml_d_sin2_core_sse4 \
svml_d_sin4_core_avx2 svml_d_sin8_core_avx512 \
svml_s_cosf4_core_sse4 svml_s_cosf8_core_avx2 \
svml_s_cosf16_core_avx512
svml_s_cosf16_core_avx512 svml_s_sinf4_core_sse4 \
svml_s_sinf8_core_avx2 svml_s_sinf16_core_avx512
endif

View File

@ -0,0 +1,39 @@
/* Multiple versions of vectorized sinf.
Copyright (C) 2014-2015 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
#include <sysdep.h>
#include <init-arch.h>
.text
ENTRY (_ZGVeN16v_sinf)
.type _ZGVeN16v_sinf, @gnu_indirect_function
cmpl $0, KIND_OFFSET+__cpu_features(%rip)
jne 1
call __init_cpu_features
1: leaq _ZGVeN16v_sinf_skx(%rip), %rax
testl $bit_AVX512DQ_Usable, __cpu_features+FEATURE_OFFSET+index_AVX512DQ_Usable(%rip)
jnz 3
2: leaq _ZGVeN16v_sinf_knl(%rip), %rax
testl $bit_AVX512F_Usable, __cpu_features+FEATURE_OFFSET+index_AVX512F_Usable(%rip)
jnz 3
leaq _ZGVeN16v_sinf_avx2_wrapper(%rip), %rax
3: ret
END (_ZGVeN16v_sinf)
#define _ZGVeN16v_sinf _ZGVeN16v_sinf_avx2_wrapper
#include "../svml_s_sinf16_core.S"

View File

@ -0,0 +1,479 @@
/* Function sinf vectorized with AVX-512. KNL and SKX versions.
Copyright (C) 2014-2015 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
#include <sysdep.h>
#include "svml_s_sinf_data.h"
#include "svml_s_wrapper_impl.h"
.text
ENTRY(_ZGVeN16v_sinf_knl)
#ifndef HAVE_AVX512_ASM_SUPPORT
WRAPPER_IMPL_AVX512 _ZGVdN8v_sinf
#else
/*
ALGORITHM DESCRIPTION:
1) Range reduction to [-Pi/2; +Pi/2] interval
a) Grab sign from source argument and save it.
b) Remove sign using AND operation
c) Getting octant Y by 1/Pi multiplication
d) Add "Right Shifter" value
e) Treat obtained value as integer for destination sign setting.
Shift first bit of this value to the last (sign) position
f) Change destination sign if source sign is negative
using XOR operation.
g) Subtract "Right Shifter" value
h) Subtract Y*PI from X argument, where PI divided to 4 parts:
X = X - Y*PI1 - Y*PI2 - Y*PI3 - Y*PI4;
2) Polynomial (minimax for sin within [-Pi/2; +Pi/2] interval)
a) Calculate X^2 = X * X
b) Calculate polynomial:
R = X + X * X^2 * (A3 + x^2 * (A5 + ......
3) Destination sign setting
a) Set shifted destination sign using XOR operation:
R = XOR( R, S );
*/
pushq %rbp
cfi_adjust_cfa_offset (8)
cfi_rel_offset (%rbp, 0)
movq %rsp, %rbp
cfi_def_cfa_register (%rbp)
andq $-64, %rsp
subq $1280, %rsp
movq __svml_ssin_data@GOTPCREL(%rip), %rax
/* Check for large and special values */
movl $-1, %edx
vmovups __sAbsMask(%rax), %zmm4
vmovups __sInvPI(%rax), %zmm1
/* b) Remove sign using AND operation */
vpandd %zmm4, %zmm0, %zmm12
vmovups __sPI1_FMA(%rax), %zmm2
vmovups __sA9(%rax), %zmm7
/*
f) Change destination sign if source sign is negative
using XOR operation.
*/
vpandnd %zmm0, %zmm4, %zmm11
/*
h) Subtract Y*PI from X argument, where PI divided to 4 parts:
X = X - Y*PI1 - Y*PI2 - Y*PI3;
*/
vmovaps %zmm12, %zmm3
/*
c) Getting octant Y by 1/Pi multiplication
d) Add "Right Shifter" value
*/
vfmadd213ps __sRShifter(%rax), %zmm12, %zmm1
vcmpps $22, __sRangeReductionVal(%rax), %zmm12, %k1
vpbroadcastd %edx, %zmm13{%k1}{z}
/* g) Subtract "Right Shifter" value */
vsubps __sRShifter(%rax), %zmm1, %zmm5
/*
e) Treat obtained value as integer for destination sign setting.
Shift first bit of this value to the last (sign) position
*/
vpslld $31, %zmm1, %zmm6
vptestmd %zmm13, %zmm13, %k0
vfnmadd231ps %zmm5, %zmm2, %zmm3
kmovw %k0, %ecx
vfnmadd231ps __sPI2_FMA(%rax), %zmm5, %zmm3
vfnmadd132ps __sPI3_FMA(%rax), %zmm3, %zmm5
/*
2) Polynomial (minimax for sin within [-Pi/2; +Pi/2] interval)
a) Calculate X^2 = X * X
b) Calculate polynomial:
R = X + X * X^2 * (A3 + x^2 * (A5 + ......
*/
vmulps %zmm5, %zmm5, %zmm8
vpxord %zmm6, %zmm5, %zmm9
vfmadd213ps __sA7(%rax), %zmm8, %zmm7
vfmadd213ps __sA5(%rax), %zmm8, %zmm7
vfmadd213ps __sA3(%rax), %zmm8, %zmm7
vmulps %zmm8, %zmm7, %zmm10
vfmadd213ps %zmm9, %zmm9, %zmm10
/*
3) Destination sign setting
a) Set shifted destination sign using XOR operation:
R = XOR( R, S );
*/
vpxord %zmm11, %zmm10, %zmm1
testl %ecx, %ecx
jne .LBL_1_3
.LBL_1_2:
cfi_remember_state
vmovaps %zmm1, %zmm0
movq %rbp, %rsp
cfi_def_cfa_register (%rsp)
popq %rbp
cfi_adjust_cfa_offset (-8)
cfi_restore (%rbp)
ret
.LBL_1_3:
cfi_restore_state
vmovups %zmm0, 1152(%rsp)
vmovups %zmm1, 1216(%rsp)
je .LBL_1_2
xorb %dl, %dl
kmovw %k4, 1048(%rsp)
xorl %eax, %eax
kmovw %k5, 1040(%rsp)
kmovw %k6, 1032(%rsp)
kmovw %k7, 1024(%rsp)
vmovups %zmm16, 960(%rsp)
vmovups %zmm17, 896(%rsp)
vmovups %zmm18, 832(%rsp)
vmovups %zmm19, 768(%rsp)
vmovups %zmm20, 704(%rsp)
vmovups %zmm21, 640(%rsp)
vmovups %zmm22, 576(%rsp)
vmovups %zmm23, 512(%rsp)
vmovups %zmm24, 448(%rsp)
vmovups %zmm25, 384(%rsp)
vmovups %zmm26, 320(%rsp)
vmovups %zmm27, 256(%rsp)
vmovups %zmm28, 192(%rsp)
vmovups %zmm29, 128(%rsp)
vmovups %zmm30, 64(%rsp)
vmovups %zmm31, (%rsp)
movq %rsi, 1064(%rsp)
movq %rdi, 1056(%rsp)
movq %r12, 1096(%rsp)
cfi_offset_rel_rsp (12, 1096)
movb %dl, %r12b
movq %r13, 1088(%rsp)
cfi_offset_rel_rsp (13, 1088)
movl %ecx, %r13d
movq %r14, 1080(%rsp)
cfi_offset_rel_rsp (14, 1080)
movl %eax, %r14d
movq %r15, 1072(%rsp)
cfi_offset_rel_rsp (15, 1072)
cfi_remember_state
.LBL_1_6:
btl %r14d, %r13d
jc .LBL_1_12
.LBL_1_7:
lea 1(%r14), %esi
btl %esi, %r13d
jc .LBL_1_10
.LBL_1_8:
addb $1, %r12b
addl $2, %r14d
cmpb $16, %r12b
jb .LBL_1_6
kmovw 1048(%rsp), %k4
movq 1064(%rsp), %rsi
kmovw 1040(%rsp), %k5
movq 1056(%rsp), %rdi
kmovw 1032(%rsp), %k6
movq 1096(%rsp), %r12
cfi_restore (%r12)
movq 1088(%rsp), %r13
cfi_restore (%r13)
kmovw 1024(%rsp), %k7
vmovups 960(%rsp), %zmm16
vmovups 896(%rsp), %zmm17
vmovups 832(%rsp), %zmm18
vmovups 768(%rsp), %zmm19
vmovups 704(%rsp), %zmm20
vmovups 640(%rsp), %zmm21
vmovups 576(%rsp), %zmm22
vmovups 512(%rsp), %zmm23
vmovups 448(%rsp), %zmm24
vmovups 384(%rsp), %zmm25
vmovups 320(%rsp), %zmm26
vmovups 256(%rsp), %zmm27
vmovups 192(%rsp), %zmm28
vmovups 128(%rsp), %zmm29
vmovups 64(%rsp), %zmm30
vmovups (%rsp), %zmm31
movq 1080(%rsp), %r14
cfi_restore (%r14)
movq 1072(%rsp), %r15
cfi_restore (%r15)
vmovups 1216(%rsp), %zmm1
jmp .LBL_1_2
.LBL_1_10:
cfi_restore_state
movzbl %r12b, %r15d
vmovss 1156(%rsp,%r15,8), %xmm0
call sinf@PLT
vmovss %xmm0, 1220(%rsp,%r15,8)
jmp .LBL_1_8
.LBL_1_12:
movzbl %r12b, %r15d
vmovss 1152(%rsp,%r15,8), %xmm0
call sinf@PLT
vmovss %xmm0, 1216(%rsp,%r15,8)
jmp .LBL_1_7
#endif
END(_ZGVeN16v_sinf_knl)
ENTRY (_ZGVeN16v_sinf_skx)
#ifndef HAVE_AVX512_ASM_SUPPORT
WRAPPER_IMPL_AVX512 _ZGVdN8v_sinf
#else
/*
ALGORITHM DESCRIPTION:
1) Range reduction to [-Pi/2; +Pi/2] interval
a) Grab sign from source argument and save it.
b) Remove sign using AND operation
c) Getting octant Y by 1/Pi multiplication
d) Add "Right Shifter" value
e) Treat obtained value as integer for destination sign setting.
Shift first bit of this value to the last (sign) position
f) Change destination sign if source sign is negative
using XOR operation.
g) Subtract "Right Shifter" value
h) Subtract Y*PI from X argument, where PI divided to 4 parts:
X = X - Y*PI1 - Y*PI2 - Y*PI3 - Y*PI4;
2) Polynomial (minimax for sin within [-Pi/2; +Pi/2] interval)
a) Calculate X^2 = X * X
b) Calculate polynomial:
R = X + X * X^2 * (A3 + x^2 * (A5 + ......
3) Destination sign setting
a) Set shifted destination sign using XOR operation:
R = XOR( R, S );
*/
pushq %rbp
cfi_adjust_cfa_offset (8)
cfi_rel_offset (%rbp, 0)
movq %rsp, %rbp
cfi_def_cfa_register (%rbp)
andq $-64, %rsp
subq $1280, %rsp
movq __svml_ssin_data@GOTPCREL(%rip), %rax
/* Check for large and special values */
vmovups .L_2il0floatpacket.11(%rip), %zmm14
vmovups __sAbsMask(%rax), %zmm5
vmovups __sInvPI(%rax), %zmm1
vmovups __sRShifter(%rax), %zmm2
vmovups __sPI1_FMA(%rax), %zmm3
vmovups __sA9(%rax), %zmm8
/* b) Remove sign using AND operation */
vandps %zmm5, %zmm0, %zmm13
/*
f) Change destination sign if source sign is negative
using XOR operation.
*/
vandnps %zmm0, %zmm5, %zmm12
/*
c) Getting octant Y by 1/Pi multiplication
d) Add "Right Shifter" value
*/
vfmadd213ps %zmm2, %zmm13, %zmm1
vcmpps $18, __sRangeReductionVal(%rax), %zmm13, %k1
/*
e) Treat obtained value as integer for destination sign setting.
Shift first bit of this value to the last (sign) position
*/
vpslld $31, %zmm1, %zmm7
/* g) Subtract "Right Shifter" value */
vsubps %zmm2, %zmm1, %zmm6
/*
h) Subtract Y*PI from X argument, where PI divided to 4 parts:
X = X - Y*PI1 - Y*PI2 - Y*PI3;
*/
vmovaps %zmm13, %zmm4
vfnmadd231ps %zmm6, %zmm3, %zmm4
vfnmadd231ps __sPI2_FMA(%rax), %zmm6, %zmm4
vfnmadd132ps __sPI3_FMA(%rax), %zmm4, %zmm6
/*
2) Polynomial (minimax for sin within [-Pi/2; +Pi/2] interval)
a) Calculate X^2 = X * X
b) Calculate polynomial:
R = X + X * X^2 * (A3 + x^2 * (A5 + ......
*/
vmulps %zmm6, %zmm6, %zmm9
vxorps %zmm7, %zmm6, %zmm10
vfmadd213ps __sA7(%rax), %zmm9, %zmm8
vfmadd213ps __sA5(%rax), %zmm9, %zmm8
vfmadd213ps __sA3(%rax), %zmm9, %zmm8
vmulps %zmm9, %zmm8, %zmm11
vfmadd213ps %zmm10, %zmm10, %zmm11
/*
3) Destination sign setting
a) Set shifted destination sign using XOR operation:
R = XOR( R, S );
*/
vxorps %zmm12, %zmm11, %zmm1
vpandnd %zmm13, %zmm13, %zmm14{%k1}
vptestmd %zmm14, %zmm14, %k0
kmovw %k0, %ecx
testl %ecx, %ecx
jne .LBL_2_3
.LBL_2_2:
cfi_remember_state
vmovaps %zmm1, %zmm0
movq %rbp, %rsp
cfi_def_cfa_register (%rsp)
popq %rbp
cfi_adjust_cfa_offset (-8)
cfi_restore (%rbp)
ret
.LBL_2_3:
cfi_restore_state
vmovups %zmm0, 1152(%rsp)
vmovups %zmm1, 1216(%rsp)
je .LBL_2_2
xorb %dl, %dl
xorl %eax, %eax
kmovw %k4, 1048(%rsp)
kmovw %k5, 1040(%rsp)
kmovw %k6, 1032(%rsp)
kmovw %k7, 1024(%rsp)
vmovups %zmm16, 960(%rsp)
vmovups %zmm17, 896(%rsp)
vmovups %zmm18, 832(%rsp)
vmovups %zmm19, 768(%rsp)
vmovups %zmm20, 704(%rsp)
vmovups %zmm21, 640(%rsp)
vmovups %zmm22, 576(%rsp)
vmovups %zmm23, 512(%rsp)
vmovups %zmm24, 448(%rsp)
vmovups %zmm25, 384(%rsp)
vmovups %zmm26, 320(%rsp)
vmovups %zmm27, 256(%rsp)
vmovups %zmm28, 192(%rsp)
vmovups %zmm29, 128(%rsp)
vmovups %zmm30, 64(%rsp)
vmovups %zmm31, (%rsp)
movq %rsi, 1064(%rsp)
movq %rdi, 1056(%rsp)
movq %r12, 1096(%rsp)
cfi_offset_rel_rsp (12, 1096)
movb %dl, %r12b
movq %r13, 1088(%rsp)
cfi_offset_rel_rsp (13, 1088)
movl %ecx, %r13d
movq %r14, 1080(%rsp)
cfi_offset_rel_rsp (14, 1080)
movl %eax, %r14d
movq %r15, 1072(%rsp)
cfi_offset_rel_rsp (15, 1072)
cfi_remember_state
.LBL_2_6:
btl %r14d, %r13d
jc .LBL_2_12
.LBL_2_7:
lea 1(%r14), %esi
btl %esi, %r13d
jc .LBL_2_10
.LBL_2_8:
incb %r12b
addl $2, %r14d
cmpb $16, %r12b
jb .LBL_2_6
kmovw 1048(%rsp), %k4
kmovw 1040(%rsp), %k5
kmovw 1032(%rsp), %k6
kmovw 1024(%rsp), %k7
vmovups 960(%rsp), %zmm16
vmovups 896(%rsp), %zmm17
vmovups 832(%rsp), %zmm18
vmovups 768(%rsp), %zmm19
vmovups 704(%rsp), %zmm20
vmovups 640(%rsp), %zmm21
vmovups 576(%rsp), %zmm22
vmovups 512(%rsp), %zmm23
vmovups 448(%rsp), %zmm24
vmovups 384(%rsp), %zmm25
vmovups 320(%rsp), %zmm26
vmovups 256(%rsp), %zmm27
vmovups 192(%rsp), %zmm28
vmovups 128(%rsp), %zmm29
vmovups 64(%rsp), %zmm30
vmovups (%rsp), %zmm31
vmovups 1216(%rsp), %zmm1
movq 1064(%rsp), %rsi
movq 1056(%rsp), %rdi
movq 1096(%rsp), %r12
cfi_restore (%r12)
movq 1088(%rsp), %r13
cfi_restore (%r13)
movq 1080(%rsp), %r14
cfi_restore (%r14)
movq 1072(%rsp), %r15
cfi_restore (%r15)
jmp .LBL_2_2
.LBL_2_10:
cfi_restore_state
movzbl %r12b, %r15d
vmovss 1156(%rsp,%r15,8), %xmm0
vzeroupper
vmovss 1156(%rsp,%r15,8), %xmm0
call sinf@PLT
vmovss %xmm0, 1220(%rsp,%r15,8)
jmp .LBL_2_8
.LBL_2_12:
movzbl %r12b, %r15d
vmovss 1152(%rsp,%r15,8), %xmm0
vzeroupper
vmovss 1152(%rsp,%r15,8), %xmm0
call sinf@PLT
vmovss %xmm0, 1216(%rsp,%r15,8)
jmp .LBL_2_7
#endif
END (_ZGVeN16v_sinf_skx)
.section .rodata, "a"
.L_2il0floatpacket.11:
.long 0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff
.type .L_2il0floatpacket.11,@object

View File

@ -0,0 +1,38 @@
/* Multiple versions of vectorized sinf.
Copyright (C) 2014-2015 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
#include <sysdep.h>
#include <init-arch.h>
.text
ENTRY (_ZGVbN4v_sinf)
.type _ZGVbN4v_sinf, @gnu_indirect_function
cmpl $0, KIND_OFFSET+__cpu_features(%rip)
jne 1f
call __init_cpu_features
1: leaq _ZGVbN4v_sinf_sse4(%rip), %rax
testl $bit_SSE4_1, __cpu_features+CPUID_OFFSET+index_SSE4_1(%rip)
jz 2f
ret
2: leaq _ZGVbN4v_sinf_sse2(%rip), %rax
ret
END (_ZGVbN4v_sinf)
libmvec_hidden_def (_ZGVbN4v_sinf)
#define _ZGVbN4v_sinf _ZGVbN4v_sinf_sse2
#include "../svml_s_sinf4_core.S"

View File

@ -0,0 +1,224 @@
/* Function sinf vectorized with SSE4.
Copyright (C) 2014-2015 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
#include <sysdep.h>
#include "svml_s_sinf_data.h"
.text
ENTRY(_ZGVbN4v_sinf_sse4)
/*
ALGORITHM DESCRIPTION:
1) Range reduction to [-Pi/2; +Pi/2] interval
a) Grab sign from source argument and save it.
b) Remove sign using AND operation
c) Getting octant Y by 1/Pi multiplication
d) Add "Right Shifter" value
e) Treat obtained value as integer for destination sign setting.
Shift first bit of this value to the last (sign) position
f) Change destination sign if source sign is negative
using XOR operation.
g) Subtract "Right Shifter" value
h) Subtract Y*PI from X argument, where PI divided to 4 parts:
X = X - Y*PI1 - Y*PI2 - Y*PI3 - Y*PI4;
2) Polynomial (minimax for sin within [-Pi/2; +Pi/2] interval)
a) Calculate X^2 = X * X
b) Calculate polynomial:
R = X + X * X^2 * (A3 + x^2 * (A5 + ......
3) Destination sign setting
a) Set shifted destination sign using XOR operation:
R = XOR( R, S );
*/
pushq %rbp
cfi_adjust_cfa_offset (8)
cfi_rel_offset (%rbp, 0)
movq %rsp, %rbp
cfi_def_cfa_register (%rbp)
andq $-64, %rsp
subq $320, %rsp
movaps %xmm0, %xmm5
movq __svml_ssin_data@GOTPCREL(%rip), %rax
movups __sAbsMask(%rax), %xmm2
/* b) Remove sign using AND operation */
movaps %xmm2, %xmm4
/*
f) Change destination sign if source sign is negative
using XOR operation.
*/
andnps %xmm5, %xmm2
movups __sInvPI(%rax), %xmm1
andps %xmm5, %xmm4
/* c) Getting octant Y by 1/Pi multiplication
d) Add "Right Shifter" value */
mulps %xmm4, %xmm1
/* h) Subtract Y*PI from X argument, where PI divided to 4 parts:
X = X - Y*PI1 - Y*PI2 - Y*PI3 - Y*PI4 */
movaps %xmm4, %xmm0
/* Check for large and special values */
cmpnleps __sRangeReductionVal(%rax), %xmm4
movups __sRShifter(%rax), %xmm6
movups __sPI1(%rax), %xmm7
addps %xmm6, %xmm1
movmskps %xmm4, %ecx
/* e) Treat obtained value as integer for destination sign setting.
Shift first bit of this value to the last (sign) position */
movaps %xmm1, %xmm3
/* g) Subtract "Right Shifter" value */
subps %xmm6, %xmm1
mulps %xmm1, %xmm7
pslld $31, %xmm3
movups __sPI2(%rax), %xmm6
subps %xmm7, %xmm0
mulps %xmm1, %xmm6
movups __sPI3(%rax), %xmm7
subps %xmm6, %xmm0
mulps %xmm1, %xmm7
movups __sPI4(%rax), %xmm6
subps %xmm7, %xmm0
mulps %xmm6, %xmm1
subps %xmm1, %xmm0
/* 2) Polynomial (minimax for sin within [-Pi/2; +Pi/2] interval)
a) Calculate X^2 = X * X
b) Calculate polynomial:
R = X + X * X^2 * (A3 + x^2 * (A5 + ...... */
movaps %xmm0, %xmm1
mulps %xmm0, %xmm1
xorps %xmm3, %xmm0
movups __sA9(%rax), %xmm3
mulps %xmm1, %xmm3
addps __sA7(%rax), %xmm3
mulps %xmm1, %xmm3
addps __sA5(%rax), %xmm3
mulps %xmm1, %xmm3
addps __sA3(%rax), %xmm3
mulps %xmm3, %xmm1
mulps %xmm0, %xmm1
addps %xmm1, %xmm0
/* 3) Destination sign setting
a) Set shifted destination sign using XOR operation:
R = XOR( R, S ); */
xorps %xmm2, %xmm0
testl %ecx, %ecx
jne .LBL_1_3
.LBL_1_2:
cfi_remember_state
movq %rbp, %rsp
cfi_def_cfa_register (%rsp)
popq %rbp
cfi_adjust_cfa_offset (-8)
cfi_restore (%rbp)
ret
.LBL_1_3:
cfi_restore_state
movups %xmm5, 192(%rsp)
movups %xmm0, 256(%rsp)
je .LBL_1_2
xorb %dl, %dl
xorl %eax, %eax
movups %xmm8, 112(%rsp)
movups %xmm9, 96(%rsp)
movups %xmm10, 80(%rsp)
movups %xmm11, 64(%rsp)
movups %xmm12, 48(%rsp)
movups %xmm13, 32(%rsp)
movups %xmm14, 16(%rsp)
movups %xmm15, (%rsp)
movq %rsi, 136(%rsp)
movq %rdi, 128(%rsp)
movq %r12, 168(%rsp)
cfi_offset_rel_rsp (12, 168)
movb %dl, %r12b
movq %r13, 160(%rsp)
cfi_offset_rel_rsp (13, 160)
movl %ecx, %r13d
movq %r14, 152(%rsp)
cfi_offset_rel_rsp (14, 152)
movl %eax, %r14d
movq %r15, 144(%rsp)
cfi_offset_rel_rsp (15, 144)
cfi_remember_state
.LBL_1_6:
btl %r14d, %r13d
jc .LBL_1_12
.LBL_1_7:
lea 1(%r14), %esi
btl %esi, %r13d
jc .LBL_1_10
.LBL_1_8:
incb %r12b
addl $2, %r14d
cmpb $16, %r12b
jb .LBL_1_6
movups 112(%rsp), %xmm8
movups 96(%rsp), %xmm9
movups 80(%rsp), %xmm10
movups 64(%rsp), %xmm11
movups 48(%rsp), %xmm12
movups 32(%rsp), %xmm13
movups 16(%rsp), %xmm14
movups (%rsp), %xmm15
movq 136(%rsp), %rsi
movq 128(%rsp), %rdi
movq 168(%rsp), %r12
cfi_restore (%r12)
movq 160(%rsp), %r13
cfi_restore (%r13)
movq 152(%rsp), %r14
cfi_restore (%r14)
movq 144(%rsp), %r15
cfi_restore (%r15)
movups 256(%rsp), %xmm0
jmp .LBL_1_2
.LBL_1_10:
cfi_restore_state
movzbl %r12b, %r15d
movss 196(%rsp,%r15,8), %xmm0
call sinf@PLT
movss %xmm0, 260(%rsp,%r15,8)
jmp .LBL_1_8
.LBL_1_12:
movzbl %r12b, %r15d
movss 192(%rsp,%r15,8), %xmm0
call sinf@PLT
movss %xmm0, 256(%rsp,%r15,8)
jmp .LBL_1_7
END(_ZGVbN4v_sinf_sse4)

View File

@ -0,0 +1,38 @@
/* Multiple versions of vectorized sinf, vector length is 8.
Copyright (C) 2014-2015 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
#include <sysdep.h>
#include <init-arch.h>
.text
ENTRY (_ZGVdN8v_sinf)
.type _ZGVdN8v_sinf, @gnu_indirect_function
cmpl $0, KIND_OFFSET+__cpu_features(%rip)
jne 1f
call __init_cpu_features
1: leaq _ZGVdN8v_sinf_avx2(%rip), %rax
testl $bit_AVX2_Usable, __cpu_features+FEATURE_OFFSET+index_AVX2_Usable(%rip)
jz 2f
ret
2: leaq _ZGVdN8v_sinf_sse_wrapper(%rip), %rax
ret
END (_ZGVdN8v_sinf)
libmvec_hidden_def (_ZGVdN8v_sinf)
#define _ZGVdN8v_sinf _ZGVdN8v_sinf_sse_wrapper
#include "../svml_s_sinf8_core.S"

View File

@ -0,0 +1,219 @@
/* Function sinf vectorized with AVX2.
Copyright (C) 2014-2015 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
#include <sysdep.h>
#include "svml_s_sinf_data.h"
.text
ENTRY(_ZGVdN8v_sinf_avx2)
/*
ALGORITHM DESCRIPTION:
1) Range reduction to [-Pi/2; +Pi/2] interval
a) Grab sign from source argument and save it.
b) Remove sign using AND operation
c) Getting octant Y by 1/Pi multiplication
d) Add "Right Shifter" value
e) Treat obtained value as integer for destination sign setting.
Shift first bit of this value to the last (sign) position
f) Change destination sign if source sign is negative
using XOR operation.
g) Subtract "Right Shifter" value
h) Subtract Y*PI from X argument, where PI divided to 4 parts:
X = X - Y*PI1 - Y*PI2 - Y*PI3 - Y*PI4;
2) Polynomial (minimax for sin within [-Pi/2; +Pi/2] interval)
a) Calculate X^2 = X * X
b) Calculate polynomial:
R = X + X * X^2 * (A3 + x^2 * (A5 + ......
3) Destination sign setting
a) Set shifted destination sign using XOR operation:
R = XOR( R, S );
*/
pushq %rbp
cfi_adjust_cfa_offset (8)
cfi_rel_offset (%rbp, 0)
movq %rsp, %rbp
cfi_def_cfa_register (%rbp)
andq $-64, %rsp
subq $448, %rsp
movq __svml_ssin_data@GOTPCREL(%rip), %rax
vmovdqa %ymm0, %ymm5
vmovups __sAbsMask(%rax), %ymm3
vmovups __sInvPI(%rax), %ymm7
vmovups __sRShifter(%rax), %ymm0
vmovups __sPI1_FMA(%rax), %ymm1
/* b) Remove sign using AND operation */
vandps %ymm3, %ymm5, %ymm4
/*
c) Getting octant Y by 1/Pi multiplication
d) Add "Right Shifter" value
*/
vfmadd213ps %ymm0, %ymm4, %ymm7
/* g) Subtract "Right Shifter" value */
vsubps %ymm0, %ymm7, %ymm2
/*
e) Treat obtained value as integer for destination sign setting.
Shift first bit of this value to the last (sign) position
*/
vpslld $31, %ymm7, %ymm6
/*
h) Subtract Y*PI from X argument, where PI divided to 4 parts:
X = X - Y*PI1 - Y*PI2 - Y*PI3;
*/
vmovdqa %ymm4, %ymm0
vfnmadd231ps %ymm2, %ymm1, %ymm0
/* Check for large and special values */
vcmpnle_uqps __sRangeReductionVal(%rax), %ymm4, %ymm4
vfnmadd231ps __sPI2_FMA(%rax), %ymm2, %ymm0
vfnmadd132ps __sPI3_FMA(%rax), %ymm0, %ymm2
/*
2) Polynomial (minimax for sin within [-Pi/2; +Pi/2] interval)
a) Calculate X^2 = X * X
b) Calculate polynomial:
R = X + X * X^2 * (A3 + x^2 * (A5 + ......
*/
vmulps %ymm2, %ymm2, %ymm1
/*
f) Change destination sign if source sign is negative
using XOR operation.
*/
vandnps %ymm5, %ymm3, %ymm0
vxorps %ymm6, %ymm2, %ymm3
vmovups __sA9(%rax), %ymm2
vfmadd213ps __sA7(%rax), %ymm1, %ymm2
vfmadd213ps __sA5(%rax), %ymm1, %ymm2
vfmadd213ps __sA3(%rax), %ymm1, %ymm2
vmulps %ymm1, %ymm2, %ymm6
vfmadd213ps %ymm3, %ymm3, %ymm6
vmovmskps %ymm4, %ecx
/*
3) Destination sign setting
a) Set shifted destination sign using XOR operation:
R = XOR( R, S );
*/
vxorps %ymm0, %ymm6, %ymm0
testl %ecx, %ecx
jne .LBL_1_3
.LBL_1_2:
cfi_remember_state
movq %rbp, %rsp
cfi_def_cfa_register (%rsp)
popq %rbp
cfi_adjust_cfa_offset (-8)
cfi_restore (%rbp)
ret
.LBL_1_3:
cfi_restore_state
vmovups %ymm5, 320(%rsp)
vmovups %ymm0, 384(%rsp)
je .LBL_1_2
xorb %dl, %dl
xorl %eax, %eax
vmovups %ymm8, 224(%rsp)
vmovups %ymm9, 192(%rsp)
vmovups %ymm10, 160(%rsp)
vmovups %ymm11, 128(%rsp)
vmovups %ymm12, 96(%rsp)
vmovups %ymm13, 64(%rsp)
vmovups %ymm14, 32(%rsp)
vmovups %ymm15, (%rsp)
movq %rsi, 264(%rsp)
movq %rdi, 256(%rsp)
movq %r12, 296(%rsp)
cfi_offset_rel_rsp (12, 296)
movb %dl, %r12b
movq %r13, 288(%rsp)
cfi_offset_rel_rsp (13, 288)
movl %ecx, %r13d
movq %r14, 280(%rsp)
cfi_offset_rel_rsp (14, 280)
movl %eax, %r14d
movq %r15, 272(%rsp)
cfi_offset_rel_rsp (15, 272)
cfi_remember_state
.LBL_1_6:
btl %r14d, %r13d
jc .LBL_1_12
.LBL_1_7:
lea 1(%r14), %esi
btl %esi, %r13d
jc .LBL_1_10
.LBL_1_8:
incb %r12b
addl $2, %r14d
cmpb $16, %r12b
jb .LBL_1_6
vmovups 224(%rsp), %ymm8
vmovups 192(%rsp), %ymm9
vmovups 160(%rsp), %ymm10
vmovups 128(%rsp), %ymm11
vmovups 96(%rsp), %ymm12
vmovups 64(%rsp), %ymm13
vmovups 32(%rsp), %ymm14
vmovups (%rsp), %ymm15
vmovups 384(%rsp), %ymm0
movq 264(%rsp), %rsi
movq 256(%rsp), %rdi
movq 296(%rsp), %r12
cfi_restore (%r12)
movq 288(%rsp), %r13
cfi_restore (%r13)
movq 280(%rsp), %r14
cfi_restore (%r14)
movq 272(%rsp), %r15
cfi_restore (%r15)
jmp .LBL_1_2
.LBL_1_10:
cfi_restore_state
movzbl %r12b, %r15d
vmovss 324(%rsp,%r15,8), %xmm0
vzeroupper
call sinf@PLT
vmovss %xmm0, 388(%rsp,%r15,8)
jmp .LBL_1_8
.LBL_1_12:
movzbl %r12b, %r15d
vmovss 320(%rsp,%r15,8), %xmm0
vzeroupper
call sinf@PLT
vmovss %xmm0, 384(%rsp,%r15,8)
jmp .LBL_1_7
END(_ZGVdN8v_sinf_avx2)

View File

@ -0,0 +1,25 @@
/* Function sinf vectorized with AVX-512. Wrapper to AVX2 version.
Copyright (C) 2014-2015 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
#include <sysdep.h>
#include "svml_s_wrapper_impl.h"
.text
ENTRY (_ZGVeN16v_sinf)
WRAPPER_IMPL_AVX512 _ZGVdN8v_sinf
END (_ZGVeN16v_sinf)

View File

@ -0,0 +1,30 @@
/* Function sinf vectorized with SSE2.
Copyright (C) 2014-2015 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
#include <sysdep.h>
#include "svml_s_wrapper_impl.h"
.text
ENTRY (_ZGVbN4v_sinf)
WRAPPER_IMPL_SSE2 sinf
END (_ZGVbN4v_sinf)
#ifndef USE_MULTIARCH
libmvec_hidden_def (_ZGVbN4v_sinf)
#endif

View File

@ -0,0 +1,29 @@
/* Function sinf vectorized with AVX2, wrapper version.
Copyright (C) 2014-2015 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
#include <sysdep.h>
#include "svml_s_wrapper_impl.h"
.text
ENTRY (_ZGVdN8v_sinf)
WRAPPER_IMPL_AVX _ZGVbN4v_sinf
END (_ZGVdN8v_sinf)
#ifndef USE_MULTIARCH
libmvec_hidden_def (_ZGVdN8v_sinf)
#endif

View File

@ -0,0 +1,25 @@
/* Function sinf vectorized in AVX ISA as wrapper to SSE4 ISA version.
Copyright (C) 2014-2015 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
#include <sysdep.h>
#include "svml_s_wrapper_impl.h"
.text
ENTRY(_ZGVcN8v_sinf)
WRAPPER_IMPL_AVX _ZGVbN4v_sinf
END(_ZGVcN8v_sinf)

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,54 @@
/* Offsets for data table for vector sinf.
Copyright (C) 2014-2015 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
#ifndef S_SINF_DATA_H
#define S_SINF_DATA_H
/* Offsets for data table */
#define __dT 0
#define __sAbsMask 4096
#define __sRangeReductionVal 4160
#define __sRangeVal 4224
#define __sS1 4288
#define __sS2 4352
#define __sC1 4416
#define __sC2 4480
#define __sPI1 4544
#define __sPI2 4608
#define __sPI3 4672
#define __sPI4 4736
#define __sPI1_FMA 4800
#define __sPI2_FMA 4864
#define __sPI3_FMA 4928
#define __sA3 4992
#define __sA5 5056
#define __sA7 5120
#define __sA9 5184
#define __sInvPI 5248
#define __sRShifter 5312
.macro float_vector offset value
.if .-__svml_ssin_data != \offset
.err
.endif
.rept 16
.long \value
.endr
.endm
#endif

View File

@ -23,3 +23,4 @@
#define VEC_TYPE __m512
VECTOR_WRAPPER (WRAPPER_NAME (cosf), _ZGVeN16v_cosf)
VECTOR_WRAPPER (WRAPPER_NAME (sinf), _ZGVeN16v_sinf)

View File

@ -19,6 +19,7 @@
#include "test-float-vlen16.h"
#define TEST_VECTOR_cosf 1
#define TEST_VECTOR_sinf 1
#define REQUIRE_AVX512F

View File

@ -23,3 +23,4 @@
#define VEC_TYPE __m128
VECTOR_WRAPPER (WRAPPER_NAME (cosf), _ZGVbN4v_cosf)
VECTOR_WRAPPER (WRAPPER_NAME (sinf), _ZGVbN4v_sinf)

View File

@ -19,5 +19,6 @@
#include "test-float-vlen4.h"
#define TEST_VECTOR_cosf 1
#define TEST_VECTOR_sinf 1
#include "libm-test.c"

View File

@ -26,3 +26,4 @@
#define VEC_TYPE __m256
VECTOR_WRAPPER (WRAPPER_NAME (cosf), _ZGVdN8v_cosf)
VECTOR_WRAPPER (WRAPPER_NAME (sinf), _ZGVdN8v_sinf)

View File

@ -22,6 +22,7 @@
#define VEC_SUFF _vlen8_avx2
#define TEST_VECTOR_cosf 1
#define TEST_VECTOR_sinf 1
#define REQUIRE_AVX2

View File

@ -23,3 +23,4 @@
#define VEC_TYPE __m256
VECTOR_WRAPPER (WRAPPER_NAME (cosf), _ZGVcN8v_cosf)
VECTOR_WRAPPER (WRAPPER_NAME (sinf), _ZGVcN8v_sinf)

View File

@ -19,5 +19,6 @@
#include "test-float-vlen8.h"
#define TEST_VECTOR_cosf 1
#define TEST_VECTOR_sinf 1
#include "libm-test.c"