Properly restore AVX registers on x86-64.

tst-audit4 and tst-audit5 fail under AVX emulator due to je instead of
jne. This patch fixes them.
This commit is contained in:
H.J. Lu 2009-07-29 08:40:54 -07:00 committed by Ulrich Drepper
parent b48a267b8f
commit 09e0389eb1
2 changed files with 14 additions and 10 deletions

View File

@ -1,3 +1,7 @@
2009-07-28 H.J. Lu <hongjiu.lu@intel.com>
* sysdeps/x86_64/dl-trampoline.S: Properly restore AVX registers.
2009-07-29 Ulrich Drepper <drepper@redhat.com>
* elf/dl-runtime.c (_dl_fixup): Indicate before _dl_lookup_symbol_x

View File

@ -203,49 +203,49 @@ L(no_avx1):
vpcmpeqq (LR_SIZE)(%rsp), %xmm0, %xmm8
vpmovmskb %xmm8, %esi
cmpl $0xffff, %esi
je 1f
jne 1f
vmovdqu (LR_VECTOR_OFFSET)(%rsp), %ymm0
1: vpcmpeqq (LR_SIZE + XMM_SIZE)(%rsp), %xmm1, %xmm8
vpmovmskb %xmm8, %esi
cmpl $0xffff, %esi
je 1f
jne 1f
vmovdqu (LR_VECTOR_OFFSET + VECTOR_SIZE)(%rsp), %ymm1
1: vpcmpeqq (LR_SIZE + XMM_SIZE*2)(%rsp), %xmm2, %xmm8
vpmovmskb %xmm8, %esi
cmpl $0xffff, %esi
je 1f
jne 1f
vmovdqu (LR_VECTOR_OFFSET + VECTOR_SIZE*2)(%rsp), %ymm2
1: vpcmpeqq (LR_SIZE + XMM_SIZE*3)(%rsp), %xmm3, %xmm8
vpmovmskb %xmm8, %esi
cmpl $0xffff, %esi
je 1f
jne 1f
vmovdqu (LR_VECTOR_OFFSET + VECTOR_SIZE*3)(%rsp), %ymm3
1: vpcmpeqq (LR_SIZE + XMM_SIZE*4)(%rsp), %xmm4, %xmm8
vpmovmskb %xmm8, %esi
cmpl $0xffff, %esi
je 1f
jne 1f
vmovdqu (LR_VECTOR_OFFSET + VECTOR_SIZE*4)(%rsp), %ymm4
1: vpcmpeqq (LR_SIZE + XMM_SIZE*5)(%rsp), %xmm5, %xmm8
vpmovmskb %xmm8, %esi
cmpl $0xffff, %esi
je 1f
jne 1f
vmovdqu (LR_VECTOR_OFFSET + VECTOR_SIZE*5)(%rsp), %ymm5
1: vpcmpeqq (LR_SIZE + XMM_SIZE*6)(%rsp), %xmm6, %xmm8
vpmovmskb %xmm8, %esi
cmpl $0xffff, %esi
je 1f
jne 1f
vmovdqu (LR_VECTOR_OFFSET + VECTOR_SIZE*6)(%rsp), %ymm6
1: vpcmpeqq (LR_SIZE + XMM_SIZE*7)(%rsp), %xmm7, %xmm8
vpmovmskb %xmm8, %esi
cmpl $0xffff, %esi
je 1f
jne 1f
vmovdqu (LR_VECTOR_OFFSET + VECTOR_SIZE*7)(%rsp), %ymm7
L(no_avx2):
@ -361,13 +361,13 @@ L(no_avx3):
vpcmpeqq (LRV_SIZE)(%rsp), %xmm0, %xmm2
vpmovmskb %xmm2, %esi
cmpl $0xffff, %esi
je 1f
jne 1f
vmovdqu LRV_VECTOR0_OFFSET(%rsp), %ymm0
1: vpcmpeqq (LRV_SIZE + XMM_SIZE)(%rsp), %xmm1, %xmm2
vpmovmskb %xmm2, %esi
cmpl $0xffff, %esi
je 1f
jne 1f
vmovdqu LRV_VECTOR1_OFFSET(%rsp), %ymm1
L(no_avx4):