re PR target/7693 (Typo in i386 mmintrin.h header)
* mmintrin.h (__m64): typedef it to v2si. (_mm_cvtsi32_si64, _mm_cvtsi32_si64_mm_sll_pi16, _mm_sll_pi32, _mm_sll_pi64, _mm_slli_pi64, _mm_sra_pi16, _mm_sra_pi32, _mm_srl_pi16, _mm_srl_pi32, _mm_srl_pi64, _mm_srli_pi64, _mm_and_si64, _mm_andnot_si64, _mm_or_si64, _mm_xor_si64): Add neccesary casts. * xmmintrin.h (_mm_setzero_si64): Likewise. PR target/7693 Patch by Shawn Wagner * mmintrin.h: Replace pi64 by si64. From-SVN: r58308
This commit is contained in:
parent
baee3f4fdf
commit
51d68d5ad4
|
@ -1,3 +1,17 @@
|
|||
Sat Oct 19 15:49:14 CEST 2002 Jan Hubicka <jh@suse.cz>
|
||||
|
||||
* mmintrin.h (__m64): typedef it to v2si.
|
||||
(_mm_cvtsi32_si64, _mm_cvtsi32_si64_mm_sll_pi16,
|
||||
_mm_sll_pi32, _mm_sll_pi64, _mm_slli_pi64, _mm_sra_pi16,
|
||||
_mm_sra_pi32, _mm_srl_pi16, _mm_srl_pi32, _mm_srl_pi64,
|
||||
_mm_srli_pi64, _mm_and_si64, _mm_andnot_si64,
|
||||
_mm_or_si64, _mm_xor_si64): Add neccesary casts.
|
||||
* xmmintrin.h (_mm_setzero_si64): Likewise.
|
||||
|
||||
PR target/7693
|
||||
Patch by Shawn Wagner
|
||||
* mmintrin.h: Replace pi64 by si64.
|
||||
|
||||
2002-10-18 Zack Weinberg <zack@codesourcery.com>
|
||||
|
||||
* decl.c (start_decl): Point users of the old initialized-
|
||||
|
|
|
@ -31,7 +31,7 @@
|
|||
#define _MMINTRIN_H_INCLUDED
|
||||
|
||||
/* The data type intended for user use. */
|
||||
typedef unsigned long long __m64 __attribute__ ((__aligned__ (8)));
|
||||
typedef int __m64 __attribute__ ((__mode__ (__V2SI__)));
|
||||
|
||||
/* Internal data types for implementing the intrinsics. */
|
||||
typedef int __v2si __attribute__ ((__mode__ (__V2SI__)));
|
||||
|
@ -49,14 +49,16 @@ _mm_empty (void)
|
|||
static __inline __m64
|
||||
_mm_cvtsi32_si64 (int __i)
|
||||
{
|
||||
return (unsigned int) __i;
|
||||
long long __tmp = (unsigned int)__i;
|
||||
return (__m64) __tmp;
|
||||
}
|
||||
|
||||
/* Convert the lower 32 bits of the __m64 object into an integer. */
|
||||
static __inline int
|
||||
_mm_cvtsi64_si32 (__m64 __i)
|
||||
{
|
||||
return __i;
|
||||
long long __tmp = (long long)__i;
|
||||
return __tmp;
|
||||
}
|
||||
|
||||
/* Pack the four 16-bit values from M1 into the lower four 8-bit values of
|
||||
|
@ -269,7 +271,7 @@ _mm_mullo_pi16 (__m64 __m1, __m64 __m2)
|
|||
static __inline __m64
|
||||
_mm_sll_pi16 (__m64 __m, __m64 __count)
|
||||
{
|
||||
return (__m64) __builtin_ia32_psllw ((__v4hi)__m, __count);
|
||||
return (__m64) __builtin_ia32_psllw ((__v4hi)__m, (long long)__count);
|
||||
}
|
||||
|
||||
static __inline __m64
|
||||
|
@ -282,7 +284,7 @@ _mm_slli_pi16 (__m64 __m, int __count)
|
|||
static __inline __m64
|
||||
_mm_sll_pi32 (__m64 __m, __m64 __count)
|
||||
{
|
||||
return (__m64) __builtin_ia32_pslld ((__v2si)__m, __count);
|
||||
return (__m64) __builtin_ia32_pslld ((__v2si)__m, (long long)__count);
|
||||
}
|
||||
|
||||
static __inline __m64
|
||||
|
@ -293,22 +295,22 @@ _mm_slli_pi32 (__m64 __m, int __count)
|
|||
|
||||
/* Shift the 64-bit value in M left by COUNT. */
|
||||
static __inline __m64
|
||||
_mm_sll_pi64 (__m64 __m, __m64 __count)
|
||||
_mm_sll_si64 (__m64 __m, __m64 __count)
|
||||
{
|
||||
return (__m64) __builtin_ia32_psllq (__m, __count);
|
||||
return (__m64) __builtin_ia32_psllq ((long long)__m, (long long)__count);
|
||||
}
|
||||
|
||||
static __inline __m64
|
||||
_mm_slli_pi64 (__m64 __m, int __count)
|
||||
_mm_slli_si64 (__m64 __m, int __count)
|
||||
{
|
||||
return (__m64) __builtin_ia32_psllq (__m, __count);
|
||||
return (__m64) __builtin_ia32_psllq ((long long)__m, (long long)__count);
|
||||
}
|
||||
|
||||
/* Shift four 16-bit values in M right by COUNT; shift in the sign bit. */
|
||||
static __inline __m64
|
||||
_mm_sra_pi16 (__m64 __m, __m64 __count)
|
||||
{
|
||||
return (__m64) __builtin_ia32_psraw ((__v4hi)__m, __count);
|
||||
return (__m64) __builtin_ia32_psraw ((__v4hi)__m, (long long)__count);
|
||||
}
|
||||
|
||||
static __inline __m64
|
||||
|
@ -321,7 +323,7 @@ _mm_srai_pi16 (__m64 __m, int __count)
|
|||
static __inline __m64
|
||||
_mm_sra_pi32 (__m64 __m, __m64 __count)
|
||||
{
|
||||
return (__m64) __builtin_ia32_psrad ((__v2si)__m, __count);
|
||||
return (__m64) __builtin_ia32_psrad ((__v2si)__m, (long long)__count);
|
||||
}
|
||||
|
||||
static __inline __m64
|
||||
|
@ -334,7 +336,7 @@ _mm_srai_pi32 (__m64 __m, int __count)
|
|||
static __inline __m64
|
||||
_mm_srl_pi16 (__m64 __m, __m64 __count)
|
||||
{
|
||||
return (__m64) __builtin_ia32_psrlw ((__v4hi)__m, __count);
|
||||
return (__m64) __builtin_ia32_psrlw ((__v4hi)__m, (long long)__count);
|
||||
}
|
||||
|
||||
static __inline __m64
|
||||
|
@ -347,7 +349,7 @@ _mm_srli_pi16 (__m64 __m, int __count)
|
|||
static __inline __m64
|
||||
_mm_srl_pi32 (__m64 __m, __m64 __count)
|
||||
{
|
||||
return (__m64) __builtin_ia32_psrld ((__v2si)__m, __count);
|
||||
return (__m64) __builtin_ia32_psrld ((__v2si)__m, (long long)__count);
|
||||
}
|
||||
|
||||
static __inline __m64
|
||||
|
@ -358,22 +360,22 @@ _mm_srli_pi32 (__m64 __m, int __count)
|
|||
|
||||
/* Shift the 64-bit value in M left by COUNT; shift in zeros. */
|
||||
static __inline __m64
|
||||
_mm_srl_pi64 (__m64 __m, __m64 __count)
|
||||
_mm_srl_si64 (__m64 __m, __m64 __count)
|
||||
{
|
||||
return (__m64) __builtin_ia32_psrlq (__m, __count);
|
||||
return (__m64) __builtin_ia32_psrlq ((long long)__m, (long long)__count);
|
||||
}
|
||||
|
||||
static __inline __m64
|
||||
_mm_srli_pi64 (__m64 __m, int __count)
|
||||
_mm_srli_si64 (__m64 __m, int __count)
|
||||
{
|
||||
return (__m64) __builtin_ia32_psrlq (__m, __count);
|
||||
return (__m64) __builtin_ia32_psrlq ((long long)__m, (long long)__count);
|
||||
}
|
||||
|
||||
/* Bit-wise AND the 64-bit values in M1 and M2. */
|
||||
static __inline __m64
|
||||
_mm_and_si64 (__m64 __m1, __m64 __m2)
|
||||
{
|
||||
return __builtin_ia32_pand (__m1, __m2);
|
||||
return (__m64) __builtin_ia32_pand ((long long)__m1, (long long)__m2);
|
||||
}
|
||||
|
||||
/* Bit-wise complement the 64-bit value in M1 and bit-wise AND it with the
|
||||
|
@ -381,21 +383,21 @@ _mm_and_si64 (__m64 __m1, __m64 __m2)
|
|||
static __inline __m64
|
||||
_mm_andnot_si64 (__m64 __m1, __m64 __m2)
|
||||
{
|
||||
return __builtin_ia32_pandn (__m1, __m2);
|
||||
return (__m64) __builtin_ia32_pandn ((long long)__m1, (long long)__m2);
|
||||
}
|
||||
|
||||
/* Bit-wise inclusive OR the 64-bit values in M1 and M2. */
|
||||
static __inline __m64
|
||||
_mm_or_si64 (__m64 __m1, __m64 __m2)
|
||||
{
|
||||
return __builtin_ia32_por (__m1, __m2);
|
||||
return (__m64)__builtin_ia32_por ((long long)__m1, (long long)__m2);
|
||||
}
|
||||
|
||||
/* Bit-wise exclusive OR the 64-bit values in M1 and M2. */
|
||||
static __inline __m64
|
||||
_mm_xor_si64 (__m64 __m1, __m64 __m2)
|
||||
{
|
||||
return __builtin_ia32_pxor (__m1, __m2);
|
||||
return (__m64)__builtin_ia32_pxor ((long long)__m1, (long long)__m2);
|
||||
}
|
||||
|
||||
/* Compare eight 8-bit values. The result of the comparison is 0xFF if the
|
||||
|
@ -444,7 +446,7 @@ _mm_cmpgt_pi32 (__m64 __m1, __m64 __m2)
|
|||
static __inline __m64
|
||||
_mm_setzero_si64 (void)
|
||||
{
|
||||
return __builtin_ia32_mmx_zero ();
|
||||
return (__m64)__builtin_ia32_mmx_zero ();
|
||||
}
|
||||
|
||||
/* Creates a vector of two 32-bit values; I0 is least significant. */
|
||||
|
|
Loading…
Reference in New Issue