emit-rtl.c (gen_lowpart_common): Use simplify_gen_subreg for constants.

* emit-rtl.c (gen_lowpart_common): Use simplify_gen_subreg
	for constants.
	(constant_subword): Delete.
	* rtl.h (constant_subword): Delete prototype.
	(immed_double_const): Is not in varasm.c.
	* simplify-rtx.c (simplify_immed_subreg): New.
	(simplify_subreg): Use simplify_immed_subreg.

From-SVN: r75487
This commit is contained in:
Geoffrey Keating 2004-01-06 22:51:00 +00:00 committed by Geoffrey Keating
parent fed2b31685
commit 550d138775
4 changed files with 311 additions and 522 deletions

View File

@ -5,6 +5,14 @@
2004-01-06 Geoffrey Keating <geoffk@apple.com>
* emit-rtl.c (gen_lowpart_common): Use simplify_gen_subreg
for constants.
(constant_subword): Delete.
* rtl.h (constant_subword): Delete prototype.
(immed_double_const): Is not in varasm.c.
* simplify-rtx.c (simplify_immed_subreg): New.
(simplify_subreg): Use simplify_immed_subreg.
* config/rs6000/rs6000.md (floatsitf2): Use expand_float rather
than trying to generate RTL directly.
(fix_trunctfsi2): Use expand_fix rather than trying to generate

View File

@ -1050,24 +1050,36 @@ rtx
gen_lowpart_common (enum machine_mode mode, rtx x)
{
int msize = GET_MODE_SIZE (mode);
int xsize = GET_MODE_SIZE (GET_MODE (x));
int xsize;
int offset = 0;
enum machine_mode innermode;
if (GET_MODE (x) == mode)
/* Unfortunately, this routine doesn't take a parameter for the mode of X,
so we have to make one up. Yuk. */
innermode = GET_MODE (x);
if (GET_CODE (x) == CONST_INT && msize <= HOST_BITS_PER_WIDE_INT)
innermode = mode_for_size (HOST_BITS_PER_WIDE_INT, MODE_INT, 0);
else if (innermode == VOIDmode)
innermode = mode_for_size (HOST_BITS_PER_WIDE_INT * 2, MODE_INT, 0);
xsize = GET_MODE_SIZE (innermode);
if (innermode == VOIDmode || innermode == BLKmode)
abort ();
if (innermode == mode)
return x;
/* MODE must occupy no more words than the mode of X. */
if (GET_MODE (x) != VOIDmode
&& ((msize + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD
> ((xsize + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)))
if ((msize + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD
> ((xsize + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD))
return 0;
/* Don't allow generating paradoxical FLOAT_MODE subregs. */
if (GET_MODE_CLASS (mode) == MODE_FLOAT
&& GET_MODE (x) != VOIDmode && msize > xsize)
if (GET_MODE_CLASS (mode) == MODE_FLOAT && msize > xsize)
return 0;
offset = subreg_lowpart_offset (mode, GET_MODE (x));
offset = subreg_lowpart_offset (mode, innermode);
if ((GET_CODE (x) == ZERO_EXTEND || GET_CODE (x) == SIGN_EXTEND)
&& (GET_MODE_CLASS (mode) == MODE_INT
@ -1083,154 +1095,15 @@ gen_lowpart_common (enum machine_mode mode, rtx x)
if (GET_MODE (XEXP (x, 0)) == mode)
return XEXP (x, 0);
else if (GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (XEXP (x, 0))))
else if (msize < GET_MODE_SIZE (GET_MODE (XEXP (x, 0))))
return gen_lowpart_common (mode, XEXP (x, 0));
else if (GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (x)))
else if (msize < xsize)
return gen_rtx_fmt_e (GET_CODE (x), mode, XEXP (x, 0));
}
else if (GET_CODE (x) == SUBREG || GET_CODE (x) == REG
|| GET_CODE (x) == CONCAT || GET_CODE (x) == CONST_VECTOR)
return simplify_gen_subreg (mode, x, GET_MODE (x), offset);
else if (VECTOR_MODE_P (mode) && GET_MODE (x) == VOIDmode)
return simplify_gen_subreg (mode, x, int_mode_for_mode (mode), offset);
/* If X is a CONST_INT or a CONST_DOUBLE, extract the appropriate bits
from the low-order part of the constant. */
else if ((GET_MODE_CLASS (mode) == MODE_INT
|| GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
&& GET_MODE (x) == VOIDmode
&& (GET_CODE (x) == CONST_INT || GET_CODE (x) == CONST_DOUBLE))
{
/* If MODE is twice the host word size, X is already the desired
representation. Otherwise, if MODE is wider than a word, we can't
do this. If MODE is exactly a word, return just one CONST_INT. */
if (GET_MODE_BITSIZE (mode) >= 2 * HOST_BITS_PER_WIDE_INT)
return x;
else if (GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT)
return 0;
else if (GET_MODE_BITSIZE (mode) == HOST_BITS_PER_WIDE_INT)
return (GET_CODE (x) == CONST_INT ? x
: GEN_INT (CONST_DOUBLE_LOW (x)));
else
{
/* MODE must be narrower than HOST_BITS_PER_WIDE_INT. */
HOST_WIDE_INT val = (GET_CODE (x) == CONST_INT ? INTVAL (x)
: CONST_DOUBLE_LOW (x));
/* Sign extend to HOST_WIDE_INT. */
val = trunc_int_for_mode (val, mode);
return (GET_CODE (x) == CONST_INT && INTVAL (x) == val ? x
: GEN_INT (val));
}
}
/* The floating-point emulator can handle all conversions between
FP and integer operands. This simplifies reload because it
doesn't have to deal with constructs like (subreg:DI
(const_double:SF ...)) or (subreg:DF (const_int ...)). */
/* Single-precision floats are always 32-bits and double-precision
floats are always 64-bits. */
else if (GET_MODE_CLASS (mode) == MODE_FLOAT
&& GET_MODE_BITSIZE (mode) == 32
&& GET_CODE (x) == CONST_INT)
{
REAL_VALUE_TYPE r;
long i = INTVAL (x);
real_from_target (&r, &i, mode);
return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
}
else if (GET_MODE_CLASS (mode) == MODE_FLOAT
&& GET_MODE_BITSIZE (mode) == 64
&& (GET_CODE (x) == CONST_INT || GET_CODE (x) == CONST_DOUBLE)
&& GET_MODE (x) == VOIDmode)
{
REAL_VALUE_TYPE r;
HOST_WIDE_INT low, high;
long i[2];
if (GET_CODE (x) == CONST_INT)
{
low = INTVAL (x);
high = low >> (HOST_BITS_PER_WIDE_INT - 1);
}
else
{
low = CONST_DOUBLE_LOW (x);
high = CONST_DOUBLE_HIGH (x);
}
if (HOST_BITS_PER_WIDE_INT > 32)
high = low >> 31 >> 1;
/* REAL_VALUE_TARGET_DOUBLE takes the addressing order of the
target machine. */
if (WORDS_BIG_ENDIAN)
i[0] = high, i[1] = low;
else
i[0] = low, i[1] = high;
real_from_target (&r, i, mode);
return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
}
else if ((GET_MODE_CLASS (mode) == MODE_INT
|| GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
&& GET_CODE (x) == CONST_DOUBLE
&& GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
{
REAL_VALUE_TYPE r;
long i[4]; /* Only the low 32 bits of each 'long' are used. */
int endian = WORDS_BIG_ENDIAN ? 1 : 0;
/* Convert 'r' into an array of four 32-bit words in target word
order. */
REAL_VALUE_FROM_CONST_DOUBLE (r, x);
switch (GET_MODE_BITSIZE (GET_MODE (x)))
{
case 32:
REAL_VALUE_TO_TARGET_SINGLE (r, i[3 * endian]);
i[1] = 0;
i[2] = 0;
i[3 - 3 * endian] = 0;
break;
case 64:
REAL_VALUE_TO_TARGET_DOUBLE (r, i + 2 * endian);
i[2 - 2 * endian] = 0;
i[3 - 2 * endian] = 0;
break;
case 96:
REAL_VALUE_TO_TARGET_LONG_DOUBLE (r, i + endian);
i[3 - 3 * endian] = 0;
break;
case 128:
REAL_VALUE_TO_TARGET_LONG_DOUBLE (r, i);
break;
default:
abort ();
}
/* Now, pack the 32-bit elements of the array into a CONST_DOUBLE
and return it. */
#if HOST_BITS_PER_WIDE_INT == 32
return immed_double_const (i[3 * endian], i[1 + endian], mode);
#else
if (HOST_BITS_PER_WIDE_INT != 64)
abort ();
return immed_double_const ((((unsigned long) i[3 * endian])
| ((HOST_WIDE_INT) i[1 + endian] << 32)),
(((unsigned long) i[2 - endian])
| ((HOST_WIDE_INT) i[3 - 3 * endian] << 32)),
mode);
#endif
}
/* If MODE is a condition code and X is a CONST_INT, the value of X
must already have been "recognized" by the back-end, and we can
assume that it is valid for this mode. */
else if (GET_MODE_CLASS (mode) == MODE_CC
&& GET_CODE (x) == CONST_INT)
return x;
|| GET_CODE (x) == CONCAT || GET_CODE (x) == CONST_VECTOR
|| GET_CODE (x) == CONST_DOUBLE || GET_CODE (x) == CONST_INT)
return simplify_gen_subreg (mode, x, innermode, offset);
/* Otherwise, we can't do this. */
return 0;
@ -1481,162 +1354,6 @@ subreg_lowpart_p (rtx x)
== SUBREG_BYTE (x));
}
/* Helper routine for all the constant cases of operand_subword.
Some places invoke this directly. */
rtx
constant_subword (rtx op, int offset, enum machine_mode mode)
{
int size_ratio = HOST_BITS_PER_WIDE_INT / BITS_PER_WORD;
HOST_WIDE_INT val;
/* If OP is already an integer word, return it. */
if (GET_MODE_CLASS (mode) == MODE_INT
&& GET_MODE_SIZE (mode) == UNITS_PER_WORD)
return op;
/* The output is some bits, the width of the target machine's word.
A wider-word host can surely hold them in a CONST_INT. A narrower-word
host can't. */
if (HOST_BITS_PER_WIDE_INT >= BITS_PER_WORD
&& GET_MODE_CLASS (mode) == MODE_FLOAT
&& GET_MODE_BITSIZE (mode) == 64
&& GET_CODE (op) == CONST_DOUBLE)
{
long k[2];
REAL_VALUE_TYPE rv;
REAL_VALUE_FROM_CONST_DOUBLE (rv, op);
REAL_VALUE_TO_TARGET_DOUBLE (rv, k);
/* We handle 32-bit and >= 64-bit words here. Note that the order in
which the words are written depends on the word endianness.
??? This is a potential portability problem and should
be fixed at some point.
We must exercise caution with the sign bit. By definition there
are 32 significant bits in K; there may be more in a HOST_WIDE_INT.
Consider a host with a 32-bit long and a 64-bit HOST_WIDE_INT.
So we explicitly mask and sign-extend as necessary. */
if (BITS_PER_WORD == 32)
{
val = k[offset];
val = ((val & 0xffffffff) ^ 0x80000000) - 0x80000000;
return GEN_INT (val);
}
#if HOST_BITS_PER_WIDE_INT >= 64
else if (BITS_PER_WORD >= 64 && offset == 0)
{
val = k[! WORDS_BIG_ENDIAN];
val = (((val & 0xffffffff) ^ 0x80000000) - 0x80000000) << 32;
val |= (HOST_WIDE_INT) k[WORDS_BIG_ENDIAN] & 0xffffffff;
return GEN_INT (val);
}
#endif
else if (BITS_PER_WORD == 16)
{
val = k[offset >> 1];
if ((offset & 1) == ! WORDS_BIG_ENDIAN)
val >>= 16;
val = ((val & 0xffff) ^ 0x8000) - 0x8000;
return GEN_INT (val);
}
else
abort ();
}
else if (HOST_BITS_PER_WIDE_INT >= BITS_PER_WORD
&& GET_MODE_CLASS (mode) == MODE_FLOAT
&& GET_MODE_BITSIZE (mode) > 64
&& GET_CODE (op) == CONST_DOUBLE)
{
long k[4];
REAL_VALUE_TYPE rv;
REAL_VALUE_FROM_CONST_DOUBLE (rv, op);
REAL_VALUE_TO_TARGET_LONG_DOUBLE (rv, k);
if (BITS_PER_WORD == 32)
{
val = k[offset];
val = ((val & 0xffffffff) ^ 0x80000000) - 0x80000000;
return GEN_INT (val);
}
#if HOST_BITS_PER_WIDE_INT >= 64
else if (BITS_PER_WORD >= 64 && offset <= 1)
{
val = k[offset * 2 + ! WORDS_BIG_ENDIAN];
val = (((val & 0xffffffff) ^ 0x80000000) - 0x80000000) << 32;
val |= (HOST_WIDE_INT) k[offset * 2 + WORDS_BIG_ENDIAN] & 0xffffffff;
return GEN_INT (val);
}
#endif
else
abort ();
}
/* Single word float is a little harder, since single- and double-word
values often do not have the same high-order bits. We have already
verified that we want the only defined word of the single-word value. */
if (GET_MODE_CLASS (mode) == MODE_FLOAT
&& GET_MODE_BITSIZE (mode) == 32
&& GET_CODE (op) == CONST_DOUBLE)
{
long l;
REAL_VALUE_TYPE rv;
REAL_VALUE_FROM_CONST_DOUBLE (rv, op);
REAL_VALUE_TO_TARGET_SINGLE (rv, l);
/* Sign extend from known 32-bit value to HOST_WIDE_INT. */
val = l;
val = ((val & 0xffffffff) ^ 0x80000000) - 0x80000000;
if (BITS_PER_WORD == 16)
{
if ((offset & 1) == ! WORDS_BIG_ENDIAN)
val >>= 16;
val = ((val & 0xffff) ^ 0x8000) - 0x8000;
}
return GEN_INT (val);
}
/* The only remaining cases that we can handle are integers.
Convert to proper endianness now since these cases need it.
At this point, offset == 0 means the low-order word.
We do not want to handle the case when BITS_PER_WORD <= HOST_BITS_PER_INT
in general. However, if OP is (const_int 0), we can just return
it for any word. */
if (op == const0_rtx)
return op;
if (GET_MODE_CLASS (mode) != MODE_INT
|| (GET_CODE (op) != CONST_INT && GET_CODE (op) != CONST_DOUBLE)
|| BITS_PER_WORD > HOST_BITS_PER_WIDE_INT)
return 0;
if (WORDS_BIG_ENDIAN)
offset = GET_MODE_SIZE (mode) / UNITS_PER_WORD - 1 - offset;
/* Find out which word on the host machine this value is in and get
it from the constant. */
val = (offset / size_ratio == 0
? (GET_CODE (op) == CONST_INT ? INTVAL (op) : CONST_DOUBLE_LOW (op))
: (GET_CODE (op) == CONST_INT
? (INTVAL (op) < 0 ? ~0 : 0) : CONST_DOUBLE_HIGH (op)));
/* Get the value we want into the low bits of val. */
if (BITS_PER_WORD < HOST_BITS_PER_WIDE_INT)
val = ((val >> ((offset % size_ratio) * BITS_PER_WORD)));
val = trunc_int_for_mode (val, word_mode);
return GEN_INT (val);
}
/* Return subword OFFSET of operand OP.
The word number, OFFSET, is interpreted as the word number starting
at the low-order address. OFFSET 0 is the low-order word if not

View File

@ -1485,7 +1485,6 @@ extern rtx gen_highpart_mode (enum machine_mode, enum machine_mode, rtx);
extern rtx gen_realpart (enum machine_mode, rtx);
extern rtx gen_imagpart (enum machine_mode, rtx);
extern rtx operand_subword (rtx, unsigned int, int, enum machine_mode);
extern rtx constant_subword (rtx, int, enum machine_mode);
/* In emit-rtl.c */
extern rtx operand_subword_force (rtx, unsigned int, enum machine_mode);
@ -1507,10 +1506,10 @@ extern void push_to_sequence (rtx);
extern void end_sequence (void);
extern void push_to_full_sequence (rtx, rtx);
extern void end_full_sequence (rtx*, rtx*);
/* In varasm.c */
extern rtx immed_double_const (HOST_WIDE_INT, HOST_WIDE_INT,
enum machine_mode);
/* In varasm.c */
extern rtx force_const_mem (enum machine_mode, rtx);
/* In varasm.c */

View File

@ -53,6 +53,8 @@ static rtx neg_const_int (enum machine_mode, rtx);
static int simplify_plus_minus_op_data_cmp (const void *, const void *);
static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx,
rtx, int);
static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
unsigned int);
static bool associative_constant_p (rtx);
static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
rtx, rtx);
@ -2949,8 +2951,276 @@ simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
return 0;
}
/* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_VECTOR,
returning another CONST_INT or CONST_DOUBLE or CONST_VECTOR.
Works by unpacking OP into a collection of 8-bit values
represented as a little-endian array of 'unsigned char', selecting by BYTE,
and then repacking them again for OUTERMODE. */
static rtx
simplify_immed_subreg (enum machine_mode outermode, rtx op,
enum machine_mode innermode, unsigned int byte)
{
/* We support up to 512-bit values (for V8DFmode). */
enum {
max_bitsize = 512,
value_bit = 8,
value_mask = (1 << value_bit) - 1
};
unsigned char value[max_bitsize / value_bit];
int value_start;
int i;
int elem;
int num_elem;
rtx * elems;
int elem_bitsize;
rtx result_s;
rtvec result_v = NULL;
enum mode_class outer_class;
enum machine_mode outer_submode;
/* Some ports misuse CCmode. */
if (GET_MODE_CLASS (outermode) == MODE_CC && GET_CODE (op) == CONST_INT)
return op;
/* Unpack the value. */
if (GET_CODE (op) == CONST_VECTOR)
{
num_elem = CONST_VECTOR_NUNITS (op);
elems = &CONST_VECTOR_ELT (op, 0);
elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
}
else
{
num_elem = 1;
elems = &op;
elem_bitsize = max_bitsize;
}
if (BITS_PER_UNIT % value_bit != 0)
abort (); /* Too complicated; reducing value_bit may help. */
if (elem_bitsize % BITS_PER_UNIT != 0)
abort (); /* I don't know how to handle endianness of sub-units. */
for (elem = 0; elem < num_elem; elem++)
{
unsigned char * vp;
rtx el = elems[elem];
/* Vectors are kept in target memory order. (This is probably
a mistake.) */
{
unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
/ BITS_PER_UNIT);
unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
unsigned bytele = (subword_byte % UNITS_PER_WORD
+ (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
vp = value + (bytele * BITS_PER_UNIT) / value_bit;
}
switch (GET_CODE (el))
{
case CONST_INT:
for (i = 0;
i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
i += value_bit)
*vp++ = INTVAL (el) >> i;
/* CONST_INTs are always logically sign-extended. */
for (; i < elem_bitsize; i += value_bit)
*vp++ = INTVAL (el) < 0 ? -1 : 0;
break;
case CONST_DOUBLE:
if (GET_MODE (el) == VOIDmode)
{
/* If this triggers, someone should have generated a
CONST_INT instead. */
if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
abort ();
for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
*vp++ = CONST_DOUBLE_LOW (el) >> i;
while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
{
*vp++ = CONST_DOUBLE_HIGH (el) >> i;
i += value_bit;
}
/* It shouldn't matter what's done here, so fill it with
zero. */
for (; i < max_bitsize; i += value_bit)
*vp++ = 0;
}
else if (GET_MODE_CLASS (GET_MODE (el)) == MODE_FLOAT)
{
long tmp[max_bitsize / 32];
int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
if (bitsize > elem_bitsize)
abort ();
if (bitsize % value_bit != 0)
abort ();
real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
GET_MODE (el));
/* real_to_target produces its result in words affected by
FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
and use WORDS_BIG_ENDIAN instead; see the documentation
of SUBREG in rtl.texi. */
for (i = 0; i < bitsize; i += value_bit)
{
int ibase;
if (WORDS_BIG_ENDIAN)
ibase = bitsize - 1 - i;
else
ibase = i;
*vp++ = tmp[ibase / 32] >> i % 32;
}
/* It shouldn't matter what's done here, so fill it with
zero. */
for (; i < elem_bitsize; i += value_bit)
*vp++ = 0;
}
else
abort ();
break;
default:
abort ();
}
}
/* Now, pick the right byte to start with. */
/* Renumber BYTE so that the least-significant byte is byte 0. A special
case is paradoxical SUBREGs, which shouldn't be adjusted since they
will already have offset 0. */
if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
{
unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
- byte);
unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
byte = (subword_byte % UNITS_PER_WORD
+ (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
}
/* BYTE should still be inside OP. (Note that BYTE is unsigned,
so if it's become negative it will instead be very large.) */
if (byte >= GET_MODE_SIZE (innermode))
abort ();
/* Convert from bytes to chunks of size value_bit. */
value_start = byte * (BITS_PER_UNIT / value_bit);
/* Re-pack the value. */
if (VECTOR_MODE_P (outermode))
{
num_elem = GET_MODE_NUNITS (outermode);
result_v = rtvec_alloc (num_elem);
elems = &RTVEC_ELT (result_v, 0);
outer_submode = GET_MODE_INNER (outermode);
}
else
{
num_elem = 1;
elems = &result_s;
outer_submode = outermode;
}
outer_class = GET_MODE_CLASS (outer_submode);
elem_bitsize = GET_MODE_BITSIZE (outer_submode);
if (elem_bitsize % value_bit != 0)
abort ();
if (elem_bitsize + value_start * value_bit > max_bitsize)
abort ();
for (elem = 0; elem < num_elem; elem++)
{
unsigned char *vp;
/* Vectors are stored in target memory order. (This is probably
a mistake.) */
{
unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
/ BITS_PER_UNIT);
unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
unsigned bytele = (subword_byte % UNITS_PER_WORD
+ (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
}
switch (outer_class)
{
case MODE_INT:
case MODE_PARTIAL_INT:
{
unsigned HOST_WIDE_INT hi = 0, lo = 0;
for (i = 0;
i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
i += value_bit)
lo |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
for (; i < elem_bitsize; i += value_bit)
hi |= ((HOST_WIDE_INT)(*vp++ & value_mask)
<< (i - HOST_BITS_PER_WIDE_INT));
/* immed_double_const doesn't call trunc_int_for_mode. I don't
know why. */
if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
elems[elem] = gen_int_mode (lo, outer_submode);
else
elems[elem] = immed_double_const (lo, hi, outer_submode);
}
break;
case MODE_FLOAT:
{
REAL_VALUE_TYPE r;
long tmp[max_bitsize / 32];
/* real_from_target wants its input in words affected by
FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
and use WORDS_BIG_ENDIAN instead; see the documentation
of SUBREG in rtl.texi. */
for (i = 0; i < max_bitsize / 32; i++)
tmp[i] = 0;
for (i = 0; i < elem_bitsize; i += value_bit)
{
int ibase;
if (WORDS_BIG_ENDIAN)
ibase = elem_bitsize - 1 - i;
else
ibase = i;
tmp[ibase / 32] = (*vp++ & value_mask) << i % 32;
}
real_from_target (&r, tmp, outer_submode);
elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
}
break;
default:
abort ();
}
}
if (VECTOR_MODE_P (outermode))
return gen_rtx_CONST_VECTOR (outermode, result_v);
else
return result_s;
}
/* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
Return 0 if no simplifications is possible. */
Return 0 if no simplifications are possible. */
rtx
simplify_subreg (enum machine_mode outermode, rtx op,
enum machine_mode innermode, unsigned int byte)
@ -2971,216 +3241,10 @@ simplify_subreg (enum machine_mode outermode, rtx op,
if (outermode == innermode && !byte)
return op;
/* Simplify subregs of vector constants. */
if (GET_CODE (op) == CONST_VECTOR)
{
int elt_size = GET_MODE_SIZE (GET_MODE_INNER (innermode));
const unsigned int offset = byte / elt_size;
rtx elt;
if (GET_MODE_INNER (innermode) == outermode)
{
elt = CONST_VECTOR_ELT (op, offset);
/* ?? We probably don't need this copy_rtx because constants
can be shared. ?? */
return copy_rtx (elt);
}
else if (GET_MODE_INNER (innermode) == GET_MODE_INNER (outermode)
&& GET_MODE_SIZE (innermode) > GET_MODE_SIZE (outermode))
{
return (gen_rtx_CONST_VECTOR
(outermode,
gen_rtvec_v (GET_MODE_NUNITS (outermode),
&CONST_VECTOR_ELT (op, offset))));
}
else if (GET_MODE_CLASS (outermode) == MODE_INT
&& (GET_MODE_SIZE (outermode) % elt_size == 0))
{
/* This happens when the target register size is smaller then
the vector mode, and we synthesize operations with vectors
of elements that are smaller than the register size. */
HOST_WIDE_INT sum = 0, high = 0;
unsigned n_elts = (GET_MODE_SIZE (outermode) / elt_size);
unsigned i = BYTES_BIG_ENDIAN ? offset : offset + n_elts - 1;
unsigned step = BYTES_BIG_ENDIAN ? 1 : -1;
int shift = BITS_PER_UNIT * elt_size;
unsigned HOST_WIDE_INT unit_mask;
unit_mask = (unsigned HOST_WIDE_INT) -1
>> (sizeof (HOST_WIDE_INT) * BITS_PER_UNIT - shift);
for (; n_elts--; i += step)
{
elt = CONST_VECTOR_ELT (op, i);
if (GET_CODE (elt) == CONST_DOUBLE
&& GET_MODE_CLASS (GET_MODE (elt)) == MODE_FLOAT)
{
elt = gen_lowpart_common (int_mode_for_mode (GET_MODE (elt)),
elt);
if (! elt)
return NULL_RTX;
}
if (GET_CODE (elt) != CONST_INT)
return NULL_RTX;
/* Avoid overflow. */
if (high >> (HOST_BITS_PER_WIDE_INT - shift))
return NULL_RTX;
high = high << shift | sum >> (HOST_BITS_PER_WIDE_INT - shift);
sum = (sum << shift) + (INTVAL (elt) & unit_mask);
}
if (GET_MODE_BITSIZE (outermode) <= HOST_BITS_PER_WIDE_INT)
return GEN_INT (trunc_int_for_mode (sum, outermode));
else if (GET_MODE_BITSIZE (outermode) == 2* HOST_BITS_PER_WIDE_INT)
return immed_double_const (sum, high, outermode);
else
return NULL_RTX;
}
else if (GET_MODE_CLASS (outermode) == MODE_INT
&& (elt_size % GET_MODE_SIZE (outermode) == 0))
{
enum machine_mode new_mode
= int_mode_for_mode (GET_MODE_INNER (innermode));
int subbyte = byte % elt_size;
op = simplify_subreg (new_mode, op, innermode, byte - subbyte);
if (! op)
return NULL_RTX;
return simplify_subreg (outermode, op, new_mode, subbyte);
}
else if (GET_MODE_CLASS (outermode) == MODE_INT)
/* This shouldn't happen, but let's not do anything stupid. */
return NULL_RTX;
}
/* Attempt to simplify constant to non-SUBREG expression. */
if (CONSTANT_P (op))
{
int offset, part;
unsigned HOST_WIDE_INT val = 0;
if (VECTOR_MODE_P (outermode))
{
/* Construct a CONST_VECTOR from individual subregs. */
enum machine_mode submode = GET_MODE_INNER (outermode);
int subsize = GET_MODE_UNIT_SIZE (outermode);
int i, elts = GET_MODE_NUNITS (outermode);
rtvec v = rtvec_alloc (elts);
rtx elt;
for (i = 0; i < elts; i++, byte += subsize)
{
/* This might fail, e.g. if taking a subreg from a SYMBOL_REF. */
/* ??? It would be nice if we could actually make such subregs
on targets that allow such relocations. */
if (byte >= GET_MODE_SIZE (innermode))
elt = CONST0_RTX (submode);
else
elt = simplify_subreg (submode, op, innermode, byte);
if (! elt)
return NULL_RTX;
RTVEC_ELT (v, i) = elt;
}
return gen_rtx_CONST_VECTOR (outermode, v);
}
/* ??? This code is partly redundant with code below, but can handle
the subregs of floats and similar corner cases.
Later it we should move all simplification code here and rewrite
GEN_LOWPART_IF_POSSIBLE, GEN_HIGHPART, OPERAND_SUBWORD and friends
using SIMPLIFY_SUBREG. */
if (subreg_lowpart_offset (outermode, innermode) == byte
&& GET_CODE (op) != CONST_VECTOR)
{
rtx new = gen_lowpart_if_possible (outermode, op);
if (new)
return new;
}
/* Similar comment as above apply here. */
if (GET_MODE_SIZE (outermode) == UNITS_PER_WORD
&& GET_MODE_SIZE (innermode) > UNITS_PER_WORD
&& GET_MODE_CLASS (outermode) == MODE_INT)
{
rtx new = constant_subword (op,
(byte / UNITS_PER_WORD),
innermode);
if (new)
return new;
}
if (GET_MODE_CLASS (outermode) != MODE_INT
&& GET_MODE_CLASS (outermode) != MODE_CC)
{
enum machine_mode new_mode = int_mode_for_mode (outermode);
if (new_mode != innermode || byte != 0)
{
op = simplify_subreg (new_mode, op, innermode, byte);
if (! op)
return NULL_RTX;
return simplify_subreg (outermode, op, new_mode, 0);
}
}
offset = byte * BITS_PER_UNIT;
switch (GET_CODE (op))
{
case CONST_DOUBLE:
if (GET_MODE (op) != VOIDmode)
break;
/* We can't handle this case yet. */
if (GET_MODE_BITSIZE (outermode) >= HOST_BITS_PER_WIDE_INT)
return NULL_RTX;
part = offset >= HOST_BITS_PER_WIDE_INT;
if ((BITS_PER_WORD > HOST_BITS_PER_WIDE_INT
&& BYTES_BIG_ENDIAN)
|| (BITS_PER_WORD <= HOST_BITS_PER_WIDE_INT
&& WORDS_BIG_ENDIAN))
part = !part;
val = part ? CONST_DOUBLE_HIGH (op) : CONST_DOUBLE_LOW (op);
offset %= HOST_BITS_PER_WIDE_INT;
/* We've already picked the word we want from a double, so
pretend this is actually an integer. */
innermode = mode_for_size (HOST_BITS_PER_WIDE_INT, MODE_INT, 0);
/* Fall through. */
case CONST_INT:
if (GET_CODE (op) == CONST_INT)
val = INTVAL (op);
/* We don't handle synthesizing of non-integral constants yet. */
if (GET_MODE_CLASS (outermode) != MODE_INT)
return NULL_RTX;
if (BYTES_BIG_ENDIAN || WORDS_BIG_ENDIAN)
{
if (WORDS_BIG_ENDIAN)
offset = (GET_MODE_BITSIZE (innermode)
- GET_MODE_BITSIZE (outermode) - offset);
if (BYTES_BIG_ENDIAN != WORDS_BIG_ENDIAN
&& GET_MODE_SIZE (outermode) < UNITS_PER_WORD)
offset = (offset + BITS_PER_WORD - GET_MODE_BITSIZE (outermode)
- 2 * (offset % BITS_PER_WORD));
}
if (offset >= HOST_BITS_PER_WIDE_INT)
return ((HOST_WIDE_INT) val < 0) ? constm1_rtx : const0_rtx;
else
{
val >>= offset;
if (GET_MODE_BITSIZE (outermode) < HOST_BITS_PER_WIDE_INT)
val = trunc_int_for_mode (val, outermode);
return GEN_INT (val);
}
default:
break;
}
}
if (GET_CODE (op) == CONST_INT
|| GET_CODE (op) == CONST_DOUBLE
|| GET_CODE (op) == CONST_VECTOR)
return simplify_immed_subreg (outermode, op, innermode, byte);
/* Changing mode twice with SUBREG => just change it once,
or not at all if changing back op starting mode. */
@ -3335,6 +3399,7 @@ simplify_subreg (enum machine_mode outermode, rtx op,
return NULL_RTX;
}
/* Make a SUBREG operation or equivalent if it folds. */
rtx