spu.c (spu_builtin_range): Move from spu-c.c

2006-11-30  Andrew Pinski  <andrew_pinski@playstation.sony.com>

        * config/spu/spu.c (spu_builtin_range): Move from spu-c.c
        (TARGET_RESOLVE_OVERLOADED_BUILTIN): Delete.
        (spu_cpu_cpp_builtins): Remove.
        (spu_override_options): Don't set warn_main.
        (spu_force_reg): Move from spu-c.c.
        (spu_check_builtin_parm): Likewise.
        (expand_builtin_args): Likewise.
        (spu_expand_builtin_1): Likewise.
        (spu_expand_builtin): Likewise.
        * config/spu/spu.h (REGISTER_TARGET_PRAGMAS): Define, set
        warn_main and targetm.resolve_overloaded_builtin.
        * config/spu/spu-c.c (spu_builtin_range): Remove.
        (spu_check_builtin_parm): Remove.
        (expand_builtin_args): Remove.
        (spu_expand_builtin_1): Remove.
        (spu_expand_builtin): Remove.
        (spu_cpu_cpp_builtins): Moved from spu.c
        (spu_force_reg): Remove.

From-SVN: r119397
This commit is contained in:
Andrew Pinski 2006-12-01 02:38:47 +00:00 committed by Andrew Pinski
parent 8521c41417
commit b66b813d47
4 changed files with 319 additions and 302 deletions

View File

@ -1,3 +1,24 @@
2006-11-30 Andrew Pinski <andrew_pinski@playstation.sony.com>
* config/spu/spu.c (spu_builtin_range): Move from spu-c.c
(TARGET_RESOLVE_OVERLOADED_BUILTIN): Delete.
(spu_cpu_cpp_builtins): Remove.
(spu_override_options): Don't set warn_main.
(spu_force_reg): Move from spu-c.c.
(spu_check_builtin_parm): Likewise.
(expand_builtin_args): Likewise.
(spu_expand_builtin_1): Likewise.
(spu_expand_builtin): Likewise.
* config/spu/spu.h (REGISTER_TARGET_PRAGMAS): Define, set
warn_main and targetm.resolve_overloaded_builtin.
* config/spu/spu-c.c (spu_builtin_range): Remove.
(spu_check_builtin_parm): Remove.
(expand_builtin_args): Remove.
(spu_expand_builtin_1): Remove.
(spu_expand_builtin): Remove.
(spu_cpu_cpp_builtins): Moved from spu.c
(spu_force_reg): Remove.
2006-12-01 Joseph Myers <joseph@codesourcery.com>
David Edelsohn <edelsohn@gnu.org>

View File

@ -34,35 +34,6 @@
#include "recog.h"
#include "optabs.h"
#include "spu-builtins.h"
static rtx spu_expand_builtin_1 (struct spu_builtin_description *d,
tree arglist, rtx target);
static void spu_check_builtin_parm (struct spu_builtin_description *, rtx,
int);
static void expand_builtin_args (struct spu_builtin_description *, tree, rtx,
rtx[]);
static rtx spu_force_reg (enum machine_mode mode, rtx op);
/* Builtin types, data and prototypes. */
struct spu_builtin_range
{
int low, high;
};
static struct spu_builtin_range spu_builtin_range[] = {
{-0x40ll, 0x7fll}, /* SPU_BTI_7 */
{-0x40ll, 0x3fll}, /* SPU_BTI_S7 */
{0ll, 0x7fll}, /* SPU_BTI_U7 */
{-0x200ll, 0x1ffll}, /* SPU_BTI_S10 */
{-0x2000ll, 0x1fffll}, /* SPU_BTI_S10_4 */
{0ll, 0x3fffll}, /* SPU_BTI_U14 */
{-0x8000ll, 0xffffll}, /* SPU_BTI_16 */
{-0x8000ll, 0x7fffll}, /* SPU_BTI_S16 */
{-0x20000ll, 0x1ffffll}, /* SPU_BTI_S16_2 */
{0ll, 0xffffll}, /* SPU_BTI_U16 */
{0ll, 0x3ffffll}, /* SPU_BTI_U16_2 */
{0ll, 0x3ffffll}, /* SPU_BTI_U18 */
};
/* Helper for spu_resolve_overloaded_builtin. */
@ -181,263 +152,13 @@ spu_resolve_overloaded_builtin (tree fndecl, tree fnargs)
return spu_build_overload_builtin (match, fnargs);
}
static void
spu_check_builtin_parm (struct spu_builtin_description *d, rtx op, int p)
void
spu_cpu_cpp_builtins (struct cpp_reader *pfile)
{
HOST_WIDE_INT v = 0;
int lsbits;
/* Check the range of immediate operands. */
if (p >= SPU_BTI_7 && p <= SPU_BTI_U18)
{
int range = p - SPU_BTI_7;
if (!CONSTANT_P (op)
|| (GET_CODE (op) == CONST_INT
&& (INTVAL (op) < spu_builtin_range[range].low
|| INTVAL (op) > spu_builtin_range[range].high)))
error ("%s expects an integer literal in the range [%d, %d].",
d->name,
spu_builtin_range[range].low, spu_builtin_range[range].high);
if (GET_CODE (op) == CONST
&& (GET_CODE (XEXP (op, 0)) == PLUS
|| GET_CODE (XEXP (op, 0)) == MINUS))
{
v = INTVAL (XEXP (XEXP (op, 0), 1));
op = XEXP (XEXP (op, 0), 0);
}
else if (GET_CODE (op) == CONST_INT)
v = INTVAL (op);
switch (p)
{
case SPU_BTI_S10_4:
lsbits = 4;
break;
case SPU_BTI_U16_2:
/* This is only used in lqa, and stqa. Even though the insns
encode 16 bits of the address (all but the 2 least
significant), only 14 bits are used because it is masked to
be 16 byte aligned. */
lsbits = 4;
break;
case SPU_BTI_S16_2:
/* This is used for lqr and stqr. */
lsbits = 2;
break;
default:
lsbits = 0;
}
if (GET_CODE (op) == LABEL_REF
|| (GET_CODE (op) == SYMBOL_REF
&& SYMBOL_REF_FUNCTION_P (op))
|| (INTVAL (op) & ((1 << lsbits) - 1)) != 0)
warning (0, "%d least significant bits of %s are ignored.", lsbits,
d->name);
}
builtin_define_std ("__SPU__");
cpp_assert (pfile, "cpu=spu");
cpp_assert (pfile, "machine=spu");
builtin_define_std ("__vector=__attribute__((__spu_vector__))");
}
static void
expand_builtin_args (struct spu_builtin_description *d, tree arglist,
rtx target, rtx ops[])
{
enum insn_code icode = d->icode;
int i = 0;
/* Expand the arguments into rtl. */
if (d->parm[0] != SPU_BTI_VOID)
ops[i++] = target;
for (; i < insn_data[icode].n_operands; i++)
{
tree arg = TREE_VALUE (arglist);
if (arg == 0)
abort ();
ops[i] = expand_expr (arg, NULL_RTX, VOIDmode, 0);
arglist = TREE_CHAIN (arglist);
}
}
static rtx
spu_expand_builtin_1 (struct spu_builtin_description *d,
tree arglist, rtx target)
{
rtx pat;
rtx ops[8];
enum insn_code icode = d->icode;
enum machine_mode mode, tmode;
int i, p;
tree return_type;
/* Set up ops[] with values from arglist. */
expand_builtin_args (d, arglist, target, ops);
/* Handle the target operand which must be operand 0. */
i = 0;
if (d->parm[0] != SPU_BTI_VOID)
{
/* We prefer the mode specified for the match_operand otherwise
use the mode from the builtin function prototype. */
tmode = insn_data[d->icode].operand[0].mode;
if (tmode == VOIDmode)
tmode = TYPE_MODE (spu_builtin_types[d->parm[0]]);
/* Try to use target because not using it can lead to extra copies
and when we are using all of the registers extra copies leads
to extra spills. */
if (target && GET_CODE (target) == REG && GET_MODE (target) == tmode)
ops[0] = target;
else
target = ops[0] = gen_reg_rtx (tmode);
if (!(*insn_data[icode].operand[0].predicate) (ops[0], tmode))
abort ();
i++;
}
/* Ignore align_hint, but still expand it's args in case they have
side effects. */
if (icode == CODE_FOR_spu_align_hint)
return 0;
/* Handle the rest of the operands. */
for (p = 1; i < insn_data[icode].n_operands; i++, p++)
{
if (insn_data[d->icode].operand[i].mode != VOIDmode)
mode = insn_data[d->icode].operand[i].mode;
else
mode = TYPE_MODE (spu_builtin_types[d->parm[i]]);
/* mode can be VOIDmode here for labels */
/* For specific intrinsics with an immediate operand, e.g.,
si_ai(), we sometimes need to convert the scalar argument to a
vector argument by splatting the scalar. */
if (VECTOR_MODE_P (mode)
&& (GET_CODE (ops[i]) == CONST_INT
|| GET_MODE_CLASS (GET_MODE (ops[i])) == MODE_INT
|| GET_MODE_CLASS (GET_MODE (ops[i])) == MODE_FLOAT))
{
if (GET_CODE (ops[i]) == CONST_INT)
ops[i] = spu_const (mode, INTVAL (ops[i]));
else
{
rtx reg = gen_reg_rtx (mode);
enum machine_mode imode = GET_MODE_INNER (mode);
if (!spu_nonmem_operand (ops[i], GET_MODE (ops[i])))
ops[i] = force_reg (GET_MODE (ops[i]), ops[i]);
if (imode != GET_MODE (ops[i]))
ops[i] = convert_to_mode (imode, ops[i],
TYPE_UNSIGNED (spu_builtin_types
[d->parm[i]]));
emit_insn (gen_spu_splats (reg, ops[i]));
ops[i] = reg;
}
}
if (!(*insn_data[icode].operand[i].predicate) (ops[i], mode))
ops[i] = spu_force_reg (mode, ops[i]);
spu_check_builtin_parm (d, ops[i], d->parm[p]);
}
switch (insn_data[icode].n_operands)
{
case 0:
pat = GEN_FCN (icode) (0);
break;
case 1:
pat = GEN_FCN (icode) (ops[0]);
break;
case 2:
pat = GEN_FCN (icode) (ops[0], ops[1]);
break;
case 3:
pat = GEN_FCN (icode) (ops[0], ops[1], ops[2]);
break;
case 4:
pat = GEN_FCN (icode) (ops[0], ops[1], ops[2], ops[3]);
break;
case 5:
pat = GEN_FCN (icode) (ops[0], ops[1], ops[2], ops[3], ops[4]);
break;
case 6:
pat = GEN_FCN (icode) (ops[0], ops[1], ops[2], ops[3], ops[4], ops[5]);
break;
default:
abort ();
}
if (!pat)
abort ();
if (d->type == B_CALL || d->type == B_BISLED)
emit_call_insn (pat);
else if (d->type == B_JUMP)
{
emit_jump_insn (pat);
emit_barrier ();
}
else
emit_insn (pat);
return_type = spu_builtin_types[d->parm[0]];
if (d->parm[0] != SPU_BTI_VOID
&& GET_MODE (target) != TYPE_MODE (return_type))
{
/* target is the return value. It should always be the mode of
the builtin function prototype. */
target = spu_force_reg (TYPE_MODE (return_type), target);
}
return target;
}
rtx
spu_expand_builtin (tree exp,
rtx target,
rtx subtarget ATTRIBUTE_UNUSED,
enum machine_mode mode ATTRIBUTE_UNUSED,
int ignore ATTRIBUTE_UNUSED)
{
tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
unsigned int fcode = DECL_FUNCTION_CODE (fndecl) - END_BUILTINS;
tree arglist = TREE_OPERAND (exp, 1);
struct spu_builtin_description *d;
if (fcode < NUM_SPU_BUILTINS)
{
d = &spu_builtins[fcode];
return spu_expand_builtin_1 (d, arglist, target);
}
abort ();
}
static rtx
spu_force_reg (enum machine_mode mode, rtx op)
{
rtx x, r;
if (GET_MODE (op) == VOIDmode || GET_MODE (op) == BLKmode)
{
if ((SCALAR_INT_MODE_P (mode) && GET_CODE (op) == CONST_INT)
|| GET_MODE (op) == BLKmode)
return force_reg (mode, convert_to_mode (mode, op, 0));
abort ();
}
r = force_reg (GET_MODE (op), op);
if (GET_MODE_SIZE (GET_MODE (op)) == GET_MODE_SIZE (mode))
{
x = simplify_gen_subreg (mode, r, GET_MODE (op), 0);
if (x)
return x;
}
x = gen_reg_rtx (mode);
emit_insn (gen_spu_convert (x, r));
return x;
}

View File

@ -54,6 +54,28 @@
#include "tree-gimple.h"
#include "tm-constrs.h"
#include "spu-builtins.h"
/* Builtin types, data and prototypes. */
struct spu_builtin_range
{
int low, high;
};
static struct spu_builtin_range spu_builtin_range[] = {
{-0x40ll, 0x7fll}, /* SPU_BTI_7 */
{-0x40ll, 0x3fll}, /* SPU_BTI_S7 */
{0ll, 0x7fll}, /* SPU_BTI_U7 */
{-0x200ll, 0x1ffll}, /* SPU_BTI_S10 */
{-0x2000ll, 0x1fffll}, /* SPU_BTI_S10_4 */
{0ll, 0x3fffll}, /* SPU_BTI_U14 */
{-0x8000ll, 0xffffll}, /* SPU_BTI_16 */
{-0x8000ll, 0x7fffll}, /* SPU_BTI_S16 */
{-0x20000ll, 0x1ffffll}, /* SPU_BTI_S16_2 */
{0ll, 0xffffll}, /* SPU_BTI_U16 */
{0ll, 0x3ffffll}, /* SPU_BTI_U16_2 */
{0ll, 0x3ffffll}, /* SPU_BTI_U18 */
};
/* Target specific attribute specifications. */
char regs_ever_allocated[FIRST_PSEUDO_REGISTER];
@ -133,9 +155,6 @@ tree spu_builtin_types[SPU_BTI_MAX];
#undef TARGET_INIT_BUILTINS
#define TARGET_INIT_BUILTINS spu_init_builtins
#undef TARGET_RESOLVE_OVERLOADED_BUILTIN
#define TARGET_RESOLVE_OVERLOADED_BUILTIN spu_resolve_overloaded_builtin
#undef TARGET_EXPAND_BUILTIN
#define TARGET_EXPAND_BUILTIN spu_expand_builtin
@ -213,16 +232,6 @@ const struct attribute_spec spu_attribute_table[];
struct gcc_target targetm = TARGET_INITIALIZER;
void
spu_cpu_cpp_builtins (struct cpp_reader *pfile)
{
extern void builtin_define_std (const char *);
builtin_define_std ("__SPU__");
cpp_assert (pfile, "cpu=spu");
cpp_assert (pfile, "machine=spu");
builtin_define_std ("__vector=__attribute__((__spu_vector__))");
}
/* Sometimes certain combinations of command options do not make sense
on a particular target machine. You can define a macro
OVERRIDE_OPTIONS to take account of this. This macro, if defined, is
@ -231,9 +240,6 @@ void
spu_override_options (void)
{
/* Don't give warnings about the main() function. */
warn_main = 0;
/* Override some of the default param values. With so many registers
larger values are better for these params. */
if (MAX_UNROLLED_INSNS == 100)
@ -4467,3 +4473,266 @@ spu_expand_vector_init (rtx target, rtx vals)
}
}
}
static rtx
spu_force_reg (enum machine_mode mode, rtx op)
{
rtx x, r;
if (GET_MODE (op) == VOIDmode || GET_MODE (op) == BLKmode)
{
if ((SCALAR_INT_MODE_P (mode) && GET_CODE (op) == CONST_INT)
|| GET_MODE (op) == BLKmode)
return force_reg (mode, convert_to_mode (mode, op, 0));
abort ();
}
r = force_reg (GET_MODE (op), op);
if (GET_MODE_SIZE (GET_MODE (op)) == GET_MODE_SIZE (mode))
{
x = simplify_gen_subreg (mode, r, GET_MODE (op), 0);
if (x)
return x;
}
x = gen_reg_rtx (mode);
emit_insn (gen_spu_convert (x, r));
return x;
}
static void
spu_check_builtin_parm (struct spu_builtin_description *d, rtx op, int p)
{
HOST_WIDE_INT v = 0;
int lsbits;
/* Check the range of immediate operands. */
if (p >= SPU_BTI_7 && p <= SPU_BTI_U18)
{
int range = p - SPU_BTI_7;
if (!CONSTANT_P (op)
|| (GET_CODE (op) == CONST_INT
&& (INTVAL (op) < spu_builtin_range[range].low
|| INTVAL (op) > spu_builtin_range[range].high)))
error ("%s expects an integer literal in the range [%d, %d].",
d->name,
spu_builtin_range[range].low, spu_builtin_range[range].high);
if (GET_CODE (op) == CONST
&& (GET_CODE (XEXP (op, 0)) == PLUS
|| GET_CODE (XEXP (op, 0)) == MINUS))
{
v = INTVAL (XEXP (XEXP (op, 0), 1));
op = XEXP (XEXP (op, 0), 0);
}
else if (GET_CODE (op) == CONST_INT)
v = INTVAL (op);
switch (p)
{
case SPU_BTI_S10_4:
lsbits = 4;
break;
case SPU_BTI_U16_2:
/* This is only used in lqa, and stqa. Even though the insns
encode 16 bits of the address (all but the 2 least
significant), only 14 bits are used because it is masked to
be 16 byte aligned. */
lsbits = 4;
break;
case SPU_BTI_S16_2:
/* This is used for lqr and stqr. */
lsbits = 2;
break;
default:
lsbits = 0;
}
if (GET_CODE (op) == LABEL_REF
|| (GET_CODE (op) == SYMBOL_REF
&& SYMBOL_REF_FUNCTION_P (op))
|| (INTVAL (op) & ((1 << lsbits) - 1)) != 0)
warning (0, "%d least significant bits of %s are ignored.", lsbits,
d->name);
}
}
static void
expand_builtin_args (struct spu_builtin_description *d, tree arglist,
rtx target, rtx ops[])
{
enum insn_code icode = d->icode;
int i = 0;
/* Expand the arguments into rtl. */
if (d->parm[0] != SPU_BTI_VOID)
ops[i++] = target;
for (; i < insn_data[icode].n_operands; i++)
{
tree arg = TREE_VALUE (arglist);
if (arg == 0)
abort ();
ops[i] = expand_expr (arg, NULL_RTX, VOIDmode, 0);
arglist = TREE_CHAIN (arglist);
}
}
static rtx
spu_expand_builtin_1 (struct spu_builtin_description *d,
tree arglist, rtx target)
{
rtx pat;
rtx ops[8];
enum insn_code icode = d->icode;
enum machine_mode mode, tmode;
int i, p;
tree return_type;
/* Set up ops[] with values from arglist. */
expand_builtin_args (d, arglist, target, ops);
/* Handle the target operand which must be operand 0. */
i = 0;
if (d->parm[0] != SPU_BTI_VOID)
{
/* We prefer the mode specified for the match_operand otherwise
use the mode from the builtin function prototype. */
tmode = insn_data[d->icode].operand[0].mode;
if (tmode == VOIDmode)
tmode = TYPE_MODE (spu_builtin_types[d->parm[0]]);
/* Try to use target because not using it can lead to extra copies
and when we are using all of the registers extra copies leads
to extra spills. */
if (target && GET_CODE (target) == REG && GET_MODE (target) == tmode)
ops[0] = target;
else
target = ops[0] = gen_reg_rtx (tmode);
if (!(*insn_data[icode].operand[0].predicate) (ops[0], tmode))
abort ();
i++;
}
/* Ignore align_hint, but still expand it's args in case they have
side effects. */
if (icode == CODE_FOR_spu_align_hint)
return 0;
/* Handle the rest of the operands. */
for (p = 1; i < insn_data[icode].n_operands; i++, p++)
{
if (insn_data[d->icode].operand[i].mode != VOIDmode)
mode = insn_data[d->icode].operand[i].mode;
else
mode = TYPE_MODE (spu_builtin_types[d->parm[i]]);
/* mode can be VOIDmode here for labels */
/* For specific intrinsics with an immediate operand, e.g.,
si_ai(), we sometimes need to convert the scalar argument to a
vector argument by splatting the scalar. */
if (VECTOR_MODE_P (mode)
&& (GET_CODE (ops[i]) == CONST_INT
|| GET_MODE_CLASS (GET_MODE (ops[i])) == MODE_INT
|| GET_MODE_CLASS (GET_MODE (ops[i])) == MODE_FLOAT))
{
if (GET_CODE (ops[i]) == CONST_INT)
ops[i] = spu_const (mode, INTVAL (ops[i]));
else
{
rtx reg = gen_reg_rtx (mode);
enum machine_mode imode = GET_MODE_INNER (mode);
if (!spu_nonmem_operand (ops[i], GET_MODE (ops[i])))
ops[i] = force_reg (GET_MODE (ops[i]), ops[i]);
if (imode != GET_MODE (ops[i]))
ops[i] = convert_to_mode (imode, ops[i],
TYPE_UNSIGNED (spu_builtin_types
[d->parm[i]]));
emit_insn (gen_spu_splats (reg, ops[i]));
ops[i] = reg;
}
}
if (!(*insn_data[icode].operand[i].predicate) (ops[i], mode))
ops[i] = spu_force_reg (mode, ops[i]);
spu_check_builtin_parm (d, ops[i], d->parm[p]);
}
switch (insn_data[icode].n_operands)
{
case 0:
pat = GEN_FCN (icode) (0);
break;
case 1:
pat = GEN_FCN (icode) (ops[0]);
break;
case 2:
pat = GEN_FCN (icode) (ops[0], ops[1]);
break;
case 3:
pat = GEN_FCN (icode) (ops[0], ops[1], ops[2]);
break;
case 4:
pat = GEN_FCN (icode) (ops[0], ops[1], ops[2], ops[3]);
break;
case 5:
pat = GEN_FCN (icode) (ops[0], ops[1], ops[2], ops[3], ops[4]);
break;
case 6:
pat = GEN_FCN (icode) (ops[0], ops[1], ops[2], ops[3], ops[4], ops[5]);
break;
default:
abort ();
}
if (!pat)
abort ();
if (d->type == B_CALL || d->type == B_BISLED)
emit_call_insn (pat);
else if (d->type == B_JUMP)
{
emit_jump_insn (pat);
emit_barrier ();
}
else
emit_insn (pat);
return_type = spu_builtin_types[d->parm[0]];
if (d->parm[0] != SPU_BTI_VOID
&& GET_MODE (target) != TYPE_MODE (return_type))
{
/* target is the return value. It should always be the mode of
the builtin function prototype. */
target = spu_force_reg (TYPE_MODE (return_type), target);
}
return target;
}
rtx
spu_expand_builtin (tree exp,
rtx target,
rtx subtarget ATTRIBUTE_UNUSED,
enum machine_mode mode ATTRIBUTE_UNUSED,
int ignore ATTRIBUTE_UNUSED)
{
tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
unsigned int fcode = DECL_FUNCTION_CODE (fndecl) - END_BUILTINS;
tree arglist = TREE_OPERAND (exp, 1);
struct spu_builtin_description *d;
if (fcode < NUM_SPU_BUILTINS)
{
d = &spu_builtins[fcode];
return spu_expand_builtin_1 (d, arglist, target);
}
abort ();
}

View File

@ -238,6 +238,12 @@ enum reg_class {
((GET_MODE_SIZE (FROM) > 4 || GET_MODE_SIZE (TO) > 4) \
&& GET_MODE_SIZE (FROM) != GET_MODE_SIZE (TO))
#define REGISTER_TARGET_PRAGMAS() do { \
targetm.resolve_overloaded_builtin = spu_resolve_overloaded_builtin; \
/* Don't give warnings about the main() function. */ \
warn_main = 0; \
}while (0);
/* Frame Layout */