Fix some typos and coding style violations.

Backport from mainline
	2012-08-28  Walter Lee  <walt@tilera.com>
	
	* confg/tilegx/tilegx.md: Fix code style.
	(*zero_extendsidi_truncdisi): Fix typo.
	* config/tilegx/tilegx.c: Fix code style.
	(tilegx_function_profiler): Fix typo.

From-SVN: r190740
This commit is contained in:
Walter Lee 2012-08-28 06:11:09 +00:00 committed by Walter Lee
parent 59798f281c
commit 43d4e561b3
3 changed files with 124 additions and 123 deletions

View File

@ -1,3 +1,12 @@
2012-08-28 Walter Lee <walt@tilera.com>
Backport from mainline
2012-08-28 Walter Lee <walt@tilera.com>
* confg/tilegx/tilegx.md: Fix code style.
(*zero_extendsidi_truncdisi): Fix typo.
* config/tilegx/tilegx.c: Fix code style.
(tilegx_function_profiler): Fix typo.
2012-08-27 Walter Lee <walt@tilera.com>
Backport from mainline

View File

@ -144,7 +144,7 @@ tilegx_return_in_memory (const_tree type, const_tree fndecl ATTRIBUTE_UNUSED)
}
/* TARGET_MODE_REP_EXTENDED. */
/* Implement TARGET_MODE_REP_EXTENDED. */
static int
tilegx_mode_rep_extended (enum machine_mode mode, enum machine_mode mode_rep)
{
@ -405,7 +405,7 @@ tilegx_gimplify_va_arg_expr (tree valist, tree type, gimple_seq *pre_p,
addr = create_tmp_var (ptr_type_node, "va_arg");
/* if an object is dynamically sized, a pointer to it is passed
/* If an object is dynamically sized, a pointer to it is passed
instead of the object itself. */
pass_by_reference_p = pass_by_reference (NULL, TYPE_MODE (type), type,
false);
@ -457,11 +457,11 @@ tilegx_rtx_costs (rtx x, int code, int outer_code, int opno, int *total,
{
case CONST_INT:
/* If this is an 8-bit constant, return zero since it can be
used nearly anywhere with no cost. If it is a valid operand
for an ADD or AND, likewise return 0 if we know it will be
used in that context. Otherwise, return 2 since it might be
used there later. All other constants take at least two
insns. */
used nearly anywhere with no cost. If it is a valid operand
for an ADD or AND, likewise return 0 if we know it will be
used in that context. Otherwise, return 2 since it might be
used there later. All other constants take at least two
insns. */
if (satisfies_constraint_I (x))
{
*total = 0;
@ -506,8 +506,8 @@ tilegx_rtx_costs (rtx x, int code, int outer_code, int opno, int *total,
case MEM:
/* If outer-code was a sign or zero extension, a cost of
COSTS_N_INSNS (1) was already added in, so account for
that. */
COSTS_N_INSNS (1) was already added in, so account for
that. */
if (outer_code == ZERO_EXTEND || outer_code == SIGN_EXTEND)
*total = COSTS_N_INSNS (1);
else
@ -635,7 +635,7 @@ tilegx_rtx_costs (rtx x, int code, int outer_code, int opno, int *total,
static rtx
create_temp_reg_if_possible (enum machine_mode mode, rtx default_reg)
{
return can_create_pseudo_p ()? gen_reg_rtx (mode) : default_reg;
return can_create_pseudo_p () ? gen_reg_rtx (mode) : default_reg;
}
@ -1335,8 +1335,8 @@ tilegx_simd_int (rtx num, enum machine_mode mode)
/* Returns true iff VAL can be moved into a register in one
instruction. And if it can, it emits the code to move the
constant into DEST_REG.
instruction. And if it can, it emits the code to move the constant
into DEST_REG.
If THREE_WIDE_ONLY is true, this insists on an instruction that
works in a bundle containing three instructions. */
@ -1396,7 +1396,7 @@ tilegx_bitfield_operand_p (HOST_WIDE_INT n, int *first_bit, int *last_bit)
continue;
/* See if x is a power of two minus one, i.e. only consecutive 1
bits starting from bit 0. */
bits starting from bit 0. */
if ((x & (x + 1)) == 0)
{
if (first_bit != NULL)
@ -1480,8 +1480,8 @@ expand_set_cint64 (rtx dest_reg, rtx src_val)
if (expand_set_cint64_one_inst (temp, r, three_wide_only))
{
/* 0xFFFFFFFFFFA5FFFF becomes:
movei temp, 0xFFFFFFFFFFFFFFA5
rotli dest, temp, 16 */
movei temp, 0xFFFFFFFFFFFFFFA5
rotli dest, temp, 16 */
emit_move_insn (dest_reg,
gen_rtx_ROTATE (DImode, temp, GEN_INT (count)));
return;
@ -1530,11 +1530,11 @@ expand_set_cint64 (rtx dest_reg, rtx src_val)
unsigned HOST_WIDE_INT leftover;
/* Recursively create the constant above the lowest 16 zero
bits. */
bits. */
expand_set_cint64 (temp, GEN_INT (val >> shift));
/* See if we can easily insert the remaining bits, or if we need
to fall through to the more general case. */
to fall through to the more general case. */
leftover = val - ((val >> shift) << shift);
if (leftover == 0)
{
@ -1571,8 +1571,8 @@ expand_set_cint64 (rtx dest_reg, rtx src_val)
else
{
/* Set as many high 16-bit blocks as we can with a single
instruction. We'll insert the remaining 16-bit blocks
below. */
instruction. We'll insert the remaining 16-bit blocks
below. */
for (shift = 16;; shift += 16)
{
gcc_assert (shift < 64);
@ -1615,10 +1615,10 @@ tilegx_expand_set_const64 (rtx op0, rtx op1)
if (CONST_INT_P (op1))
{
/* TODO: I don't know if we want to split large constants
now, or wait until later (with a define_split).
now, or wait until later (with a define_split).
Does splitting early help CSE? Does it harm other
optimizations that might fold loads? */
Does splitting early help CSE? Does it harm other
optimizations that might fold loads? */
expand_set_cint64 (op0, op1);
}
else
@ -1716,7 +1716,7 @@ tilegx_expand_unaligned_load (rtx dest_reg, rtx mem, HOST_WIDE_INT bitsize,
if (bitsize == 2 * BITS_PER_UNIT && (bit_offset % BITS_PER_UNIT) == 0)
{
/* When just loading a two byte value, we can load the two bytes
individually and combine them efficiently. */
individually and combine them efficiently. */
mem_lo = adjust_address (mem, QImode, byte_offset);
mem_hi = adjust_address (mem, QImode, byte_offset + 1);
@ -2053,6 +2053,7 @@ tilegx_expand_const_muldi (rtx op0, rtx op1, long long multiplier)
return false;
}
/* Expand the muldi pattern. */
bool
tilegx_expand_muldi (rtx op0, rtx op1, rtx op2)
@ -2227,7 +2228,7 @@ tilegx_emit_setcc_internal (rtx res, enum rtx_code code, rtx op0, rtx op1,
case GEU:
case GTU:
/* We do not have these compares, so we reverse the
operands. */
operands. */
swap = true;
break;
@ -2322,7 +2323,7 @@ tilegx_emit_cc_test (enum rtx_code code, rtx op0, rtx op1,
case GEU:
case GTU:
/* These must be reversed (except NE, but let's
canonicalize). */
canonicalize). */
code = reverse_condition (code);
branch_code = EQ;
break;
@ -2352,7 +2353,7 @@ tilegx_emit_cc_test (enum rtx_code code, rtx op0, rtx op1,
|| (REG_P (op0) && REG_POINTER (op0))))
{
/* TODO: Use a SIMD add immediate to hit zero for tiled
constants in a single instruction. */
constants in a single instruction. */
if (GET_MODE (op0) != DImode)
{
/* Convert to DImode so we can use addli. Note that
@ -3400,47 +3401,47 @@ tilegx_expand_builtin (tree exp,
opnum = nonvoid;
FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
{
const struct insn_operand_data *insn_op;
{
const struct insn_operand_data *insn_op;
if (arg == error_mark_node)
return NULL_RTX;
if (opnum > MAX_BUILTIN_ARGS)
return NULL_RTX;
if (arg == error_mark_node)
return NULL_RTX;
if (opnum > MAX_BUILTIN_ARGS)
return NULL_RTX;
insn_op = &insn_data[icode].operand[opnum];
insn_op = &insn_data[icode].operand[opnum];
op[opnum] = expand_expr (arg, NULL_RTX, insn_op->mode, EXPAND_NORMAL);
op[opnum] = expand_expr (arg, NULL_RTX, insn_op->mode, EXPAND_NORMAL);
if (!(*insn_op->predicate) (op[opnum], insn_op->mode))
{
enum machine_mode opmode = insn_op->mode;
if (!(*insn_op->predicate) (op[opnum], insn_op->mode))
{
enum machine_mode opmode = insn_op->mode;
/* pointer_operand and pmode_register_operand operands do
not specify a mode, so use the operand's mode instead
(which should always be right by the time we get here,
except for constants, which are VOIDmode). */
if (opmode == VOIDmode)
{
enum machine_mode m = GET_MODE (op[opnum]);
gcc_assert (m == Pmode || m == VOIDmode);
opmode = Pmode;
}
/* pointer_operand and pmode_register_operand operands do
not specify a mode, so use the operand's mode instead
(which should always be right by the time we get here,
except for constants, which are VOIDmode). */
if (opmode == VOIDmode)
{
enum machine_mode m = GET_MODE (op[opnum]);
gcc_assert (m == Pmode || m == VOIDmode);
opmode = Pmode;
}
op[opnum] = copy_to_mode_reg (opmode, op[opnum]);
}
op[opnum] = copy_to_mode_reg (opmode, op[opnum]);
}
if (!(*insn_op->predicate) (op[opnum], insn_op->mode))
{
/* We still failed to meet the predicate even after moving
into a register. Assume we needed an immediate. */
error_at (EXPR_LOCATION (exp),
"operand must be an immediate of the right size");
return const0_rtx;
}
if (!(*insn_op->predicate) (op[opnum], insn_op->mode))
{
/* We still failed to meet the predicate even after moving
into a register. Assume we needed an immediate. */
error_at (EXPR_LOCATION (exp),
"operand must be an immediate of the right size");
return const0_rtx;
}
opnum++;
}
opnum++;
}
if (nonvoid)
{
@ -3874,7 +3875,7 @@ tilegx_expand_prologue (void)
REGNO_POINTER_ALIGN (HARD_FRAME_POINTER_REGNUM) = STACK_BOUNDARY;
/* fp holds a copy of the incoming sp, in case we need to store
it. */
it. */
sp_copy_regno = HARD_FRAME_POINTER_REGNUM;
}
else if (!tilegx_current_function_is_leaf ())
@ -4069,7 +4070,7 @@ tilegx_expand_epilogue (bool sibcall_p)
if (frame_pointer_needed)
{
/* Restore the old stack pointer by copying from the frame
pointer. */
pointer. */
if (TARGET_32BIT)
{
insn = emit_insn (gen_sp_restore_32bit (stack_pointer_rtx,
@ -4266,6 +4267,7 @@ get_jump_target (rtx branch)
return 0;
}
/* Implement TARGET_SCHED_ADJUST_COST. */
static int
tilegx_sched_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
@ -4311,37 +4313,37 @@ tilegx_gen_bundles (void)
{
basic_block bb;
FOR_EACH_BB (bb)
{
rtx insn, next;
rtx end = NEXT_INSN (BB_END (bb));
{
rtx insn, next;
rtx end = NEXT_INSN (BB_END (bb));
for (insn = next_insn_to_bundle (BB_HEAD (bb), end); insn; insn = next)
{
next = next_insn_to_bundle (NEXT_INSN (insn), end);
for (insn = next_insn_to_bundle (BB_HEAD (bb), end); insn; insn = next)
{
next = next_insn_to_bundle (NEXT_INSN (insn), end);
/* Never wrap {} around inline asm. */
if (GET_CODE (PATTERN (insn)) != ASM_INPUT)
{
if (next == NULL_RTX || GET_MODE (next) == TImode
/* NOTE: The scheduler incorrectly believes a call
insn can execute in the same cycle as the insn
after the call. This is of course impossible.
Really we need to fix the scheduler somehow, so
the code after the call gets scheduled
optimally. */
|| CALL_P (insn))
{
/* Mark current insn as the end of a bundle. */
PUT_MODE (insn, QImode);
}
else
{
/* Mark it as part of a bundle. */
PUT_MODE (insn, SImode);
}
}
}
}
/* Never wrap {} around inline asm. */
if (GET_CODE (PATTERN (insn)) != ASM_INPUT)
{
if (next == NULL_RTX || GET_MODE (next) == TImode
/* NOTE: The scheduler incorrectly believes a call
insn can execute in the same cycle as the insn
after the call. This is of course impossible.
Really we need to fix the scheduler somehow, so
the code after the call gets scheduled
optimally. */
|| CALL_P (insn))
{
/* Mark current insn as the end of a bundle. */
PUT_MODE (insn, QImode);
}
else
{
/* Mark it as part of a bundle. */
PUT_MODE (insn, SImode);
}
}
}
}
}
@ -4906,7 +4908,7 @@ tilegx_print_operand (FILE *file, rtx x, int code)
switch (code)
{
case 'c':
/* Print the compare operator opcode for conditional moves. */
/* Print the compare operator opcode for conditional moves. */
switch (GET_CODE (x))
{
case EQ:
@ -4921,7 +4923,7 @@ tilegx_print_operand (FILE *file, rtx x, int code)
return;
case 'C':
/* Print the compare operator opcode for conditional moves. */
/* Print the compare operator opcode for conditional moves. */
switch (GET_CODE (x))
{
case EQ:
@ -4937,7 +4939,7 @@ tilegx_print_operand (FILE *file, rtx x, int code)
case 'd':
{
/* Print the compare operator opcode for conditional moves. */
/* Print the compare operator opcode for conditional moves. */
switch (GET_CODE (x))
{
case EQ:
@ -4954,7 +4956,7 @@ tilegx_print_operand (FILE *file, rtx x, int code)
case 'D':
{
/* Print the compare operator opcode for conditional moves. */
/* Print the compare operator opcode for conditional moves. */
switch (GET_CODE (x))
{
case EQ:
@ -5196,7 +5198,7 @@ tilegx_print_operand (FILE *file, rtx x, int code)
case 'r':
/* In this case we need a register. Use 'zero' if the operand
is const0_rtx. */
is const0_rtx. */
if (x == const0_rtx
|| (GET_MODE (x) != VOIDmode && x == CONST0_RTX (GET_MODE (x))))
{
@ -5283,7 +5285,7 @@ tilegx_final_prescan_insn (rtx insn)
}
/* While emitting asm, are we currently inside '{' for a bundle? */
/* While emitting asm, are we currently inside '{' for a bundle? */
static bool tilegx_in_bundle = false;
/* Implement ASM_OUTPUT_OPCODE. Prepend/append curly braces as
@ -5345,7 +5347,7 @@ tilegx_function_profiler (FILE *file, int labelno ATTRIBUTE_UNUSED)
"\t{\n"
"\tmove\tr10, lr\n"
"\tjal\t%s\n"
"\t}\t\n", MCOUNT_NAME);
"\t}\n", MCOUNT_NAME);
}
tilegx_in_bundle = false;
@ -5458,7 +5460,7 @@ tilegx_file_end (void)
#undef TARGET_BUILTIN_DECL
#define TARGET_BUILTIN_DECL tilegx_builtin_decl
#undef TARGET_EXPAND_BUILTIN
#undef TARGET_EXPAND_BUILTIN
#define TARGET_EXPAND_BUILTIN tilegx_expand_builtin
#undef TARGET_CONDITIONAL_REGISTER_USAGE

View File

@ -408,7 +408,7 @@
(ss_minus "")
(us_minus "")
])
;; <s> is the load/store extension suffix.
(define_code_attr s [(zero_extend "u")
(sign_extend "s")])
@ -816,11 +816,11 @@
bit_width = INTVAL (operands[2]);
bit_offset = INTVAL (operands[3]);
/* Reject bitfields that can be done with a normal load */
/* Reject bitfields that can be done with a normal load. */
if (MEM_ALIGN (operands[1]) >= bit_offset + bit_width)
FAIL;
/* The value in memory cannot span more than 8 bytes. */
/* The value in memory cannot span more than 8 bytes. */
first_byte_offset = bit_offset / BITS_PER_UNIT;
last_byte_offset = (bit_offset + bit_width - 1) / BITS_PER_UNIT;
if (last_byte_offset - first_byte_offset > 7)
@ -845,7 +845,6 @@
HOST_WIDE_INT bit_width = INTVAL (operands[2]);
HOST_WIDE_INT bit_offset = INTVAL (operands[3]);
if (MEM_P (operands[1]))
{
HOST_WIDE_INT first_byte_offset, last_byte_offset;
@ -853,11 +852,11 @@
if (GET_MODE (operands[1]) != QImode)
FAIL;
/* Reject bitfields that can be done with a normal load */
/* Reject bitfields that can be done with a normal load. */
if (MEM_ALIGN (operands[1]) >= bit_offset + bit_width)
FAIL;
/* The value in memory cannot span more than 8 bytes. */
/* The value in memory cannot span more than 8 bytes. */
first_byte_offset = bit_offset / BITS_PER_UNIT;
last_byte_offset = (bit_offset + bit_width - 1) / BITS_PER_UNIT;
if (last_byte_offset - first_byte_offset > 7)
@ -873,7 +872,7 @@
if (bit_offset == 0)
{
/* Extracting the low bits is just a bitwise AND. */
/* Extracting the low bits is just a bitwise AND. */
HOST_WIDE_INT mask = ((HOST_WIDE_INT)1 << bit_width) - 1;
emit_insn (gen_anddi3 (operands[0], operands[1], GEN_INT (mask)));
DONE;
@ -891,7 +890,7 @@
[(set (match_operand:DI 0 "register_operand" "")
(const:DI (unspec:DI [(match_operand:DI 1 "symbolic_operand" "")]
UNSPEC_HW2_LAST)))])
;; Second step of the 3-insn sequence to materialize a symbolic
;; address.
(define_expand "mov_address_step2"
@ -947,7 +946,7 @@
"%1 = . + 8\n\tlnk\t%0"
[(set_attr "type" "Y1")])
;; First step of the 3-insn sequence to materialize a position
;; The next three patterns are used to to materialize a position
;; independent address by adding the difference of two labels to a
;; base label in the text segment, assuming that the difference fits
;; in 32 signed bits.
@ -959,10 +958,6 @@
UNSPEC_HW1_LAST_PCREL)))]
"flag_pic")
;; Second step of the 3-insn sequence to materialize a position
;; independent address by adding the difference of two labels to a
;; base label in the text segment, assuming that the difference fits
;; in 32 signed bits.
(define_expand "mov_pcrel_step2<bitsuffix>"
[(set (match_operand:I48MODE 0 "register_operand" "")
(unspec:I48MODE
@ -973,11 +968,7 @@
UNSPEC_HW0_PCREL))]
UNSPEC_INSN_ADDR_SHL16INSLI))]
"flag_pic")
;; Third step of the 3-insn sequence to materialize a position
;; independent address by adding the difference of two labels to a base
;; label in the text segment, assuming that the difference fits in 32
;; signed bits.
(define_insn "mov_pcrel_step3<bitsuffix>"
[(set (match_operand:I48MODE 0 "register_operand" "=r")
(unspec:I48MODE [(match_operand:I48MODE 1 "reg_or_0_operand" "rO")
@ -1335,7 +1326,6 @@
DONE;
})
(define_expand "subdf3"
[(set (match_operand:DF 0 "register_operand" "")
(minus:DF (match_operand:DF 1 "register_operand" "")
@ -1708,7 +1698,6 @@
"ctz\t%0, %r1"
[(set_attr "type" "Y0")])
(define_insn "popcount<mode>2"
[(set (match_operand:I48MODE 0 "register_operand" "=r")
(popcount:I48MODE (match_operand:DI 1 "reg_or_0_operand" "rO")))]
@ -1937,7 +1926,7 @@
(define_insn "*zero_extendsidi_truncdisi"
[(set (match_operand:DI 0 "register_operand" "=r")
(zero_extend:DI
(truncate:SI (match_operand:DI 1 "reg_or_0_operand" "0"))))]
(truncate:SI (match_operand:DI 1 "reg_or_0_operand" "rO"))))]
""
"v4int_l\t%0, zero, %r1"
[(set_attr "type" "X01")])
@ -2008,7 +1997,7 @@
shruxi\t%0, %r1, %2
shrux\t%0, %r1, %r2"
[(set_attr "type" "X01,X01")])
(define_insn "*lshrsi_truncdisi2"
[(set (match_operand:SI 0 "register_operand" "=r")
(lshiftrt:SI
@ -2213,7 +2202,8 @@
;; Loops
;;
;; Define the subtract-one-and-jump insns so loop.c knows what to generate.
;; Define the subtract-one-and-jump insns so loop.c knows what to
;; generate.
(define_expand "doloop_end"
[(use (match_operand 0 "" "")) ;; loop pseudo
(use (match_operand 1 "" "")) ;; iterations; zero if unknown
@ -2481,8 +2471,8 @@
[(set_attr "type" "*,*,X01")])
;; Used for move sp, r52, to pop a stack frame. We need to make sure
;; that stack frame memory operations have been issued before we do this.
;; TODO: see above TODO.
;; that stack frame memory operations have been issued before we do
;; this. TODO: see above TODO.
(define_insn "sp_restore<bitsuffix>"
[(set (match_operand:I48MODE 0 "register_operand" "=r")
(match_operand:I48MODE 1 "register_operand" "r"))