config.gcc (sh-*): Add sh-mem.o to extra_obj.
2013-10-25 Christian Bruel <christian.bruel@st.com> * config.gcc (sh-*): Add sh-mem.o to extra_obj. * gcc/config/sh/t-sh (sh-mem.o): New rule. * gcc/config/sh/sh-mem.cc (expand_block_move): Moved here. (sh4_expand_cmpstr): New function. * gcc/config/sh/sh.c (force_into, expand_block_move): Move to sh-mem.cc * gcc/config/sh/sh-protos.h (sh4_expand_cmpstr): Declare. * gcc/config/sh/sh.md (cmpstrsi, cmpstr_t): New patterns. (rotlhi3_8): Rename. From-SVN: r204013
This commit is contained in:
parent
f28aa681d3
commit
8e701300d2
@ -1,3 +1,14 @@
|
||||
2013-10-25 Christian Bruel <christian.bruel@st.com>
|
||||
|
||||
* config.gcc (sh-*): Add sh-mem.o to extra_obj.
|
||||
* gcc/config/sh/t-sh (sh-mem.o): New rule.
|
||||
* gcc/config/sh/sh-mem.cc (expand_block_move): Moved here.
|
||||
(sh4_expand_cmpstr): New function.
|
||||
* gcc/config/sh/sh.c (force_into, expand_block_move): Move to sh-mem.c
|
||||
* gcc/config/sh/sh-protos.h (sh4_expand_cmpstr): Declare.
|
||||
* gcc/config/sh/sh.md (cmpstrsi, cmpstr_t): New patterns.
|
||||
(rotlhi3_8): Rename.
|
||||
|
||||
2013-10-24 Jan-Benedict Glaw <jbglaw@lug-owl.de>
|
||||
|
||||
* configure.ac (ZW_PROG_COMPILER_DEPENDENCIES): Use CXX instead of CC.
|
||||
|
@ -465,7 +465,7 @@ sh[123456789lbe]*-*-* | sh-*-*)
|
||||
cpu_type=sh
|
||||
need_64bit_hwint=yes
|
||||
extra_options="${extra_options} fused-madd.opt"
|
||||
extra_objs="${extra_objs} sh_treg_combine.o"
|
||||
extra_objs="${extra_objs} sh_treg_combine.o sh-mem.o"
|
||||
;;
|
||||
v850*-*-*)
|
||||
cpu_type=v850
|
||||
|
307
gcc/config/sh/sh-mem.cc
Normal file
307
gcc/config/sh/sh-mem.cc
Normal file
@ -0,0 +1,307 @@
|
||||
/* Helper routines for memory move and comparison insns.
|
||||
Copyright (C) 2013 Free Software Foundation, Inc.
|
||||
|
||||
This file is part of GCC.
|
||||
|
||||
GCC is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 3, or (at your option)
|
||||
any later version.
|
||||
|
||||
GCC is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with GCC; see the file COPYING3. If not see
|
||||
<http://www.gnu.org/licenses/>. */
|
||||
|
||||
#include "config.h"
|
||||
#include "system.h"
|
||||
#include "coretypes.h"
|
||||
#include "tm.h"
|
||||
#include "machmode.h"
|
||||
#include "rtl.h"
|
||||
#include "tree.h"
|
||||
#include "expr.h"
|
||||
#include "tm_p.h"
|
||||
#include "basic-block.h"
|
||||
|
||||
/* Like force_operand, but guarantees that VALUE ends up in TARGET. */
|
||||
static void
|
||||
force_into (rtx value, rtx target)
|
||||
{
|
||||
value = force_operand (value, target);
|
||||
if (! rtx_equal_p (value, target))
|
||||
emit_insn (gen_move_insn (target, value));
|
||||
}
|
||||
|
||||
/* Emit code to perform a block move. Choose the best method.
|
||||
|
||||
OPERANDS[0] is the destination.
|
||||
OPERANDS[1] is the source.
|
||||
OPERANDS[2] is the size.
|
||||
OPERANDS[3] is the alignment safe to use. */
|
||||
bool
|
||||
expand_block_move (rtx *operands)
|
||||
{
|
||||
int align = INTVAL (operands[3]);
|
||||
int constp = (CONST_INT_P (operands[2]));
|
||||
int bytes = (constp ? INTVAL (operands[2]) : 0);
|
||||
|
||||
if (! constp)
|
||||
return false;
|
||||
|
||||
/* If we could use mov.l to move words and dest is word-aligned, we
|
||||
can use movua.l for loads and still generate a relatively short
|
||||
and efficient sequence. */
|
||||
if (TARGET_SH4A_ARCH && align < 4
|
||||
&& MEM_ALIGN (operands[0]) >= 32
|
||||
&& can_move_by_pieces (bytes, 32))
|
||||
{
|
||||
rtx dest = copy_rtx (operands[0]);
|
||||
rtx src = copy_rtx (operands[1]);
|
||||
/* We could use different pseudos for each copied word, but
|
||||
since movua can only load into r0, it's kind of
|
||||
pointless. */
|
||||
rtx temp = gen_reg_rtx (SImode);
|
||||
rtx src_addr = copy_addr_to_reg (XEXP (src, 0));
|
||||
int copied = 0;
|
||||
|
||||
while (copied + 4 <= bytes)
|
||||
{
|
||||
rtx to = adjust_address (dest, SImode, copied);
|
||||
rtx from = adjust_automodify_address (src, BLKmode,
|
||||
src_addr, copied);
|
||||
|
||||
set_mem_size (from, 4);
|
||||
emit_insn (gen_movua (temp, from));
|
||||
emit_move_insn (src_addr, plus_constant (Pmode, src_addr, 4));
|
||||
emit_move_insn (to, temp);
|
||||
copied += 4;
|
||||
}
|
||||
|
||||
if (copied < bytes)
|
||||
move_by_pieces (adjust_address (dest, BLKmode, copied),
|
||||
adjust_automodify_address (src, BLKmode,
|
||||
src_addr, copied),
|
||||
bytes - copied, align, 0);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/* If it isn't a constant number of bytes, or if it doesn't have 4 byte
|
||||
alignment, or if it isn't a multiple of 4 bytes, then fail. */
|
||||
if (align < 4 || (bytes % 4 != 0))
|
||||
return false;
|
||||
|
||||
if (TARGET_HARD_SH4)
|
||||
{
|
||||
if (bytes < 12)
|
||||
return false;
|
||||
else if (bytes == 12)
|
||||
{
|
||||
rtx func_addr_rtx = gen_reg_rtx (Pmode);
|
||||
rtx r4 = gen_rtx_REG (SImode, 4);
|
||||
rtx r5 = gen_rtx_REG (SImode, 5);
|
||||
|
||||
function_symbol (func_addr_rtx, "__movmemSI12_i4", SFUNC_STATIC);
|
||||
force_into (XEXP (operands[0], 0), r4);
|
||||
force_into (XEXP (operands[1], 0), r5);
|
||||
emit_insn (gen_block_move_real_i4 (func_addr_rtx));
|
||||
return true;
|
||||
}
|
||||
else if (! optimize_size)
|
||||
{
|
||||
const char *entry_name;
|
||||
rtx func_addr_rtx = gen_reg_rtx (Pmode);
|
||||
int dwords;
|
||||
rtx r4 = gen_rtx_REG (SImode, 4);
|
||||
rtx r5 = gen_rtx_REG (SImode, 5);
|
||||
rtx r6 = gen_rtx_REG (SImode, 6);
|
||||
|
||||
entry_name = (bytes & 4 ? "__movmem_i4_odd" : "__movmem_i4_even");
|
||||
function_symbol (func_addr_rtx, entry_name, SFUNC_STATIC);
|
||||
force_into (XEXP (operands[0], 0), r4);
|
||||
force_into (XEXP (operands[1], 0), r5);
|
||||
|
||||
dwords = bytes >> 3;
|
||||
emit_insn (gen_move_insn (r6, GEN_INT (dwords - 1)));
|
||||
emit_insn (gen_block_lump_real_i4 (func_addr_rtx));
|
||||
return true;
|
||||
}
|
||||
else
|
||||
return false;
|
||||
}
|
||||
if (bytes < 64)
|
||||
{
|
||||
char entry[30];
|
||||
rtx func_addr_rtx = gen_reg_rtx (Pmode);
|
||||
rtx r4 = gen_rtx_REG (SImode, 4);
|
||||
rtx r5 = gen_rtx_REG (SImode, 5);
|
||||
|
||||
sprintf (entry, "__movmemSI%d", bytes);
|
||||
function_symbol (func_addr_rtx, entry, SFUNC_STATIC);
|
||||
force_into (XEXP (operands[0], 0), r4);
|
||||
force_into (XEXP (operands[1], 0), r5);
|
||||
emit_insn (gen_block_move_real (func_addr_rtx));
|
||||
return true;
|
||||
}
|
||||
|
||||
/* This is the same number of bytes as a memcpy call, but to a different
|
||||
less common function name, so this will occasionally use more space. */
|
||||
if (! optimize_size)
|
||||
{
|
||||
rtx func_addr_rtx = gen_reg_rtx (Pmode);
|
||||
int final_switch, while_loop;
|
||||
rtx r4 = gen_rtx_REG (SImode, 4);
|
||||
rtx r5 = gen_rtx_REG (SImode, 5);
|
||||
rtx r6 = gen_rtx_REG (SImode, 6);
|
||||
|
||||
function_symbol (func_addr_rtx, "__movmem", SFUNC_STATIC);
|
||||
force_into (XEXP (operands[0], 0), r4);
|
||||
force_into (XEXP (operands[1], 0), r5);
|
||||
|
||||
/* r6 controls the size of the move. 16 is decremented from it
|
||||
for each 64 bytes moved. Then the negative bit left over is used
|
||||
as an index into a list of move instructions. e.g., a 72 byte move
|
||||
would be set up with size(r6) = 14, for one iteration through the
|
||||
big while loop, and a switch of -2 for the last part. */
|
||||
|
||||
final_switch = 16 - ((bytes / 4) % 16);
|
||||
while_loop = ((bytes / 4) / 16 - 1) * 16;
|
||||
emit_insn (gen_move_insn (r6, GEN_INT (while_loop + final_switch)));
|
||||
emit_insn (gen_block_lump_real (func_addr_rtx));
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Emit code to perform a strcmp.
|
||||
|
||||
OPERANDS[0] is the destination.
|
||||
OPERANDS[1] is the first string.
|
||||
OPERANDS[2] is the second string.
|
||||
OPERANDS[3] is the align. */
|
||||
bool
|
||||
sh_expand_cmpstr (rtx *operands)
|
||||
{
|
||||
rtx s1 = copy_rtx (operands[1]);
|
||||
rtx s2 = copy_rtx (operands[2]);
|
||||
rtx s1_addr = copy_addr_to_reg (XEXP (s1, 0));
|
||||
rtx s2_addr = copy_addr_to_reg (XEXP (s2, 0));
|
||||
rtx tmp0 = gen_reg_rtx (SImode);
|
||||
rtx tmp1 = gen_reg_rtx (SImode);
|
||||
rtx tmp2 = gen_reg_rtx (SImode);
|
||||
rtx tmp3 = gen_reg_rtx (SImode);
|
||||
|
||||
rtx L_return = gen_label_rtx ();
|
||||
rtx L_loop_byte = gen_label_rtx ();
|
||||
rtx L_end_loop_byte = gen_label_rtx ();
|
||||
rtx L_loop_long = gen_label_rtx ();
|
||||
rtx L_end_loop_long = gen_label_rtx ();
|
||||
|
||||
rtx jump, addr1, addr2;
|
||||
int prob_unlikely = REG_BR_PROB_BASE / 10;
|
||||
int prob_likely = REG_BR_PROB_BASE / 4;
|
||||
|
||||
emit_insn (gen_iorsi3 (tmp1, s1_addr, s2_addr));
|
||||
emit_move_insn (tmp0, GEN_INT (3));
|
||||
|
||||
emit_insn (gen_tstsi_t (tmp0, tmp1));
|
||||
|
||||
emit_move_insn (tmp0, const0_rtx);
|
||||
|
||||
jump = emit_jump_insn (gen_branch_false (L_loop_byte));
|
||||
add_int_reg_note (jump, REG_BR_PROB, prob_likely);
|
||||
|
||||
addr1 = adjust_automodify_address (s1, SImode, s1_addr, 0);
|
||||
addr2 = adjust_automodify_address (s2, SImode, s2_addr, 0);
|
||||
|
||||
/* tmp2 is aligned, OK to load. */
|
||||
emit_move_insn (tmp3, addr2);
|
||||
emit_move_insn (s2_addr, plus_constant (Pmode, s2_addr, 4));
|
||||
|
||||
/*start long loop. */
|
||||
emit_label (L_loop_long);
|
||||
|
||||
emit_move_insn (tmp2, tmp3);
|
||||
|
||||
/* tmp1 is aligned, OK to load. */
|
||||
emit_move_insn (tmp1, addr1);
|
||||
emit_move_insn (s1_addr, plus_constant (Pmode, s1_addr, 4));
|
||||
|
||||
/* Is there a 0 byte ? */
|
||||
emit_insn (gen_andsi3 (tmp3, tmp3, tmp1));
|
||||
|
||||
emit_insn (gen_cmpstr_t (tmp0, tmp3));
|
||||
jump = emit_jump_insn (gen_branch_true (L_end_loop_long));
|
||||
add_int_reg_note (jump, REG_BR_PROB, prob_unlikely);
|
||||
|
||||
emit_insn (gen_cmpeqsi_t (tmp1, tmp2));
|
||||
|
||||
/* tmp2 is aligned, OK to load. */
|
||||
emit_move_insn (tmp3, addr2);
|
||||
emit_move_insn (s2_addr, plus_constant (Pmode, s2_addr, 4));
|
||||
|
||||
jump = emit_jump_insn (gen_branch_true (L_loop_long));
|
||||
add_int_reg_note (jump, REG_BR_PROB, prob_likely);
|
||||
/* end loop. */
|
||||
|
||||
/* Fallthu, check if one of the word is greater. */
|
||||
if (TARGET_LITTLE_ENDIAN)
|
||||
{
|
||||
rtx low_1 = gen_lowpart (HImode, tmp1);
|
||||
rtx low_2 = gen_lowpart (HImode, tmp2);
|
||||
|
||||
emit_insn (gen_rotlhi3_8 (low_1, low_1));
|
||||
emit_insn (gen_rotlhi3_8 (low_2, low_2));
|
||||
emit_insn (gen_rotlsi3_16 (tmp1, tmp1));
|
||||
emit_insn (gen_rotlsi3_16 (tmp2, tmp2));
|
||||
emit_insn (gen_rotlhi3_8 (low_1, low_1));
|
||||
emit_insn (gen_rotlhi3_8 (low_2, low_2));
|
||||
}
|
||||
|
||||
jump = emit_jump_insn (gen_jump_compact (L_return));
|
||||
emit_barrier_after (jump);
|
||||
|
||||
/* start byte loop. */
|
||||
addr1 = adjust_automodify_address (s1, QImode, s1_addr, 0);
|
||||
addr2 = adjust_automodify_address (s2, QImode, s2_addr, 0);
|
||||
|
||||
emit_label (L_end_loop_long);
|
||||
|
||||
emit_move_insn (s1_addr, plus_constant (Pmode, s1_addr, -4));
|
||||
emit_move_insn (s2_addr, plus_constant (Pmode, s2_addr, -4));
|
||||
|
||||
emit_label (L_loop_byte);
|
||||
|
||||
emit_insn (gen_extendqisi2 (tmp2, addr2));
|
||||
emit_move_insn (s2_addr, plus_constant (Pmode, s2_addr, 1));
|
||||
|
||||
emit_insn (gen_extendqisi2 (tmp1, addr1));
|
||||
emit_move_insn (s1_addr, plus_constant (Pmode, s1_addr, 1));
|
||||
|
||||
emit_insn (gen_cmpeqsi_t (tmp2, const0_rtx));
|
||||
jump = emit_jump_insn (gen_branch_true (L_end_loop_byte));
|
||||
add_int_reg_note (jump, REG_BR_PROB, prob_unlikely);
|
||||
|
||||
emit_insn (gen_cmpeqsi_t (tmp1, tmp2));
|
||||
emit_jump_insn (gen_branch_true (L_loop_byte));
|
||||
add_int_reg_note (jump, REG_BR_PROB, prob_likely);
|
||||
/* end loop. */
|
||||
|
||||
emit_label (L_end_loop_byte);
|
||||
|
||||
emit_insn (gen_zero_extendqisi2 (tmp2, gen_lowpart (QImode, tmp2)));
|
||||
emit_insn (gen_zero_extendqisi2 (tmp1, gen_lowpart (QImode, tmp1)));
|
||||
|
||||
emit_label (L_return);
|
||||
|
||||
emit_insn (gen_subsi3 (operands[0], tmp1, tmp2));
|
||||
|
||||
return true;
|
||||
}
|
||||
|
@ -116,6 +116,7 @@ extern void emit_df_insn (rtx);
|
||||
extern void output_pic_addr_const (FILE *, rtx);
|
||||
extern bool expand_block_move (rtx *);
|
||||
extern void prepare_move_operands (rtx[], enum machine_mode mode);
|
||||
extern bool sh_expand_cmpstr (rtx *);
|
||||
extern enum rtx_code prepare_cbranch_operands (rtx *, enum machine_mode mode,
|
||||
enum rtx_code comparison);
|
||||
extern void expand_cbranchsi4 (rtx *operands, enum rtx_code comparison, int);
|
||||
|
@ -174,7 +174,6 @@ static bool shmedia_space_reserved_for_target_registers;
|
||||
|
||||
static void split_branches (rtx);
|
||||
static int branch_dest (rtx);
|
||||
static void force_into (rtx, rtx);
|
||||
static void print_slot (rtx);
|
||||
static rtx add_constant (rtx, enum machine_mode, rtx);
|
||||
static void dump_table (rtx, rtx);
|
||||
@ -1621,157 +1620,6 @@ sh_encode_section_info (tree decl, rtx rtl, int first)
|
||||
SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_FUNCVEC_FUNCTION;
|
||||
}
|
||||
|
||||
/* Like force_operand, but guarantees that VALUE ends up in TARGET. */
|
||||
static void
|
||||
force_into (rtx value, rtx target)
|
||||
{
|
||||
value = force_operand (value, target);
|
||||
if (! rtx_equal_p (value, target))
|
||||
emit_insn (gen_move_insn (target, value));
|
||||
}
|
||||
|
||||
/* Emit code to perform a block move. Choose the best method.
|
||||
|
||||
OPERANDS[0] is the destination.
|
||||
OPERANDS[1] is the source.
|
||||
OPERANDS[2] is the size.
|
||||
OPERANDS[3] is the alignment safe to use. */
|
||||
bool
|
||||
expand_block_move (rtx *operands)
|
||||
{
|
||||
int align = INTVAL (operands[3]);
|
||||
int constp = (CONST_INT_P (operands[2]));
|
||||
int bytes = (constp ? INTVAL (operands[2]) : 0);
|
||||
|
||||
if (! constp)
|
||||
return false;
|
||||
|
||||
/* If we could use mov.l to move words and dest is word-aligned, we
|
||||
can use movua.l for loads and still generate a relatively short
|
||||
and efficient sequence. */
|
||||
if (TARGET_SH4A_ARCH && align < 4
|
||||
&& MEM_ALIGN (operands[0]) >= 32
|
||||
&& can_move_by_pieces (bytes, 32))
|
||||
{
|
||||
rtx dest = copy_rtx (operands[0]);
|
||||
rtx src = copy_rtx (operands[1]);
|
||||
/* We could use different pseudos for each copied word, but
|
||||
since movua can only load into r0, it's kind of
|
||||
pointless. */
|
||||
rtx temp = gen_reg_rtx (SImode);
|
||||
rtx src_addr = copy_addr_to_reg (XEXP (src, 0));
|
||||
int copied = 0;
|
||||
|
||||
while (copied + 4 <= bytes)
|
||||
{
|
||||
rtx to = adjust_address (dest, SImode, copied);
|
||||
rtx from = adjust_automodify_address (src, BLKmode,
|
||||
src_addr, copied);
|
||||
|
||||
set_mem_size (from, 4);
|
||||
emit_insn (gen_movua (temp, from));
|
||||
emit_move_insn (src_addr, plus_constant (Pmode, src_addr, 4));
|
||||
emit_move_insn (to, temp);
|
||||
copied += 4;
|
||||
}
|
||||
|
||||
if (copied < bytes)
|
||||
move_by_pieces (adjust_address (dest, BLKmode, copied),
|
||||
adjust_automodify_address (src, BLKmode,
|
||||
src_addr, copied),
|
||||
bytes - copied, align, 0);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/* If it isn't a constant number of bytes, or if it doesn't have 4 byte
|
||||
alignment, or if it isn't a multiple of 4 bytes, then fail. */
|
||||
if (align < 4 || (bytes % 4 != 0))
|
||||
return false;
|
||||
|
||||
if (TARGET_HARD_SH4)
|
||||
{
|
||||
if (bytes < 12)
|
||||
return false;
|
||||
else if (bytes == 12)
|
||||
{
|
||||
rtx func_addr_rtx = gen_reg_rtx (Pmode);
|
||||
rtx r4 = gen_rtx_REG (SImode, 4);
|
||||
rtx r5 = gen_rtx_REG (SImode, 5);
|
||||
|
||||
function_symbol (func_addr_rtx, "__movmemSI12_i4", SFUNC_STATIC);
|
||||
force_into (XEXP (operands[0], 0), r4);
|
||||
force_into (XEXP (operands[1], 0), r5);
|
||||
emit_insn (gen_block_move_real_i4 (func_addr_rtx));
|
||||
return true;
|
||||
}
|
||||
else if (! optimize_size)
|
||||
{
|
||||
const char *entry_name;
|
||||
rtx func_addr_rtx = gen_reg_rtx (Pmode);
|
||||
int dwords;
|
||||
rtx r4 = gen_rtx_REG (SImode, 4);
|
||||
rtx r5 = gen_rtx_REG (SImode, 5);
|
||||
rtx r6 = gen_rtx_REG (SImode, 6);
|
||||
|
||||
entry_name = (bytes & 4 ? "__movmem_i4_odd" : "__movmem_i4_even");
|
||||
function_symbol (func_addr_rtx, entry_name, SFUNC_STATIC);
|
||||
force_into (XEXP (operands[0], 0), r4);
|
||||
force_into (XEXP (operands[1], 0), r5);
|
||||
|
||||
dwords = bytes >> 3;
|
||||
emit_insn (gen_move_insn (r6, GEN_INT (dwords - 1)));
|
||||
emit_insn (gen_block_lump_real_i4 (func_addr_rtx));
|
||||
return true;
|
||||
}
|
||||
else
|
||||
return false;
|
||||
}
|
||||
if (bytes < 64)
|
||||
{
|
||||
char entry[30];
|
||||
rtx func_addr_rtx = gen_reg_rtx (Pmode);
|
||||
rtx r4 = gen_rtx_REG (SImode, 4);
|
||||
rtx r5 = gen_rtx_REG (SImode, 5);
|
||||
|
||||
sprintf (entry, "__movmemSI%d", bytes);
|
||||
function_symbol (func_addr_rtx, entry, SFUNC_STATIC);
|
||||
force_into (XEXP (operands[0], 0), r4);
|
||||
force_into (XEXP (operands[1], 0), r5);
|
||||
emit_insn (gen_block_move_real (func_addr_rtx));
|
||||
return true;
|
||||
}
|
||||
|
||||
/* This is the same number of bytes as a memcpy call, but to a different
|
||||
less common function name, so this will occasionally use more space. */
|
||||
if (! optimize_size)
|
||||
{
|
||||
rtx func_addr_rtx = gen_reg_rtx (Pmode);
|
||||
int final_switch, while_loop;
|
||||
rtx r4 = gen_rtx_REG (SImode, 4);
|
||||
rtx r5 = gen_rtx_REG (SImode, 5);
|
||||
rtx r6 = gen_rtx_REG (SImode, 6);
|
||||
|
||||
function_symbol (func_addr_rtx, "__movmem", SFUNC_STATIC);
|
||||
force_into (XEXP (operands[0], 0), r4);
|
||||
force_into (XEXP (operands[1], 0), r5);
|
||||
|
||||
/* r6 controls the size of the move. 16 is decremented from it
|
||||
for each 64 bytes moved. Then the negative bit left over is used
|
||||
as an index into a list of move instructions. e.g., a 72 byte move
|
||||
would be set up with size(r6) = 14, for one iteration through the
|
||||
big while loop, and a switch of -2 for the last part. */
|
||||
|
||||
final_switch = 16 - ((bytes / 4) % 16);
|
||||
while_loop = ((bytes / 4) / 16 - 1) * 16;
|
||||
emit_insn (gen_move_insn (r6, GEN_INT (while_loop + final_switch)));
|
||||
emit_insn (gen_block_lump_real (func_addr_rtx));
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Prepare operands for a move define_expand; specifically, one of the
|
||||
operands must be in a register. */
|
||||
void
|
||||
|
@ -31,9 +31,6 @@
|
||||
;; ??? The MAC.W and MAC.L instructions are not supported. There is no
|
||||
;; way to generate them.
|
||||
|
||||
;; ??? The cmp/str instruction is not supported. Perhaps it can be used
|
||||
;; for a str* inline function.
|
||||
|
||||
;; BSR is not generated by the compiler proper, but when relaxing, it
|
||||
;; generates .uses pseudo-ops that allow linker relaxation to create
|
||||
;; BSR. This is actually implemented in bfd/{coff,elf32}-sh.c
|
||||
@ -4037,7 +4034,7 @@ label:
|
||||
DONE;
|
||||
})
|
||||
|
||||
(define_insn "*rotlhi3_8"
|
||||
(define_insn "rotlhi3_8"
|
||||
[(set (match_operand:HI 0 "arith_reg_dest" "=r")
|
||||
(rotate:HI (match_operand:HI 1 "arith_reg_operand" "r")
|
||||
(const_int 8)))]
|
||||
@ -11912,6 +11909,40 @@ label:
|
||||
"jsr @%0%#"
|
||||
[(set_attr "type" "sfunc")
|
||||
(set_attr "needs_delay_slot" "yes")])
|
||||
|
||||
;; byte compare pattern
|
||||
;; temp = a ^ b;
|
||||
;; !((temp & 0xF000) && (temp & 0x0F00) && (temp & 0x00F0) && (temp & 0x000F))
|
||||
(define_insn "cmpstr_t"
|
||||
[(set (reg:SI T_REG)
|
||||
(eq:SI (and:SI
|
||||
(and:SI
|
||||
(and:SI
|
||||
(zero_extract:SI (xor:SI (match_operand:SI 0 "arith_reg_operand" "r")
|
||||
(match_operand:SI 1 "arith_reg_operand" "r"))
|
||||
(const_int 8) (const_int 0))
|
||||
(zero_extract:SI (xor:SI (match_dup 0) (match_dup 1))
|
||||
(const_int 8) (const_int 8)))
|
||||
(zero_extract:SI (xor:SI (match_dup 0) (match_dup 1))
|
||||
(const_int 8) (const_int 16)))
|
||||
(zero_extract:SI (xor:SI (match_dup 0) (match_dup 1))
|
||||
(const_int 8) (const_int 24))) (const_int 0)))]
|
||||
"TARGET_SH1"
|
||||
"cmp/str %0,%1"
|
||||
[(set_attr "type" "mt_group")])
|
||||
|
||||
(define_expand "cmpstrsi"
|
||||
[(set (match_operand:SI 0 "register_operand")
|
||||
(compare:SI (match_operand:BLK 1 "memory_operand")
|
||||
(match_operand:BLK 2 "memory_operand")))
|
||||
(use (match_operand 3 "immediate_operand"))]
|
||||
"TARGET_SH1"
|
||||
{
|
||||
if (! optimize_insn_for_size_p () && sh_expand_cmpstr (operands))
|
||||
DONE;
|
||||
else FAIL;
|
||||
})
|
||||
|
||||
|
||||
;; -------------------------------------------------------------------------
|
||||
;; Floating point instructions.
|
||||
|
@ -16,6 +16,10 @@
|
||||
# along with GCC; see the file COPYING3. If not see
|
||||
# <http://www.gnu.org/licenses/>.
|
||||
|
||||
sh-mem.o: $(srcdir)/config/sh/sh-mem.cc \
|
||||
$(CONFIG_H) $(SYSTEM_H) $(TREE_H) $(TM_H) $(TM_P_H)
|
||||
$(COMPILER) -c $(ALL_COMPILERFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) $<
|
||||
|
||||
sh-c.o: $(srcdir)/config/sh/sh-c.c \
|
||||
$(CONFIG_H) $(SYSTEM_H) $(TREE_H) $(TM_H) $(TM_P_H) coretypes.h
|
||||
$(COMPILER) -c $(ALL_COMPILERFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) \
|
||||
|
Loading…
Reference in New Issue
Block a user