* server.h (struct emit_ops): Add compare-goto fields.

* tracepoint.c (gdb_agent_op_sizes): New table.
	(emit_eq_goto): New function.
	(emit_ne_goto): New function.
	(emit_lt_goto): New function.
	(emit_le_goto): New function.
	(emit_gt_goto): New function.
	(emit_ge_goto): New function.
	(is_goto_target): New function.
	(compile_bytecodes): Recognize special cases of compare-goto
	combinations and call specialized emitters for them.
	* linux-x86-low.c (amd64_emit_eq_goto): New function.
	(amd64_emit_ne_goto): New function.
	(amd64_emit_lt_goto): New function.
	(amd64_emit_le_goto): New function.
	(amd64_emit_gt_goto): New function.
	(amd64_emit_ge_goto): New function.
	(amd64_emit_ops): Add the new functions.
	(i386_emit_eq_goto): New function.
	(i386_emit_ne_goto): New function.
	(i386_emit_lt_goto): New function.
	(i386_emit_le_goto): New function.
	(i386_emit_gt_goto): New function.
	(i386_emit_ge_goto): New function.
	(i386_emit_ops): Add the new functions.
This commit is contained in:
Stan Shebs 2011-09-15 22:54:13 +00:00
parent 5af65ec072
commit 6b9801d456
4 changed files with 484 additions and 6 deletions

View File

@ -1,3 +1,31 @@
2011-09-15 Stan Shebs <stan@codesourcery.com>
* server.h (struct emit_ops): Add compare-goto fields.
* tracepoint.c (gdb_agent_op_sizes): New table.
(emit_eq_goto): New function.
(emit_ne_goto): New function.
(emit_lt_goto): New function.
(emit_le_goto): New function.
(emit_gt_goto): New function.
(emit_ge_goto): New function.
(is_goto_target): New function.
(compile_bytecodes): Recognize special cases of compare-goto
combinations and call specialized emitters for them.
* linux-x86-low.c (amd64_emit_eq_goto): New function.
(amd64_emit_ne_goto): New function.
(amd64_emit_lt_goto): New function.
(amd64_emit_le_goto): New function.
(amd64_emit_gt_goto): New function.
(amd64_emit_ge_goto): New function.
(amd64_emit_ops): Add the new functions.
(i386_emit_eq_goto): New function.
(i386_emit_ne_goto): New function.
(i386_emit_lt_goto): New function.
(i386_emit_le_goto): New function.
(i386_emit_gt_goto): New function.
(i386_emit_ge_goto): New function.
(i386_emit_ops): Add the new functions.
2011-09-08 Stan Shebs <stan@codesourcery.com>
* linux-x86-low.c (i386_emit_prologue): Save %ebx.

View File

@ -1993,6 +1993,127 @@ amd64_emit_void_call_2 (CORE_ADDR fn, int arg1)
"pop %rax");
}
void
amd64_emit_eq_goto (int *offset_p, int *size_p)
{
EMIT_ASM (amd64_eq,
"cmp %rax,(%rsp)\n\t"
"jne .Lamd64_eq_fallthru\n\t"
"lea 0x8(%rsp),%rsp\n\t"
"pop %rax\n\t"
/* jmp, but don't trust the assembler to choose the right jump */
".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
".Lamd64_eq_fallthru:\n\t"
"lea 0x8(%rsp),%rsp\n\t"
"pop %rax");
if (offset_p)
*offset_p = 13;
if (size_p)
*size_p = 4;
}
void
amd64_emit_ne_goto (int *offset_p, int *size_p)
{
EMIT_ASM (amd64_ne,
"cmp %rax,(%rsp)\n\t"
"je .Lamd64_ne_fallthru\n\t"
"lea 0x8(%rsp),%rsp\n\t"
"pop %rax\n\t"
/* jmp, but don't trust the assembler to choose the right jump */
".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
".Lamd64_ne_fallthru:\n\t"
"lea 0x8(%rsp),%rsp\n\t"
"pop %rax");
if (offset_p)
*offset_p = 13;
if (size_p)
*size_p = 4;
}
void
amd64_emit_lt_goto (int *offset_p, int *size_p)
{
EMIT_ASM (amd64_lt,
"cmp %rax,(%rsp)\n\t"
"jnl .Lamd64_lt_fallthru\n\t"
"lea 0x8(%rsp),%rsp\n\t"
"pop %rax\n\t"
/* jmp, but don't trust the assembler to choose the right jump */
".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
".Lamd64_lt_fallthru:\n\t"
"lea 0x8(%rsp),%rsp\n\t"
"pop %rax");
if (offset_p)
*offset_p = 13;
if (size_p)
*size_p = 4;
}
void
amd64_emit_le_goto (int *offset_p, int *size_p)
{
EMIT_ASM (amd64_le,
"cmp %rax,(%rsp)\n\t"
"jnle .Lamd64_le_fallthru\n\t"
"lea 0x8(%rsp),%rsp\n\t"
"pop %rax\n\t"
/* jmp, but don't trust the assembler to choose the right jump */
".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
".Lamd64_le_fallthru:\n\t"
"lea 0x8(%rsp),%rsp\n\t"
"pop %rax");
if (offset_p)
*offset_p = 13;
if (size_p)
*size_p = 4;
}
void
amd64_emit_gt_goto (int *offset_p, int *size_p)
{
EMIT_ASM (amd64_gt,
"cmp %rax,(%rsp)\n\t"
"jng .Lamd64_gt_fallthru\n\t"
"lea 0x8(%rsp),%rsp\n\t"
"pop %rax\n\t"
/* jmp, but don't trust the assembler to choose the right jump */
".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
".Lamd64_gt_fallthru:\n\t"
"lea 0x8(%rsp),%rsp\n\t"
"pop %rax");
if (offset_p)
*offset_p = 13;
if (size_p)
*size_p = 4;
}
void
amd64_emit_ge_goto (int *offset_p, int *size_p)
{
EMIT_ASM (amd64_ge,
"cmp %rax,(%rsp)\n\t"
"jnge .Lamd64_ge_fallthru\n\t"
".Lamd64_ge_jump:\n\t"
"lea 0x8(%rsp),%rsp\n\t"
"pop %rax\n\t"
/* jmp, but don't trust the assembler to choose the right jump */
".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
".Lamd64_ge_fallthru:\n\t"
"lea 0x8(%rsp),%rsp\n\t"
"pop %rax");
if (offset_p)
*offset_p = 13;
if (size_p)
*size_p = 4;
}
struct emit_ops amd64_emit_ops =
{
amd64_emit_prologue,
@ -2025,7 +2146,13 @@ struct emit_ops amd64_emit_ops =
amd64_emit_swap,
amd64_emit_stack_adjust,
amd64_emit_int_call_1,
amd64_emit_void_call_2
amd64_emit_void_call_2,
amd64_emit_eq_goto,
amd64_emit_ne_goto,
amd64_emit_lt_goto,
amd64_emit_le_goto,
amd64_emit_gt_goto,
amd64_emit_ge_goto
};
#endif /* __x86_64__ */
@ -2500,6 +2627,162 @@ i386_emit_void_call_2 (CORE_ADDR fn, int arg1)
"pop %eax");
}
void
i386_emit_eq_goto (int *offset_p, int *size_p)
{
EMIT_ASM32 (eq,
/* Check low half first, more likely to be decider */
"cmpl %eax,(%esp)\n\t"
"jne .Leq_fallthru\n\t"
"cmpl %ebx,4(%esp)\n\t"
"jne .Leq_fallthru\n\t"
"lea 0x8(%esp),%esp\n\t"
"pop %eax\n\t"
"pop %ebx\n\t"
/* jmp, but don't trust the assembler to choose the right jump */
".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
".Leq_fallthru:\n\t"
"lea 0x8(%esp),%esp\n\t"
"pop %eax\n\t"
"pop %ebx");
if (offset_p)
*offset_p = 18;
if (size_p)
*size_p = 4;
}
void
i386_emit_ne_goto (int *offset_p, int *size_p)
{
EMIT_ASM32 (ne,
/* Check low half first, more likely to be decider */
"cmpl %eax,(%esp)\n\t"
"jne .Lne_jump\n\t"
"cmpl %ebx,4(%esp)\n\t"
"je .Lne_fallthru\n\t"
".Lne_jump:\n\t"
"lea 0x8(%esp),%esp\n\t"
"pop %eax\n\t"
"pop %ebx\n\t"
/* jmp, but don't trust the assembler to choose the right jump */
".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
".Lne_fallthru:\n\t"
"lea 0x8(%esp),%esp\n\t"
"pop %eax\n\t"
"pop %ebx");
if (offset_p)
*offset_p = 18;
if (size_p)
*size_p = 4;
}
void
i386_emit_lt_goto (int *offset_p, int *size_p)
{
EMIT_ASM32 (lt,
"cmpl %ebx,4(%esp)\n\t"
"jl .Llt_jump\n\t"
"jne .Llt_fallthru\n\t"
"cmpl %eax,(%esp)\n\t"
"jnl .Llt_fallthru\n\t"
".Llt_jump:\n\t"
"lea 0x8(%esp),%esp\n\t"
"pop %eax\n\t"
"pop %ebx\n\t"
/* jmp, but don't trust the assembler to choose the right jump */
".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
".Llt_fallthru:\n\t"
"lea 0x8(%esp),%esp\n\t"
"pop %eax\n\t"
"pop %ebx");
if (offset_p)
*offset_p = 20;
if (size_p)
*size_p = 4;
}
void
i386_emit_le_goto (int *offset_p, int *size_p)
{
EMIT_ASM32 (le,
"cmpl %ebx,4(%esp)\n\t"
"jle .Lle_jump\n\t"
"jne .Lle_fallthru\n\t"
"cmpl %eax,(%esp)\n\t"
"jnle .Lle_fallthru\n\t"
".Lle_jump:\n\t"
"lea 0x8(%esp),%esp\n\t"
"pop %eax\n\t"
"pop %ebx\n\t"
/* jmp, but don't trust the assembler to choose the right jump */
".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
".Lle_fallthru:\n\t"
"lea 0x8(%esp),%esp\n\t"
"pop %eax\n\t"
"pop %ebx");
if (offset_p)
*offset_p = 20;
if (size_p)
*size_p = 4;
}
void
i386_emit_gt_goto (int *offset_p, int *size_p)
{
EMIT_ASM32 (gt,
"cmpl %ebx,4(%esp)\n\t"
"jg .Lgt_jump\n\t"
"jne .Lgt_fallthru\n\t"
"cmpl %eax,(%esp)\n\t"
"jng .Lgt_fallthru\n\t"
".Lgt_jump:\n\t"
"lea 0x8(%esp),%esp\n\t"
"pop %eax\n\t"
"pop %ebx\n\t"
/* jmp, but don't trust the assembler to choose the right jump */
".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
".Lgt_fallthru:\n\t"
"lea 0x8(%esp),%esp\n\t"
"pop %eax\n\t"
"pop %ebx");
if (offset_p)
*offset_p = 20;
if (size_p)
*size_p = 4;
}
void
i386_emit_ge_goto (int *offset_p, int *size_p)
{
EMIT_ASM32 (ge,
"cmpl %ebx,4(%esp)\n\t"
"jge .Lge_jump\n\t"
"jne .Lge_fallthru\n\t"
"cmpl %eax,(%esp)\n\t"
"jnge .Lge_fallthru\n\t"
".Lge_jump:\n\t"
"lea 0x8(%esp),%esp\n\t"
"pop %eax\n\t"
"pop %ebx\n\t"
/* jmp, but don't trust the assembler to choose the right jump */
".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
".Lge_fallthru:\n\t"
"lea 0x8(%esp),%esp\n\t"
"pop %eax\n\t"
"pop %ebx");
if (offset_p)
*offset_p = 20;
if (size_p)
*size_p = 4;
}
struct emit_ops i386_emit_ops =
{
i386_emit_prologue,
@ -2532,7 +2815,13 @@ struct emit_ops i386_emit_ops =
i386_emit_swap,
i386_emit_stack_adjust,
i386_emit_int_call_1,
i386_emit_void_call_2
i386_emit_void_call_2,
i386_emit_eq_goto,
i386_emit_ne_goto,
i386_emit_lt_goto,
i386_emit_le_goto,
i386_emit_gt_goto,
i386_emit_ge_goto
};

View File

@ -538,6 +538,15 @@ struct emit_ops
argument and a 64-bit int from the top of the stack, and returns
nothing (for instance, tsv setter). */
void (*emit_void_call_2) (CORE_ADDR fn, int arg1);
/* Emit code specialized for common combinations of compare followed
by a goto. */
void (*emit_eq_goto) (int *offset_p, int *size_p);
void (*emit_ne_goto) (int *offset_p, int *size_p);
void (*emit_lt_goto) (int *offset_p, int *size_p);
void (*emit_le_goto) (int *offset_p, int *size_p);
void (*emit_gt_goto) (int *offset_p, int *size_p);
void (*emit_ge_goto) (int *offset_p, int *size_p);
};
/* Returns the address of the get_raw_reg function in the IPA. */

View File

@ -483,6 +483,14 @@ static const char *gdb_agent_op_names [gdb_agent_op_last] =
#undef DEFOP
};
static const unsigned char gdb_agent_op_sizes [gdb_agent_op_last] =
{
0
#define DEFOP(NAME, SIZE, DATA_SIZE, CONSUMED, PRODUCED, VALUE) , SIZE
#include "ax.def"
#undef DEFOP
};
struct agent_expr
{
int length;
@ -5663,6 +5671,42 @@ emit_void_call_2 (CORE_ADDR fn, int arg1)
target_emit_ops ()->emit_void_call_2 (fn, arg1);
}
static void
emit_eq_goto (int *offset_p, int *size_p)
{
target_emit_ops ()->emit_eq_goto (offset_p, size_p);
}
static void
emit_ne_goto (int *offset_p, int *size_p)
{
target_emit_ops ()->emit_ne_goto (offset_p, size_p);
}
static void
emit_lt_goto (int *offset_p, int *size_p)
{
target_emit_ops ()->emit_lt_goto (offset_p, size_p);
}
static void
emit_ge_goto (int *offset_p, int *size_p)
{
target_emit_ops ()->emit_ge_goto (offset_p, size_p);
}
static void
emit_gt_goto (int *offset_p, int *size_p)
{
target_emit_ops ()->emit_gt_goto (offset_p, size_p);
}
static void
emit_le_goto (int *offset_p, int *size_p)
{
target_emit_ops ()->emit_le_goto (offset_p, size_p);
}
static enum eval_result_type compile_bytecodes (struct agent_expr *aexpr);
static void
@ -5712,6 +5756,30 @@ compile_tracepoint_condition (struct tracepoint *tpoint,
*jump_entry += 16;
}
/* Scan an agent expression for any evidence that the given PC is the
target of a jump bytecode in the expression. */
int
is_goto_target (struct agent_expr *aexpr, int pc)
{
int i;
unsigned char op;
for (i = 0; i < aexpr->length; i += 1 + gdb_agent_op_sizes[op])
{
op = aexpr->bytes[i];
if (op == gdb_agent_op_goto || op == gdb_agent_op_if_goto)
{
int target = (aexpr->bytes[i + 1] << 8) + aexpr->bytes[i + 2];
if (target == pc)
return 1;
}
}
return 0;
}
/* Given an agent expression, turn it into native code. */
static enum eval_result_type
@ -5719,7 +5787,7 @@ compile_bytecodes (struct agent_expr *aexpr)
{
int pc = 0;
int done = 0;
unsigned char op;
unsigned char op, next_op;
int arg;
/* This is only used to build 64-bit value for constants. */
ULONGEST top;
@ -5831,11 +5899,64 @@ compile_bytecodes (struct agent_expr *aexpr)
break;
case gdb_agent_op_equal:
emit_equal ();
next_op = aexpr->bytes[pc];
if (next_op == gdb_agent_op_if_goto
&& !is_goto_target (aexpr, pc)
&& target_emit_ops ()->emit_eq_goto)
{
trace_debug ("Combining equal & if_goto");
pc += 1;
aentry->pc = pc;
arg = aexpr->bytes[pc++];
arg = (arg << 8) + aexpr->bytes[pc++];
aentry->goto_pc = arg;
emit_eq_goto (&(aentry->from_offset), &(aentry->from_size));
}
else if (next_op == gdb_agent_op_log_not
&& (aexpr->bytes[pc + 1] == gdb_agent_op_if_goto)
&& !is_goto_target (aexpr, pc + 1)
&& target_emit_ops ()->emit_ne_goto)
{
trace_debug ("Combining equal & log_not & if_goto");
pc += 2;
aentry->pc = pc;
arg = aexpr->bytes[pc++];
arg = (arg << 8) + aexpr->bytes[pc++];
aentry->goto_pc = arg;
emit_ne_goto (&(aentry->from_offset), &(aentry->from_size));
}
else
emit_equal ();
break;
case gdb_agent_op_less_signed:
emit_less_signed ();
next_op = aexpr->bytes[pc];
if (next_op == gdb_agent_op_if_goto
&& !is_goto_target (aexpr, pc))
{
trace_debug ("Combining less_signed & if_goto");
pc += 1;
aentry->pc = pc;
arg = aexpr->bytes[pc++];
arg = (arg << 8) + aexpr->bytes[pc++];
aentry->goto_pc = arg;
emit_lt_goto (&(aentry->from_offset), &(aentry->from_size));
}
else if (next_op == gdb_agent_op_log_not
&& !is_goto_target (aexpr, pc)
&& (aexpr->bytes[pc + 1] == gdb_agent_op_if_goto)
&& !is_goto_target (aexpr, pc + 1))
{
trace_debug ("Combining less_signed & log_not & if_goto");
pc += 2;
aentry->pc = pc;
arg = aexpr->bytes[pc++];
arg = (arg << 8) + aexpr->bytes[pc++];
aentry->goto_pc = arg;
emit_ge_goto (&(aentry->from_offset), &(aentry->from_size));
}
else
emit_less_signed ();
break;
case gdb_agent_op_less_unsigned:
@ -5946,7 +6067,38 @@ compile_bytecodes (struct agent_expr *aexpr)
break;
case gdb_agent_op_swap:
emit_swap ();
next_op = aexpr->bytes[pc];
/* Detect greater-than comparison sequences. */
if (next_op == gdb_agent_op_less_signed
&& !is_goto_target (aexpr, pc)
&& (aexpr->bytes[pc + 1] == gdb_agent_op_if_goto)
&& !is_goto_target (aexpr, pc + 1))
{
trace_debug ("Combining swap & less_signed & if_goto");
pc += 2;
aentry->pc = pc;
arg = aexpr->bytes[pc++];
arg = (arg << 8) + aexpr->bytes[pc++];
aentry->goto_pc = arg;
emit_gt_goto (&(aentry->from_offset), &(aentry->from_size));
}
else if (next_op == gdb_agent_op_less_signed
&& !is_goto_target (aexpr, pc)
&& (aexpr->bytes[pc + 1] == gdb_agent_op_log_not)
&& !is_goto_target (aexpr, pc + 1)
&& (aexpr->bytes[pc + 2] == gdb_agent_op_if_goto)
&& !is_goto_target (aexpr, pc + 2))
{
trace_debug ("Combining swap & less_signed & log_not & if_goto");
pc += 3;
aentry->pc = pc;
arg = aexpr->bytes[pc++];
arg = (arg << 8) + aexpr->bytes[pc++];
aentry->goto_pc = arg;
emit_le_goto (&(aentry->from_offset), &(aentry->from_size));
}
else
emit_swap ();
break;
case gdb_agent_op_getv: