Support displaced stepping in aarch64-linux

This patch is to support displaced stepping in aarch64-linux.  A
visitor is implemented for displaced stepping, and used to record
information to fixup pc after displaced stepping if needed.  Some
emit_* functions are converted to macros, and moved to
arch/aarch64-insn.{c,h} so that they can be shared.

gdb:

2015-10-12  Yao Qi  <yao.qi@linaro.org>

	* aarch64-linux-tdep.c: Include arch-utils.h.
	(aarch64_linux_init_abi): Call set_gdbarch_max_insn_length,
	set_gdbarch_displaced_step_copy_insn,
	set_gdbarch_displaced_step_fixup,
	set_gdbarch_displaced_step_free_closure,
	set_gdbarch_displaced_step_location,
	and set_gdbarch_displaced_step_hw_singlestep.
	* aarch64-tdep.c (struct displaced_step_closure): New.
	(struct aarch64_displaced_step_data): New.
	(aarch64_displaced_step_b): New function.
	(aarch64_displaced_step_b_cond): Likewise.
	(aarch64_register): Likewise.
	(aarch64_displaced_step_cb): Likewise.
	(aarch64_displaced_step_tb): Likewise.
	(aarch64_displaced_step_adr): Likewise.
	(aarch64_displaced_step_ldr_literal): Likewise.
	(aarch64_displaced_step_others): Likewise.
	(aarch64_displaced_step_copy_insn): Likewise.
	(aarch64_displaced_step_fixup): Likewise.
	(aarch64_displaced_step_hw_singlestep): Likewise.
	* aarch64-tdep.h (DISPLACED_MODIFIED_INSNS): New macro.
	(aarch64_displaced_step_copy_insn): Declare.
	(aarch64_displaced_step_fixup): Declare.
	(aarch64_displaced_step_hw_singlestep): Declare.
	* arch/aarch64-insn.c (emit_insn): Moved from
	gdbserver/linux-aarch64-low.c.
	(emit_load_store): Likewise.
	* arch/aarch64-insn.h (enum aarch64_opcodes): Moved from
	gdbserver/linux-aarch64-low.c.
	(struct aarch64_register): Likewise.
	(struct aarch64_memory_operand): Likewise.
	(ENCODE): Likewise.
	(can_encode_int32): New macro.
	(emit_b, emit_bcond, emit_cb, emit_ldr, emit_ldrsw): Likewise.
	(emit_tb, emit_nop): Likewise.
	(emit_insn): Declare.
	(emit_load_store): Declare.

gdb/gdbserver:

2015-10-12  Yao Qi  <yao.qi@linaro.org>

	* linux-aarch64-low.c (enum aarch64_opcodes): Move to
	arch/aarch64-insn.h.
	(struct aarch64_memory_operand): Likewise.
	(ENCODE): Likewise.
	(emit_insn): Move to arch/aarch64-insn.c.
	(emit_b, emit_bcond, emit_cb, emit_tb): Remove.
	(emit_load_store): Move to arch/aarch64-insn.c.
	(emit_ldr, emit_ldrb, emit_ldrsw, emit_nop): Remove.
	(can_encode_int32): Remove.
This commit is contained in:
Yao Qi 2015-10-12 11:28:38 +01:00
parent 246994ce35
commit b6542f81d0
8 changed files with 711 additions and 320 deletions

View File

@ -1,3 +1,43 @@
2015-10-12 Yao Qi <yao.qi@linaro.org>
* aarch64-linux-tdep.c: Include arch-utils.h.
(aarch64_linux_init_abi): Call set_gdbarch_max_insn_length,
set_gdbarch_displaced_step_copy_insn,
set_gdbarch_displaced_step_fixup,
set_gdbarch_displaced_step_free_closure,
set_gdbarch_displaced_step_location,
and set_gdbarch_displaced_step_hw_singlestep.
* aarch64-tdep.c (struct displaced_step_closure): New.
(struct aarch64_displaced_step_data): New.
(aarch64_displaced_step_b): New function.
(aarch64_displaced_step_b_cond): Likewise.
(aarch64_register): Likewise.
(aarch64_displaced_step_cb): Likewise.
(aarch64_displaced_step_tb): Likewise.
(aarch64_displaced_step_adr): Likewise.
(aarch64_displaced_step_ldr_literal): Likewise.
(aarch64_displaced_step_others): Likewise.
(aarch64_displaced_step_copy_insn): Likewise.
(aarch64_displaced_step_fixup): Likewise.
(aarch64_displaced_step_hw_singlestep): Likewise.
* aarch64-tdep.h (DISPLACED_MODIFIED_INSNS): New macro.
(aarch64_displaced_step_copy_insn): Declare.
(aarch64_displaced_step_fixup): Declare.
(aarch64_displaced_step_hw_singlestep): Declare.
* arch/aarch64-insn.c (emit_insn): Moved from
gdbserver/linux-aarch64-low.c.
(emit_load_store): Likewise.
* arch/aarch64-insn.h (enum aarch64_opcodes): Moved from
gdbserver/linux-aarch64-low.c.
(struct aarch64_register): Likewise.
(struct aarch64_memory_operand): Likewise.
(ENCODE): Likewise.
(can_encode_int32): New macro.
(emit_b, emit_bcond, emit_cb, emit_ldr, emit_ldrsw): Likewise.
(emit_tb, emit_nop): Likewise.
(emit_insn): Declare.
(emit_load_store): Declare.
2015-10-12 Yao Qi <yao.qi@linaro.org>
* arch/aarch64-insn.c (aarch64_decode_ldr_literal): Moved from

View File

@ -21,6 +21,7 @@
#include "defs.h"
#include "gdbarch.h"
#include "arch-utils.h"
#include "glibc-tdep.h"
#include "linux-tdep.h"
#include "aarch64-tdep.h"
@ -1151,6 +1152,17 @@ aarch64_linux_init_abi (struct gdbarch_info info, struct gdbarch *gdbarch)
/* `catch syscall' */
set_xml_syscall_file_name (gdbarch, "syscalls/aarch64-linux.xml");
set_gdbarch_get_syscall_number (gdbarch, aarch64_linux_get_syscall_number);
/* Displaced stepping. */
set_gdbarch_max_insn_length (gdbarch, 4 * DISPLACED_MODIFIED_INSNS);
set_gdbarch_displaced_step_copy_insn (gdbarch,
aarch64_displaced_step_copy_insn);
set_gdbarch_displaced_step_fixup (gdbarch, aarch64_displaced_step_fixup);
set_gdbarch_displaced_step_free_closure (gdbarch,
simple_displaced_step_free_closure);
set_gdbarch_displaced_step_location (gdbarch, linux_displaced_step_location);
set_gdbarch_displaced_step_hw_singlestep (gdbarch,
aarch64_displaced_step_hw_singlestep);
}
/* Provide a prototype to silence -Wmissing-prototypes. */

View File

@ -2559,6 +2559,343 @@ aarch64_software_single_step (struct frame_info *frame)
return 1;
}
struct displaced_step_closure
{
/* It is true when condition instruction, such as B.CON, TBZ, etc,
is being displaced stepping. */
int cond;
/* PC adjustment offset after displaced stepping. */
int32_t pc_adjust;
};
/* Data when visiting instructions for displaced stepping. */
struct aarch64_displaced_step_data
{
struct aarch64_insn_data base;
/* The address where the instruction will be executed at. */
CORE_ADDR new_addr;
/* Buffer of instructions to be copied to NEW_ADDR to execute. */
uint32_t insn_buf[DISPLACED_MODIFIED_INSNS];
/* Number of instructions in INSN_BUF. */
unsigned insn_count;
/* Registers when doing displaced stepping. */
struct regcache *regs;
struct displaced_step_closure *dsc;
};
/* Implementation of aarch64_insn_visitor method "b". */
static void
aarch64_displaced_step_b (const int is_bl, const int32_t offset,
struct aarch64_insn_data *data)
{
struct aarch64_displaced_step_data *dsd
= (struct aarch64_displaced_step_data *) data;
int32_t new_offset = data->insn_addr - dsd->new_addr + offset;
if (can_encode_int32 (new_offset, 28))
{
/* Emit B rather than BL, because executing BL on a new address
will get the wrong address into LR. In order to avoid this,
we emit B, and update LR if the instruction is BL. */
emit_b (dsd->insn_buf, 0, new_offset);
dsd->insn_count++;
}
else
{
/* Write NOP. */
emit_nop (dsd->insn_buf);
dsd->insn_count++;
dsd->dsc->pc_adjust = offset;
}
if (is_bl)
{
/* Update LR. */
regcache_cooked_write_unsigned (dsd->regs, AARCH64_LR_REGNUM,
data->insn_addr + 4);
}
}
/* Implementation of aarch64_insn_visitor method "b_cond". */
static void
aarch64_displaced_step_b_cond (const unsigned cond, const int32_t offset,
struct aarch64_insn_data *data)
{
struct aarch64_displaced_step_data *dsd
= (struct aarch64_displaced_step_data *) data;
int32_t new_offset = data->insn_addr - dsd->new_addr + offset;
/* GDB has to fix up PC after displaced step this instruction
differently according to the condition is true or false. Instead
of checking COND against conditional flags, we can use
the following instructions, and GDB can tell how to fix up PC
according to the PC value.
B.COND TAKEN ; If cond is true, then jump to TAKEN.
INSN1 ;
TAKEN:
INSN2
*/
emit_bcond (dsd->insn_buf, cond, 8);
dsd->dsc->cond = 1;
dsd->dsc->pc_adjust = offset;
dsd->insn_count = 1;
}
/* Dynamically allocate a new register. If we know the register
statically, we should make it a global as above instead of using this
helper function. */
static struct aarch64_register
aarch64_register (unsigned num, int is64)
{
return (struct aarch64_register) { num, is64 };
}
/* Implementation of aarch64_insn_visitor method "cb". */
static void
aarch64_displaced_step_cb (const int32_t offset, const int is_cbnz,
const unsigned rn, int is64,
struct aarch64_insn_data *data)
{
struct aarch64_displaced_step_data *dsd
= (struct aarch64_displaced_step_data *) data;
int32_t new_offset = data->insn_addr - dsd->new_addr + offset;
/* The offset is out of range for a compare and branch
instruction. We can use the following instructions instead:
CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
INSN1 ;
TAKEN:
INSN2
*/
emit_cb (dsd->insn_buf, is_cbnz, aarch64_register (rn, is64), 8);
dsd->insn_count = 1;
dsd->dsc->cond = 1;
dsd->dsc->pc_adjust = offset;
}
/* Implementation of aarch64_insn_visitor method "tb". */
static void
aarch64_displaced_step_tb (const int32_t offset, int is_tbnz,
const unsigned rt, unsigned bit,
struct aarch64_insn_data *data)
{
struct aarch64_displaced_step_data *dsd
= (struct aarch64_displaced_step_data *) data;
int32_t new_offset = data->insn_addr - dsd->new_addr + offset;
/* The offset is out of range for a test bit and branch
instruction We can use the following instructions instead:
TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
INSN1 ;
TAKEN:
INSN2
*/
emit_tb (dsd->insn_buf, is_tbnz, bit, aarch64_register (rt, 1), 8);
dsd->insn_count = 1;
dsd->dsc->cond = 1;
dsd->dsc->pc_adjust = offset;
}
/* Implementation of aarch64_insn_visitor method "adr". */
static void
aarch64_displaced_step_adr (const int32_t offset, const unsigned rd,
const int is_adrp, struct aarch64_insn_data *data)
{
struct aarch64_displaced_step_data *dsd
= (struct aarch64_displaced_step_data *) data;
/* We know exactly the address the ADR{P,} instruction will compute.
We can just write it to the destination register. */
CORE_ADDR address = data->insn_addr + offset;
if (is_adrp)
{
/* Clear the lower 12 bits of the offset to get the 4K page. */
regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
address & ~0xfff);
}
else
regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
address);
dsd->dsc->pc_adjust = 4;
emit_nop (dsd->insn_buf);
dsd->insn_count = 1;
}
/* Implementation of aarch64_insn_visitor method "ldr_literal". */
static void
aarch64_displaced_step_ldr_literal (const int32_t offset, const int is_sw,
const unsigned rt, const int is64,
struct aarch64_insn_data *data)
{
struct aarch64_displaced_step_data *dsd
= (struct aarch64_displaced_step_data *) data;
CORE_ADDR address = data->insn_addr + offset;
struct aarch64_memory_operand zero = { MEMORY_OPERAND_OFFSET, 0 };
regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rt,
address);
if (is_sw)
dsd->insn_count = emit_ldrsw (dsd->insn_buf, aarch64_register (rt, 1),
aarch64_register (rt, 1), zero);
else
dsd->insn_count = emit_ldr (dsd->insn_buf, aarch64_register (rt, is64),
aarch64_register (rt, 1), zero);
dsd->dsc->pc_adjust = 4;
}
/* Implementation of aarch64_insn_visitor method "others". */
static void
aarch64_displaced_step_others (const uint32_t insn,
struct aarch64_insn_data *data)
{
struct aarch64_displaced_step_data *dsd
= (struct aarch64_displaced_step_data *) data;
emit_insn (dsd->insn_buf, insn);
dsd->insn_count = 1;
if ((insn & 0xfffffc1f) == 0xd65f0000)
{
/* RET */
dsd->dsc->pc_adjust = 0;
}
else
dsd->dsc->pc_adjust = 4;
}
static const struct aarch64_insn_visitor visitor =
{
aarch64_displaced_step_b,
aarch64_displaced_step_b_cond,
aarch64_displaced_step_cb,
aarch64_displaced_step_tb,
aarch64_displaced_step_adr,
aarch64_displaced_step_ldr_literal,
aarch64_displaced_step_others,
};
/* Implement the "displaced_step_copy_insn" gdbarch method. */
struct displaced_step_closure *
aarch64_displaced_step_copy_insn (struct gdbarch *gdbarch,
CORE_ADDR from, CORE_ADDR to,
struct regcache *regs)
{
struct displaced_step_closure *dsc = NULL;
enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
uint32_t insn = read_memory_unsigned_integer (from, 4, byte_order_for_code);
struct aarch64_displaced_step_data dsd;
/* Look for a Load Exclusive instruction which begins the sequence. */
if (decode_masked_match (insn, 0x3fc00000, 0x08400000))
{
/* We can't displaced step atomic sequences. */
return NULL;
}
dsc = XCNEW (struct displaced_step_closure);
dsd.base.insn_addr = from;
dsd.new_addr = to;
dsd.regs = regs;
dsd.dsc = dsc;
aarch64_relocate_instruction (insn, &visitor,
(struct aarch64_insn_data *) &dsd);
gdb_assert (dsd.insn_count <= DISPLACED_MODIFIED_INSNS);
if (dsd.insn_count != 0)
{
int i;
/* Instruction can be relocated to scratch pad. Copy
relocated instruction(s) there. */
for (i = 0; i < dsd.insn_count; i++)
{
if (debug_displaced)
{
debug_printf ("displaced: writing insn ");
debug_printf ("%.8x", dsd.insn_buf[i]);
debug_printf (" at %s\n", paddress (gdbarch, to + i * 4));
}
write_memory_unsigned_integer (to + i * 4, 4, byte_order_for_code,
(ULONGEST) dsd.insn_buf[i]);
}
}
else
{
xfree (dsc);
dsc = NULL;
}
return dsc;
}
/* Implement the "displaced_step_fixup" gdbarch method. */
void
aarch64_displaced_step_fixup (struct gdbarch *gdbarch,
struct displaced_step_closure *dsc,
CORE_ADDR from, CORE_ADDR to,
struct regcache *regs)
{
if (dsc->cond)
{
ULONGEST pc;
regcache_cooked_read_unsigned (regs, AARCH64_PC_REGNUM, &pc);
if (pc - to == 8)
{
/* Condition is true. */
}
else if (pc - to == 4)
{
/* Condition is false. */
dsc->pc_adjust = 4;
}
else
gdb_assert_not_reached ("Unexpected PC value after displaced stepping");
}
if (dsc->pc_adjust != 0)
{
if (debug_displaced)
{
debug_printf ("displaced: fixup: set PC to %s:%d\n",
paddress (gdbarch, from), dsc->pc_adjust);
}
regcache_cooked_write_unsigned (regs, AARCH64_PC_REGNUM,
from + dsc->pc_adjust);
}
}
/* Implement the "displaced_step_hw_singlestep" gdbarch method. */
int
aarch64_displaced_step_hw_singlestep (struct gdbarch *gdbarch,
struct displaced_step_closure *closure)
{
return 1;
}
/* Initialize the current architecture based on INFO. If possible,
re-use an architecture from ARCHES, which is a list of
architectures already created during this debugging session.

View File

@ -69,6 +69,10 @@ enum aarch64_regnum
/* Total number of general (X) registers. */
#define AARCH64_X_REGISTER_COUNT 32
/* The maximum number of modified instructions generated for one
single-stepped instruction. */
#define DISPLACED_MODIFIED_INSNS 1
/* Target-dependent structure in gdbarch. */
struct gdbarch_tdep
{
@ -98,4 +102,17 @@ extern struct target_desc *tdesc_aarch64;
extern int aarch64_process_record (struct gdbarch *gdbarch,
struct regcache *regcache, CORE_ADDR addr);
struct displaced_step_closure *
aarch64_displaced_step_copy_insn (struct gdbarch *gdbarch,
CORE_ADDR from, CORE_ADDR to,
struct regcache *regs);
void aarch64_displaced_step_fixup (struct gdbarch *gdbarch,
struct displaced_step_closure *dsc,
CORE_ADDR from, CORE_ADDR to,
struct regcache *regs);
int aarch64_displaced_step_hw_singlestep (struct gdbarch *gdbarch,
struct displaced_step_closure *closure);
#endif /* aarch64-tdep.h */

View File

@ -328,3 +328,61 @@ aarch64_relocate_instruction (uint32_t insn,
else
visitor->others (insn, data);
}
/* Write a 32-bit unsigned integer INSN info *BUF. Return the number of
instructions written (aka. 1). */
int
emit_insn (uint32_t *buf, uint32_t insn)
{
*buf = insn;
return 1;
}
/* Helper function emitting a load or store instruction. */
int
emit_load_store (uint32_t *buf, uint32_t size,
enum aarch64_opcodes opcode,
struct aarch64_register rt,
struct aarch64_register rn,
struct aarch64_memory_operand operand)
{
uint32_t op;
switch (operand.type)
{
case MEMORY_OPERAND_OFFSET:
{
op = ENCODE (1, 1, 24);
return emit_insn (buf, opcode | ENCODE (size, 2, 30) | op
| ENCODE (operand.index >> 3, 12, 10)
| ENCODE (rn.num, 5, 5)
| ENCODE (rt.num, 5, 0));
}
case MEMORY_OPERAND_POSTINDEX:
{
uint32_t post_index = ENCODE (1, 2, 10);
op = ENCODE (0, 1, 24);
return emit_insn (buf, opcode | ENCODE (size, 2, 30) | op
| post_index | ENCODE (operand.index, 9, 12)
| ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
}
case MEMORY_OPERAND_PREINDEX:
{
uint32_t pre_index = ENCODE (3, 2, 10);
op = ENCODE (0, 1, 24);
return emit_insn (buf, opcode | ENCODE (size, 2, 30) | op
| pre_index | ENCODE (operand.index, 9, 12)
| ENCODE (rn.num, 5, 5)
| ENCODE (rt.num, 5, 0));
}
default:
return 0;
}
}

View File

@ -21,6 +21,129 @@
extern int aarch64_debug;
/* List of opcodes that we need for building the jump pad and relocating
an instruction. */
enum aarch64_opcodes
{
/* B 0001 01ii iiii iiii iiii iiii iiii iiii */
/* BL 1001 01ii iiii iiii iiii iiii iiii iiii */
/* B.COND 0101 0100 iiii iiii iiii iiii iii0 cccc */
/* CBZ s011 0100 iiii iiii iiii iiii iiir rrrr */
/* CBNZ s011 0101 iiii iiii iiii iiii iiir rrrr */
/* TBZ b011 0110 bbbb biii iiii iiii iiir rrrr */
/* TBNZ b011 0111 bbbb biii iiii iiii iiir rrrr */
B = 0x14000000,
BL = 0x80000000 | B,
BCOND = 0x40000000 | B,
CBZ = 0x20000000 | B,
CBNZ = 0x21000000 | B,
TBZ = 0x36000000 | B,
TBNZ = 0x37000000 | B,
/* BLR 1101 0110 0011 1111 0000 00rr rrr0 0000 */
BLR = 0xd63f0000,
/* RET 1101 0110 0101 1111 0000 00rr rrr0 0000 */
RET = 0xd65f0000,
/* STP s010 100o o0ii iiii irrr rrrr rrrr rrrr */
/* LDP s010 100o o1ii iiii irrr rrrr rrrr rrrr */
/* STP (SIMD&VFP) ss10 110o o0ii iiii irrr rrrr rrrr rrrr */
/* LDP (SIMD&VFP) ss10 110o o1ii iiii irrr rrrr rrrr rrrr */
STP = 0x28000000,
LDP = 0x28400000,
STP_SIMD_VFP = 0x04000000 | STP,
LDP_SIMD_VFP = 0x04000000 | LDP,
/* STR ss11 100o 00xi iiii iiii xxrr rrrr rrrr */
/* LDR ss11 100o 01xi iiii iiii xxrr rrrr rrrr */
/* LDRSW 1011 100o 10xi iiii iiii xxrr rrrr rrrr */
STR = 0x38000000,
LDR = 0x00400000 | STR,
LDRSW = 0x80800000 | STR,
/* LDAXR ss00 1000 0101 1111 1111 11rr rrrr rrrr */
LDAXR = 0x085ffc00,
/* STXR ss00 1000 000r rrrr 0111 11rr rrrr rrrr */
STXR = 0x08007c00,
/* STLR ss00 1000 1001 1111 1111 11rr rrrr rrrr */
STLR = 0x089ffc00,
/* MOV s101 0010 1xxi iiii iiii iiii iiir rrrr */
/* MOVK s111 0010 1xxi iiii iiii iiii iiir rrrr */
MOV = 0x52800000,
MOVK = 0x20000000 | MOV,
/* ADD s00o ooo1 xxxx xxxx xxxx xxxx xxxx xxxx */
/* SUB s10o ooo1 xxxx xxxx xxxx xxxx xxxx xxxx */
/* SUBS s11o ooo1 xxxx xxxx xxxx xxxx xxxx xxxx */
ADD = 0x01000000,
SUB = 0x40000000 | ADD,
SUBS = 0x20000000 | SUB,
/* AND s000 1010 xx0x xxxx xxxx xxxx xxxx xxxx */
/* ORR s010 1010 xx0x xxxx xxxx xxxx xxxx xxxx */
/* ORN s010 1010 xx1x xxxx xxxx xxxx xxxx xxxx */
/* EOR s100 1010 xx0x xxxx xxxx xxxx xxxx xxxx */
AND = 0x0a000000,
ORR = 0x20000000 | AND,
ORN = 0x00200000 | ORR,
EOR = 0x40000000 | AND,
/* LSLV s001 1010 110r rrrr 0010 00rr rrrr rrrr */
/* LSRV s001 1010 110r rrrr 0010 01rr rrrr rrrr */
/* ASRV s001 1010 110r rrrr 0010 10rr rrrr rrrr */
LSLV = 0x1ac02000,
LSRV = 0x00000400 | LSLV,
ASRV = 0x00000800 | LSLV,
/* SBFM s001 0011 0nii iiii iiii iirr rrrr rrrr */
SBFM = 0x13000000,
/* UBFM s101 0011 0nii iiii iiii iirr rrrr rrrr */
UBFM = 0x40000000 | SBFM,
/* CSINC s001 1010 100r rrrr cccc 01rr rrrr rrrr */
CSINC = 0x9a800400,
/* MUL s001 1011 000r rrrr 0111 11rr rrrr rrrr */
MUL = 0x1b007c00,
/* MSR (register) 1101 0101 0001 oooo oooo oooo ooor rrrr */
/* MRS 1101 0101 0011 oooo oooo oooo ooor rrrr */
MSR = 0xd5100000,
MRS = 0x00200000 | MSR,
/* HINT 1101 0101 0000 0011 0010 oooo ooo1 1111 */
HINT = 0xd503201f,
SEVL = (5 << 5) | HINT,
WFE = (2 << 5) | HINT,
NOP = (0 << 5) | HINT,
};
/* Representation of a general purpose register of the form xN or wN.
This type is used by emitting functions that take registers as operands. */
struct aarch64_register
{
unsigned num;
int is64;
};
/* Representation of a memory operand, used for load and store
instructions.
The types correspond to the following variants:
MEMORY_OPERAND_OFFSET: LDR rt, [rn, #offset]
MEMORY_OPERAND_PREINDEX: LDR rt, [rn, #index]!
MEMORY_OPERAND_POSTINDEX: LDR rt, [rn], #index */
struct aarch64_memory_operand
{
/* Type of the operand. */
enum
{
MEMORY_OPERAND_OFFSET,
MEMORY_OPERAND_PREINDEX,
MEMORY_OPERAND_POSTINDEX,
} type;
/* Index from the base register. */
int32_t index;
};
/* Helper macro to mask and shift a value into a bitfield. */
#define ENCODE(val, size, offset) \
((uint32_t) ((val & ((1ULL << size) - 1)) << offset))
int aarch64_decode_adr (CORE_ADDR addr, uint32_t insn, int *is_adrp,
unsigned *rd, int32_t *offset);
@ -86,4 +209,113 @@ void aarch64_relocate_instruction (uint32_t insn,
const struct aarch64_insn_visitor *visitor,
struct aarch64_insn_data *data);
#define can_encode_int32(val, bits) \
(((val) >> (bits)) == 0 || ((val) >> (bits)) == -1)
/* Write a B or BL instruction into *BUF.
B #offset
BL #offset
IS_BL specifies if the link register should be updated.
OFFSET is the immediate offset from the current PC. It is
byte-addressed but should be 4 bytes aligned. It has a limited range of
+/- 128MB (26 bits << 2). */
#define emit_b(buf, is_bl, offset) \
emit_insn (buf, ((is_bl) ? BL : B) | (ENCODE ((offset) >> 2, 26, 0)))
/* Write a BCOND instruction into *BUF.
B.COND #offset
COND specifies the condition field.
OFFSET is the immediate offset from the current PC. It is
byte-addressed but should be 4 bytes aligned. It has a limited range of
+/- 1MB (19 bits << 2). */
#define emit_bcond(buf, cond, offset) \
emit_insn (buf, \
BCOND | ENCODE ((offset) >> 2, 19, 5) \
| ENCODE ((cond), 4, 0))
/* Write a CBZ or CBNZ instruction into *BUF.
CBZ rt, #offset
CBNZ rt, #offset
IS_CBNZ distinguishes between CBZ and CBNZ instructions.
RN is the register to test.
OFFSET is the immediate offset from the current PC. It is
byte-addressed but should be 4 bytes aligned. It has a limited range of
+/- 1MB (19 bits << 2). */
#define emit_cb(buf, is_cbnz, rt, offset) \
emit_insn (buf, \
((is_cbnz) ? CBNZ : CBZ) \
| ENCODE (rt.is64, 1, 31) /* sf */ \
| ENCODE (offset >> 2, 19, 5) /* imm19 */ \
| ENCODE (rt.num, 5, 0))
/* Write a LDR instruction into *BUF.
LDR rt, [rn, #offset]
LDR rt, [rn, #index]!
LDR rt, [rn], #index
RT is the register to store.
RN is the base address register.
OFFSET is the immediate to add to the base address. It is limited to
0 .. 32760 range (12 bits << 3). */
#define emit_ldr(buf, rt, rn, operand) \
emit_load_store (buf, rt.is64 ? 3 : 2, LDR, rt, rn, operand)
/* Write a LDRSW instruction into *BUF. The register size is 64-bit.
LDRSW xt, [rn, #offset]
LDRSW xt, [rn, #index]!
LDRSW xt, [rn], #index
RT is the register to store.
RN is the base address register.
OFFSET is the immediate to add to the base address. It is limited to
0 .. 16380 range (12 bits << 2). */
#define emit_ldrsw(buf, rt, rn, operand) \
emit_load_store (buf, 3, LDRSW, rt, rn, operand)
/* Write a TBZ or TBNZ instruction into *BUF.
TBZ rt, #bit, #offset
TBNZ rt, #bit, #offset
IS_TBNZ distinguishes between TBZ and TBNZ instructions.
RT is the register to test.
BIT is the index of the bit to test in register RT.
OFFSET is the immediate offset from the current PC. It is
byte-addressed but should be 4 bytes aligned. It has a limited range of
+/- 32KB (14 bits << 2). */
#define emit_tb(buf, is_tbnz, bit, rt, offset) \
emit_insn (buf, \
((is_tbnz) ? TBNZ: TBZ) \
| ENCODE (bit >> 5, 1, 31) /* b5 */ \
| ENCODE (bit, 5, 19) /* b40 */ \
| ENCODE (offset >> 2, 14, 5) /* imm14 */ \
| ENCODE (rt.num, 5, 0))
/* Write a NOP instruction into *BUF. */
#define emit_nop(buf) emit_insn (buf, NOP)
int emit_insn (uint32_t *buf, uint32_t insn);
int emit_load_store (uint32_t *buf, uint32_t size,
enum aarch64_opcodes opcode,
struct aarch64_register rt,
struct aarch64_register rn,
struct aarch64_memory_operand operand);
#endif

View File

@ -1,3 +1,15 @@
2015-10-12 Yao Qi <yao.qi@linaro.org>
* linux-aarch64-low.c (enum aarch64_opcodes): Move to
arch/aarch64-insn.h.
(struct aarch64_memory_operand): Likewise.
(ENCODE): Likewise.
(emit_insn): Move to arch/aarch64-insn.c.
(emit_b, emit_bcond, emit_cb, emit_tb): Remove.
(emit_load_store): Move to arch/aarch64-insn.c.
(emit_ldr, emit_ldrb, emit_ldrsw, emit_nop): Remove.
(can_encode_int32): Remove.
2015-10-12 Yao Qi <yao.qi@linaro.org>
* linux-aarch64-low.c (extract_signed_bitfield): Remove.

View File

@ -584,92 +584,6 @@ aarch64_get_thread_area (int lwpid, CORE_ADDR *addrp)
return 0;
}
/* List of opcodes that we need for building the jump pad and relocating
an instruction. */
enum aarch64_opcodes
{
/* B 0001 01ii iiii iiii iiii iiii iiii iiii */
/* BL 1001 01ii iiii iiii iiii iiii iiii iiii */
/* B.COND 0101 0100 iiii iiii iiii iiii iii0 cccc */
/* CBZ s011 0100 iiii iiii iiii iiii iiir rrrr */
/* CBNZ s011 0101 iiii iiii iiii iiii iiir rrrr */
/* TBZ b011 0110 bbbb biii iiii iiii iiir rrrr */
/* TBNZ b011 0111 bbbb biii iiii iiii iiir rrrr */
B = 0x14000000,
BL = 0x80000000 | B,
BCOND = 0x40000000 | B,
CBZ = 0x20000000 | B,
CBNZ = 0x21000000 | B,
TBZ = 0x36000000 | B,
TBNZ = 0x37000000 | B,
/* BLR 1101 0110 0011 1111 0000 00rr rrr0 0000 */
BLR = 0xd63f0000,
/* RET 1101 0110 0101 1111 0000 00rr rrr0 0000 */
RET = 0xd65f0000,
/* STP s010 100o o0ii iiii irrr rrrr rrrr rrrr */
/* LDP s010 100o o1ii iiii irrr rrrr rrrr rrrr */
/* STP (SIMD&VFP) ss10 110o o0ii iiii irrr rrrr rrrr rrrr */
/* LDP (SIMD&VFP) ss10 110o o1ii iiii irrr rrrr rrrr rrrr */
STP = 0x28000000,
LDP = 0x28400000,
STP_SIMD_VFP = 0x04000000 | STP,
LDP_SIMD_VFP = 0x04000000 | LDP,
/* STR ss11 100o 00xi iiii iiii xxrr rrrr rrrr */
/* LDR ss11 100o 01xi iiii iiii xxrr rrrr rrrr */
/* LDRSW 1011 100o 10xi iiii iiii xxrr rrrr rrrr */
STR = 0x38000000,
LDR = 0x00400000 | STR,
LDRSW = 0x80800000 | STR,
/* LDAXR ss00 1000 0101 1111 1111 11rr rrrr rrrr */
LDAXR = 0x085ffc00,
/* STXR ss00 1000 000r rrrr 0111 11rr rrrr rrrr */
STXR = 0x08007c00,
/* STLR ss00 1000 1001 1111 1111 11rr rrrr rrrr */
STLR = 0x089ffc00,
/* MOV s101 0010 1xxi iiii iiii iiii iiir rrrr */
/* MOVK s111 0010 1xxi iiii iiii iiii iiir rrrr */
MOV = 0x52800000,
MOVK = 0x20000000 | MOV,
/* ADD s00o ooo1 xxxx xxxx xxxx xxxx xxxx xxxx */
/* SUB s10o ooo1 xxxx xxxx xxxx xxxx xxxx xxxx */
/* SUBS s11o ooo1 xxxx xxxx xxxx xxxx xxxx xxxx */
ADD = 0x01000000,
SUB = 0x40000000 | ADD,
SUBS = 0x20000000 | SUB,
/* AND s000 1010 xx0x xxxx xxxx xxxx xxxx xxxx */
/* ORR s010 1010 xx0x xxxx xxxx xxxx xxxx xxxx */
/* ORN s010 1010 xx1x xxxx xxxx xxxx xxxx xxxx */
/* EOR s100 1010 xx0x xxxx xxxx xxxx xxxx xxxx */
AND = 0x0a000000,
ORR = 0x20000000 | AND,
ORN = 0x00200000 | ORR,
EOR = 0x40000000 | AND,
/* LSLV s001 1010 110r rrrr 0010 00rr rrrr rrrr */
/* LSRV s001 1010 110r rrrr 0010 01rr rrrr rrrr */
/* ASRV s001 1010 110r rrrr 0010 10rr rrrr rrrr */
LSLV = 0x1ac02000,
LSRV = 0x00000400 | LSLV,
ASRV = 0x00000800 | LSLV,
/* SBFM s001 0011 0nii iiii iiii iirr rrrr rrrr */
SBFM = 0x13000000,
/* UBFM s101 0011 0nii iiii iiii iirr rrrr rrrr */
UBFM = 0x40000000 | SBFM,
/* CSINC s001 1010 100r rrrr cccc 01rr rrrr rrrr */
CSINC = 0x9a800400,
/* MUL s001 1011 000r rrrr 0111 11rr rrrr rrrr */
MUL = 0x1b007c00,
/* MSR (register) 1101 0101 0001 oooo oooo oooo ooor rrrr */
/* MRS 1101 0101 0011 oooo oooo oooo ooor rrrr */
MSR = 0xd5100000,
MRS = 0x00200000 | MSR,
/* HINT 1101 0101 0000 0011 0010 oooo ooo1 1111 */
HINT = 0xd503201f,
SEVL = (5 << 5) | HINT,
WFE = (2 << 5) | HINT,
NOP = (0 << 5) | HINT,
};
/* List of condition codes that we need. */
enum aarch64_condition_codes
@ -683,16 +597,6 @@ enum aarch64_condition_codes
LE = 0xd,
};
/* Representation of a general purpose register of the form xN or wN.
This type is used by emitting functions that take registers as operands. */
struct aarch64_register
{
unsigned num;
int is64;
};
/* Representation of an operand. At this time, it only supports register
and immediate types. */
@ -779,28 +683,6 @@ immediate_operand (uint32_t imm)
return operand;
}
/* Representation of a memory operand, used for load and store
instructions.
The types correspond to the following variants:
MEMORY_OPERAND_OFFSET: LDR rt, [rn, #offset]
MEMORY_OPERAND_PREINDEX: LDR rt, [rn, #index]!
MEMORY_OPERAND_POSTINDEX: LDR rt, [rn], #index */
struct aarch64_memory_operand
{
/* Type of the operand. */
enum
{
MEMORY_OPERAND_OFFSET,
MEMORY_OPERAND_PREINDEX,
MEMORY_OPERAND_POSTINDEX,
} type;
/* Index from the base register. */
int32_t index;
};
/* Helper function to create an offset memory operand.
For example:
@ -852,108 +734,6 @@ enum aarch64_system_control_registers
TPIDR_EL0 = (0x1 << 14) | (0x3 << 11) | (0xd << 7) | (0x0 << 3) | 0x2
};
/* Helper macro to mask and shift a value into a bitfield. */
#define ENCODE(val, size, offset) \
((uint32_t) ((val & ((1ULL << size) - 1)) << offset))
/* Write a 32-bit unsigned integer INSN info *BUF. Return the number of
instructions written (aka. 1). */
static int
emit_insn (uint32_t *buf, uint32_t insn)
{
*buf = insn;
return 1;
}
/* Write a B or BL instruction into *BUF.
B #offset
BL #offset
IS_BL specifies if the link register should be updated.
OFFSET is the immediate offset from the current PC. It is
byte-addressed but should be 4 bytes aligned. It has a limited range of
+/- 128MB (26 bits << 2). */
static int
emit_b (uint32_t *buf, int is_bl, int32_t offset)
{
uint32_t imm26 = ENCODE (offset >> 2, 26, 0);
if (is_bl)
return emit_insn (buf, BL | imm26);
else
return emit_insn (buf, B | imm26);
}
/* Write a BCOND instruction into *BUF.
B.COND #offset
COND specifies the condition field.
OFFSET is the immediate offset from the current PC. It is
byte-addressed but should be 4 bytes aligned. It has a limited range of
+/- 1MB (19 bits << 2). */
static int
emit_bcond (uint32_t *buf, unsigned cond, int32_t offset)
{
return emit_insn (buf, BCOND | ENCODE (offset >> 2, 19, 5)
| ENCODE (cond, 4, 0));
}
/* Write a CBZ or CBNZ instruction into *BUF.
CBZ rt, #offset
CBNZ rt, #offset
IS_CBNZ distinguishes between CBZ and CBNZ instructions.
RN is the register to test.
OFFSET is the immediate offset from the current PC. It is
byte-addressed but should be 4 bytes aligned. It has a limited range of
+/- 1MB (19 bits << 2). */
static int
emit_cb (uint32_t *buf, int is_cbnz, struct aarch64_register rt,
int32_t offset)
{
uint32_t imm19 = ENCODE (offset >> 2, 19, 5);
uint32_t sf = ENCODE (rt.is64, 1, 31);
if (is_cbnz)
return emit_insn (buf, CBNZ | sf | imm19 | ENCODE (rt.num, 5, 0));
else
return emit_insn (buf, CBZ | sf | imm19 | ENCODE (rt.num, 5, 0));
}
/* Write a TBZ or TBNZ instruction into *BUF.
TBZ rt, #bit, #offset
TBNZ rt, #bit, #offset
IS_TBNZ distinguishes between TBZ and TBNZ instructions.
RT is the register to test.
BIT is the index of the bit to test in register RT.
OFFSET is the immediate offset from the current PC. It is
byte-addressed but should be 4 bytes aligned. It has a limited range of
+/- 32KB (14 bits << 2). */
static int
emit_tb (uint32_t *buf, int is_tbnz, unsigned bit,
struct aarch64_register rt, int32_t offset)
{
uint32_t imm14 = ENCODE (offset >> 2, 14, 5);
uint32_t b40 = ENCODE (bit, 5, 19);
uint32_t b5 = ENCODE (bit >> 5, 1, 31);
if (is_tbnz)
return emit_insn (buf, TBNZ | b5 | b40 | imm14 | ENCODE (rt.num, 5, 0));
else
return emit_insn (buf, TBZ | b5 | b40 | imm14 | ENCODE (rt.num, 5, 0));
}
/* Write a BLR instruction into *BUF.
BLR rn
@ -1100,70 +880,9 @@ emit_stp_q_offset (uint32_t *buf, unsigned rt, unsigned rt2,
uint32_t pre_index = ENCODE (1, 1, 24);
return emit_insn (buf, STP_SIMD_VFP | opc | pre_index
| ENCODE (offset >> 4, 7, 15) | ENCODE (rt2, 5, 10)
| ENCODE (rn.num, 5, 5) | ENCODE (rt, 5, 0));
}
/* Helper function emitting a load or store instruction. */
static int
emit_load_store (uint32_t *buf, uint32_t size, enum aarch64_opcodes opcode,
struct aarch64_register rt, struct aarch64_register rn,
struct aarch64_memory_operand operand)
{
uint32_t op;
switch (operand.type)
{
case MEMORY_OPERAND_OFFSET:
{
op = ENCODE (1, 1, 24);
return emit_insn (buf, opcode | ENCODE (size, 2, 30) | op
| ENCODE (operand.index >> 3, 12, 10)
| ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
}
case MEMORY_OPERAND_POSTINDEX:
{
uint32_t post_index = ENCODE (1, 2, 10);
op = ENCODE (0, 1, 24);
return emit_insn (buf, opcode | ENCODE (size, 2, 30) | op
| post_index | ENCODE (operand.index, 9, 12)
| ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
}
case MEMORY_OPERAND_PREINDEX:
{
uint32_t pre_index = ENCODE (3, 2, 10);
op = ENCODE (0, 1, 24);
return emit_insn (buf, opcode | ENCODE (size, 2, 30) | op
| pre_index | ENCODE (operand.index, 9, 12)
| ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
}
default:
return 0;
}
}
/* Write a LDR instruction into *BUF.
LDR rt, [rn, #offset]
LDR rt, [rn, #index]!
LDR rt, [rn], #index
RT is the register to store.
RN is the base address register.
OFFSET is the immediate to add to the base address. It is limited to
0 .. 32760 range (12 bits << 3). */
static int
emit_ldr (uint32_t *buf, struct aarch64_register rt,
struct aarch64_register rn, struct aarch64_memory_operand operand)
{
return emit_load_store (buf, rt.is64 ? 3 : 2, LDR, rt, rn, operand);
| ENCODE (offset >> 4, 7, 15)
| ENCODE (rt2, 5, 10)
| ENCODE (rn.num, 5, 5) | ENCODE (rt, 5, 0));
}
/* Write a LDRH instruction into *BUF.
@ -1204,24 +923,7 @@ emit_ldrb (uint32_t *buf, struct aarch64_register rt,
return emit_load_store (buf, 0, LDR, rt, rn, operand);
}
/* Write a LDRSW instruction into *BUF. The register size is 64-bit.
LDRSW xt, [rn, #offset]
LDRSW xt, [rn, #index]!
LDRSW xt, [rn], #index
RT is the register to store.
RN is the base address register.
OFFSET is the immediate to add to the base address. It is limited to
0 .. 16380 range (12 bits << 2). */
static int
emit_ldrsw (uint32_t *buf, struct aarch64_register rt,
struct aarch64_register rn,
struct aarch64_memory_operand operand)
{
return emit_load_store (buf, 3, LDRSW, rt, rn, operand);
}
/* Write a STR instruction into *BUF.
@ -1816,14 +1518,6 @@ emit_cset (uint32_t *buf, struct aarch64_register rd, unsigned cond)
return emit_csinc (buf, rd, xzr, xzr, cond ^ 0x1);
}
/* Write a NOP instruction into *BUF. */
static int
emit_nop (uint32_t *buf)
{
return emit_insn (buf, NOP);
}
/* Write LEN instructions from BUF into the inferior memory at *TO.
Note instructions are always little endian on AArch64, unlike data. */
@ -1849,17 +1543,6 @@ append_insns (CORE_ADDR *to, size_t len, const uint32_t *buf)
*to += byte_len;
}
/* Helper function. Return 1 if VAL can be encoded in BITS bits. */
static int
can_encode_int32 (int32_t val, unsigned bits)
{
/* This must be an arithemic shift. */
int32_t rest = val >> bits;
return rest == 0 || rest == -1;
}
/* Sub-class of struct aarch64_insn_data, store information of
instruction relocation for fast tracepoint. Visitor can
relocate an instruction from BASE.INSN_ADDR to NEW_ADDR and save