tcg-arm: Define TCG_TARGET_INSN_UNIT_SIZE

And use tcg pointer differencing functions as appropriate.

Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Richard Henderson <rth@twiddle.net>
This commit is contained in:
Richard Henderson 2014-04-24 14:23:40 -07:00
parent abce5964be
commit 267c931985
2 changed files with 55 additions and 96 deletions

View File

@ -115,36 +115,18 @@ static const int tcg_target_call_oarg_regs[2] = {
#define TCG_REG_TMP TCG_REG_R12 #define TCG_REG_TMP TCG_REG_R12
static inline void reloc_abs32(void *code_ptr, intptr_t target) static inline void reloc_pc24(tcg_insn_unit *code_ptr, tcg_insn_unit *target)
{ {
*(uint32_t *) code_ptr = target; ptrdiff_t offset = (tcg_ptr_byte_diff(target, code_ptr) - 8) >> 2;
*code_ptr = (*code_ptr & ~0xffffff) | (offset & 0xffffff);
} }
static inline void reloc_pc24(void *code_ptr, intptr_t target) static void patch_reloc(tcg_insn_unit *code_ptr, int type,
{
uint32_t offset = ((target - ((intptr_t)code_ptr + 8)) >> 2);
*(uint32_t *) code_ptr = ((*(uint32_t *) code_ptr) & ~0xffffff)
| (offset & 0xffffff);
}
static void patch_reloc(uint8_t *code_ptr, int type,
intptr_t value, intptr_t addend) intptr_t value, intptr_t addend)
{ {
switch (type) { assert(type == R_ARM_PC24);
case R_ARM_ABS32: assert(addend == 0);
reloc_abs32(code_ptr, value); reloc_pc24(code_ptr, (tcg_insn_unit *)value);
break;
case R_ARM_CALL:
case R_ARM_JUMP24:
default:
tcg_abort();
case R_ARM_PC24:
reloc_pc24(code_ptr, value);
break;
}
} }
#define TCG_CT_CONST_ARM 0x100 #define TCG_CT_CONST_ARM 0x100
@ -379,20 +361,18 @@ static inline void tcg_out_b(TCGContext *s, int cond, int32_t offset)
static inline void tcg_out_b_noaddr(TCGContext *s, int cond) static inline void tcg_out_b_noaddr(TCGContext *s, int cond)
{ {
/* We pay attention here to not modify the branch target by skipping /* We pay attention here to not modify the branch target by masking
the corresponding bytes. This ensure that caches and memory are the corresponding bytes. This ensure that caches and memory are
kept coherent during retranslation. */ kept coherent during retranslation. */
s->code_ptr += 3; tcg_out32(s, deposit32(*s->code_ptr, 24, 8, (cond << 4) | 0x0a));
tcg_out8(s, (cond << 4) | 0x0a);
} }
static inline void tcg_out_bl_noaddr(TCGContext *s, int cond) static inline void tcg_out_bl_noaddr(TCGContext *s, int cond)
{ {
/* We pay attention here to not modify the branch target by skipping /* We pay attention here to not modify the branch target by masking
the corresponding bytes. This ensure that caches and memory are the corresponding bytes. This ensure that caches and memory are
kept coherent during retranslation. */ kept coherent during retranslation. */
s->code_ptr += 3; tcg_out32(s, deposit32(*s->code_ptr, 24, 8, (cond << 4) | 0x0b));
tcg_out8(s, (cond << 4) | 0x0b);
} }
static inline void tcg_out_bl(TCGContext *s, int cond, int32_t offset) static inline void tcg_out_bl(TCGContext *s, int cond, int32_t offset)
@ -1010,20 +990,21 @@ static inline void tcg_out_st8(TCGContext *s, int cond,
* with the code buffer limited to 16MB we wouldn't need the long case. * with the code buffer limited to 16MB we wouldn't need the long case.
* But we also use it for the tail-call to the qemu_ld/st helpers, which does. * But we also use it for the tail-call to the qemu_ld/st helpers, which does.
*/ */
static inline void tcg_out_goto(TCGContext *s, int cond, uint32_t addr) static inline void tcg_out_goto(TCGContext *s, int cond, tcg_insn_unit *addr)
{ {
int32_t disp = addr - (tcg_target_long) s->code_ptr; intptr_t addri = (intptr_t)addr;
ptrdiff_t disp = tcg_pcrel_diff(s, addr);
if ((addr & 1) == 0 && disp - 8 < 0x01fffffd && disp - 8 > -0x01fffffd) { if ((addri & 1) == 0 && disp - 8 < 0x01fffffd && disp - 8 > -0x01fffffd) {
tcg_out_b(s, cond, disp); tcg_out_b(s, cond, disp);
return; return;
} }
tcg_out_movi32(s, cond, TCG_REG_TMP, addr); tcg_out_movi32(s, cond, TCG_REG_TMP, addri);
if (use_armv5t_instructions) { if (use_armv5t_instructions) {
tcg_out_bx(s, cond, TCG_REG_TMP); tcg_out_bx(s, cond, TCG_REG_TMP);
} else { } else {
if (addr & 1) { if (addri & 1) {
tcg_abort(); tcg_abort();
} }
tcg_out_mov_reg(s, cond, TCG_REG_PC, TCG_REG_TMP); tcg_out_mov_reg(s, cond, TCG_REG_PC, TCG_REG_TMP);
@ -1032,28 +1013,28 @@ static inline void tcg_out_goto(TCGContext *s, int cond, uint32_t addr)
/* The call case is mostly used for helpers - so it's not unreasonable /* The call case is mostly used for helpers - so it's not unreasonable
* for them to be beyond branch range */ * for them to be beyond branch range */
static inline void tcg_out_call(TCGContext *s, uint32_t addr) static void tcg_out_call(TCGContext *s, tcg_insn_unit *addr)
{ {
int32_t val; intptr_t addri = (intptr_t)addr;
ptrdiff_t disp = tcg_pcrel_diff(s, addr);
val = addr - (tcg_target_long) s->code_ptr; if (disp - 8 < 0x02000000 && disp - 8 >= -0x02000000) {
if (val - 8 < 0x02000000 && val - 8 >= -0x02000000) { if (addri & 1) {
if (addr & 1) {
/* Use BLX if the target is in Thumb mode */ /* Use BLX if the target is in Thumb mode */
if (!use_armv5t_instructions) { if (!use_armv5t_instructions) {
tcg_abort(); tcg_abort();
} }
tcg_out_blx_imm(s, val); tcg_out_blx_imm(s, disp);
} else { } else {
tcg_out_bl(s, COND_AL, val); tcg_out_bl(s, COND_AL, disp);
} }
} else if (use_armv7_instructions) { } else if (use_armv7_instructions) {
tcg_out_movi32(s, COND_AL, TCG_REG_TMP, addr); tcg_out_movi32(s, COND_AL, TCG_REG_TMP, addri);
tcg_out_blx(s, COND_AL, TCG_REG_TMP); tcg_out_blx(s, COND_AL, TCG_REG_TMP);
} else { } else {
tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_R14, TCG_REG_PC, 4); tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_R14, TCG_REG_PC, 4);
tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_PC, -4); tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_PC, -4);
tcg_out32(s, addr); tcg_out32(s, addri);
} }
} }
@ -1073,9 +1054,9 @@ static inline void tcg_out_goto_label(TCGContext *s, int cond, int label_index)
TCGLabel *l = &s->labels[label_index]; TCGLabel *l = &s->labels[label_index];
if (l->has_value) { if (l->has_value) {
tcg_out_goto(s, cond, l->u.value); tcg_out_goto(s, cond, l->u.value_ptr);
} else { } else {
tcg_out_reloc(s, s->code_ptr, R_ARM_PC24, label_index, 31337); tcg_out_reloc(s, s->code_ptr, R_ARM_PC24, label_index, 0);
tcg_out_b_noaddr(s, cond); tcg_out_b_noaddr(s, cond);
} }
} }
@ -1084,7 +1065,7 @@ static inline void tcg_out_goto_label(TCGContext *s, int cond, int label_index)
/* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr, /* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
* int mmu_idx, uintptr_t ra) * int mmu_idx, uintptr_t ra)
*/ */
static const void * const qemu_ld_helpers[16] = { static void * const qemu_ld_helpers[16] = {
[MO_UB] = helper_ret_ldub_mmu, [MO_UB] = helper_ret_ldub_mmu,
[MO_SB] = helper_ret_ldsb_mmu, [MO_SB] = helper_ret_ldsb_mmu,
@ -1104,7 +1085,7 @@ static const void * const qemu_ld_helpers[16] = {
/* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr, /* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr,
* uintxx_t val, int mmu_idx, uintptr_t ra) * uintxx_t val, int mmu_idx, uintptr_t ra)
*/ */
static const void * const qemu_st_helpers[16] = { static void * const qemu_st_helpers[16] = {
[MO_UB] = helper_ret_stb_mmu, [MO_UB] = helper_ret_stb_mmu,
[MO_LEUW] = helper_le_stw_mmu, [MO_LEUW] = helper_le_stw_mmu,
[MO_LEUL] = helper_le_stl_mmu, [MO_LEUL] = helper_le_stl_mmu,
@ -1256,7 +1237,7 @@ static TCGReg tcg_out_tlb_read(TCGContext *s, TCGReg addrlo, TCGReg addrhi,
static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOp opc, static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOp opc,
TCGReg datalo, TCGReg datahi, TCGReg addrlo, TCGReg datalo, TCGReg datahi, TCGReg addrlo,
TCGReg addrhi, int mem_index, TCGReg addrhi, int mem_index,
uint8_t *raddr, uint8_t *label_ptr) tcg_insn_unit *raddr, tcg_insn_unit *label_ptr)
{ {
TCGLabelQemuLdst *label = new_ldst_label(s); TCGLabelQemuLdst *label = new_ldst_label(s);
@ -1275,9 +1256,9 @@ static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
{ {
TCGReg argreg, datalo, datahi; TCGReg argreg, datalo, datahi;
TCGMemOp opc = lb->opc; TCGMemOp opc = lb->opc;
uintptr_t func; void *func;
reloc_pc24(lb->label_ptr[0], (tcg_target_long)s->code_ptr); reloc_pc24(lb->label_ptr[0], s->code_ptr);
argreg = tcg_out_arg_reg32(s, TCG_REG_R0, TCG_AREG0); argreg = tcg_out_arg_reg32(s, TCG_REG_R0, TCG_AREG0);
if (TARGET_LONG_BITS == 64) { if (TARGET_LONG_BITS == 64) {
@ -1292,9 +1273,9 @@ static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
icache usage. For pre-armv6, use the signed helpers since we do icache usage. For pre-armv6, use the signed helpers since we do
not have a single insn sign-extend. */ not have a single insn sign-extend. */
if (use_armv6_instructions) { if (use_armv6_instructions) {
func = (uintptr_t)qemu_ld_helpers[opc & ~MO_SIGN]; func = qemu_ld_helpers[opc & ~MO_SIGN];
} else { } else {
func = (uintptr_t)qemu_ld_helpers[opc]; func = qemu_ld_helpers[opc];
if (opc & MO_SIGN) { if (opc & MO_SIGN) {
opc = MO_UL; opc = MO_UL;
} }
@ -1328,7 +1309,7 @@ static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
break; break;
} }
tcg_out_goto(s, COND_AL, (tcg_target_long)lb->raddr); tcg_out_goto(s, COND_AL, lb->raddr);
} }
static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
@ -1336,7 +1317,7 @@ static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
TCGReg argreg, datalo, datahi; TCGReg argreg, datalo, datahi;
TCGMemOp opc = lb->opc; TCGMemOp opc = lb->opc;
reloc_pc24(lb->label_ptr[0], (tcg_target_long)s->code_ptr); reloc_pc24(lb->label_ptr[0], s->code_ptr);
argreg = TCG_REG_R0; argreg = TCG_REG_R0;
argreg = tcg_out_arg_reg32(s, argreg, TCG_AREG0); argreg = tcg_out_arg_reg32(s, argreg, TCG_AREG0);
@ -1368,7 +1349,7 @@ static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
argreg = tcg_out_arg_reg32(s, argreg, TCG_REG_R14); argreg = tcg_out_arg_reg32(s, argreg, TCG_REG_R14);
/* Tail-call to the helper, which will return to the fast path. */ /* Tail-call to the helper, which will return to the fast path. */
tcg_out_goto(s, COND_AL, (uintptr_t)qemu_st_helpers[opc]); tcg_out_goto(s, COND_AL, qemu_st_helpers[opc]);
} }
#endif /* SOFTMMU */ #endif /* SOFTMMU */
@ -1499,7 +1480,7 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64)
#ifdef CONFIG_SOFTMMU #ifdef CONFIG_SOFTMMU
int mem_index; int mem_index;
TCGReg addend; TCGReg addend;
uint8_t *label_ptr; tcg_insn_unit *label_ptr;
#endif #endif
datalo = *args++; datalo = *args++;
@ -1628,7 +1609,7 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64)
#ifdef CONFIG_SOFTMMU #ifdef CONFIG_SOFTMMU
int mem_index; int mem_index;
TCGReg addend; TCGReg addend;
uint8_t *label_ptr; tcg_insn_unit *label_ptr;
#endif #endif
datalo = *args++; datalo = *args++;
@ -1660,7 +1641,7 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64)
#endif #endif
} }
static uint8_t *tb_ret_addr; static tcg_insn_unit *tb_ret_addr;
static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
const TCGArg *args, const int *const_args) const TCGArg *args, const int *const_args)
@ -1670,51 +1651,28 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
switch (opc) { switch (opc) {
case INDEX_op_exit_tb: case INDEX_op_exit_tb:
if (use_armv7_instructions || check_fit_imm(args[0])) {
tcg_out_movi32(s, COND_AL, TCG_REG_R0, args[0]); tcg_out_movi32(s, COND_AL, TCG_REG_R0, args[0]);
tcg_out_goto(s, COND_AL, (tcg_target_ulong) tb_ret_addr); tcg_out_goto(s, COND_AL, tb_ret_addr);
} else {
uint8_t *ld_ptr = s->code_ptr;
tcg_out_ld32_12(s, COND_AL, TCG_REG_R0, TCG_REG_PC, 0);
tcg_out_goto(s, COND_AL, (tcg_target_ulong) tb_ret_addr);
*ld_ptr = (uint8_t) (s->code_ptr - ld_ptr) - 8;
tcg_out32(s, args[0]);
}
break; break;
case INDEX_op_goto_tb: case INDEX_op_goto_tb:
if (s->tb_jmp_offset) { if (s->tb_jmp_offset) {
/* Direct jump method */ /* Direct jump method */
#if defined(USE_DIRECT_JUMP) s->tb_jmp_offset[args[0]] = tcg_current_code_size(s);
s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf;
tcg_out_b_noaddr(s, COND_AL); tcg_out_b_noaddr(s, COND_AL);
#else
tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_PC, -4);
s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf;
tcg_out32(s, 0);
#endif
} else { } else {
/* Indirect jump method */ /* Indirect jump method */
#if 1 intptr_t ptr = (intptr_t)(s->tb_next + args[0]);
c = (int) (s->tb_next + args[0]) - ((int) s->code_ptr + 8); tcg_out_movi32(s, COND_AL, TCG_REG_R0, ptr & ~0xfff);
if (c > 0xfff || c < -0xfff) { tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_R0, ptr & 0xfff);
tcg_out_movi32(s, COND_AL, TCG_REG_R0,
(tcg_target_long) (s->tb_next + args[0]));
tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_R0, 0);
} else
tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_PC, c);
#else
tcg_out_ld32_12(s, COND_AL, TCG_REG_R0, TCG_REG_PC, 0);
tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_R0, 0);
tcg_out32(s, (tcg_target_long) (s->tb_next + args[0]));
#endif
} }
s->tb_next_offset[args[0]] = s->code_ptr - s->code_buf; s->tb_next_offset[args[0]] = tcg_current_code_size(s);
break; break;
case INDEX_op_call: case INDEX_op_call:
if (const_args[0]) if (const_args[0]) {
tcg_out_call(s, args[0]); tcg_out_call(s, (void *)args[0]);
else } else {
tcg_out_callr(s, COND_AL, args[0]); tcg_out_callr(s, COND_AL, args[0]);
}
break; break;
case INDEX_op_br: case INDEX_op_br:
tcg_out_goto_label(s, COND_AL, args[0]); tcg_out_goto_label(s, COND_AL, args[0]);

View File

@ -26,6 +26,7 @@
#define TCG_TARGET_ARM 1 #define TCG_TARGET_ARM 1
#undef TCG_TARGET_STACK_GROWSUP #undef TCG_TARGET_STACK_GROWSUP
#define TCG_TARGET_INSN_UNIT_SIZE 4
typedef enum { typedef enum {
TCG_REG_R0 = 0, TCG_REG_R0 = 0,