Fix libafl edges (#3)

* reverse_bits

* icount and size in edge tb

* libafl_gen_edge gen eob

* cleanup
This commit is contained in:
Andrea Fioraldi 2022-02-28 17:42:08 +01:00 committed by GitHub
parent 4a4c9f5c09
commit 17c1f083ee
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 40 additions and 98 deletions

View File

@ -897,7 +897,7 @@ static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb,
//// --- Begin LibAFL code ---
TranslationBlock *libafl_gen_edge(CPUState *cpu, target_ulong src_block,
target_ulong dst_block, target_ulong cs_base,
target_ulong dst_block, int exit_n, target_ulong cs_base,
uint32_t flags, int cflags);
//// --- End LibAFL code ---
@ -1023,7 +1023,7 @@ int cpu_exec(CPUState *cpu)
if (last_tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
mmap_lock();
TranslationBlock *edge = libafl_gen_edge(cpu, last_tb->pc, tb->pc,
cs_base, flags, cflags);
tb_exit, cs_base, flags, cflags);
mmap_unlock();
if (edge) {

View File

@ -70,8 +70,9 @@
void libafl_helper_table_add(TCGHelperInfo* info);
TranslationBlock *libafl_gen_edge(CPUState *cpu, target_ulong src_block,
target_ulong dst_block, target_ulong cs_base,
uint32_t flags, int cflags);
target_ulong dst_block, int exit_n,
target_ulong cs_base, uint32_t flags,
int cflags);
void libafl_gen_read(TCGv addr, MemOp ot);
void libafl_gen_read_N(TCGv addr, uint32_t size);
void libafl_gen_write(TCGv addr, MemOp ot);
@ -1749,10 +1750,28 @@ tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
//// --- Begin LibAFL code ---
static target_ulong reverse_bits(target_ulong num)
{
unsigned int count = sizeof(num) * 8 - 1;
target_ulong reverse_num = num;
num >>= 1;
while(num)
{
reverse_num <<= 1;
reverse_num |= num & 1;
num >>= 1;
count--;
}
reverse_num <<= count;
return reverse_num;
}
/* Called with mmap_lock held for user mode emulation. */
TranslationBlock *libafl_gen_edge(CPUState *cpu, target_ulong src_block,
target_ulong dst_block, target_ulong cs_base,
uint32_t flags, int cflags)
target_ulong dst_block, int exit_n,
target_ulong cs_base, uint32_t flags,
int cflags)
{
CPUArchState *env = cpu->env_ptr;
TranslationBlock *tb, *existing_tb;
@ -1765,6 +1784,8 @@ TranslationBlock *libafl_gen_edge(CPUState *cpu, target_ulong src_block,
int64_t ti;
#endif
target_ulong pc = src_block ^ reverse_bits((target_ulong)exit_n);
(void)virt_page2;
(void)phys_page2;
(void)phys_pc;
@ -1786,13 +1807,10 @@ TranslationBlock *libafl_gen_edge(CPUState *cpu, target_ulong src_block,
qemu_thread_jit_write();
//phys_pc = get_page_addr_code(env, pc);
phys_pc = get_page_addr_code(env, src_block);
phys_pc ^= reverse_bits((tb_page_addr_t)exit_n);
//if (phys_pc == -1) {
/* Generate a one-shot TB with 1 insn in it */
//cflags = (cflags & ~CF_COUNT_MASK) | CF_LAST_IO | 1;
//}
/* Generate a one-shot TB with 8 insn in it */
/* Generate a one-shot TB with max 8 insn in it */
cflags = (cflags & ~CF_COUNT_MASK) | CF_LAST_IO | 8;
max_insns = cflags & CF_COUNT_MASK;
@ -1801,7 +1819,7 @@ TranslationBlock *libafl_gen_edge(CPUState *cpu, target_ulong src_block,
}
QEMU_BUILD_BUG_ON(CF_COUNT_MASK + 1 != TCG_MAX_INSNS);
buffer_overflow:
buffer_overflow1:
tb = tcg_tb_alloc(tcg_ctx);
if (unlikely(!tb)) {
/* flush must be done */
@ -1814,13 +1832,12 @@ TranslationBlock *libafl_gen_edge(CPUState *cpu, target_ulong src_block,
gen_code_buf = tcg_ctx->code_gen_ptr;
tb->tc.ptr = tcg_splitwx_to_rx(gen_code_buf);
tb->pc = 0;
tb->pc = pc;
tb->cs_base = cs_base;
tb->flags = flags;
tb->cflags = cflags;
tb->trace_vcpu_dstate = *cpu->trace_dstate;
tcg_ctx->tb_cflags = cflags;
tb_overflow:
#ifdef CONFIG_PROFILER
/* includes aborted translations because of exceptions */
@ -1828,11 +1845,6 @@ TranslationBlock *libafl_gen_edge(CPUState *cpu, target_ulong src_block,
ti = profile_getclock();
#endif
gen_code_size = sigsetjmp(tcg_ctx->jmp_trans, 0);
if (unlikely(gen_code_size != 0)) {
goto error_return;
}
tcg_func_start(tcg_ctx);
tcg_ctx->cpu = env_cpu(env);
@ -1841,12 +1853,16 @@ TranslationBlock *libafl_gen_edge(CPUState *cpu, target_ulong src_block,
TCGTemp *tmp1[1] = { tcgv_i64_temp(tmp0) };
tcg_gen_callN(libafl_exec_edge_hook, NULL, 1, tmp1);
tcg_temp_free_i64(tmp0);
tcg_gen_goto_tb(0);
tcg_gen_exit_tb(tb, 0);
tb->size = 1;
tb->icount = 1;
assert(tb->size != 0);
tcg_ctx->cpu = NULL;
max_insns = tb->icount;
// trace_translate_block(tb, tb->pc, tb->tc.ptr);
trace_translate_block(tb, tb->pc, tb->tc.ptr);
/* generate machine code */
tb->jmp_reset_offset[0] = TB_JMP_RESET_OFFSET_INVALID;
@ -1869,48 +1885,11 @@ TranslationBlock *libafl_gen_edge(CPUState *cpu, target_ulong src_block,
gen_code_size = tcg_gen_code(tcg_ctx, tb);
if (unlikely(gen_code_size < 0)) {
error_return:
switch (gen_code_size) {
case -1:
/*
* Overflow of code_gen_buffer, or the current slice of it.
*
* TODO: We don't need to re-do gen_intermediate_code, nor
* should we re-do the tcg optimization currently hidden
* inside tcg_gen_code. All that should be required is to
* flush the TBs, allocate a new TB, re-initialize it per
* above, and re-do the actual code generation.
*/
qemu_log_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT,
"Restarting code generation for "
"code_gen_buffer overflow\n");
goto buffer_overflow;
case -2:
/*
* The code generated for the TranslationBlock is too large.
* The maximum size allowed by the unwind info is 64k.
* There may be stricter constraints from relocations
* in the tcg backend.
*
* Try again with half as many insns as we attempted this time.
* If a single insn overflows, there's a bug somewhere...
*/
assert(max_insns > 1);
max_insns /= 2;
qemu_log_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT,
"Restarting code generation with "
"smaller translation block (max %d insns)\n",
max_insns);
goto tb_overflow;
default:
g_assert_not_reached();
}
goto buffer_overflow1;
}
search_size = encode_search(tb, (void *)gen_code_buf + gen_code_size);
if (unlikely(search_size < 0)) {
goto buffer_overflow;
goto buffer_overflow1;
}
tb->tc.size = gen_code_size;
@ -1941,45 +1920,8 @@ TranslationBlock *libafl_gen_edge(CPUState *cpu, target_ulong src_block,
tb_reset_jump(tb, 1);
}
/*
* If the TB is not associated with a physical RAM page then
* it must be a temporary one-insn TB, and we have nothing to do
* except fill in the page_addr[] fields. Return early before
* attempting to link to other TBs or add to the lookup table.
*/
/*if (phys_pc == -1) {
tb->page_addr[0] = tb->page_addr[1] = -1;
return tb;
}*/
/*
* Insert TB into the corresponding region tree before publishing it
* through QHT. Otherwise rewinding happened in the TB might fail to
* lookup itself using host PC.
*/
tcg_tb_insert(tb);
/* check next page if needed */
/*virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
phys_page2 = -1;
if ((pc & TARGET_PAGE_MASK) != virt_page2) {
phys_page2 = get_page_addr_code(env, virt_page2);
}*/
/*
* No explicit memory barrier is required -- tb_link_page() makes the
* TB visible in a consistent state.
*/
//existing_tb = tb_link_page(tb, phys_pc, phys_page2);
/* if the TB already exists, discard what we just translated */
/*if (unlikely(existing_tb != tb)) {
uintptr_t orig_aligned = (uintptr_t)gen_code_buf;
orig_aligned -= ROUND_UP(sizeof(*tb), qemu_icache_linesize);
qatomic_set(&tcg_ctx->code_gen_ptr, (void *)orig_aligned);
tcg_tb_remove(tb);
return existing_tb;
}*/
return tb;
}
//// --- End LibAFL code ---