Early return from libafl edge generation if no exec hooks (#85)

* bug with edges in systemmode, not fixed yet

* timeout request only for systemmode
This commit is contained in:
Romain Malmain 2024-09-20 13:42:52 +02:00 committed by GitHub
parent f58a6859f3
commit d663793952
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
8 changed files with 70 additions and 28 deletions

View File

@ -1039,7 +1039,7 @@ cpu_exec_loop(CPUState *cpu, SyncClocks *sc)
//// --- Begin LibAFL code --- //// --- Begin LibAFL code ---
int has_libafl_edge = 0; bool libafl_edge_generated = false;
TranslationBlock *edge; TranslationBlock *edge;
/* See if we can patch the calling TB. */ /* See if we can patch the calling TB. */
@ -1054,7 +1054,7 @@ cpu_exec_loop(CPUState *cpu, SyncClocks *sc)
if (edge) { if (edge) {
tb_add_jump(last_tb, tb_exit, edge); tb_add_jump(last_tb, tb_exit, edge);
tb_add_jump(edge, 0, tb); tb_add_jump(edge, 0, tb);
has_libafl_edge = 1; libafl_edge_generated = true;
} else { } else {
tb_add_jump(last_tb, tb_exit, tb); tb_add_jump(last_tb, tb_exit, tb);
} }
@ -1063,7 +1063,7 @@ cpu_exec_loop(CPUState *cpu, SyncClocks *sc)
} }
} }
if (has_libafl_edge) { if (libafl_edge_generated) {
// execute the edge to make sure to log it the first execution // execute the edge to make sure to log it the first execution
// the edge will then jump to the translated block // the edge will then jump to the translated block
cpu_loop_exec_tb(cpu, edge, pc, &last_tb, &tb_exit); cpu_loop_exec_tb(cpu, edge, pc, &last_tb, &tb_exit);

View File

@ -378,11 +378,20 @@ TranslationBlock *libafl_gen_edge(CPUState *cpu, target_ulong src_block,
int64_t ti; int64_t ti;
void *host_pc; void *host_pc;
// edge hooks generation callbacks
// early check if it should be skipped or not
bool no_exec_hook = libafl_qemu_hook_edge_gen(src_block, dst_block);
if (no_exec_hook) {
// no exec hooks to run for edges, not point in generating a TB
return NULL;
}
target_ulong pc = src_block ^ reverse_bits((target_ulong)exit_n); target_ulong pc = src_block ^ reverse_bits((target_ulong)exit_n);
assert_memory_lock(); assert_memory_lock();
qemu_thread_jit_write(); qemu_thread_jit_write();
// TODO: this (get_page_addr_code_hostp) is a bottleneck in systemmode, investigate why
phys_pc = get_page_addr_code_hostp(env, src_block, &host_pc); phys_pc = get_page_addr_code_hostp(env, src_block, &host_pc);
phys_pc ^= reverse_bits((tb_page_addr_t)exit_n); phys_pc ^= reverse_bits((tb_page_addr_t)exit_n);
@ -401,11 +410,6 @@ TranslationBlock *libafl_gen_edge(CPUState *cpu, target_ulong src_block,
} }
QEMU_BUILD_BUG_ON(CF_COUNT_MASK + 1 != TCG_MAX_INSNS); QEMU_BUILD_BUG_ON(CF_COUNT_MASK + 1 != TCG_MAX_INSNS);
// edge hooks generation callbacks
bool no_exec_hook = libafl_qemu_hook_edge_gen(src_block, dst_block);
if (no_exec_hook)
return NULL;
buffer_overflow: buffer_overflow:
assert_no_pages_locked(); assert_no_pages_locked();
tb = tcg_tb_alloc(tcg_ctx); tb = tcg_tb_alloc(tcg_ctx);

View File

@ -21,15 +21,7 @@ enum libafl_exit_reason_kind {
INTERNAL = 0, INTERNAL = 0,
BREAKPOINT = 1, BREAKPOINT = 1,
SYNC_EXIT = 2, SYNC_EXIT = 2,
}; TIMEOUT = 3,
// A breakpoint has been triggered.
struct libafl_exit_reason_breakpoint {
target_ulong addr;
};
// A synchronous exit has been triggered.
struct libafl_exit_reason_sync_exit {
}; };
// QEMU exited on its own for some reason. // QEMU exited on its own for some reason.
@ -38,14 +30,26 @@ struct libafl_exit_reason_internal {
int signal; // valid if cause == SHUTDOWN_CAUSE_HOST_SIGNAL int signal; // valid if cause == SHUTDOWN_CAUSE_HOST_SIGNAL
}; };
// A breakpoint has been triggered.
struct libafl_exit_reason_breakpoint {
target_ulong addr;
};
// A synchronous exit has been triggered.
struct libafl_exit_reason_sync_exit {};
// A timeout occured and we were asked to exit on timeout
struct libafl_exit_reason_timeout {};
struct libafl_exit_reason { struct libafl_exit_reason {
enum libafl_exit_reason_kind kind; enum libafl_exit_reason_kind kind;
CPUState* cpu; // CPU that triggered an exit. CPUState* cpu; // CPU that triggered an exit.
vaddr next_pc; // The PC that should be stored in the CPU when re-entering. vaddr next_pc; // The PC that should be stored in the CPU when re-entering.
union { union {
struct libafl_exit_reason_internal internal; struct libafl_exit_reason_internal internal; // kind == INTERNAL
struct libafl_exit_reason_breakpoint breakpoint; // kind == BREAKPOINT struct libafl_exit_reason_breakpoint breakpoint; // kind == BREAKPOINT
struct libafl_exit_reason_sync_exit sync_exit; // kind == SYNC_EXIT struct libafl_exit_reason_sync_exit sync_exit; // kind == SYNC_EXIT
struct libafl_exit_reason_timeout timeout; // kind == TIMEOUT
} data; } data;
}; };
@ -59,6 +63,11 @@ void libafl_sync_exit_cpu(void);
void libafl_exit_request_internal(CPUState* cpu, uint64_t pc, void libafl_exit_request_internal(CPUState* cpu, uint64_t pc,
ShutdownCause cause, int signal); ShutdownCause cause, int signal);
void libafl_exit_request_sync_backdoor(CPUState* cpu, target_ulong pc);
void libafl_exit_request_breakpoint(CPUState* cpu, target_ulong pc); void libafl_exit_request_breakpoint(CPUState* cpu, target_ulong pc);
void libafl_exit_request_sync_backdoor(CPUState* cpu, target_ulong pc);
#ifndef CONFIG_USER_ONLY
void libafl_exit_request_timeout(void);
#endif
struct libafl_exit_reason* libafl_get_exit_reason(void); struct libafl_exit_reason* libafl_get_exit_reason(void);

View File

@ -81,6 +81,7 @@ static void prepare_qemu_exit(CPUState* cpu, target_ulong next_pc)
qemu_system_debug_request(); qemu_system_debug_request();
cpu->stopped = true; // TODO check if still needed cpu->stopped = true; // TODO check if still needed
#endif #endif
// in usermode, this may be called from the syscall hook, thus already out // in usermode, this may be called from the syscall hook, thus already out
// of the cpu_exec but still in the cpu_loop // of the cpu_exec but still in the cpu_loop
if (cpu->running) { if (cpu->running) {
@ -125,6 +126,17 @@ void libafl_exit_request_breakpoint(CPUState* cpu, target_ulong pc)
prepare_qemu_exit(cpu, pc); prepare_qemu_exit(cpu, pc);
} }
#ifndef CONFIG_USER_ONLY
void libafl_exit_request_timeout(void)
{
expected_exit = true;
last_exit_reason.kind = TIMEOUT;
last_exit_reason.cpu = current_cpu;
qemu_system_debug_request();
}
#endif
void libafl_qemu_trigger_breakpoint(CPUState* cpu) void libafl_qemu_trigger_breakpoint(CPUState* cpu)
{ {
CPUClass* cc = CPU_GET_CLASS(cpu); CPUClass* cc = CPU_GET_CLASS(cpu);

View File

@ -71,8 +71,11 @@ void libafl_qemu_hook_block_run(target_ulong pc)
while (hook) { while (hook) {
uint64_t cur_id = 0; uint64_t cur_id = 0;
if (hook->gen)
if (hook->gen) {
cur_id = hook->gen(hook->data, pc); cur_id = hook->gen(hook->data, pc);
}
if (cur_id != (uint64_t)-1 && hook->helper_info.func) { if (cur_id != (uint64_t)-1 && hook->helper_info.func) {
TCGv_i64 tmp0 = tcg_constant_i64(hook->data); TCGv_i64 tmp0 = tcg_constant_i64(hook->data);
TCGv_i64 tmp1 = tcg_constant_i64(cur_id); TCGv_i64 tmp1 = tcg_constant_i64(cur_id);
@ -81,9 +84,11 @@ void libafl_qemu_hook_block_run(target_ulong pc)
tcg_temp_free_i64(tmp0); tcg_temp_free_i64(tmp0);
tcg_temp_free_i64(tmp1); tcg_temp_free_i64(tmp1);
} }
if (cur_id != (uint64_t)-1 && hook->jit) { if (cur_id != (uint64_t)-1 && hook->jit) {
hook->jit(hook->data, cur_id); hook->jit(hook->data, cur_id);
} }
hook = hook->next; hook = hook->next;
} }
} }

View File

@ -56,15 +56,22 @@ bool libafl_qemu_hook_edge_gen(target_ulong src_block, target_ulong dst_block)
{ {
struct libafl_edge_hook* hook = libafl_edge_hooks; struct libafl_edge_hook* hook = libafl_edge_hooks;
bool no_exec_hook = true; bool no_exec_hook = true;
while (hook) { while (hook) {
hook->cur_id = 0; hook->cur_id = 0;
if (hook->gen)
if (hook->gen) {
hook->cur_id = hook->gen(hook->data, src_block, dst_block); hook->cur_id = hook->gen(hook->data, src_block, dst_block);
}
if (hook->cur_id != (uint64_t)-1 && if (hook->cur_id != (uint64_t)-1 &&
(hook->helper_info.func || hook->jit)) (hook->helper_info.func || hook->jit)) {
no_exec_hook = false; no_exec_hook = false;
}
hook = hook->next; hook = hook->next;
} }
return no_exec_hook; return no_exec_hook;
} }

View File

@ -713,8 +713,8 @@ SyxSnapshotCheckResult syx_snapshot_check(SyxSnapshot* ref_snapshot)
void syx_snapshot_root_restore(SyxSnapshot* snapshot) void syx_snapshot_root_restore(SyxSnapshot* snapshot)
{ {
// health check. // health check.
CPUState* cpu; // CPUState* cpu;
CPU_FOREACH(cpu) { assert(cpu->stopped); } // CPU_FOREACH(cpu) { assert(cpu->stopped); }
bool must_unlock_bql = false; bool must_unlock_bql = false;

View File

@ -8,13 +8,18 @@ add_project_arguments(cc.get_supported_arguments('-Wsign-compare',
'-Wstrict-aliasing'), '-Wstrict-aliasing'),
native: false, language: 'c') native: false, language: 'c')
#### --- Begin LibAFL code ---
keyval = import('keyval') keyval = import('keyval')
config_host = keyval.load(meson.global_build_root() / 'config-host.mak') config_host = keyval.load(meson.global_build_root() / 'config-host.mak')
#### --- End LibAFL code ---
libvduse = static_library('vduse', libvduse = static_library('vduse',
files('libvduse.c'), files('libvduse.c'),
c_args: '-D_GNU_SOURCE', c_args: '-D_GNU_SOURCE',
pic: 'AS_SHARED_LIB' in config_host) #### --- Begin LibAFL code ---
pic: 'AS_SHARED_LIB' in config_host
#### --- End LibAFL code ---
)
libvduse_dep = declare_dependency(link_with: libvduse, libvduse_dep = declare_dependency(link_with: libvduse,
include_directories: include_directories('.')) include_directories: include_directories('.'))