From 668fc28b051b78f0b95e051682ddfc884d21fe35 Mon Sep 17 00:00:00 2001 From: Alwin Berger <50980804+alwinber@users.noreply.github.com> Date: Mon, 14 Nov 2022 14:23:59 +0100 Subject: [PATCH] Multiple fixes for systemmode (#13) * Systemmode: handle breakpoints natively * systemmode: buffer snapshot name fixes a bug where the name pointer becomes stale. * systemmode: allow synchronous snapshotting Add a flag to take snapshots synchronosly. This should be used to take or load snapshots while the emulator is not running. Co-authored-by: Alwin Berger --- accel/tcg/tcg-runtime.c | 55 ++++++++++++++++++++++++++++++++++++----- softmmu/runstate.c | 3 +++ 2 files changed, 52 insertions(+), 6 deletions(-) diff --git a/accel/tcg/tcg-runtime.c b/accel/tcg/tcg-runtime.c index bc21bc34e0..95a03a590d 100644 --- a/accel/tcg/tcg-runtime.c +++ b/accel/tcg/tcg-runtime.c @@ -40,9 +40,11 @@ #include "qapi/error.h" #include "qemu/error-report.h" #include "qemu/main-loop.h" +#include +#include -void libafl_save_qemu_snapshot(char *name); -void libafl_load_qemu_snapshot(char *name); +void libafl_save_qemu_snapshot(char *name, bool sync); +void libafl_load_qemu_snapshot(char *name, bool sync); static void save_snapshot_cb(void* opaque) { @@ -52,11 +54,26 @@ static void save_snapshot_cb(void* opaque) error_report_err(err); error_report("Could not save snapshot"); } + free(opaque); } -void libafl_save_qemu_snapshot(char *name) +void libafl_save_qemu_snapshot(char *name, bool sync) { - aio_bh_schedule_oneshot_full(qemu_get_aio_context(), save_snapshot_cb, (void*)name, "save_snapshot"); + // use snapshots synchronously, use if main loop is not running + if (sync) { + //TODO: eliminate this code duplication + //by passing a heap-allocated buffer from rust to c, + //which c needs to free + Error *err = NULL; + if(!save_snapshot(name, true, NULL, false, NULL, &err)) { + error_report_err(err); + error_report("Could not save snapshot"); + } + return; + } + char* name_buffer = malloc(strlen(name)+1); + strcpy(name_buffer, name); + aio_bh_schedule_oneshot_full(qemu_get_aio_context(), save_snapshot_cb, (void*)name_buffer, "save_snapshot"); } static void load_snapshot_cb(void* opaque) @@ -76,11 +93,33 @@ static void load_snapshot_cb(void* opaque) if (loaded && saved_vm_running) { vm_start(); } + free(opaque); } -void libafl_load_qemu_snapshot(char *name) +void libafl_load_qemu_snapshot(char *name, bool sync) { - aio_bh_schedule_oneshot_full(qemu_get_aio_context(), load_snapshot_cb, (void*)name, "load_snapshot"); + // use snapshots synchronously, use if main loop is not running + if (sync) { + //TODO: see libafl_save_qemu_snapshot + Error *err = NULL; + + int saved_vm_running = runstate_is_running(); + vm_stop(RUN_STATE_RESTORE_VM); + + bool loaded = load_snapshot(name, NULL, false, NULL, &err); + + if(!loaded) { + error_report_err(err); + error_report("Could not load snapshot"); + } + if (loaded && saved_vm_running) { + vm_start(); + } + return; + } + char* name_buffer = malloc(strlen(name)+1); + strcpy(name_buffer, name); + aio_bh_schedule_oneshot_full(qemu_get_aio_context(), load_snapshot_cb, (void*)name_buffer, "load_snapshot"); } #endif @@ -93,12 +132,16 @@ void libafl_qemu_trigger_breakpoint(CPUState* cpu); void libafl_qemu_trigger_breakpoint(CPUState* cpu) { +#ifndef CONFIG_USER_ONLY + qemu_system_debug_request(); +#else if (cpu->running) { cpu->exception_index = EXCP_LIBAFL_BP; cpu_loop_exit(cpu); } else { libafl_qemu_break_asap = 1; } +#endif } void HELPER(libafl_qemu_handle_breakpoint)(CPUArchState *env) diff --git a/softmmu/runstate.c b/softmmu/runstate.c index 1e68680b9d..84102e4c3e 100644 --- a/softmmu/runstate.c +++ b/softmmu/runstate.c @@ -670,6 +670,9 @@ static bool main_loop_should_exit(int *status) if (qemu_debug_requested()) { vm_stop(RUN_STATE_DEBUG); +//// --- Begin LibAFL code --- + return true; // exit back to fuzzing harness +//// --- End LibAFL code --- } if (qemu_suspend_requested()) { qemu_system_suspend();