add libafl_qemu_read_user_sp_unchecked

This commit is contained in:
Alwin Berger 2024-06-14 13:54:57 +02:00
parent b006000ced
commit acc2e70812

View File

@ -28,6 +28,346 @@
#include "exec/log.h"
#include "accel/accel-cpu-target.h"
#include "trace/trace-root.h"
#include "qemu/accel.h"
//// --- Begin LibAFL code ---
#ifndef CONFIG_USER_ONLY
#include "libafl/syx-snapshot/device-save.h"
#endif
void libafl_flush_jit(void);
extern int libafl_restoring_devices;
/*
void* libafl_qemu_g2h(CPUState *cpu, target_ulong x);
target_ulong libafl_qemu_h2g(CPUState *cpu, void* x);
void* libafl_qemu_g2h(CPUState *cpu, target_ulong x)
{
return g2h(cpu, x);
}
target_ulong libafl_qemu_h2g(CPUState *cpu, void* x)
{
return h2g(cpu, x);
}
*/
target_ulong libafl_page_from_addr(target_ulong addr) {
return addr & TARGET_PAGE_MASK;
}
CPUState* libafl_qemu_get_cpu(int cpu_index)
{
CPUState *cpu;
CPU_FOREACH(cpu) {
if (cpu->cpu_index == cpu_index)
return cpu;
}
return NULL;
}
int libafl_qemu_num_cpus(void)
{
CPUState *cpu;
int num = 0;
CPU_FOREACH(cpu) {
num++;
}
return num;
}
CPUState* libafl_qemu_current_cpu(void)
{
#ifndef CONFIG_USER_ONLY
if (current_cpu == NULL) {
return libafl_last_exit_cpu();
}
#endif
return current_cpu;
}
int libafl_qemu_cpu_index(CPUState* cpu)
{
if (cpu) return cpu->cpu_index;
return -1;
}
int libafl_qemu_write_reg(CPUState* cpu, int reg, uint8_t* val)
{
return gdb_write_register(cpu, val, reg);
}
int libafl_qemu_read_reg(CPUState* cpu, int reg, uint8_t* val)
{
int len;
if (libafl_qemu_mem_buf == NULL) {
libafl_qemu_mem_buf = g_byte_array_sized_new(64);
}
g_byte_array_set_size(libafl_qemu_mem_buf, 0);
len = gdb_read_register(cpu, libafl_qemu_mem_buf, reg);
if (len > 0) {
memcpy(val, libafl_qemu_mem_buf->data, len);
}
return len;
}
int libafl_qemu_num_regs(CPUState* cpu)
{
CPUClass *cc = CPU_GET_CLASS(cpu);
return cc->gdb_num_core_regs;
}
#ifdef TARGET_ARM
// use-case: get the user-stack pointer and return addr from at an isr-return
#include "target/arm/cpu.h"
#include "target/arm/internals.h"
int libafl_qemu_read_user_sp_unchecked(CPUState* cpu);
int libafl_qemu_read_user_sp_unchecked(CPUState* cpu) {
CPUARMState *env = cpu_env(cpu);
return env->v7m.other_sp;
}
#endif
#ifndef CONFIG_USER_ONLY
hwaddr libafl_qemu_current_paging_id(CPUState* cpu)
{
CPUClass* cc = CPU_GET_CLASS(cpu);
if (cc->sysemu_ops && cc->sysemu_ops->get_paging_id) {
return cc->sysemu_ops->get_paging_id(cpu);
} else {
return 0;
}
}
#endif
void libafl_flush_jit(void)
{
CPUState *cpu;
CPU_FOREACH(cpu) {
tb_flush(cpu);
}
}
//// --- End LibAFL code ---
#ifndef CONFIG_USER_ONLY
static int cpu_common_post_load(void *opaque, int version_id)
{
CPUState *cpu = opaque;
/* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
version_id is increased. */
cpu->interrupt_request &= ~0x01;
tlb_flush(cpu);
/* loadvm has just updated the content of RAM, bypassing the
* usual mechanisms that ensure we flush TBs for writes to
* memory we've translated code from. So we must flush all TBs,
* which will now be stale.
*/
//tb_flush(cpu);
//// --- Begin LibAFL code ---
// flushing the TBs every restore makes it really slow
// TODO handle writes to X code with specific calls to tb_invalidate_phys_addr
if (!libafl_devices_is_restoring()) {
tb_flush(cpu);
}
//// --- End LibAFL code ---
return 0;
}
static int cpu_common_pre_load(void *opaque)
{
CPUState *cpu = opaque;
cpu->exception_index = -1;
return 0;
}
static bool cpu_common_exception_index_needed(void *opaque)
{
CPUState *cpu = opaque;
return tcg_enabled() && cpu->exception_index != -1;
}
static const VMStateDescription vmstate_cpu_common_exception_index = {
.name = "cpu_common/exception_index",
.version_id = 1,
.minimum_version_id = 1,
.needed = cpu_common_exception_index_needed,
.fields = (const VMStateField[]) {
VMSTATE_INT32(exception_index, CPUState),
VMSTATE_END_OF_LIST()
}
};
static bool cpu_common_crash_occurred_needed(void *opaque)
{
CPUState *cpu = opaque;
return cpu->crash_occurred;
}
static const VMStateDescription vmstate_cpu_common_crash_occurred = {
.name = "cpu_common/crash_occurred",
.version_id = 1,
.minimum_version_id = 1,
.needed = cpu_common_crash_occurred_needed,
.fields = (const VMStateField[]) {
VMSTATE_BOOL(crash_occurred, CPUState),
VMSTATE_END_OF_LIST()
}
};
const VMStateDescription vmstate_cpu_common = {
.name = "cpu_common",
.version_id = 1,
.minimum_version_id = 1,
.pre_load = cpu_common_pre_load,
.post_load = cpu_common_post_load,
.fields = (const VMStateField[]) {
VMSTATE_UINT32(halted, CPUState),
VMSTATE_UINT32(interrupt_request, CPUState),
VMSTATE_END_OF_LIST()
},
.subsections = (const VMStateDescription * const []) {
&vmstate_cpu_common_exception_index,
&vmstate_cpu_common_crash_occurred,
NULL
}
};
#endif
bool cpu_exec_realizefn(CPUState *cpu, Error **errp)
{
/* cache the cpu class for the hotpath */
cpu->cc = CPU_GET_CLASS(cpu);
if (!accel_cpu_common_realize(cpu, errp)) {
return false;
}
/* Wait until cpu initialization complete before exposing cpu. */
cpu_list_add(cpu);
#ifdef CONFIG_USER_ONLY
assert(qdev_get_vmsd(DEVICE(cpu)) == NULL ||
qdev_get_vmsd(DEVICE(cpu))->unmigratable);
#else
if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
vmstate_register(NULL, cpu->cpu_index, &vmstate_cpu_common, cpu);
}
if (cpu->cc->sysemu_ops->legacy_vmsd != NULL) {
vmstate_register(NULL, cpu->cpu_index, cpu->cc->sysemu_ops->legacy_vmsd, cpu);
}
#endif /* CONFIG_USER_ONLY */
return true;
}
void cpu_exec_unrealizefn(CPUState *cpu)
{
#ifndef CONFIG_USER_ONLY
CPUClass *cc = CPU_GET_CLASS(cpu);
if (cc->sysemu_ops->legacy_vmsd != NULL) {
vmstate_unregister(NULL, cc->sysemu_ops->legacy_vmsd, cpu);
}
if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
vmstate_unregister(NULL, &vmstate_cpu_common, cpu);
}
#endif
cpu_list_remove(cpu);
/*
* Now that the vCPU has been removed from the RCU list, we can call
* accel_cpu_common_unrealize, which may free fields using call_rcu.
*/
accel_cpu_common_unrealize(cpu);
}
/*
* This can't go in hw/core/cpu.c because that file is compiled only
* once for both user-mode and system builds.
*/
static Property cpu_common_props[] = {
#ifdef CONFIG_USER_ONLY
/*
* Create a property for the user-only object, so users can
* adjust prctl(PR_SET_UNALIGN) from the command-line.
* Has no effect if the target does not support the feature.
*/
DEFINE_PROP_BOOL("prctl-unalign-sigbus", CPUState,
prctl_unalign_sigbus, false),
#else
/*
* Create a memory property for system CPU object, so users can
* wire up its memory. The default if no link is set up is to use
* the system address space.
*/
DEFINE_PROP_LINK("memory", CPUState, memory, TYPE_MEMORY_REGION,
MemoryRegion *),
#endif
DEFINE_PROP_END_OF_LIST(),
};
#ifndef CONFIG_USER_ONLY
static bool cpu_get_start_powered_off(Object *obj, Error **errp)
{
CPUState *cpu = CPU(obj);
return cpu->start_powered_off;
}
static void cpu_set_start_powered_off(Object *obj, bool value, Error **errp)
{
CPUState *cpu = CPU(obj);
cpu->start_powered_off = value;
}
#endif
void cpu_class_init_props(DeviceClass *dc)
{
#ifndef CONFIG_USER_ONLY
ObjectClass *oc = OBJECT_CLASS(dc);
/*
* We can't use DEFINE_PROP_BOOL in the Property array for this
* property, because we want this to be settable after realize.
*/
object_class_property_add_bool(oc, "start-powered-off",
cpu_get_start_powered_off,
cpu_set_start_powered_off);
#endif
device_class_set_props(dc, cpu_common_props);
}
void cpu_exec_initfn(CPUState *cpu)
{
cpu->as = NULL;
cpu->num_ases = 0;
#ifndef CONFIG_USER_ONLY
cpu->memory = get_system_memory();
object_ref(OBJECT(cpu->memory));
#endif
}
char *cpu_model_from_type(const char *typename)
{